memblock: reduce number of parameters in for_each_mem_range()
authorMike Rapoport <rppt@linux.ibm.com>
Tue, 13 Oct 2020 23:57:59 +0000 (16:57 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 14 Oct 2020 01:38:35 +0000 (18:38 -0700)
Currently for_each_mem_range() and for_each_mem_range_rev() iterators are
the most generic way to traverse memblock regions.  As such, they have 8
parameters and they are hardly convenient to users.  Most users choose to
utilize one of their wrappers and the only user that actually needs most
of the parameters is memblock itself.

To avoid yet another naming for memblock iterators, rename the existing
for_each_mem_range[_rev]() to __for_each_mem_range[_rev]() and add a new
for_each_mem_range[_rev]() wrappers with only index, start and end
parameters.

The new wrapper nicely fits into init_unavailable_mem() and will be used
in upcoming changes to simplify memblock traversals.

Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> [MIPS]
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Baoquan He <bhe@redhat.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Daniel Axtens <dja@axtens.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Emil Renner Berthing <kernel@esmil.dk>
Cc: Hari Bathini <hbathini@linux.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Link: https://lkml.kernel.org/r/20200818151634.14343-11-rppt@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
.clang-format
arch/arm64/kernel/machine_kexec_file.c
arch/powerpc/kexec/file_load_64.c
include/linux/memblock.h
mm/page_alloc.c

index badfc1ba440afc1c87f056ee61c8e38105d60022..0366a3d2e561b3957cf1880cc349fccc0bee14ce 100644 (file)
@@ -207,7 +207,9 @@ ForEachMacros:
   - 'for_each_memblock_type'
   - 'for_each_memcg_cache_index'
   - 'for_each_mem_pfn_range'
+  - '__for_each_mem_range'
   - 'for_each_mem_range'
+  - '__for_each_mem_range_rev'
   - 'for_each_mem_range_rev'
   - 'for_each_migratetype_order'
   - 'for_each_msi_entry'
index 361a1143e09ee8a65437825c1e8a417621d4a56a..5b0e67b93cdcda8c839e7b52fccdff04c3151d10 100644 (file)
@@ -215,8 +215,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
        phys_addr_t start, end;
 
        nr_ranges = 1; /* for exclusion of crashkernel region */
-       for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
-                                       MEMBLOCK_NONE, &start, &end, NULL)
+       for_each_mem_range(i, &start, &end)
                nr_ranges++;
 
        cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL);
@@ -225,8 +224,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
 
        cmem->max_nr_ranges = nr_ranges;
        cmem->nr_ranges = 0;
-       for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
-                                       MEMBLOCK_NONE, &start, &end, NULL) {
+       for_each_mem_range(i, &start, &end) {
                cmem->ranges[cmem->nr_ranges].start = start;
                cmem->ranges[cmem->nr_ranges].end = end - 1;
                cmem->nr_ranges++;
index 53bb71e3a2e1ceb965d93b4923162f4058eeb933..2c9d908eab96e95b941be04ce58cd7e3565e7a0c 100644 (file)
@@ -250,8 +250,7 @@ static int __locate_mem_hole_top_down(struct kexec_buf *kbuf,
        phys_addr_t start, end;
        u64 i;
 
-       for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE,
-                              MEMBLOCK_NONE, &start, &end, NULL) {
+       for_each_mem_range_rev(i, &start, &end) {
                /*
                 * memblock uses [start, end) convention while it is
                 * [start, end] here. Fix the off-by-one to have the
@@ -350,8 +349,7 @@ static int __locate_mem_hole_bottom_up(struct kexec_buf *kbuf,
        phys_addr_t start, end;
        u64 i;
 
-       for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
-                          MEMBLOCK_NONE, &start, &end, NULL) {
+       for_each_mem_range(i, &start, &end) {
                /*
                 * memblock uses [start, end) convention while it is
                 * [start, end] here. Fix the off-by-one to have the
index 47a76e237fca4f46bc9c7d93ca096f0ab2c6f73e..27c3b84d1615d3f7f44c86e714d3243a878598be 100644 (file)
@@ -162,7 +162,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
 #endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */
 
 /**
- * for_each_mem_range - iterate through memblock areas from type_a and not
+ * __for_each_mem_range - iterate through memblock areas from type_a and not
  * included in type_b. Or just type_a if type_b is NULL.
  * @i: u64 used as loop variable
  * @type_a: ptr to memblock_type to iterate
@@ -173,7 +173,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
  * @p_nid: ptr to int for nid of the range, can be %NULL
  */
-#define for_each_mem_range(i, type_a, type_b, nid, flags,              \
+#define __for_each_mem_range(i, type_a, type_b, nid, flags,            \
                           p_start, p_end, p_nid)                       \
        for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b,    \
                                     p_start, p_end, p_nid);            \
@@ -182,7 +182,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
                              p_start, p_end, p_nid))
 
 /**
- * for_each_mem_range_rev - reverse iterate through memblock areas from
+ * __for_each_mem_range_rev - reverse iterate through memblock areas from
  * type_a and not included in type_b. Or just type_a if type_b is NULL.
  * @i: u64 used as loop variable
  * @type_a: ptr to memblock_type to iterate
@@ -193,15 +193,36 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
  * @p_nid: ptr to int for nid of the range, can be %NULL
  */
-#define for_each_mem_range_rev(i, type_a, type_b, nid, flags,          \
-                              p_start, p_end, p_nid)                   \
+#define __for_each_mem_range_rev(i, type_a, type_b, nid, flags,                \
+                                p_start, p_end, p_nid)                 \
        for (i = (u64)ULLONG_MAX,                                       \
-                    __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
+                    __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
                                          p_start, p_end, p_nid);       \
             i != (u64)ULLONG_MAX;                                      \
             __next_mem_range_rev(&i, nid, flags, type_a, type_b,       \
                                  p_start, p_end, p_nid))
 
+/**
+ * for_each_mem_range - iterate through memory areas.
+ * @i: u64 used as loop variable
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ */
+#define for_each_mem_range(i, p_start, p_end) \
+       __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,   \
+                            MEMBLOCK_NONE, p_start, p_end, NULL)
+
+/**
+ * for_each_mem_range_rev - reverse iterate through memblock areas from
+ * type_a and not included in type_b. Or just type_a if type_b is NULL.
+ * @i: u64 used as loop variable
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ */
+#define for_each_mem_range_rev(i, p_start, p_end)                      \
+       __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
+                                MEMBLOCK_NONE, p_start, p_end, NULL)
+
 /**
  * for_each_reserved_mem_region - iterate over all reserved memblock areas
  * @i: u64 used as loop variable
@@ -307,8 +328,8 @@ int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
  * soon as memblock is initialized.
  */
 #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid)  \
-       for_each_mem_range(i, &memblock.memory, &memblock.reserved,     \
-                          nid, flags, p_start, p_end, p_nid)
+       __for_each_mem_range(i, &memblock.memory, &memblock.reserved,   \
+                            nid, flags, p_start, p_end, p_nid)
 
 /**
  * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
@@ -324,8 +345,8 @@ int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
  */
 #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
                                        p_nid)                          \
-       for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
-                              nid, flags, p_start, p_end, p_nid)
+       __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
+                                nid, flags, p_start, p_end, p_nid)
 
 int memblock_set_node(phys_addr_t base, phys_addr_t size,
                      struct memblock_type *type, int nid);
index 73e33ab6d249939ed8c024c8a5d02fe27c85f426..34ac7127d1e6a5f23dfd360ebcf484880b58f2af 100644 (file)
@@ -6990,8 +6990,7 @@ static void __init init_unavailable_mem(void)
         * Loop through unavailable ranges not covered by memblock.memory.
         */
        pgcnt = 0;
-       for_each_mem_range(i, &memblock.memory, NULL,
-                       NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, NULL) {
+       for_each_mem_range(i, &start, &end) {
                if (next < start)
                        pgcnt += init_unavailable_range(PFN_DOWN(next),
                                                        PFN_UP(start));