Commit 6e245ad4 authored by Mike Rapoport's avatar Mike Rapoport Committed by Linus Torvalds

memblock: reduce number of parameters in for_each_mem_range()

Currently for_each_mem_range() and for_each_mem_range_rev() iterators are
the most generic way to traverse memblock regions.  As such, they have 8
parameters and they are hardly convenient to users.  Most users choose to
utilize one of their wrappers and the only user that actually needs most
of the parameters is memblock itself.

To avoid yet another naming for memblock iterators, rename the existing
for_each_mem_range[_rev]() to __for_each_mem_range[_rev]() and add a new
for_each_mem_range[_rev]() wrappers with only index, start and end
parameters.

The new wrapper nicely fits into init_unavailable_mem() and will be used
in upcoming changes to simplify memblock traversals.
Signed-off-by: default avatarMike Rapoport <rppt@linux.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>	[MIPS]
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Baoquan He <bhe@redhat.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Daniel Axtens <dja@axtens.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Emil Renner Berthing <kernel@esmil.dk>
Cc: Hari Bathini <hbathini@linux.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Link: https://lkml.kernel.org/r/20200818151634.14343-11-rppt@kernel.orgSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 87c55870
...@@ -207,7 +207,9 @@ ForEachMacros: ...@@ -207,7 +207,9 @@ ForEachMacros:
- 'for_each_memblock_type' - 'for_each_memblock_type'
- 'for_each_memcg_cache_index' - 'for_each_memcg_cache_index'
- 'for_each_mem_pfn_range' - 'for_each_mem_pfn_range'
- '__for_each_mem_range'
- 'for_each_mem_range' - 'for_each_mem_range'
- '__for_each_mem_range_rev'
- 'for_each_mem_range_rev' - 'for_each_mem_range_rev'
- 'for_each_migratetype_order' - 'for_each_migratetype_order'
- 'for_each_msi_entry' - 'for_each_msi_entry'
......
...@@ -215,8 +215,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz) ...@@ -215,8 +215,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
phys_addr_t start, end; phys_addr_t start, end;
nr_ranges = 1; /* for exclusion of crashkernel region */ nr_ranges = 1; /* for exclusion of crashkernel region */
for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, for_each_mem_range(i, &start, &end)
MEMBLOCK_NONE, &start, &end, NULL)
nr_ranges++; nr_ranges++;
cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL); cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL);
...@@ -225,8 +224,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz) ...@@ -225,8 +224,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
cmem->max_nr_ranges = nr_ranges; cmem->max_nr_ranges = nr_ranges;
cmem->nr_ranges = 0; cmem->nr_ranges = 0;
for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, for_each_mem_range(i, &start, &end) {
MEMBLOCK_NONE, &start, &end, NULL) {
cmem->ranges[cmem->nr_ranges].start = start; cmem->ranges[cmem->nr_ranges].start = start;
cmem->ranges[cmem->nr_ranges].end = end - 1; cmem->ranges[cmem->nr_ranges].end = end - 1;
cmem->nr_ranges++; cmem->nr_ranges++;
......
...@@ -250,8 +250,7 @@ static int __locate_mem_hole_top_down(struct kexec_buf *kbuf, ...@@ -250,8 +250,7 @@ static int __locate_mem_hole_top_down(struct kexec_buf *kbuf,
phys_addr_t start, end; phys_addr_t start, end;
u64 i; u64 i;
for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, for_each_mem_range_rev(i, &start, &end) {
MEMBLOCK_NONE, &start, &end, NULL) {
/* /*
* memblock uses [start, end) convention while it is * memblock uses [start, end) convention while it is
* [start, end] here. Fix the off-by-one to have the * [start, end] here. Fix the off-by-one to have the
...@@ -350,8 +349,7 @@ static int __locate_mem_hole_bottom_up(struct kexec_buf *kbuf, ...@@ -350,8 +349,7 @@ static int __locate_mem_hole_bottom_up(struct kexec_buf *kbuf,
phys_addr_t start, end; phys_addr_t start, end;
u64 i; u64 i;
for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, for_each_mem_range(i, &start, &end) {
MEMBLOCK_NONE, &start, &end, NULL) {
/* /*
* memblock uses [start, end) convention while it is * memblock uses [start, end) convention while it is
* [start, end] here. Fix the off-by-one to have the * [start, end] here. Fix the off-by-one to have the
......
...@@ -162,7 +162,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, ...@@ -162,7 +162,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
#endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */ #endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */
/** /**
* for_each_mem_range - iterate through memblock areas from type_a and not * __for_each_mem_range - iterate through memblock areas from type_a and not
* included in type_b. Or just type_a if type_b is NULL. * included in type_b. Or just type_a if type_b is NULL.
* @i: u64 used as loop variable * @i: u64 used as loop variable
* @type_a: ptr to memblock_type to iterate * @type_a: ptr to memblock_type to iterate
...@@ -173,7 +173,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, ...@@ -173,7 +173,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL * @p_nid: ptr to int for nid of the range, can be %NULL
*/ */
#define for_each_mem_range(i, type_a, type_b, nid, flags, \ #define __for_each_mem_range(i, type_a, type_b, nid, flags, \
p_start, p_end, p_nid) \ p_start, p_end, p_nid) \
for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \ for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid); \ p_start, p_end, p_nid); \
...@@ -182,7 +182,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, ...@@ -182,7 +182,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
p_start, p_end, p_nid)) p_start, p_end, p_nid))
/** /**
* for_each_mem_range_rev - reverse iterate through memblock areas from * __for_each_mem_range_rev - reverse iterate through memblock areas from
* type_a and not included in type_b. Or just type_a if type_b is NULL. * type_a and not included in type_b. Or just type_a if type_b is NULL.
* @i: u64 used as loop variable * @i: u64 used as loop variable
* @type_a: ptr to memblock_type to iterate * @type_a: ptr to memblock_type to iterate
...@@ -193,15 +193,36 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, ...@@ -193,15 +193,36 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @p_nid: ptr to int for nid of the range, can be %NULL * @p_nid: ptr to int for nid of the range, can be %NULL
*/ */
#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \ #define __for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
p_start, p_end, p_nid) \ p_start, p_end, p_nid) \
for (i = (u64)ULLONG_MAX, \ for (i = (u64)ULLONG_MAX, \
__next_mem_range_rev(&i, nid, flags, type_a, type_b,\ __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid); \ p_start, p_end, p_nid); \
i != (u64)ULLONG_MAX; \ i != (u64)ULLONG_MAX; \
__next_mem_range_rev(&i, nid, flags, type_a, type_b, \ __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
p_start, p_end, p_nid)) p_start, p_end, p_nid))
/**
* for_each_mem_range - iterate through memory areas.
* @i: u64 used as loop variable
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
*/
#define for_each_mem_range(i, p_start, p_end) \
__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \
MEMBLOCK_NONE, p_start, p_end, NULL)
/**
* for_each_mem_range_rev - reverse iterate through memblock areas from
* type_a and not included in type_b. Or just type_a if type_b is NULL.
* @i: u64 used as loop variable
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
*/
#define for_each_mem_range_rev(i, p_start, p_end) \
__for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
MEMBLOCK_NONE, p_start, p_end, NULL)
/** /**
* for_each_reserved_mem_region - iterate over all reserved memblock areas * for_each_reserved_mem_region - iterate over all reserved memblock areas
* @i: u64 used as loop variable * @i: u64 used as loop variable
...@@ -307,7 +328,7 @@ int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask); ...@@ -307,7 +328,7 @@ int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
* soon as memblock is initialized. * soon as memblock is initialized.
*/ */
#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \ #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ __for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
nid, flags, p_start, p_end, p_nid) nid, flags, p_start, p_end, p_nid)
/** /**
...@@ -324,7 +345,7 @@ int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask); ...@@ -324,7 +345,7 @@ int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
*/ */
#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \ #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
p_nid) \ p_nid) \
for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
nid, flags, p_start, p_end, p_nid) nid, flags, p_start, p_end, p_nid)
int memblock_set_node(phys_addr_t base, phys_addr_t size, int memblock_set_node(phys_addr_t base, phys_addr_t size,
......
...@@ -6990,8 +6990,7 @@ static void __init init_unavailable_mem(void) ...@@ -6990,8 +6990,7 @@ static void __init init_unavailable_mem(void)
* Loop through unavailable ranges not covered by memblock.memory. * Loop through unavailable ranges not covered by memblock.memory.
*/ */
pgcnt = 0; pgcnt = 0;
for_each_mem_range(i, &memblock.memory, NULL, for_each_mem_range(i, &start, &end) {
NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, NULL) {
if (next < start) if (next < start)
pgcnt += init_unavailable_range(PFN_DOWN(next), pgcnt += init_unavailable_range(PFN_DOWN(next),
PFN_UP(start)); PFN_UP(start));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment