Commit 34e18455 authored by Tejun Heo's avatar Tejun Heo Committed by H. Peter Anvin

memblock: Make memblock_alloc_[try_]nid() top-down

NUMA aware memblock alloc functions - memblock_alloc_[try_]nid() -
weren't properly top-down because memblock_nid_range() scanned
forward.  This patch reverses memblock_nid_range(), renames it to
memblock_nid_range_rev() and updates related functions to implement
proper top-down allocation.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/1310460395-30913-7-git-send-email-tj@kernel.org
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent f9b18db3
...@@ -499,27 +499,26 @@ phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) ...@@ -499,27 +499,26 @@ phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
/* /*
* Additional node-local allocators. Search for node memory is bottom up * Additional node-local top-down allocators.
* and walks memblock regions within that node bottom-up as well, but allocation
* within an memblock region is top-down. XXX I plan to fix that at some stage
* *
* WARNING: Only available after early_node_map[] has been populated, * WARNING: Only available after early_node_map[] has been populated,
* on some architectures, that is after all the calls to add_active_range() * on some architectures, that is after all the calls to add_active_range()
* have been done to populate it. * have been done to populate it.
*/ */
static phys_addr_t __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid) static phys_addr_t __init memblock_nid_range_rev(phys_addr_t start,
phys_addr_t end, int *nid)
{ {
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
unsigned long start_pfn, end_pfn; unsigned long start_pfn, end_pfn;
int i; int i;
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, nid) for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, nid)
if (start >= PFN_PHYS(start_pfn) && start < PFN_PHYS(end_pfn)) if (end > PFN_PHYS(start_pfn) && end <= PFN_PHYS(end_pfn))
return min(end, PFN_PHYS(end_pfn)); return max(start, PFN_PHYS(start_pfn));
#endif #endif
*nid = 0; *nid = 0;
return end; return start;
} }
static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp, static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
...@@ -531,21 +530,19 @@ static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp, ...@@ -531,21 +530,19 @@ static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
start = mp->base; start = mp->base;
end = start + mp->size; end = start + mp->size;
start = round_up(start, align);
while (start < end) { while (start < end) {
phys_addr_t this_end; phys_addr_t this_start;
int this_nid; int this_nid;
this_end = memblock_nid_range(start, end, &this_nid); this_start = memblock_nid_range_rev(start, end, &this_nid);
if (this_nid == nid) { if (this_nid == nid) {
phys_addr_t ret = memblock_find_region(start, this_end, size, align); phys_addr_t ret = memblock_find_region(this_start, end, size, align);
if (ret && if (ret &&
!memblock_add_region(&memblock.reserved, ret, size)) !memblock_add_region(&memblock.reserved, ret, size))
return ret; return ret;
} }
start = this_end; end = this_start;
} }
return 0; return 0;
} }
...@@ -561,11 +558,7 @@ phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int n ...@@ -561,11 +558,7 @@ phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int n
*/ */
size = round_up(size, align); size = round_up(size, align);
/* We do a bottom-up search for a region with the right for (i = mem->cnt - 1; i >= 0; i--) {
* nid since that's easier considering how memblock_nid_range()
* works
*/
for (i = 0; i < mem->cnt; i++) {
phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i], phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
size, align, nid); size, align, nid);
if (ret) if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment