Commit eb40c4c2 authored by Tejun Heo's avatar Tejun Heo Committed by H. Peter Anvin

memblock, x86: Replace memblock_x86_find_in_range_node() with generic memblock calls

With the previous changes, generic NUMA aware memblock API has feature
parity with memblock_x86_find_in_range_node().  There currently are
two users - x86 setup_node_data() and __alloc_memory_core_early() in
nobootmem.c.

This patch converts the former to use memblock_alloc_nid() and the
latter memblock_find_range_in_node(), and kills
memblock_x86_find_in_range_node() and related functions including
find_memory_early_core_early() in page_alloc.c.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/1310460395-30913-9-git-send-email-tj@kernel.org
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent e6498040
...@@ -15,7 +15,6 @@ int get_free_all_memory_range(struct range **rangep, int nodeid); ...@@ -15,7 +15,6 @@ int get_free_all_memory_range(struct range **rangep, int nodeid);
void memblock_x86_register_active_regions(int nid, unsigned long start_pfn, void memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
unsigned long last_pfn); unsigned long last_pfn);
u64 memblock_x86_hole_size(u64 start, u64 end); u64 memblock_x86_hole_size(u64 start, u64 end);
u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align);
u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
u64 memblock_x86_memory_in_range(u64 addr, u64 limit); u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align); bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align);
......
...@@ -251,21 +251,6 @@ void __init memblock_x86_free_range(u64 start, u64 end) ...@@ -251,21 +251,6 @@ void __init memblock_x86_free_range(u64 start, u64 end)
memblock_free(start, end - start); memblock_free(start, end - start);
} }
/*
* Need to call this function after memblock_x86_register_active_regions,
* so early_node_map[] is filled already.
*/
u64 __init memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align)
{
u64 addr;
addr = find_memory_core_early(nid, size, align, start, end);
if (addr)
return addr;
/* Fallback, should already have start end within node range */
return memblock_find_in_range(start, end, size, align);
}
/* /*
* Finds an active region in the address range from start_pfn to last_pfn and * Finds an active region in the address range from start_pfn to last_pfn and
* returns its range in ei_startpfn and ei_endpfn for the memblock entry. * returns its range in ei_startpfn and ei_endpfn for the memblock entry.
......
...@@ -192,8 +192,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end) ...@@ -192,8 +192,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end)
/* Initialize NODE_DATA for a node on the local memory */ /* Initialize NODE_DATA for a node on the local memory */
static void __init setup_node_data(int nid, u64 start, u64 end) static void __init setup_node_data(int nid, u64 start, u64 end)
{ {
const u64 nd_low = PFN_PHYS(MAX_DMA_PFN);
const u64 nd_high = PFN_PHYS(max_pfn_mapped);
const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
bool remapped = false; bool remapped = false;
u64 nd_pa; u64 nd_pa;
...@@ -224,17 +222,12 @@ static void __init setup_node_data(int nid, u64 start, u64 end) ...@@ -224,17 +222,12 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
nd_pa = __pa(nd); nd_pa = __pa(nd);
remapped = true; remapped = true;
} else { } else {
nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high, nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
nd_size, SMP_CACHE_BYTES);
if (!nd_pa)
nd_pa = memblock_find_in_range(nd_low, nd_high,
nd_size, SMP_CACHE_BYTES);
if (!nd_pa) { if (!nd_pa) {
pr_err("Cannot find %zu bytes in node %d\n", pr_err("Cannot find %zu bytes in node %d\n",
nd_size, nid); nd_size, nid);
return; return;
} }
memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA");
nd = __va(nd_pa); nd = __va(nd_pa);
} }
......
...@@ -1325,8 +1325,6 @@ extern void free_bootmem_with_active_regions(int nid, ...@@ -1325,8 +1325,6 @@ extern void free_bootmem_with_active_regions(int nid,
unsigned long max_low_pfn); unsigned long max_low_pfn);
int add_from_early_node_map(struct range *range, int az, int add_from_early_node_map(struct range *range, int az,
int nr_range, int nid); int nr_range, int nid);
u64 __init find_memory_core_early(int nid, u64 size, u64 align,
u64 goal, u64 limit);
extern void sparse_memory_present_with_active_regions(int nid); extern void sparse_memory_present_with_active_regions(int nid);
extern void __next_mem_pfn_range(int *idx, int nid, extern void __next_mem_pfn_range(int *idx, int nid,
......
...@@ -41,8 +41,7 @@ static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, ...@@ -41,8 +41,7 @@ static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
if (limit > memblock.current_limit) if (limit > memblock.current_limit)
limit = memblock.current_limit; limit = memblock.current_limit;
addr = find_memory_core_early(nid, size, align, goal, limit); addr = memblock_find_in_range_node(goal, limit, size, align, nid);
if (!addr) if (!addr)
return NULL; return NULL;
......
...@@ -3779,73 +3779,6 @@ void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn) ...@@ -3779,73 +3779,6 @@ void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
} }
} }
#ifdef CONFIG_HAVE_MEMBLOCK
/*
* Basic iterator support. Return the last range of PFNs for a node
* Note: nid == MAX_NUMNODES returns last region regardless of node
*/
static int __meminit last_active_region_index_in_nid(int nid)
{
int i;
for (i = nr_nodemap_entries - 1; i >= 0; i--)
if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
return i;
return -1;
}
/*
* Basic iterator support. Return the previous active range of PFNs for a node
* Note: nid == MAX_NUMNODES returns next region regardless of node
*/
static int __meminit previous_active_region_index_in_nid(int index, int nid)
{
for (index = index - 1; index >= 0; index--)
if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
return index;
return -1;
}
#define for_each_active_range_index_in_nid_reverse(i, nid) \
for (i = last_active_region_index_in_nid(nid); i != -1; \
i = previous_active_region_index_in_nid(i, nid))
u64 __init find_memory_core_early(int nid, u64 size, u64 align,
u64 goal, u64 limit)
{
int i;
/* Need to go over early_node_map to find out good range for node */
for_each_active_range_index_in_nid_reverse(i, nid) {
u64 addr;
u64 ei_start, ei_last;
u64 final_start, final_end;
ei_last = early_node_map[i].end_pfn;
ei_last <<= PAGE_SHIFT;
ei_start = early_node_map[i].start_pfn;
ei_start <<= PAGE_SHIFT;
final_start = max(ei_start, goal);
final_end = min(ei_last, limit);
if (final_start >= final_end)
continue;
addr = memblock_find_in_range(final_start, final_end, size, align);
if (!addr)
continue;
return addr;
}
return 0;
}
#endif
int __init add_from_early_node_map(struct range *range, int az, int __init add_from_early_node_map(struct range *range, int az,
int nr_range, int nid) int nr_range, int nid)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment