Commit d9adb3fa authored by Jesse Barnes's avatar Jesse Barnes Committed by David Mosberger

[PATCH] ia64: initialize bootmem maps in reverse order

The arch-independent bootmem code now requires that arches initialize
their bootmem maps in reverse order (in particular, from high to low
addesses), otherwise alloc_bootmem_pages_low() won't work.  This change
makes the ia64 code do just that, so that machines without an IOMMU can
allocate their bounce buffers in low memory at early boot.  It also adds
a sanity check to the early init code to make sure that each node has a
local data area, because if they don't, many things will break later on
and may be hard to track down.
parent e6d4485d
...@@ -134,94 +134,69 @@ static int __init find_pernode_space(unsigned long start, unsigned long len, ...@@ -134,94 +134,69 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
int node) int node)
{ {
unsigned long epfn, cpu, cpus; unsigned long epfn, cpu, cpus;
unsigned long pernodesize = 0, pernode; unsigned long pernodesize = 0, pernode, pages, mapsize;
void *cpu_data; void *cpu_data;
struct bootmem_data *bdp = &mem_data[node].bootmem_data; struct bootmem_data *bdp = &mem_data[node].bootmem_data;
epfn = (start + len) >> PAGE_SHIFT; epfn = (start + len) >> PAGE_SHIFT;
pages = bdp->node_low_pfn - (bdp->node_boot_start >> PAGE_SHIFT);
mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
/* /*
* Make sure this memory falls within this node's usable memory * Make sure this memory falls within this node's usable memory
* since we may have thrown some away in build_maps(). * since we may have thrown some away in build_maps().
*/ */
if (start < bdp->node_boot_start || if (start < bdp->node_boot_start || epfn > bdp->node_low_pfn)
epfn > bdp->node_low_pfn)
return 0; return 0;
/* Don't setup this node's local space twice... */ /* Don't setup this node's local space twice... */
if (!mem_data[node].pernode_addr) { if (mem_data[node].pernode_addr)
/* return 0;
* Calculate total size needed, incl. what's necessary
* for good alignment and alias prevention. /*
*/ * Calculate total size needed, incl. what's necessary
cpus = early_nr_cpus_node(node); * for good alignment and alias prevention.
pernodesize += PERCPU_PAGE_SIZE * cpus; */
pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t)); cpus = early_nr_cpus_node(node);
pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); pernodesize += PERCPU_PAGE_SIZE * cpus;
pernodesize = PAGE_ALIGN(pernodesize); pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
pernode = NODEDATA_ALIGN(start, node); pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
pernodesize = PAGE_ALIGN(pernodesize);
/* Is this range big enough for what we want to store here? */ pernode = NODEDATA_ALIGN(start, node);
if (start + len > (pernode + pernodesize)) {
mem_data[node].pernode_addr = pernode; /* Is this range big enough for what we want to store here? */
mem_data[node].pernode_size = pernodesize; if (start + len > (pernode + pernodesize + mapsize)) {
memset(__va(pernode), 0, pernodesize); mem_data[node].pernode_addr = pernode;
mem_data[node].pernode_size = pernodesize;
cpu_data = (void *)pernode; memset(__va(pernode), 0, pernodesize);
pernode += PERCPU_PAGE_SIZE * cpus;
cpu_data = (void *)pernode;
mem_data[node].pgdat = __va(pernode); pernode += PERCPU_PAGE_SIZE * cpus;
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
mem_data[node].node_data = __va(pernode);
pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
mem_data[node].pgdat->bdata = bdp;
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
/*
* Copy the static per-cpu data into the region we
* just set aside and then setup __per_cpu_offset
* for each CPU on this node.
*/
for (cpu = 0; cpu < NR_CPUS; cpu++) {
if (node == node_cpuid[cpu].nid) {
memcpy(__va(cpu_data), __phys_per_cpu_start,
__per_cpu_end-__per_cpu_start);
__per_cpu_offset[cpu] =
(char*)__va(cpu_data) -
__per_cpu_start;
cpu_data += PERCPU_PAGE_SIZE;
}
}
}
}
pernode = mem_data[node].pernode_addr; mem_data[node].pgdat = __va(pernode);
pernodesize = mem_data[node].pernode_size; pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
if (pernode && !bdp->node_bootmem_map) {
unsigned long pages, mapsize, map = 0;
pages = bdp->node_low_pfn - mem_data[node].node_data = __va(pernode);
(bdp->node_boot_start >> PAGE_SHIFT); pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
mem_data[node].pgdat->bdata = bdp;
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
/* /*
* The map will either contain the pernode area or begin * Copy the static per-cpu data into the region we
* after it. * just set aside and then setup __per_cpu_offset
* for each CPU on this node.
*/ */
if (pernode - start > mapsize) for (cpu = 0; cpu < NR_CPUS; cpu++) {
map = start; if (node == node_cpuid[cpu].nid) {
else if (start + len - pernode - pernodesize > mapsize) memcpy(__va(cpu_data), __phys_per_cpu_start,
map = pernode + pernodesize; __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char*)__va(cpu_data) -
if (map) { __per_cpu_start;
init_bootmem_node(mem_data[node].pgdat, cpu_data += PERCPU_PAGE_SIZE;
map>>PAGE_SHIFT, }
bdp->node_boot_start>>PAGE_SHIFT,
bdp->node_low_pfn);
} }
} }
return 0; return 0;
...@@ -314,6 +289,8 @@ static void __init initialize_pernode_data(void) ...@@ -314,6 +289,8 @@ static void __init initialize_pernode_data(void)
*/ */
void __init find_memory(void) void __init find_memory(void)
{ {
int node;
reserve_memory(); reserve_memory();
if (numnodes == 0) { if (numnodes == 0) {
...@@ -327,6 +304,31 @@ void __init find_memory(void) ...@@ -327,6 +304,31 @@ void __init find_memory(void)
/* These actually end up getting called by call_pernode_memory() */ /* These actually end up getting called by call_pernode_memory() */
efi_memmap_walk(filter_rsvd_memory, build_node_maps); efi_memmap_walk(filter_rsvd_memory, build_node_maps);
efi_memmap_walk(filter_rsvd_memory, find_pernode_space); efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
/*
* Initialize the boot memory maps in reverse order since that's
* what the bootmem allocator expects
*/
for (node = numnodes - 1; node >= 0; node--) {
unsigned long pernode, pernodesize, map;
struct bootmem_data *bdp;
bdp = &mem_data[node].bootmem_data;
pernode = mem_data[node].pernode_addr;
pernodesize = mem_data[node].pernode_size;
map = pernode + pernodesize;
/* Sanity check... */
if (!pernode)
panic("pernode space for node %d "
"could not be allocated!", node);
init_bootmem_node(mem_data[node].pgdat,
map>>PAGE_SHIFT,
bdp->node_boot_start>>PAGE_SHIFT,
bdp->node_low_pfn);
}
efi_memmap_walk(filter_rsvd_memory, free_node_bootmem); efi_memmap_walk(filter_rsvd_memory, free_node_bootmem);
reserve_pernode_space(); reserve_pernode_space();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment