Commit e2bf3cae authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

bootmem: factor out the marking of a PFN range

Introduce new helpers that mark a range that resides completely on a node
or node-agnostic ranges that might also span node boundaries.

The free/reserve API functions will then directly use these helpers.

Note that the free/reserve semantics become more strict: while the prior
code took basically arbitrary range arguments and marked the PFNs that
happen to fall into that range, the new code requires node-specific ranges
to be completely on the node.  The node-agnostic requests might span node
boundaries as long as the nodes are contiguous.

Passing ranges that do not satisfy these criteria is a bug.

[akpm@linux-foundation.org: fix printk warnings]
Signed-off-by: default avatarJohannes Weiner <hannes@saeurebad.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Cc: Andi Kleen <andi@firstfloor.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d747fa4b
...@@ -234,6 +234,9 @@ static void __init __free(bootmem_data_t *bdata, ...@@ -234,6 +234,9 @@ static void __init __free(bootmem_data_t *bdata,
sidx + PFN_DOWN(bdata->node_boot_start), sidx + PFN_DOWN(bdata->node_boot_start),
eidx + PFN_DOWN(bdata->node_boot_start)); eidx + PFN_DOWN(bdata->node_boot_start));
if (bdata->hint_idx > sidx)
bdata->hint_idx = sidx;
for (idx = sidx; idx < eidx; idx++) for (idx = sidx; idx < eidx; idx++)
if (!test_and_clear_bit(idx, bdata->node_bootmem_map)) if (!test_and_clear_bit(idx, bdata->node_bootmem_map))
BUG(); BUG();
...@@ -263,40 +266,57 @@ static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx, ...@@ -263,40 +266,57 @@ static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx,
return 0; return 0;
} }
static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, static int __init mark_bootmem_node(bootmem_data_t *bdata,
unsigned long size) unsigned long start, unsigned long end,
int reserve, int flags)
{ {
unsigned long sidx, eidx; unsigned long sidx, eidx;
unsigned long i;
BUG_ON(!size); bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
bdata - bootmem_node_data, start, end, reserve, flags);
/* out range */ BUG_ON(start < PFN_DOWN(bdata->node_boot_start));
if (addr + size < bdata->node_boot_start || BUG_ON(end > bdata->node_low_pfn);
PFN_DOWN(addr) > bdata->node_low_pfn)
return;
/*
* round down end of usable mem, partially free pages are
* considered reserved.
*/
if (addr >= bdata->node_boot_start && sidx = start - PFN_DOWN(bdata->node_boot_start);
PFN_DOWN(addr - bdata->node_boot_start) < bdata->hint_idx) eidx = end - PFN_DOWN(bdata->node_boot_start);
bdata->hint_idx = PFN_DOWN(addr - bdata->node_boot_start);
/* if (reserve)
* Round up to index to the range. return __reserve(bdata, sidx, eidx, flags);
*/
if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start))
sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start);
else else
sidx = 0; __free(bdata, sidx, eidx);
return 0;
}
eidx = PFN_DOWN(addr + size - bdata->node_boot_start); static int __init mark_bootmem(unsigned long start, unsigned long end,
if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start)) int reserve, int flags)
eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start); {
unsigned long pos;
bootmem_data_t *bdata;
__free(bdata, sidx, eidx); pos = start;
list_for_each_entry(bdata, &bdata_list, list) {
int err;
unsigned long max;
if (pos < PFN_DOWN(bdata->node_boot_start)) {
BUG_ON(pos != start);
continue;
}
max = min(bdata->node_low_pfn, end);
err = mark_bootmem_node(bdata, pos, max, reserve, flags);
if (reserve && err) {
mark_bootmem(start, pos, 0, 0);
return err;
}
if (max == end)
return 0;
pos = bdata->node_low_pfn;
}
BUG();
} }
/** /**
...@@ -307,12 +327,17 @@ static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, ...@@ -307,12 +327,17 @@ static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
* *
* Partial pages will be considered reserved and left as they are. * Partial pages will be considered reserved and left as they are.
* *
* Only physical pages that actually reside on @pgdat are marked. * The range must reside completely on the specified node.
*/ */
void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size) unsigned long size)
{ {
free_bootmem_core(pgdat->bdata, physaddr, size); unsigned long start, end;
start = PFN_UP(physaddr);
end = PFN_DOWN(physaddr + size);
mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
} }
/** /**
...@@ -322,83 +347,16 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, ...@@ -322,83 +347,16 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
* *
* Partial pages will be considered reserved and left as they are. * Partial pages will be considered reserved and left as they are.
* *
* All physical pages within the range are marked, no matter what * The range must be contiguous but may span node boundaries.
* node they reside on.
*/ */
void __init free_bootmem(unsigned long addr, unsigned long size) void __init free_bootmem(unsigned long addr, unsigned long size)
{ {
bootmem_data_t *bdata; unsigned long start, end;
list_for_each_entry(bdata, &bdata_list, list)
free_bootmem_core(bdata, addr, size);
}
/*
* Marks a particular physical memory range as unallocatable. Usable RAM
* might be used for boot-time allocations - or it might get added
* to the free page pool later on.
*/
static int __init can_reserve_bootmem_core(bootmem_data_t *bdata,
unsigned long addr, unsigned long size, int flags)
{
unsigned long sidx, eidx;
unsigned long i;
BUG_ON(!size);
/* out of range, don't hold other */
if (addr + size < bdata->node_boot_start ||
PFN_DOWN(addr) > bdata->node_low_pfn)
return 0;
/*
* Round up to index to the range.
*/
if (addr > bdata->node_boot_start)
sidx= PFN_DOWN(addr - bdata->node_boot_start);
else
sidx = 0;
eidx = PFN_UP(addr + size - bdata->node_boot_start);
if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
for (i = sidx; i < eidx; i++) {
if (test_bit(i, bdata->node_bootmem_map)) {
if (flags & BOOTMEM_EXCLUSIVE)
return -EBUSY;
}
}
return 0;
}
static void __init reserve_bootmem_core(bootmem_data_t *bdata,
unsigned long addr, unsigned long size, int flags)
{
unsigned long sidx, eidx;
unsigned long i;
BUG_ON(!size);
/* out of range */
if (addr + size < bdata->node_boot_start ||
PFN_DOWN(addr) > bdata->node_low_pfn)
return;
/*
* Round up to index to the range.
*/
if (addr > bdata->node_boot_start)
sidx= PFN_DOWN(addr - bdata->node_boot_start);
else
sidx = 0;
eidx = PFN_UP(addr + size - bdata->node_boot_start); start = PFN_UP(addr);
if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start)) end = PFN_DOWN(addr + size);
eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
return __reserve(bdata, sidx, eidx, flags); mark_bootmem(start, end, 0, 0);
} }
/** /**
...@@ -410,18 +368,17 @@ static void __init reserve_bootmem_core(bootmem_data_t *bdata, ...@@ -410,18 +368,17 @@ static void __init reserve_bootmem_core(bootmem_data_t *bdata,
* *
* Partial pages will be reserved. * Partial pages will be reserved.
* *
* Only physical pages that actually reside on @pgdat are marked. * The range must reside completely on the specified node.
*/ */
int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size, int flags) unsigned long size, int flags)
{ {
int ret; unsigned long start, end;
ret = can_reserve_bootmem_core(pgdat->bdata, physaddr, size, flags); start = PFN_DOWN(physaddr);
if (ret < 0) end = PFN_UP(physaddr + size);
return -ENOMEM;
reserve_bootmem_core(pgdat->bdata, physaddr, size, flags); return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
return 0;
} }
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
...@@ -433,24 +390,17 @@ int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, ...@@ -433,24 +390,17 @@ int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
* *
* Partial pages will be reserved. * Partial pages will be reserved.
* *
* All physical pages within the range are marked, no matter what * The range must be contiguous but may span node boundaries.
* node they reside on.
*/ */
int __init reserve_bootmem(unsigned long addr, unsigned long size, int __init reserve_bootmem(unsigned long addr, unsigned long size,
int flags) int flags)
{ {
bootmem_data_t *bdata; unsigned long start, end;
int ret;
list_for_each_entry(bdata, &bdata_list, list) { start = PFN_DOWN(addr);
ret = can_reserve_bootmem_core(bdata, addr, size, flags); end = PFN_UP(addr + size);
if (ret < 0)
return ret;
}
list_for_each_entry(bdata, &bdata_list, list)
reserve_bootmem_core(bdata, addr, size, flags);
return 0; return mark_bootmem(start, end, 1, flags);
} }
#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
...@@ -663,7 +613,7 @@ void * __init alloc_bootmem_section(unsigned long size, ...@@ -663,7 +613,7 @@ void * __init alloc_bootmem_section(unsigned long size,
if (start_nr != section_nr || end_nr != section_nr) { if (start_nr != section_nr || end_nr != section_nr) {
printk(KERN_WARNING "alloc_bootmem failed on section %ld.\n", printk(KERN_WARNING "alloc_bootmem failed on section %ld.\n",
section_nr); section_nr);
free_bootmem_core(pgdat->bdata, __pa(ptr), size); free_bootmem_node(pgdat, __pa(ptr), size);
ptr = NULL; ptr = NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment