Commit a2f3aa02 authored by Dave Hansen's avatar Dave Hansen Committed by Linus Torvalds

[PATCH] Fix sparsemem on Cell

Fix an oops experienced on the Cell architecture when init-time functions,
early_*(), are called at runtime.  It alters the call paths to make sure
that the callers explicitly say whether the call is being made on behalf of
a hotplug even, or happening at boot-time.

It has been compile tested on ppc64, ia64, s390, i386 and x86_64.
Acked-by: default avatarArnd Bergmann <arndb@de.ibm.com>
Signed-off-by: default avatarDave Hansen <haveblue@us.ibm.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Acked-by: default avatarAndy Whitcroft <apw@shadowen.org>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Acked-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 47a4d5be
...@@ -543,7 +543,8 @@ virtual_memmap_init (u64 start, u64 end, void *arg) ...@@ -543,7 +543,8 @@ virtual_memmap_init (u64 start, u64 end, void *arg)
if (map_start < map_end) if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start), memmap_init_zone((unsigned long)(map_end - map_start),
args->nid, args->zone, page_to_pfn(map_start)); args->nid, args->zone, page_to_pfn(map_start),
MEMMAP_EARLY);
return 0; return 0;
} }
...@@ -552,7 +553,7 @@ memmap_init (unsigned long size, int nid, unsigned long zone, ...@@ -552,7 +553,7 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn) unsigned long start_pfn)
{ {
if (!vmem_map) if (!vmem_map)
memmap_init_zone(size, nid, zone, start_pfn); memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
else { else {
struct page *start; struct page *start;
struct memmap_init_callback_data args; struct memmap_init_callback_data args;
......
...@@ -61,7 +61,8 @@ void memmap_init(unsigned long size, int nid, unsigned long zone, ...@@ -61,7 +61,8 @@ void memmap_init(unsigned long size, int nid, unsigned long zone,
if (map_start < map_end) if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start), memmap_init_zone((unsigned long)(map_end - map_start),
nid, zone, page_to_pfn(map_start)); nid, zone, page_to_pfn(map_start),
MEMMAP_EARLY);
} }
} }
......
...@@ -978,7 +978,8 @@ extern int early_pfn_to_nid(unsigned long pfn); ...@@ -978,7 +978,8 @@ extern int early_pfn_to_nid(unsigned long pfn);
#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
extern void set_dma_reserve(unsigned long new_dma_reserve); extern void set_dma_reserve(unsigned long new_dma_reserve);
extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long); extern void memmap_init_zone(unsigned long, int, unsigned long,
unsigned long, enum memmap_context);
extern void setup_per_zone_pages_min(void); extern void setup_per_zone_pages_min(void);
extern void mem_init(void); extern void mem_init(void);
extern void show_mem(void); extern void show_mem(void);
......
...@@ -450,9 +450,13 @@ void build_all_zonelists(void); ...@@ -450,9 +450,13 @@ void build_all_zonelists(void);
void wakeup_kswapd(struct zone *zone, int order); void wakeup_kswapd(struct zone *zone, int order);
int zone_watermark_ok(struct zone *z, int order, unsigned long mark, int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags); int classzone_idx, int alloc_flags);
enum memmap_context {
MEMMAP_EARLY,
MEMMAP_HOTPLUG,
};
extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
unsigned long size); unsigned long size,
enum memmap_context context);
#ifdef CONFIG_HAVE_MEMORY_PRESENT #ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end); void memory_present(int nid, unsigned long start, unsigned long end);
......
...@@ -67,11 +67,13 @@ static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) ...@@ -67,11 +67,13 @@ static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
zone_type = zone - pgdat->node_zones; zone_type = zone - pgdat->node_zones;
if (!populated_zone(zone)) { if (!populated_zone(zone)) {
int ret = 0; int ret = 0;
ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages); ret = init_currently_empty_zone(zone, phys_start_pfn,
nr_pages, MEMMAP_HOTPLUG);
if (ret < 0) if (ret < 0)
return ret; return ret;
} }
memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn); memmap_init_zone(nr_pages, nid, zone_type,
phys_start_pfn, MEMMAP_HOTPLUG);
return 0; return 0;
} }
......
...@@ -1956,17 +1956,24 @@ static inline unsigned long wait_table_bits(unsigned long size) ...@@ -1956,17 +1956,24 @@ static inline unsigned long wait_table_bits(unsigned long size)
* done. Non-atomic initialization, single-pass. * done. Non-atomic initialization, single-pass.
*/ */
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn) unsigned long start_pfn, enum memmap_context context)
{ {
struct page *page; struct page *page;
unsigned long end_pfn = start_pfn + size; unsigned long end_pfn = start_pfn + size;
unsigned long pfn; unsigned long pfn;
for (pfn = start_pfn; pfn < end_pfn; pfn++) { for (pfn = start_pfn; pfn < end_pfn; pfn++) {
if (!early_pfn_valid(pfn)) /*
continue; * There can be holes in boot-time mem_map[]s
if (!early_pfn_in_nid(pfn, nid)) * handed to this function. They do not
continue; * exist on hotplugged memory.
*/
if (context == MEMMAP_EARLY) {
if (!early_pfn_valid(pfn))
continue;
if (!early_pfn_in_nid(pfn, nid))
continue;
}
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
set_page_links(page, zone, nid, pfn); set_page_links(page, zone, nid, pfn);
init_page_count(page); init_page_count(page);
...@@ -1993,7 +2000,7 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, ...@@ -1993,7 +2000,7 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
#ifndef __HAVE_ARCH_MEMMAP_INIT #ifndef __HAVE_ARCH_MEMMAP_INIT
#define memmap_init(size, nid, zone, start_pfn) \ #define memmap_init(size, nid, zone, start_pfn) \
memmap_init_zone((size), (nid), (zone), (start_pfn)) memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
#endif #endif
static int __cpuinit zone_batchsize(struct zone *zone) static int __cpuinit zone_batchsize(struct zone *zone)
...@@ -2239,7 +2246,8 @@ static __meminit void zone_pcp_init(struct zone *zone) ...@@ -2239,7 +2246,8 @@ static __meminit void zone_pcp_init(struct zone *zone)
__meminit int init_currently_empty_zone(struct zone *zone, __meminit int init_currently_empty_zone(struct zone *zone,
unsigned long zone_start_pfn, unsigned long zone_start_pfn,
unsigned long size) unsigned long size,
enum memmap_context context)
{ {
struct pglist_data *pgdat = zone->zone_pgdat; struct pglist_data *pgdat = zone->zone_pgdat;
int ret; int ret;
...@@ -2683,7 +2691,8 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat, ...@@ -2683,7 +2691,8 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
if (!size) if (!size)
continue; continue;
ret = init_currently_empty_zone(zone, zone_start_pfn, size); ret = init_currently_empty_zone(zone, zone_start_pfn,
size, MEMMAP_EARLY);
BUG_ON(ret); BUG_ON(ret);
zone_start_pfn += size; zone_start_pfn += size;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment