Commit 05039b92 authored by Dave Hansen's avatar Dave Hansen Committed by Linus Torvalds

[PATCH] memory hotplug: i386 addition functions

Adds the necessary for non-NUMA hot-add of highmem to an existing zone on
i386.
Signed-off-by: default avatarDave Hansen <haveblue@us.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 61b13993
...@@ -98,7 +98,7 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn, ...@@ -98,7 +98,7 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
extern unsigned long find_max_low_pfn(void); extern unsigned long find_max_low_pfn(void);
extern void find_max_pfn(void); extern void find_max_pfn(void);
extern void one_highpage_init(struct page *, int, int); extern void add_one_highpage_init(struct page *, int, int);
extern struct e820map e820; extern struct e820map e820;
extern unsigned long init_pg_tables_end; extern unsigned long init_pg_tables_end;
...@@ -427,7 +427,7 @@ void __init set_highmem_pages_init(int bad_ppro) ...@@ -427,7 +427,7 @@ void __init set_highmem_pages_init(int bad_ppro)
if (!pfn_valid(node_pfn)) if (!pfn_valid(node_pfn))
continue; continue;
page = pfn_to_page(node_pfn); page = pfn_to_page(node_pfn);
one_highpage_init(page, node_pfn, bad_ppro); add_one_highpage_init(page, node_pfn, bad_ppro);
} }
} }
totalram_pages += totalhigh_pages; totalram_pages += totalhigh_pages;
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/efi.h> #include <linux/efi.h>
#include <linux/memory_hotplug.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/system.h> #include <asm/system.h>
...@@ -266,17 +267,46 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base) ...@@ -266,17 +267,46 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
pkmap_page_table = pte; pkmap_page_table = pte;
} }
void __init one_highpage_init(struct page *page, int pfn, int bad_ppro) void __devinit free_new_highpage(struct page *page)
{ {
if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
ClearPageReserved(page);
set_page_count(page, 1); set_page_count(page, 1);
__free_page(page); __free_page(page);
totalhigh_pages++; totalhigh_pages++;
}
void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
{
if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
ClearPageReserved(page);
free_new_highpage(page);
} else } else
SetPageReserved(page); SetPageReserved(page);
} }
static int add_one_highpage_hotplug(struct page *page, unsigned long pfn)
{
free_new_highpage(page);
totalram_pages++;
#ifdef CONFIG_FLATMEM
max_mapnr = max(pfn, max_mapnr);
#endif
num_physpages++;
return 0;
}
/*
* Not currently handling the NUMA case.
* Assuming single node and all memory that
* has been added dynamically that would be
* onlined here is in HIGHMEM
*/
void online_page(struct page *page)
{
ClearPageReserved(page);
add_one_highpage_hotplug(page, page_to_pfn(page));
}
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
extern void set_highmem_pages_init(int); extern void set_highmem_pages_init(int);
#else #else
...@@ -284,7 +314,7 @@ static void __init set_highmem_pages_init(int bad_ppro) ...@@ -284,7 +314,7 @@ static void __init set_highmem_pages_init(int bad_ppro)
{ {
int pfn; int pfn;
for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro); add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
totalram_pages += totalhigh_pages; totalram_pages += totalhigh_pages;
} }
#endif /* CONFIG_FLATMEM */ #endif /* CONFIG_FLATMEM */
...@@ -615,6 +645,28 @@ void __init mem_init(void) ...@@ -615,6 +645,28 @@ void __init mem_init(void)
#endif #endif
} }
/*
* this is for the non-NUMA, single node SMP system case.
* Specifically, in the case of x86, we will always add
* memory to the highmem for now.
*/
#ifndef CONFIG_NEED_MULTIPLE_NODES
int add_memory(u64 start, u64 size)
{
struct pglist_data *pgdata = &contig_page_data;
struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
return __add_pages(zone, start_pfn, nr_pages);
}
int remove_memory(u64 start, u64 size)
{
return -EINVAL;
}
#endif
kmem_cache_t *pgd_cache; kmem_cache_t *pgd_cache;
kmem_cache_t *pmd_cache; kmem_cache_t *pmd_cache;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment