Commit cc9f7a0c authored by Yinghai Lu's avatar Yinghai Lu Committed by Ingo Molnar

x86: kill bad_ppro

so don't punish all other cpus without that problem when init highmem
Signed-off-by: default avatarYinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 41c094fd
...@@ -68,6 +68,7 @@ ...@@ -68,6 +68,7 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/efi.h> #include <asm/efi.h>
#include <asm/bugs.h>
/* This value is set up by the early boot code to point to the value /* This value is set up by the early boot code to point to the value
immediately after the boot time page tables. It contains a *physical* immediately after the boot time page tables. It contains a *physical*
...@@ -764,6 +765,14 @@ void __init setup_arch(char **cmdline_p) ...@@ -764,6 +765,14 @@ void __init setup_arch(char **cmdline_p)
if (efi_enabled) if (efi_enabled)
efi_init(); efi_init();
if (ppro_with_ram_bug()) {
e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM,
E820_RESERVED);
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
printk(KERN_INFO "fixed physical RAM map:\n");
e820_print_map("bad_ppro");
}
e820_register_active_regions(0, 0, -1UL); e820_register_active_regions(0, 0, -1UL);
/* /*
* partially used pages are not usable - thus * partially used pages are not usable - thus
......
...@@ -427,7 +427,7 @@ void __init zone_sizes_init(void) ...@@ -427,7 +427,7 @@ void __init zone_sizes_init(void)
return; return;
} }
void __init set_highmem_pages_init(int bad_ppro) void __init set_highmem_pages_init(void)
{ {
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
struct zone *zone; struct zone *zone;
...@@ -447,7 +447,7 @@ void __init set_highmem_pages_init(int bad_ppro) ...@@ -447,7 +447,7 @@ void __init set_highmem_pages_init(int bad_ppro)
zone->name, nid, zone_start_pfn, zone_end_pfn); zone->name, nid, zone_start_pfn, zone_end_pfn);
add_highpages_with_active_regions(nid, zone_start_pfn, add_highpages_with_active_regions(nid, zone_start_pfn,
zone_end_pfn, bad_ppro); zone_end_pfn);
} }
totalram_pages += totalhigh_pages; totalram_pages += totalhigh_pages;
#endif #endif
......
...@@ -220,13 +220,6 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) ...@@ -220,13 +220,6 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
} }
} }
static inline int page_kills_ppro(unsigned long pagenr)
{
if (pagenr >= 0x70000 && pagenr <= 0x7003F)
return 1;
return 0;
}
/* /*
* devmem_is_allowed() checks to see if /dev/mem access to a certain address * devmem_is_allowed() checks to see if /dev/mem access to a certain address
* is valid. The argument is a physical page number. * is valid. The argument is a physical page number.
...@@ -287,22 +280,17 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base) ...@@ -287,22 +280,17 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
pkmap_page_table = pte; pkmap_page_table = pte;
} }
static void __init static void __init add_one_highpage_init(struct page *page, int pfn)
add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
{ {
if (!(bad_ppro && page_kills_ppro(pfn))) { ClearPageReserved(page);
ClearPageReserved(page); init_page_count(page);
init_page_count(page); __free_page(page);
__free_page(page); totalhigh_pages++;
totalhigh_pages++;
} else
SetPageReserved(page);
} }
struct add_highpages_data { struct add_highpages_data {
unsigned long start_pfn; unsigned long start_pfn;
unsigned long end_pfn; unsigned long end_pfn;
int bad_ppro;
}; };
static void __init add_highpages_work_fn(unsigned long start_pfn, static void __init add_highpages_work_fn(unsigned long start_pfn,
...@@ -312,10 +300,8 @@ static void __init add_highpages_work_fn(unsigned long start_pfn, ...@@ -312,10 +300,8 @@ static void __init add_highpages_work_fn(unsigned long start_pfn,
struct page *page; struct page *page;
unsigned long final_start_pfn, final_end_pfn; unsigned long final_start_pfn, final_end_pfn;
struct add_highpages_data *data; struct add_highpages_data *data;
int bad_ppro;
data = (struct add_highpages_data *)datax; data = (struct add_highpages_data *)datax;
bad_ppro = data->bad_ppro;
final_start_pfn = max(start_pfn, data->start_pfn); final_start_pfn = max(start_pfn, data->start_pfn);
final_end_pfn = min(end_pfn, data->end_pfn); final_end_pfn = min(end_pfn, data->end_pfn);
...@@ -327,29 +313,26 @@ static void __init add_highpages_work_fn(unsigned long start_pfn, ...@@ -327,29 +313,26 @@ static void __init add_highpages_work_fn(unsigned long start_pfn,
if (!pfn_valid(node_pfn)) if (!pfn_valid(node_pfn))
continue; continue;
page = pfn_to_page(node_pfn); page = pfn_to_page(node_pfn);
add_one_highpage_init(page, node_pfn, bad_ppro); add_one_highpage_init(page, node_pfn);
} }
} }
void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
unsigned long end_pfn, unsigned long end_pfn)
int bad_ppro)
{ {
struct add_highpages_data data; struct add_highpages_data data;
data.start_pfn = start_pfn; data.start_pfn = start_pfn;
data.end_pfn = end_pfn; data.end_pfn = end_pfn;
data.bad_ppro = bad_ppro;
work_with_active_regions(nid, add_highpages_work_fn, &data); work_with_active_regions(nid, add_highpages_work_fn, &data);
} }
#ifndef CONFIG_NUMA #ifndef CONFIG_NUMA
static void __init set_highmem_pages_init(int bad_ppro) static void __init set_highmem_pages_init(void)
{ {
add_highpages_with_active_regions(0, highstart_pfn, highend_pfn, add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
bad_ppro);
totalram_pages += totalhigh_pages; totalram_pages += totalhigh_pages;
} }
...@@ -358,7 +341,7 @@ static void __init set_highmem_pages_init(int bad_ppro) ...@@ -358,7 +341,7 @@ static void __init set_highmem_pages_init(int bad_ppro)
#else #else
# define kmap_init() do { } while (0) # define kmap_init() do { } while (0)
# define permanent_kmaps_init(pgd_base) do { } while (0) # define permanent_kmaps_init(pgd_base) do { } while (0)
# define set_highmem_pages_init(bad_ppro) do { } while (0) # define set_highmem_pages_init() do { } while (0)
#endif /* CONFIG_HIGHMEM */ #endif /* CONFIG_HIGHMEM */
pteval_t __PAGE_KERNEL = _PAGE_KERNEL; pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
...@@ -605,13 +588,11 @@ static struct kcore_list kcore_mem, kcore_vmalloc; ...@@ -605,13 +588,11 @@ static struct kcore_list kcore_mem, kcore_vmalloc;
void __init mem_init(void) void __init mem_init(void)
{ {
int codesize, reservedpages, datasize, initsize; int codesize, reservedpages, datasize, initsize;
int tmp, bad_ppro; int tmp;
#ifdef CONFIG_FLATMEM #ifdef CONFIG_FLATMEM
BUG_ON(!mem_map); BUG_ON(!mem_map);
#endif #endif
bad_ppro = ppro_with_ram_bug();
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
/* check that fixmap and pkmap do not overlap */ /* check that fixmap and pkmap do not overlap */
if (PKMAP_BASE + LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) { if (PKMAP_BASE + LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
...@@ -634,7 +615,7 @@ void __init mem_init(void) ...@@ -634,7 +615,7 @@ void __init mem_init(void)
if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
reservedpages++; reservedpages++;
set_highmem_pages_init(bad_ppro); set_highmem_pages_init();
codesize = (unsigned long) &_etext - (unsigned long) &_text; codesize = (unsigned long) &_etext - (unsigned long) &_text;
datasize = (unsigned long) &_edata - (unsigned long) &_etext; datasize = (unsigned long) &_edata - (unsigned long) &_etext;
......
...@@ -75,7 +75,7 @@ struct page *kmap_atomic_to_page(void *ptr); ...@@ -75,7 +75,7 @@ struct page *kmap_atomic_to_page(void *ptr);
#define flush_cache_kmaps() do { } while (0) #define flush_cache_kmaps() do { } while (0)
extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn, extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
unsigned long end_pfn, int bad_ppro); unsigned long end_pfn);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -5,7 +5,7 @@ extern int pxm_to_nid(int pxm); ...@@ -5,7 +5,7 @@ extern int pxm_to_nid(int pxm);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
extern void __init remap_numa_kva(void); extern void __init remap_numa_kva(void);
extern void set_highmem_pages_init(int); extern void set_highmem_pages_init(void);
#else #else
static inline void remap_numa_kva(void) static inline void remap_numa_kva(void)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment