Commit fd89a137 authored by Joerg Roedel's avatar Joerg Roedel Committed by H. Peter Anvin

x86-32: Separate 1:1 pagetables from swapper_pg_dir

This patch fixes machine crashes which occur when heavily exercising the
CPU hotplug codepaths on a 32-bit kernel. These crashes are caused by
AMD Erratum 383 and result in a fatal machine check exception. Here's
the scenario:

1. On 32-bit, the swapper_pg_dir page table is used as the initial page
table for booting a secondary CPU.

2. To make this work, swapper_pg_dir needs a direct mapping of physical
memory in it (the low mappings). By adding those low, large page (2M)
mappings (PAE kernel), we create the necessary conditions for Erratum
383 to occur.

3. Other CPUs which do not participate in the off- and onlining game may
use swapper_pg_dir while the low mappings are present (when leave_mm is
called). For all steps below, the CPU referred to is a CPU that is using
swapper_pg_dir, and not the CPU which is being onlined.

4. The presence of the low mappings in swapper_pg_dir can result
in TLB entries for addresses below __PAGE_OFFSET to be established
speculatively. These TLB entries are marked global and large.

5. When the CPU with such TLB entry switches to another page table, this
TLB entry remains because it is global.

6. The process then generates an access to an address covered by the
above TLB entry but there is a permission mismatch - the TLB entry
covers a large global page not accessible to userspace.

7. Due to this permission mismatch a new 4kb, user TLB entry gets
established. Further, Erratum 383 provides for a small window of time
where both TLB entries are present. This results in an uncorrectable
machine check exception signalling a TLB multimatch which panics the
machine.

There are two ways to fix this issue:

        1. Always do a global TLB flush when a new cr3 is loaded and the
        old page table was swapper_pg_dir. I consider this a hack hard
        to understand and with performance implications

        2. Do not use swapper_pg_dir to boot secondary CPUs like 64-bit
        does.

This patch implements solution 2. It introduces a trampoline_pg_dir
which has the same layout as swapper_pg_dir with low_mappings. This page
table is used as the initial page table of the booting CPU. Later in the
bringup process, it switches to swapper_pg_dir and does a global TLB
flush. This fixes the crashes in our test cases.

-v2: switch to swapper_pg_dir right after entering start_secondary() so
that we are able to access percpu data which might not be mapped in the
trampoline page table.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
LKML-Reference: <20100816123833.GB28147@aftab>
Signed-off-by: default avatarBorislav Petkov <borislav.petkov@amd.com>
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent 07a7795c
...@@ -26,6 +26,7 @@ struct mm_struct; ...@@ -26,6 +26,7 @@ struct mm_struct;
struct vm_area_struct; struct vm_area_struct;
extern pgd_t swapper_pg_dir[1024]; extern pgd_t swapper_pg_dir[1024];
extern pgd_t trampoline_pg_dir[1024];
static inline void pgtable_cache_init(void) { } static inline void pgtable_cache_init(void) { }
static inline void check_pgt_cache(void) { } static inline void check_pgt_cache(void) { }
......
...@@ -13,14 +13,17 @@ extern unsigned char *trampoline_base; ...@@ -13,14 +13,17 @@ extern unsigned char *trampoline_base;
extern unsigned long init_rsp; extern unsigned long init_rsp;
extern unsigned long initial_code; extern unsigned long initial_code;
extern unsigned long initial_page_table;
extern unsigned long initial_gs; extern unsigned long initial_gs;
#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
extern unsigned long setup_trampoline(void); extern unsigned long setup_trampoline(void);
extern void __init setup_trampoline_page_table(void);
extern void __init reserve_trampoline_memory(void); extern void __init reserve_trampoline_memory(void);
#else #else
static inline void reserve_trampoline_memory(void) {}; static inline void reserve_trampoline_memory(void) {};
extern void __init setup_trampoline_page_table(void) {};
#endif /* CONFIG_X86_TRAMPOLINE */ #endif /* CONFIG_X86_TRAMPOLINE */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -334,7 +334,7 @@ ENTRY(startup_32_smp) ...@@ -334,7 +334,7 @@ ENTRY(startup_32_smp)
/* /*
* Enable paging * Enable paging
*/ */
movl $pa(swapper_pg_dir),%eax movl pa(initial_page_table), %eax
movl %eax,%cr3 /* set the page table pointer.. */ movl %eax,%cr3 /* set the page table pointer.. */
movl %cr0,%eax movl %cr0,%eax
orl $X86_CR0_PG,%eax orl $X86_CR0_PG,%eax
...@@ -614,6 +614,8 @@ ignore_int: ...@@ -614,6 +614,8 @@ ignore_int:
.align 4 .align 4
ENTRY(initial_code) ENTRY(initial_code)
.long i386_start_kernel .long i386_start_kernel
ENTRY(initial_page_table)
.long pa(swapper_pg_dir)
/* /*
* BSS section * BSS section
...@@ -629,6 +631,10 @@ ENTRY(swapper_pg_dir) ...@@ -629,6 +631,10 @@ ENTRY(swapper_pg_dir)
#endif #endif
swapper_pg_fixmap: swapper_pg_fixmap:
.fill 1024,4,0 .fill 1024,4,0
#ifdef CONFIG_X86_TRAMPOLINE
ENTRY(trampoline_pg_dir)
.fill 1024,4,0
#endif
ENTRY(empty_zero_page) ENTRY(empty_zero_page)
.fill 4096,1,0 .fill 4096,1,0
......
...@@ -1014,6 +1014,8 @@ void __init setup_arch(char **cmdline_p) ...@@ -1014,6 +1014,8 @@ void __init setup_arch(char **cmdline_p)
paging_init(); paging_init();
x86_init.paging.pagetable_setup_done(swapper_pg_dir); x86_init.paging.pagetable_setup_done(swapper_pg_dir);
setup_trampoline_page_table();
tboot_probe(); tboot_probe();
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
...@@ -73,7 +73,6 @@ ...@@ -73,7 +73,6 @@
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
u8 apicid_2_node[MAX_APICID]; u8 apicid_2_node[MAX_APICID];
static int low_mappings;
#endif #endif
/* State of each CPU */ /* State of each CPU */
...@@ -281,6 +280,18 @@ notrace static void __cpuinit start_secondary(void *unused) ...@@ -281,6 +280,18 @@ notrace static void __cpuinit start_secondary(void *unused)
* fragile that we want to limit the things done here to the * fragile that we want to limit the things done here to the
* most necessary things. * most necessary things.
*/ */
#ifdef CONFIG_X86_32
/*
* Switch away from the trampoline page-table
*
* Do this before cpu_init() because it needs to access per-cpu
* data which may not be mapped in the trampoline page-table.
*/
load_cr3(swapper_pg_dir);
__flush_tlb_all();
#endif
vmi_bringup(); vmi_bringup();
cpu_init(); cpu_init();
preempt_disable(); preempt_disable();
...@@ -299,12 +310,6 @@ notrace static void __cpuinit start_secondary(void *unused) ...@@ -299,12 +310,6 @@ notrace static void __cpuinit start_secondary(void *unused)
legacy_pic->chip->unmask(0); legacy_pic->chip->unmask(0);
} }
#ifdef CONFIG_X86_32
while (low_mappings)
cpu_relax();
__flush_tlb_all();
#endif
/* This must be done before setting cpu_online_mask */ /* This must be done before setting cpu_online_mask */
set_cpu_sibling_map(raw_smp_processor_id()); set_cpu_sibling_map(raw_smp_processor_id());
wmb(); wmb();
...@@ -750,6 +755,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) ...@@ -750,6 +755,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* Stack for startup_32 can be just as for start_secondary onwards */ /* Stack for startup_32 can be just as for start_secondary onwards */
irq_ctx_init(cpu); irq_ctx_init(cpu);
initial_page_table = __pa(&trampoline_pg_dir);
#else #else
clear_tsk_thread_flag(c_idle.idle, TIF_FORK); clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
initial_gs = per_cpu_offset(cpu); initial_gs = per_cpu_offset(cpu);
...@@ -897,20 +903,8 @@ int __cpuinit native_cpu_up(unsigned int cpu) ...@@ -897,20 +903,8 @@ int __cpuinit native_cpu_up(unsigned int cpu)
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
#ifdef CONFIG_X86_32
/* init low mem mapping */
clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
flush_tlb_all();
low_mappings = 1;
err = do_boot_cpu(apicid, cpu); err = do_boot_cpu(apicid, cpu);
zap_low_mappings(false);
low_mappings = 0;
#else
err = do_boot_cpu(apicid, cpu);
#endif
if (err) { if (err) {
pr_debug("do_boot_cpu failed %d\n", err); pr_debug("do_boot_cpu failed %d\n", err);
return -EIO; return -EIO;
......
#include <linux/io.h> #include <linux/io.h>
#include <asm/trampoline.h> #include <asm/trampoline.h>
#include <asm/pgtable.h>
#include <asm/e820.h> #include <asm/e820.h>
#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) #if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
...@@ -37,3 +38,20 @@ unsigned long __trampinit setup_trampoline(void) ...@@ -37,3 +38,20 @@ unsigned long __trampinit setup_trampoline(void)
memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
return virt_to_phys(trampoline_base); return virt_to_phys(trampoline_base);
} }
void __init setup_trampoline_page_table(void)
{
#ifdef CONFIG_X86_32
/* Copy kernel address range */
clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
min_t(unsigned long, KERNEL_PGD_PTRS,
KERNEL_PGD_BOUNDARY));
/* Initialize low mappings */
clone_pgd_range(trampoline_pg_dir,
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
min_t(unsigned long, KERNEL_PGD_PTRS,
KERNEL_PGD_BOUNDARY));
#endif
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment