Commit 36423a5e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-fixes-for-linus' of...

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, apic: Fix apic=debug boot crash
  x86, hotplug: Serialize CPU hotplug to avoid bringup concurrency issues
  x86-32: Fix dummy trampoline-related inline stubs
  x86-32: Separate 1:1 pagetables from swapper_pg_dir
  x86, cpu: Fix regression in AMD errata checking code
parents f6143a9b 05e40760
...@@ -245,6 +245,11 @@ config ARCH_HWEIGHT_CFLAGS ...@@ -245,6 +245,11 @@ config ARCH_HWEIGHT_CFLAGS
config KTIME_SCALAR config KTIME_SCALAR
def_bool X86_32 def_bool X86_32
config ARCH_CPU_PROBE_RELEASE
def_bool y
depends on HOTPLUG_CPU
source "init/Kconfig" source "init/Kconfig"
source "kernel/Kconfig.freezer" source "kernel/Kconfig.freezer"
......
...@@ -26,6 +26,7 @@ struct mm_struct; ...@@ -26,6 +26,7 @@ struct mm_struct;
struct vm_area_struct; struct vm_area_struct;
extern pgd_t swapper_pg_dir[1024]; extern pgd_t swapper_pg_dir[1024];
extern pgd_t trampoline_pg_dir[1024];
static inline void pgtable_cache_init(void) { } static inline void pgtable_cache_init(void) { }
static inline void check_pgt_cache(void) { } static inline void check_pgt_cache(void) { }
......
...@@ -13,14 +13,17 @@ extern unsigned char *trampoline_base; ...@@ -13,14 +13,17 @@ extern unsigned char *trampoline_base;
extern unsigned long init_rsp; extern unsigned long init_rsp;
extern unsigned long initial_code; extern unsigned long initial_code;
extern unsigned long initial_page_table;
extern unsigned long initial_gs; extern unsigned long initial_gs;
#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
extern unsigned long setup_trampoline(void); extern unsigned long setup_trampoline(void);
extern void __init setup_trampoline_page_table(void);
extern void __init reserve_trampoline_memory(void); extern void __init reserve_trampoline_memory(void);
#else #else
static inline void reserve_trampoline_memory(void) {}; static inline void setup_trampoline_page_table(void) {}
static inline void reserve_trampoline_memory(void) {}
#endif /* CONFIG_X86_TRAMPOLINE */ #endif /* CONFIG_X86_TRAMPOLINE */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -1728,6 +1728,8 @@ __apicdebuginit(void) print_IO_APIC(void) ...@@ -1728,6 +1728,8 @@ __apicdebuginit(void) print_IO_APIC(void)
struct irq_pin_list *entry; struct irq_pin_list *entry;
cfg = desc->chip_data; cfg = desc->chip_data;
if (!cfg)
continue;
entry = cfg->irq_2_pin; entry = cfg->irq_2_pin;
if (!entry) if (!entry)
continue; continue;
......
...@@ -669,7 +669,7 @@ bool cpu_has_amd_erratum(const int *erratum) ...@@ -669,7 +669,7 @@ bool cpu_has_amd_erratum(const int *erratum)
} }
/* OSVW unavailable or ID unknown, match family-model-stepping range */ /* OSVW unavailable or ID unknown, match family-model-stepping range */
ms = (cpu->x86_model << 8) | cpu->x86_mask; ms = (cpu->x86_model << 4) | cpu->x86_mask;
while ((range = *erratum++)) while ((range = *erratum++))
if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
(ms >= AMD_MODEL_RANGE_START(range)) && (ms >= AMD_MODEL_RANGE_START(range)) &&
......
...@@ -334,7 +334,7 @@ ENTRY(startup_32_smp) ...@@ -334,7 +334,7 @@ ENTRY(startup_32_smp)
/* /*
* Enable paging * Enable paging
*/ */
movl $pa(swapper_pg_dir),%eax movl pa(initial_page_table), %eax
movl %eax,%cr3 /* set the page table pointer.. */ movl %eax,%cr3 /* set the page table pointer.. */
movl %cr0,%eax movl %cr0,%eax
orl $X86_CR0_PG,%eax orl $X86_CR0_PG,%eax
...@@ -614,6 +614,8 @@ ignore_int: ...@@ -614,6 +614,8 @@ ignore_int:
.align 4 .align 4
ENTRY(initial_code) ENTRY(initial_code)
.long i386_start_kernel .long i386_start_kernel
ENTRY(initial_page_table)
.long pa(swapper_pg_dir)
/* /*
* BSS section * BSS section
...@@ -629,6 +631,10 @@ ENTRY(swapper_pg_dir) ...@@ -629,6 +631,10 @@ ENTRY(swapper_pg_dir)
#endif #endif
swapper_pg_fixmap: swapper_pg_fixmap:
.fill 1024,4,0 .fill 1024,4,0
#ifdef CONFIG_X86_TRAMPOLINE
ENTRY(trampoline_pg_dir)
.fill 1024,4,0
#endif
ENTRY(empty_zero_page) ENTRY(empty_zero_page)
.fill 4096,1,0 .fill 4096,1,0
......
...@@ -1014,6 +1014,8 @@ void __init setup_arch(char **cmdline_p) ...@@ -1014,6 +1014,8 @@ void __init setup_arch(char **cmdline_p)
paging_init(); paging_init();
x86_init.paging.pagetable_setup_done(swapper_pg_dir); x86_init.paging.pagetable_setup_done(swapper_pg_dir);
setup_trampoline_page_table();
tboot_probe(); tboot_probe();
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
...@@ -73,7 +73,6 @@ ...@@ -73,7 +73,6 @@
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
u8 apicid_2_node[MAX_APICID]; u8 apicid_2_node[MAX_APICID];
static int low_mappings;
#endif #endif
/* State of each CPU */ /* State of each CPU */
...@@ -91,6 +90,25 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 }; ...@@ -91,6 +90,25 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
/*
* We need this for trampoline_base protection from concurrent accesses when
* off- and onlining cores wildly.
*/
static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
void cpu_hotplug_driver_lock()
{
mutex_lock(&x86_cpu_hotplug_driver_mutex);
}
void cpu_hotplug_driver_unlock()
{
mutex_unlock(&x86_cpu_hotplug_driver_mutex);
}
ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
#else #else
static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
#define get_idle_for_cpu(x) (idle_thread_array[(x)]) #define get_idle_for_cpu(x) (idle_thread_array[(x)])
...@@ -281,6 +299,18 @@ notrace static void __cpuinit start_secondary(void *unused) ...@@ -281,6 +299,18 @@ notrace static void __cpuinit start_secondary(void *unused)
* fragile that we want to limit the things done here to the * fragile that we want to limit the things done here to the
* most necessary things. * most necessary things.
*/ */
#ifdef CONFIG_X86_32
/*
* Switch away from the trampoline page-table
*
* Do this before cpu_init() because it needs to access per-cpu
* data which may not be mapped in the trampoline page-table.
*/
load_cr3(swapper_pg_dir);
__flush_tlb_all();
#endif
vmi_bringup(); vmi_bringup();
cpu_init(); cpu_init();
preempt_disable(); preempt_disable();
...@@ -299,12 +329,6 @@ notrace static void __cpuinit start_secondary(void *unused) ...@@ -299,12 +329,6 @@ notrace static void __cpuinit start_secondary(void *unused)
legacy_pic->chip->unmask(0); legacy_pic->chip->unmask(0);
} }
#ifdef CONFIG_X86_32
while (low_mappings)
cpu_relax();
__flush_tlb_all();
#endif
/* This must be done before setting cpu_online_mask */ /* This must be done before setting cpu_online_mask */
set_cpu_sibling_map(raw_smp_processor_id()); set_cpu_sibling_map(raw_smp_processor_id());
wmb(); wmb();
...@@ -750,6 +774,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) ...@@ -750,6 +774,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* Stack for startup_32 can be just as for start_secondary onwards */ /* Stack for startup_32 can be just as for start_secondary onwards */
irq_ctx_init(cpu); irq_ctx_init(cpu);
initial_page_table = __pa(&trampoline_pg_dir);
#else #else
clear_tsk_thread_flag(c_idle.idle, TIF_FORK); clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
initial_gs = per_cpu_offset(cpu); initial_gs = per_cpu_offset(cpu);
...@@ -897,20 +922,8 @@ int __cpuinit native_cpu_up(unsigned int cpu) ...@@ -897,20 +922,8 @@ int __cpuinit native_cpu_up(unsigned int cpu)
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
#ifdef CONFIG_X86_32
/* init low mem mapping */
clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
flush_tlb_all();
low_mappings = 1;
err = do_boot_cpu(apicid, cpu); err = do_boot_cpu(apicid, cpu);
zap_low_mappings(false);
low_mappings = 0;
#else
err = do_boot_cpu(apicid, cpu);
#endif
if (err) { if (err) {
pr_debug("do_boot_cpu failed %d\n", err); pr_debug("do_boot_cpu failed %d\n", err);
return -EIO; return -EIO;
......
#include <linux/io.h> #include <linux/io.h>
#include <asm/trampoline.h> #include <asm/trampoline.h>
#include <asm/pgtable.h>
#include <asm/e820.h> #include <asm/e820.h>
#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) #if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
...@@ -37,3 +38,20 @@ unsigned long __trampinit setup_trampoline(void) ...@@ -37,3 +38,20 @@ unsigned long __trampinit setup_trampoline(void)
memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
return virt_to_phys(trampoline_base); return virt_to_phys(trampoline_base);
} }
void __init setup_trampoline_page_table(void)
{
#ifdef CONFIG_X86_32
/* Copy kernel address range */
clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
min_t(unsigned long, KERNEL_PGD_PTRS,
KERNEL_PGD_BOUNDARY));
/* Initialize low mappings */
clone_pgd_range(trampoline_pg_dir,
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
min_t(unsigned long, KERNEL_PGD_PTRS,
KERNEL_PGD_BOUNDARY));
#endif
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment