Commit 19e00d59 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-boot-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 bootup updates from Ingo Molnar:
 "The changes in this cycle were:

   - Fix rare SMP-boot hang (mostly in virtual environments)

   - Fix build warning with certain (rare) toolchains"

* 'x86-boot-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/relocs: Make per_cpu_load_addr static
  x86/smpboot: Initialize secondary CPU only if master CPU will wait for it
parents 197fe6b0 eeeda4cd
...@@ -1266,6 +1266,19 @@ static void dbg_restore_debug_regs(void) ...@@ -1266,6 +1266,19 @@ static void dbg_restore_debug_regs(void)
#define dbg_restore_debug_regs() #define dbg_restore_debug_regs()
#endif /* ! CONFIG_KGDB */ #endif /* ! CONFIG_KGDB */
static void wait_for_master_cpu(int cpu)
{
#ifdef CONFIG_SMP
/*
* wait for ACK from master CPU before continuing
* with AP initialization
*/
WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
while (!cpumask_test_cpu(cpu, cpu_callout_mask))
cpu_relax();
#endif
}
/* /*
* cpu_init() initializes state that is per-CPU. Some data is already * cpu_init() initializes state that is per-CPU. Some data is already
* initialized (naturally) in the bootstrap process, such as the GDT * initialized (naturally) in the bootstrap process, such as the GDT
...@@ -1281,16 +1294,17 @@ void cpu_init(void) ...@@ -1281,16 +1294,17 @@ void cpu_init(void)
struct task_struct *me; struct task_struct *me;
struct tss_struct *t; struct tss_struct *t;
unsigned long v; unsigned long v;
int cpu; int cpu = stack_smp_processor_id();
int i; int i;
wait_for_master_cpu(cpu);
/* /*
* Load microcode on this cpu if a valid microcode is available. * Load microcode on this cpu if a valid microcode is available.
* This is early microcode loading procedure. * This is early microcode loading procedure.
*/ */
load_ucode_ap(); load_ucode_ap();
cpu = stack_smp_processor_id();
t = &per_cpu(init_tss, cpu); t = &per_cpu(init_tss, cpu);
oist = &per_cpu(orig_ist, cpu); oist = &per_cpu(orig_ist, cpu);
...@@ -1302,9 +1316,6 @@ void cpu_init(void) ...@@ -1302,9 +1316,6 @@ void cpu_init(void)
me = current; me = current;
if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
panic("CPU#%d already initialized!\n", cpu);
pr_debug("Initializing CPU#%d\n", cpu); pr_debug("Initializing CPU#%d\n", cpu);
clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
...@@ -1381,13 +1392,9 @@ void cpu_init(void) ...@@ -1381,13 +1392,9 @@ void cpu_init(void)
struct tss_struct *t = &per_cpu(init_tss, cpu); struct tss_struct *t = &per_cpu(init_tss, cpu);
struct thread_struct *thread = &curr->thread; struct thread_struct *thread = &curr->thread;
show_ucode_info_early(); wait_for_master_cpu(cpu);
if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { show_ucode_info_early();
printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
for (;;)
local_irq_enable();
}
printk(KERN_INFO "Initializing CPU#%d\n", cpu); printk(KERN_INFO "Initializing CPU#%d\n", cpu);
......
...@@ -111,7 +111,6 @@ atomic_t init_deasserted; ...@@ -111,7 +111,6 @@ atomic_t init_deasserted;
static void smp_callin(void) static void smp_callin(void)
{ {
int cpuid, phys_id; int cpuid, phys_id;
unsigned long timeout;
/* /*
* If waken up by an INIT in an 82489DX configuration * If waken up by an INIT in an 82489DX configuration
...@@ -130,37 +129,6 @@ static void smp_callin(void) ...@@ -130,37 +129,6 @@ static void smp_callin(void)
* (This works even if the APIC is not enabled.) * (This works even if the APIC is not enabled.)
*/ */
phys_id = read_apic_id(); phys_id = read_apic_id();
if (cpumask_test_cpu(cpuid, cpu_callin_mask)) {
panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
phys_id, cpuid);
}
pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
/*
* STARTUP IPIs are fragile beasts as they might sometimes
* trigger some glue motherboard logic. Complete APIC bus
* silence for 1 second, this overestimates the time the
* boot CPU is spending to send the up to 2 STARTUP IPIs
* by a factor of two. This should be enough.
*/
/*
* Waiting 2s total for startup (udelay is not yet working)
*/
timeout = jiffies + 2*HZ;
while (time_before(jiffies, timeout)) {
/*
* Has the boot CPU finished it's STARTUP sequence?
*/
if (cpumask_test_cpu(cpuid, cpu_callout_mask))
break;
cpu_relax();
}
if (!time_before(jiffies, timeout)) {
panic("%s: CPU%d started up but did not get a callout!\n",
__func__, cpuid);
}
/* /*
* the boot CPU has finished the init stage and is spinning * the boot CPU has finished the init stage and is spinning
...@@ -790,8 +758,8 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) ...@@ -790,8 +758,8 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
unsigned long start_ip = real_mode_header->trampoline_start; unsigned long start_ip = real_mode_header->trampoline_start;
unsigned long boot_error = 0; unsigned long boot_error = 0;
int timeout;
int cpu0_nmi_registered = 0; int cpu0_nmi_registered = 0;
unsigned long timeout;
/* Just in case we booted with a single CPU. */ /* Just in case we booted with a single CPU. */
alternatives_enable_smp(); alternatives_enable_smp();
...@@ -838,6 +806,15 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) ...@@ -838,6 +806,15 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
} }
} }
/*
* AP might wait on cpu_callout_mask in cpu_init() with
* cpu_initialized_mask set if previous attempt to online
* it timed-out. Clear cpu_initialized_mask so that after
* INIT/SIPI it could start with a clean state.
*/
cpumask_clear_cpu(cpu, cpu_initialized_mask);
smp_mb();
/* /*
* Wake up a CPU in difference cases: * Wake up a CPU in difference cases:
* - Use the method in the APIC driver if it's defined * - Use the method in the APIC driver if it's defined
...@@ -852,53 +829,38 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) ...@@ -852,53 +829,38 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
if (!boot_error) { if (!boot_error) {
/* /*
* allow APs to start initializing. * Wait 10s total for a response from AP
*/
boot_error = -1;
timeout = jiffies + 10*HZ;
while (time_before(jiffies, timeout)) {
if (cpumask_test_cpu(cpu, cpu_initialized_mask)) {
/*
* Tell AP to proceed with initialization
*/ */
pr_debug("Before Callout %d\n", cpu);
cpumask_set_cpu(cpu, cpu_callout_mask); cpumask_set_cpu(cpu, cpu_callout_mask);
pr_debug("After Callout %d\n", cpu); boot_error = 0;
break;
}
udelay(100);
schedule();
}
}
if (!boot_error) {
/* /*
* Wait 5s total for a response * Wait till AP completes initial initialization
*/ */
for (timeout = 0; timeout < 50000; timeout++) { while (!cpumask_test_cpu(cpu, cpu_callin_mask)) {
if (cpumask_test_cpu(cpu, cpu_callin_mask))
break; /* It has booted */
udelay(100);
/* /*
* Allow other tasks to run while we wait for the * Allow other tasks to run while we wait for the
* AP to come online. This also gives a chance * AP to come online. This also gives a chance
* for the MTRR work(triggered by the AP coming online) * for the MTRR work(triggered by the AP coming online)
* to be completed in the stop machine context. * to be completed in the stop machine context.
*/ */
udelay(100);
schedule(); schedule();
} }
if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
print_cpu_msr(&cpu_data(cpu));
pr_debug("CPU%d: has booted.\n", cpu);
} else {
boot_error = 1;
if (*trampoline_status == 0xA5A5A5A5)
/* trampoline started but...? */
pr_err("CPU%d: Stuck ??\n", cpu);
else
/* trampoline code not run */
pr_err("CPU%d: Not responding\n", cpu);
if (apic->inquire_remote_apic)
apic->inquire_remote_apic(apicid);
}
}
if (boot_error) {
/* Try to put things back the way they were before ... */
numa_remove_cpu(cpu); /* was set by numa_add_cpu */
/* was set by do_boot_cpu() */
cpumask_clear_cpu(cpu, cpu_callout_mask);
/* was set by cpu_init() */
cpumask_clear_cpu(cpu, cpu_initialized_mask);
} }
/* mark "stuck" area as not stuck */ /* mark "stuck" area as not stuck */
......
...@@ -695,7 +695,7 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel, ...@@ -695,7 +695,7 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
* *
*/ */
static int per_cpu_shndx = -1; static int per_cpu_shndx = -1;
Elf_Addr per_cpu_load_addr; static Elf_Addr per_cpu_load_addr;
static void percpu_init(void) static void percpu_init(void)
{ {
......
...@@ -365,6 +365,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) ...@@ -365,6 +365,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
struct desc_struct *gdt; struct desc_struct *gdt;
unsigned long gdt_mfn; unsigned long gdt_mfn;
/* used to tell cpu_init() that it can proceed with initialization */
cpumask_set_cpu(cpu, cpu_callout_mask);
if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment