Commit 0671b767 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-fixes-for-linus' of...

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  percpu: Remove the multi-page alignment facility
  x86-32: Allocate irq stacks seperate from percpu area
  x86-32, mm: Remove duplicated #include
  x86, printk: Get rid of <0> from stack output
  x86, kexec: Make sure to stop all CPUs before exiting the kernel
  x86/vsmp: Eliminate kconfig dependency warning
parents 0b2d8d9e 47f19a08
...@@ -347,6 +347,7 @@ endif ...@@ -347,6 +347,7 @@ endif
config X86_VSMP config X86_VSMP
bool "ScaleMP vSMP" bool "ScaleMP vSMP"
select PARAVIRT_GUEST
select PARAVIRT select PARAVIRT
depends on X86_64 && PCI depends on X86_64 && PCI
depends on X86_EXTENDED_PLATFORM depends on X86_EXTENDED_PLATFORM
......
...@@ -21,10 +21,8 @@ static inline int irq_canonicalize(int irq) ...@@ -21,10 +21,8 @@ static inline int irq_canonicalize(int irq)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
extern void irq_ctx_init(int cpu); extern void irq_ctx_init(int cpu);
extern void irq_ctx_exit(int cpu);
#else #else
# define irq_ctx_init(cpu) do { } while (0) # define irq_ctx_init(cpu) do { } while (0)
# define irq_ctx_exit(cpu) do { } while (0)
#endif #endif
#define __ARCH_HAS_DO_SOFTIRQ #define __ARCH_HAS_DO_SOFTIRQ
......
...@@ -50,7 +50,7 @@ struct smp_ops { ...@@ -50,7 +50,7 @@ struct smp_ops {
void (*smp_prepare_cpus)(unsigned max_cpus); void (*smp_prepare_cpus)(unsigned max_cpus);
void (*smp_cpus_done)(unsigned max_cpus); void (*smp_cpus_done)(unsigned max_cpus);
void (*smp_send_stop)(void); void (*stop_other_cpus)(int wait);
void (*smp_send_reschedule)(int cpu); void (*smp_send_reschedule)(int cpu);
int (*cpu_up)(unsigned cpu); int (*cpu_up)(unsigned cpu);
...@@ -73,7 +73,12 @@ extern struct smp_ops smp_ops; ...@@ -73,7 +73,12 @@ extern struct smp_ops smp_ops;
static inline void smp_send_stop(void) static inline void smp_send_stop(void)
{ {
smp_ops.smp_send_stop(); smp_ops.stop_other_cpus(0);
}
static inline void stop_other_cpus(void)
{
smp_ops.stop_other_cpus(1);
} }
static inline void smp_prepare_boot_cpu(void) static inline void smp_prepare_boot_cpu(void)
......
...@@ -82,11 +82,11 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, ...@@ -82,11 +82,11 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
if (kstack_end(stack)) if (kstack_end(stack))
break; break;
if (i && ((i % STACKSLOTS_PER_LINE) == 0)) if (i && ((i % STACKSLOTS_PER_LINE) == 0))
printk("\n%s", log_lvl); printk(KERN_CONT "\n");
printk(" %08lx", *stack++); printk(KERN_CONT " %08lx", *stack++);
touch_nmi_watchdog(); touch_nmi_watchdog();
} }
printk("\n"); printk(KERN_CONT "\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl); show_trace_log_lvl(task, regs, sp, bp, log_lvl);
} }
......
...@@ -265,20 +265,20 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, ...@@ -265,20 +265,20 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
if (stack >= irq_stack && stack <= irq_stack_end) { if (stack >= irq_stack && stack <= irq_stack_end) {
if (stack == irq_stack_end) { if (stack == irq_stack_end) {
stack = (unsigned long *) (irq_stack_end[-1]); stack = (unsigned long *) (irq_stack_end[-1]);
printk(" <EOI> "); printk(KERN_CONT " <EOI> ");
} }
} else { } else {
if (((long) stack & (THREAD_SIZE-1)) == 0) if (((long) stack & (THREAD_SIZE-1)) == 0)
break; break;
} }
if (i && ((i % STACKSLOTS_PER_LINE) == 0)) if (i && ((i % STACKSLOTS_PER_LINE) == 0))
printk("\n%s", log_lvl); printk(KERN_CONT "\n");
printk(" %016lx", *stack++); printk(KERN_CONT " %016lx", *stack++);
touch_nmi_watchdog(); touch_nmi_watchdog();
} }
preempt_enable(); preempt_enable();
printk("\n"); printk(KERN_CONT "\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl); show_trace_log_lvl(task, regs, sp, bp, log_lvl);
} }
......
...@@ -60,9 +60,6 @@ union irq_ctx { ...@@ -60,9 +60,6 @@ union irq_ctx {
static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx); static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, hardirq_stack, THREAD_SIZE);
static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, softirq_stack, THREAD_SIZE);
static void call_on_stack(void *func, void *stack) static void call_on_stack(void *func, void *stack)
{ {
asm volatile("xchgl %%ebx,%%esp \n" asm volatile("xchgl %%ebx,%%esp \n"
...@@ -128,7 +125,7 @@ void __cpuinit irq_ctx_init(int cpu) ...@@ -128,7 +125,7 @@ void __cpuinit irq_ctx_init(int cpu)
if (per_cpu(hardirq_ctx, cpu)) if (per_cpu(hardirq_ctx, cpu))
return; return;
irqctx = &per_cpu(hardirq_stack, cpu); irqctx = (union irq_ctx *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER);
irqctx->tinfo.task = NULL; irqctx->tinfo.task = NULL;
irqctx->tinfo.exec_domain = NULL; irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu; irqctx->tinfo.cpu = cpu;
...@@ -137,7 +134,7 @@ void __cpuinit irq_ctx_init(int cpu) ...@@ -137,7 +134,7 @@ void __cpuinit irq_ctx_init(int cpu)
per_cpu(hardirq_ctx, cpu) = irqctx; per_cpu(hardirq_ctx, cpu) = irqctx;
irqctx = &per_cpu(softirq_stack, cpu); irqctx = (union irq_ctx *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER);
irqctx->tinfo.task = NULL; irqctx->tinfo.task = NULL;
irqctx->tinfo.exec_domain = NULL; irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu; irqctx->tinfo.cpu = cpu;
...@@ -150,11 +147,6 @@ void __cpuinit irq_ctx_init(int cpu) ...@@ -150,11 +147,6 @@ void __cpuinit irq_ctx_init(int cpu)
cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
} }
void irq_ctx_exit(int cpu)
{
per_cpu(hardirq_ctx, cpu) = NULL;
}
asmlinkage void do_softirq(void) asmlinkage void do_softirq(void)
{ {
unsigned long flags; unsigned long flags;
......
...@@ -635,7 +635,7 @@ void native_machine_shutdown(void) ...@@ -635,7 +635,7 @@ void native_machine_shutdown(void)
/* O.K Now that I'm on the appropriate processor, /* O.K Now that I'm on the appropriate processor,
* stop all of the others. * stop all of the others.
*/ */
smp_send_stop(); stop_other_cpus();
#endif #endif
lapic_shutdown(); lapic_shutdown();
......
...@@ -159,10 +159,10 @@ asmlinkage void smp_reboot_interrupt(void) ...@@ -159,10 +159,10 @@ asmlinkage void smp_reboot_interrupt(void)
irq_exit(); irq_exit();
} }
static void native_smp_send_stop(void) static void native_stop_other_cpus(int wait)
{ {
unsigned long flags; unsigned long flags;
unsigned long wait; unsigned long timeout;
if (reboot_force) if (reboot_force)
return; return;
...@@ -179,9 +179,12 @@ static void native_smp_send_stop(void) ...@@ -179,9 +179,12 @@ static void native_smp_send_stop(void)
if (num_online_cpus() > 1) { if (num_online_cpus() > 1) {
apic->send_IPI_allbutself(REBOOT_VECTOR); apic->send_IPI_allbutself(REBOOT_VECTOR);
/* Don't wait longer than a second */ /*
wait = USEC_PER_SEC; * Don't wait longer than a second if the caller
while (num_online_cpus() > 1 && wait--) * didn't ask us to wait.
*/
timeout = USEC_PER_SEC;
while (num_online_cpus() > 1 && (wait || timeout--))
udelay(1); udelay(1);
} }
...@@ -227,7 +230,7 @@ struct smp_ops smp_ops = { ...@@ -227,7 +230,7 @@ struct smp_ops smp_ops = {
.smp_prepare_cpus = native_smp_prepare_cpus, .smp_prepare_cpus = native_smp_prepare_cpus,
.smp_cpus_done = native_smp_cpus_done, .smp_cpus_done = native_smp_cpus_done,
.smp_send_stop = native_smp_send_stop, .stop_other_cpus = native_stop_other_cpus,
.smp_send_reschedule = native_smp_send_reschedule, .smp_send_reschedule = native_smp_send_reschedule,
.cpu_up = native_cpu_up, .cpu_up = native_cpu_up,
......
...@@ -1373,7 +1373,6 @@ void play_dead_common(void) ...@@ -1373,7 +1373,6 @@ void play_dead_common(void)
{ {
idle_task_exit(); idle_task_exit();
reset_lazy_tlbstate(); reset_lazy_tlbstate();
irq_ctx_exit(raw_smp_processor_id());
c1e_remove_cpu(raw_smp_processor_id()); c1e_remove_cpu(raw_smp_processor_id());
mb(); mb();
......
...@@ -1016,7 +1016,7 @@ static void xen_reboot(int reason) ...@@ -1016,7 +1016,7 @@ static void xen_reboot(int reason)
struct sched_shutdown r = { .reason = reason }; struct sched_shutdown r = { .reason = reason };
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
smp_send_stop(); stop_other_cpus();
#endif #endif
if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
......
...@@ -400,9 +400,9 @@ static void stop_self(void *v) ...@@ -400,9 +400,9 @@ static void stop_self(void *v)
BUG(); BUG();
} }
static void xen_smp_send_stop(void) static void xen_stop_other_cpus(int wait)
{ {
smp_call_function(stop_self, NULL, 0); smp_call_function(stop_self, NULL, wait);
} }
static void xen_smp_send_reschedule(int cpu) static void xen_smp_send_reschedule(int cpu)
...@@ -470,7 +470,7 @@ static const struct smp_ops xen_smp_ops __initdata = { ...@@ -470,7 +470,7 @@ static const struct smp_ops xen_smp_ops __initdata = {
.cpu_disable = xen_cpu_disable, .cpu_disable = xen_cpu_disable,
.play_dead = xen_play_dead, .play_dead = xen_play_dead,
.smp_send_stop = xen_smp_send_stop, .stop_other_cpus = xen_stop_other_cpus,
.smp_send_reschedule = xen_smp_send_reschedule, .smp_send_reschedule = xen_smp_send_reschedule,
.send_call_func_ipi = xen_smp_send_call_function_ipi, .send_call_func_ipi = xen_smp_send_call_function_ipi,
......
...@@ -147,18 +147,6 @@ ...@@ -147,18 +147,6 @@
#define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \
DEFINE_PER_CPU_SECTION(type, name, "..readmostly") DEFINE_PER_CPU_SECTION(type, name, "..readmostly")
/*
* Declaration/definition used for large per-CPU variables that must be
* aligned to something larger than the pagesize.
*/
#define DECLARE_PER_CPU_MULTIPAGE_ALIGNED(type, name, size) \
DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \
__aligned(size)
#define DEFINE_PER_CPU_MULTIPAGE_ALIGNED(type, name, size) \
DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \
__aligned(size)
/* /*
* Intermodule exports for per-CPU variables. sparse forgets about * Intermodule exports for per-CPU variables. sparse forgets about
* address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment