Commit b8d490c3 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'irq/core-v6' of...

Merge branch 'irq/core-v6' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into irq/core

Pull hardirq and softirq nesting updates from Frederic Weisbecker,
which fix nesting related stack overruns such as:

  http://lkml.kernel.org/r/1378330796.4321.50.camel%40pasglop

Beyond being a fix, this series also optimizes and reorganizes arch
hardirq/softirq stack processing to be faster and more robust.
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 8a60d42d 62d26c82
...@@ -390,6 +390,16 @@ config HAVE_UNDERSCORE_SYMBOL_PREFIX ...@@ -390,6 +390,16 @@ config HAVE_UNDERSCORE_SYMBOL_PREFIX
Some architectures generate an _ in front of C symbols; things like Some architectures generate an _ in front of C symbols; things like
module loading and assembly files need to know about this. module loading and assembly files need to know about this.
config HAVE_IRQ_EXIT_ON_IRQ_STACK
bool
help
Architecture doesn't only execute the irq handler on the irq stack
but also irq_exit(). This way we can process softirqs on this irq
stack instead of switching to a new one when we call __do_softirq()
in the end of an hardirq.
This spares a stack switch and improves cache usage on softirq
processing.
# #
# ABI hall of shame # ABI hall of shame
# #
......
...@@ -159,44 +159,30 @@ void irq_ctx_exit(int cpu) ...@@ -159,44 +159,30 @@ void irq_ctx_exit(int cpu)
extern asmlinkage void __do_softirq(void); extern asmlinkage void __do_softirq(void);
asmlinkage void do_softirq(void) void do_softirq_own_stack(void)
{ {
unsigned long flags;
struct thread_info *curctx; struct thread_info *curctx;
union irq_ctx *irqctx; union irq_ctx *irqctx;
u32 *isp; u32 *isp;
if (in_interrupt()) curctx = current_thread_info();
return; irqctx = softirq_ctx[smp_processor_id()];
irqctx->tinfo.task = curctx->task;
local_irq_save(flags);
/* build the stack frame on the softirq stack */
if (local_softirq_pending()) { isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info));
curctx = current_thread_info();
irqctx = softirq_ctx[smp_processor_id()]; asm volatile (
irqctx->tinfo.task = curctx->task; "MOV D0.5,%0\n"
"SWAP A0StP,D0.5\n"
/* build the stack frame on the softirq stack */ "CALLR D1RtP,___do_softirq\n"
isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info)); "MOV A0StP,D0.5\n"
:
asm volatile ( : "r" (isp)
"MOV D0.5,%0\n" : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
"SWAP A0StP,D0.5\n" "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
"CALLR D1RtP,___do_softirq\n" "D0.5"
"MOV A0StP,D0.5\n" );
:
: "r" (isp)
: "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
"D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
"D0.5"
);
/*
* Shouldn't happen, we returned above if in_interrupt():
*/
WARN_ON_ONCE(softirq_count());
}
local_irq_restore(flags);
} }
#endif #endif
......
...@@ -499,22 +499,9 @@ static void execute_on_irq_stack(void *func, unsigned long param1) ...@@ -499,22 +499,9 @@ static void execute_on_irq_stack(void *func, unsigned long param1)
*irq_stack_in_use = 1; *irq_stack_in_use = 1;
} }
asmlinkage void do_softirq(void) void do_softirq_own_stack(void)
{ {
__u32 pending; execute_on_irq_stack(__do_softirq, 0);
unsigned long flags;
if (in_interrupt())
return;
local_irq_save(flags);
pending = local_softirq_pending();
if (pending)
execute_on_irq_stack(__do_softirq, 0);
local_irq_restore(flags);
} }
#endif /* CONFIG_IRQSTACKS */ #endif /* CONFIG_IRQSTACKS */
......
...@@ -138,6 +138,7 @@ config PPC ...@@ -138,6 +138,7 @@ config PPC
select OLD_SIGSUSPEND select OLD_SIGSUSPEND
select OLD_SIGACTION if PPC32 select OLD_SIGACTION if PPC32
select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_STACKOVERFLOW
select HAVE_IRQ_EXIT_ON_IRQ_STACK
config EARLY_PRINTK config EARLY_PRINTK
bool bool
......
...@@ -593,7 +593,7 @@ void irq_ctx_init(void) ...@@ -593,7 +593,7 @@ void irq_ctx_init(void)
} }
} }
static inline void do_softirq_onstack(void) void do_softirq_own_stack(void)
{ {
struct thread_info *curtp, *irqtp; struct thread_info *curtp, *irqtp;
...@@ -611,21 +611,6 @@ static inline void do_softirq_onstack(void) ...@@ -611,21 +611,6 @@ static inline void do_softirq_onstack(void)
set_bits(irqtp->flags, &curtp->flags); set_bits(irqtp->flags, &curtp->flags);
} }
void do_softirq(void)
{
unsigned long flags;
if (in_interrupt())
return;
local_irq_save(flags);
if (local_softirq_pending())
do_softirq_onstack();
local_irq_restore(flags);
}
irq_hw_number_t virq_to_hw(unsigned int virq) irq_hw_number_t virq_to_hw(unsigned int virq)
{ {
struct irq_data *irq_data = irq_get_irq_data(virq); struct irq_data *irq_data = irq_get_irq_data(virq);
......
...@@ -157,39 +157,29 @@ int arch_show_interrupts(struct seq_file *p, int prec) ...@@ -157,39 +157,29 @@ int arch_show_interrupts(struct seq_file *p, int prec)
/* /*
* Switch to the asynchronous interrupt stack for softirq execution. * Switch to the asynchronous interrupt stack for softirq execution.
*/ */
asmlinkage void do_softirq(void) void do_softirq_own_stack(void)
{ {
unsigned long flags, old, new; unsigned long old, new;
if (in_interrupt()) /* Get current stack pointer. */
return; asm volatile("la %0,0(15)" : "=a" (old));
/* Check against async. stack address range. */
local_irq_save(flags); new = S390_lowcore.async_stack;
if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) {
if (local_softirq_pending()) { /* Need to switch to the async. stack. */
/* Get current stack pointer. */ new -= STACK_FRAME_OVERHEAD;
asm volatile("la %0,0(15)" : "=a" (old)); ((struct stack_frame *) new)->back_chain = old;
/* Check against async. stack address range. */ asm volatile(" la 15,0(%0)\n"
new = S390_lowcore.async_stack; " basr 14,%2\n"
if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) { " la 15,0(%1)\n"
/* Need to switch to the async. stack. */ : : "a" (new), "a" (old),
new -= STACK_FRAME_OVERHEAD; "a" (__do_softirq)
((struct stack_frame *) new)->back_chain = old; : "0", "1", "2", "3", "4", "5", "14",
"cc", "memory" );
asm volatile(" la 15,0(%0)\n" } else {
" basr 14,%2\n" /* We are already on the async stack. */
" la 15,0(%1)\n" __do_softirq();
: : "a" (new), "a" (old),
"a" (__do_softirq)
: "0", "1", "2", "3", "4", "5", "14",
"cc", "memory" );
} else {
/* We are already on the async stack. */
__do_softirq();
}
} }
local_irq_restore(flags);
} }
/* /*
......
...@@ -149,47 +149,32 @@ void irq_ctx_exit(int cpu) ...@@ -149,47 +149,32 @@ void irq_ctx_exit(int cpu)
hardirq_ctx[cpu] = NULL; hardirq_ctx[cpu] = NULL;
} }
asmlinkage void do_softirq(void) void do_softirq_own_stack(void)
{ {
unsigned long flags;
struct thread_info *curctx; struct thread_info *curctx;
union irq_ctx *irqctx; union irq_ctx *irqctx;
u32 *isp; u32 *isp;
if (in_interrupt()) curctx = current_thread_info();
return; irqctx = softirq_ctx[smp_processor_id()];
irqctx->tinfo.task = curctx->task;
local_irq_save(flags); irqctx->tinfo.previous_sp = current_stack_pointer;
if (local_softirq_pending()) { /* build the stack frame on the softirq stack */
curctx = current_thread_info(); isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
irqctx = softirq_ctx[smp_processor_id()];
irqctx->tinfo.task = curctx->task; __asm__ __volatile__ (
irqctx->tinfo.previous_sp = current_stack_pointer; "mov r15, r9 \n"
"jsr @%0 \n"
/* build the stack frame on the softirq stack */ /* switch to the softirq stack */
isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); " mov %1, r15 \n"
/* restore the thread stack */
__asm__ __volatile__ ( "mov r9, r15 \n"
"mov r15, r9 \n" : /* no outputs */
"jsr @%0 \n" : "r" (__do_softirq), "r" (isp)
/* switch to the softirq stack */ : "memory", "r0", "r1", "r2", "r3", "r4",
" mov %1, r15 \n" "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
/* restore the thread stack */ );
"mov r9, r15 \n"
: /* no outputs */
: "r" (__do_softirq), "r" (isp)
: "memory", "r0", "r1", "r2", "r3", "r4",
"r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
);
/*
* Shouldn't happen, we returned above if in_interrupt():
*/
WARN_ON_ONCE(softirq_count());
}
local_irq_restore(flags);
} }
#else #else
static inline void handle_one_irq(unsigned int irq) static inline void handle_one_irq(unsigned int irq)
......
...@@ -698,30 +698,19 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs) ...@@ -698,30 +698,19 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
set_irq_regs(old_regs); set_irq_regs(old_regs);
} }
void do_softirq(void) void do_softirq_own_stack(void)
{ {
unsigned long flags; void *orig_sp, *sp = softirq_stack[smp_processor_id()];
if (in_interrupt())
return;
local_irq_save(flags);
if (local_softirq_pending()) { sp += THREAD_SIZE - 192 - STACK_BIAS;
void *orig_sp, *sp = softirq_stack[smp_processor_id()];
sp += THREAD_SIZE - 192 - STACK_BIAS;
__asm__ __volatile__("mov %%sp, %0\n\t"
"mov %1, %%sp"
: "=&r" (orig_sp)
: "r" (sp));
__do_softirq();
__asm__ __volatile__("mov %0, %%sp"
: : "r" (orig_sp));
}
local_irq_restore(flags); __asm__ __volatile__("mov %%sp, %0\n\t"
"mov %1, %%sp"
: "=&r" (orig_sp)
: "r" (sp));
__do_softirq();
__asm__ __volatile__("mov %0, %%sp"
: : "r" (orig_sp));
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
......
...@@ -123,6 +123,7 @@ config X86 ...@@ -123,6 +123,7 @@ config X86
select COMPAT_OLD_SIGACTION if IA32_EMULATION select COMPAT_OLD_SIGACTION if IA32_EMULATION
select RTC_LIB select RTC_LIB
select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_STACKOVERFLOW
select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
config INSTRUCTION_DECODER config INSTRUCTION_DECODER
def_bool y def_bool y
......
...@@ -1342,7 +1342,7 @@ bad_gs: ...@@ -1342,7 +1342,7 @@ bad_gs:
.previous .previous
/* Call softirq on interrupt stack. Interrupts are off. */ /* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(call_softirq) ENTRY(do_softirq_own_stack)
CFI_STARTPROC CFI_STARTPROC
pushq_cfi %rbp pushq_cfi %rbp
CFI_REL_OFFSET rbp,0 CFI_REL_OFFSET rbp,0
...@@ -1359,7 +1359,7 @@ ENTRY(call_softirq) ...@@ -1359,7 +1359,7 @@ ENTRY(call_softirq)
decl PER_CPU_VAR(irq_count) decl PER_CPU_VAR(irq_count)
ret ret
CFI_ENDPROC CFI_ENDPROC
END(call_softirq) END(do_softirq_own_stack)
#ifdef CONFIG_XEN #ifdef CONFIG_XEN
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
......
...@@ -149,35 +149,21 @@ void irq_ctx_init(int cpu) ...@@ -149,35 +149,21 @@ void irq_ctx_init(int cpu)
cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
} }
asmlinkage void do_softirq(void) void do_softirq_own_stack(void)
{ {
unsigned long flags;
struct thread_info *curctx; struct thread_info *curctx;
union irq_ctx *irqctx; union irq_ctx *irqctx;
u32 *isp; u32 *isp;
if (in_interrupt()) curctx = current_thread_info();
return; irqctx = __this_cpu_read(softirq_ctx);
irqctx->tinfo.task = curctx->task;
local_irq_save(flags); irqctx->tinfo.previous_esp = current_stack_pointer;
if (local_softirq_pending()) {
curctx = current_thread_info();
irqctx = __this_cpu_read(softirq_ctx);
irqctx->tinfo.task = curctx->task;
irqctx->tinfo.previous_esp = current_stack_pointer;
/* build the stack frame on the softirq stack */
isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
call_on_stack(__do_softirq, isp); /* build the stack frame on the softirq stack */
/* isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
* Shouldn't happen, we returned above if in_interrupt():
*/
WARN_ON_ONCE(softirq_count());
}
local_irq_restore(flags); call_on_stack(__do_softirq, isp);
} }
bool handle_irq(unsigned irq, struct pt_regs *regs) bool handle_irq(unsigned irq, struct pt_regs *regs)
......
...@@ -87,24 +87,3 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) ...@@ -87,24 +87,3 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
generic_handle_irq_desc(irq, desc); generic_handle_irq_desc(irq, desc);
return true; return true;
} }
extern void call_softirq(void);
asmlinkage void do_softirq(void)
{
__u32 pending;
unsigned long flags;
if (in_interrupt())
return;
local_irq_save(flags);
pending = local_softirq_pending();
/* Switch to interrupt stack */
if (pending) {
call_softirq();
WARN_ON_ONCE(softirq_count());
}
local_irq_restore(flags);
}
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/irq.h>
/* /*
* These correspond to the IORESOURCE_IRQ_* defines in * These correspond to the IORESOURCE_IRQ_* defines in
...@@ -374,6 +375,16 @@ struct softirq_action ...@@ -374,6 +375,16 @@ struct softirq_action
asmlinkage void do_softirq(void); asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void); asmlinkage void __do_softirq(void);
#ifdef __ARCH_HAS_DO_SOFTIRQ
void do_softirq_own_stack(void);
#else
static inline void do_softirq_own_stack(void)
{
__do_softirq();
}
#endif
extern void open_softirq(int nr, void (*action)(struct softirq_action *)); extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void); extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr); extern void __raise_softirq_irqoff(unsigned int nr);
......
...@@ -29,7 +29,6 @@ ...@@ -29,7 +29,6 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/irq.h> #include <trace/events/irq.h>
#include <asm/irq.h>
/* /*
- No shared variables, all the data are CPU local. - No shared variables, all the data are CPU local.
- If a softirq needs serialization, let it serialize itself - If a softirq needs serialization, let it serialize itself
...@@ -134,7 +133,6 @@ EXPORT_SYMBOL(local_bh_disable); ...@@ -134,7 +133,6 @@ EXPORT_SYMBOL(local_bh_disable);
static void __local_bh_enable(unsigned int cnt) static void __local_bh_enable(unsigned int cnt)
{ {
WARN_ON_ONCE(in_irq());
WARN_ON_ONCE(!irqs_disabled()); WARN_ON_ONCE(!irqs_disabled());
if (softirq_count() == cnt) if (softirq_count() == cnt)
...@@ -149,6 +147,7 @@ static void __local_bh_enable(unsigned int cnt) ...@@ -149,6 +147,7 @@ static void __local_bh_enable(unsigned int cnt)
*/ */
void _local_bh_enable(void) void _local_bh_enable(void)
{ {
WARN_ON_ONCE(in_irq());
__local_bh_enable(SOFTIRQ_DISABLE_OFFSET); __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
} }
...@@ -171,8 +170,13 @@ static inline void _local_bh_enable_ip(unsigned long ip) ...@@ -171,8 +170,13 @@ static inline void _local_bh_enable_ip(unsigned long ip)
*/ */
sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1); sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
if (unlikely(!in_interrupt() && local_softirq_pending())) if (unlikely(!in_interrupt() && local_softirq_pending())) {
/*
* Run softirq if any pending. And do it in its own stack
* as we may be calling this deep in a task call stack already.
*/
do_softirq(); do_softirq();
}
dec_preempt_count(); dec_preempt_count();
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
...@@ -280,10 +284,11 @@ asmlinkage void __do_softirq(void) ...@@ -280,10 +284,11 @@ asmlinkage void __do_softirq(void)
account_irq_exit_time(current); account_irq_exit_time(current);
__local_bh_enable(SOFTIRQ_OFFSET); __local_bh_enable(SOFTIRQ_OFFSET);
WARN_ON_ONCE(in_interrupt());
tsk_restore_flags(current, old_flags, PF_MEMALLOC); tsk_restore_flags(current, old_flags, PF_MEMALLOC);
} }
#ifndef __ARCH_HAS_DO_SOFTIRQ
asmlinkage void do_softirq(void) asmlinkage void do_softirq(void)
{ {
...@@ -298,13 +303,11 @@ asmlinkage void do_softirq(void) ...@@ -298,13 +303,11 @@ asmlinkage void do_softirq(void)
pending = local_softirq_pending(); pending = local_softirq_pending();
if (pending) if (pending)
__do_softirq(); do_softirq_own_stack();
local_irq_restore(flags); local_irq_restore(flags);
} }
#endif
/* /*
* Enter an interrupt context. * Enter an interrupt context.
*/ */
...@@ -328,10 +331,25 @@ void irq_enter(void) ...@@ -328,10 +331,25 @@ void irq_enter(void)
static inline void invoke_softirq(void) static inline void invoke_softirq(void)
{ {
if (!force_irqthreads) if (!force_irqthreads) {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
/*
* We can safely execute softirq on the current stack if
* it is the irq stack, because it should be near empty
* at this stage.
*/
__do_softirq(); __do_softirq();
else #else
/*
* Otherwise, irq_exit() is called on the task stack that can
* be potentially deep already. So call softirq in its own stack
* to prevent from any overrun.
*/
do_softirq_own_stack();
#endif
} else {
wakeup_softirqd(); wakeup_softirqd();
}
} }
static inline void tick_irq_exit(void) static inline void tick_irq_exit(void)
...@@ -762,6 +780,10 @@ static void run_ksoftirqd(unsigned int cpu) ...@@ -762,6 +780,10 @@ static void run_ksoftirqd(unsigned int cpu)
{ {
local_irq_disable(); local_irq_disable();
if (local_softirq_pending()) { if (local_softirq_pending()) {
/*
* We can safely run softirq on inline stack, as we are not deep
* in the task stack here.
*/
__do_softirq(); __do_softirq();
rcu_note_context_switch(cpu); rcu_note_context_switch(cpu);
local_irq_enable(); local_irq_enable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment