Commit 1006fae3 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull IRQ changes from Ingo Molnar:
 "The biggest change this cycle are the softirq/hardirq stack
  interaction and nesting fixes, cleanups and reorganizations from
  Frederic.  This is the longer followup story to the softirq nesting
  fix that is already upstream (commit ded79754: "irq: Force hardirq
  exit's softirq processing on its own stack")"

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  irqchip: bcm2835: Convert to use IRQCHIP_DECLARE macro
  powerpc: Tell about irq stack coverage
  x86: Tell about irq stack coverage
  irq: Optimize softirq stack selection in irq exit
  irq: Justify the various softirq stack choices
  irq: Improve a bit softirq debugging
  irq: Optimize call to softirq on hardirq exit
  irq: Consolidate do_softirq() arch overriden implementations
  x86/irq: Correct comment about i8259 initialization
parents 70fdcb83 5702941e
...@@ -390,6 +390,16 @@ config HAVE_UNDERSCORE_SYMBOL_PREFIX ...@@ -390,6 +390,16 @@ config HAVE_UNDERSCORE_SYMBOL_PREFIX
Some architectures generate an _ in front of C symbols; things like Some architectures generate an _ in front of C symbols; things like
module loading and assembly files need to know about this. module loading and assembly files need to know about this.
config HAVE_IRQ_EXIT_ON_IRQ_STACK
bool
help
Architecture doesn't only execute the irq handler on the irq stack
but also irq_exit(). This way we can process softirqs on this irq
stack instead of switching to a new one when we call __do_softirq()
in the end of an hardirq.
This spares a stack switch and improves cache usage on softirq
processing.
# #
# ABI hall of shame # ABI hall of shame
# #
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/irqchip/bcm2835.h> #include <linux/irqchip.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/clk/bcm2835.h> #include <linux/clk/bcm2835.h>
...@@ -130,8 +130,7 @@ static const char * const bcm2835_compat[] = { ...@@ -130,8 +130,7 @@ static const char * const bcm2835_compat[] = {
DT_MACHINE_START(BCM2835, "BCM2835") DT_MACHINE_START(BCM2835, "BCM2835")
.map_io = bcm2835_map_io, .map_io = bcm2835_map_io,
.init_irq = bcm2835_init_irq, .init_irq = irqchip_init,
.handle_irq = bcm2835_handle_irq,
.init_machine = bcm2835_init, .init_machine = bcm2835_init,
.restart = bcm2835_restart, .restart = bcm2835_restart,
.dt_compat = bcm2835_compat .dt_compat = bcm2835_compat
......
...@@ -159,44 +159,30 @@ void irq_ctx_exit(int cpu) ...@@ -159,44 +159,30 @@ void irq_ctx_exit(int cpu)
extern asmlinkage void __do_softirq(void); extern asmlinkage void __do_softirq(void);
asmlinkage void do_softirq(void) void do_softirq_own_stack(void)
{ {
unsigned long flags;
struct thread_info *curctx; struct thread_info *curctx;
union irq_ctx *irqctx; union irq_ctx *irqctx;
u32 *isp; u32 *isp;
if (in_interrupt()) curctx = current_thread_info();
return; irqctx = softirq_ctx[smp_processor_id()];
irqctx->tinfo.task = curctx->task;
local_irq_save(flags);
/* build the stack frame on the softirq stack */
if (local_softirq_pending()) { isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info));
curctx = current_thread_info();
irqctx = softirq_ctx[smp_processor_id()]; asm volatile (
irqctx->tinfo.task = curctx->task; "MOV D0.5,%0\n"
"SWAP A0StP,D0.5\n"
/* build the stack frame on the softirq stack */ "CALLR D1RtP,___do_softirq\n"
isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info)); "MOV A0StP,D0.5\n"
:
asm volatile ( : "r" (isp)
"MOV D0.5,%0\n" : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
"SWAP A0StP,D0.5\n" "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
"CALLR D1RtP,___do_softirq\n" "D0.5"
"MOV A0StP,D0.5\n" );
:
: "r" (isp)
: "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
"D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
"D0.5"
);
/*
* Shouldn't happen, we returned above if in_interrupt():
*/
WARN_ON_ONCE(softirq_count());
}
local_irq_restore(flags);
} }
#endif #endif
......
...@@ -495,22 +495,9 @@ static void execute_on_irq_stack(void *func, unsigned long param1) ...@@ -495,22 +495,9 @@ static void execute_on_irq_stack(void *func, unsigned long param1)
*irq_stack_in_use = 1; *irq_stack_in_use = 1;
} }
asmlinkage void do_softirq(void) void do_softirq_own_stack(void)
{ {
__u32 pending; execute_on_irq_stack(__do_softirq, 0);
unsigned long flags;
if (in_interrupt())
return;
local_irq_save(flags);
pending = local_softirq_pending();
if (pending)
execute_on_irq_stack(__do_softirq, 0);
local_irq_restore(flags);
} }
#endif /* CONFIG_IRQSTACKS */ #endif /* CONFIG_IRQSTACKS */
......
...@@ -138,6 +138,7 @@ config PPC ...@@ -138,6 +138,7 @@ config PPC
select OLD_SIGSUSPEND select OLD_SIGSUSPEND
select OLD_SIGACTION if PPC32 select OLD_SIGACTION if PPC32
select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_STACKOVERFLOW
select HAVE_IRQ_EXIT_ON_IRQ_STACK
config EARLY_PRINTK config EARLY_PRINTK
bool bool
......
...@@ -594,7 +594,7 @@ void irq_ctx_init(void) ...@@ -594,7 +594,7 @@ void irq_ctx_init(void)
} }
} }
static inline void do_softirq_onstack(void) void do_softirq_own_stack(void)
{ {
struct thread_info *curtp, *irqtp; struct thread_info *curtp, *irqtp;
...@@ -612,21 +612,6 @@ static inline void do_softirq_onstack(void) ...@@ -612,21 +612,6 @@ static inline void do_softirq_onstack(void)
set_bits(irqtp->flags, &curtp->flags); set_bits(irqtp->flags, &curtp->flags);
} }
void do_softirq(void)
{
unsigned long flags;
if (in_interrupt())
return;
local_irq_save(flags);
if (local_softirq_pending())
do_softirq_onstack();
local_irq_restore(flags);
}
irq_hw_number_t virq_to_hw(unsigned int virq) irq_hw_number_t virq_to_hw(unsigned int virq)
{ {
struct irq_data *irq_data = irq_get_irq_data(virq); struct irq_data *irq_data = irq_get_irq_data(virq);
......
...@@ -157,39 +157,29 @@ int arch_show_interrupts(struct seq_file *p, int prec) ...@@ -157,39 +157,29 @@ int arch_show_interrupts(struct seq_file *p, int prec)
/* /*
* Switch to the asynchronous interrupt stack for softirq execution. * Switch to the asynchronous interrupt stack for softirq execution.
*/ */
asmlinkage void do_softirq(void) void do_softirq_own_stack(void)
{ {
unsigned long flags, old, new; unsigned long old, new;
if (in_interrupt()) /* Get current stack pointer. */
return; asm volatile("la %0,0(15)" : "=a" (old));
/* Check against async. stack address range. */
local_irq_save(flags); new = S390_lowcore.async_stack;
if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) {
if (local_softirq_pending()) { /* Need to switch to the async. stack. */
/* Get current stack pointer. */ new -= STACK_FRAME_OVERHEAD;
asm volatile("la %0,0(15)" : "=a" (old)); ((struct stack_frame *) new)->back_chain = old;
/* Check against async. stack address range. */ asm volatile(" la 15,0(%0)\n"
new = S390_lowcore.async_stack; " basr 14,%2\n"
if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) { " la 15,0(%1)\n"
/* Need to switch to the async. stack. */ : : "a" (new), "a" (old),
new -= STACK_FRAME_OVERHEAD; "a" (__do_softirq)
((struct stack_frame *) new)->back_chain = old; : "0", "1", "2", "3", "4", "5", "14",
"cc", "memory" );
asm volatile(" la 15,0(%0)\n" } else {
" basr 14,%2\n" /* We are already on the async stack. */
" la 15,0(%1)\n" __do_softirq();
: : "a" (new), "a" (old),
"a" (__do_softirq)
: "0", "1", "2", "3", "4", "5", "14",
"cc", "memory" );
} else {
/* We are already on the async stack. */
__do_softirq();
}
} }
local_irq_restore(flags);
} }
/* /*
......
...@@ -149,47 +149,32 @@ void irq_ctx_exit(int cpu) ...@@ -149,47 +149,32 @@ void irq_ctx_exit(int cpu)
hardirq_ctx[cpu] = NULL; hardirq_ctx[cpu] = NULL;
} }
asmlinkage void do_softirq(void) void do_softirq_own_stack(void)
{ {
unsigned long flags;
struct thread_info *curctx; struct thread_info *curctx;
union irq_ctx *irqctx; union irq_ctx *irqctx;
u32 *isp; u32 *isp;
if (in_interrupt()) curctx = current_thread_info();
return; irqctx = softirq_ctx[smp_processor_id()];
irqctx->tinfo.task = curctx->task;
local_irq_save(flags); irqctx->tinfo.previous_sp = current_stack_pointer;
if (local_softirq_pending()) { /* build the stack frame on the softirq stack */
curctx = current_thread_info(); isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
irqctx = softirq_ctx[smp_processor_id()];
irqctx->tinfo.task = curctx->task; __asm__ __volatile__ (
irqctx->tinfo.previous_sp = current_stack_pointer; "mov r15, r9 \n"
"jsr @%0 \n"
/* build the stack frame on the softirq stack */ /* switch to the softirq stack */
isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); " mov %1, r15 \n"
/* restore the thread stack */
__asm__ __volatile__ ( "mov r9, r15 \n"
"mov r15, r9 \n" : /* no outputs */
"jsr @%0 \n" : "r" (__do_softirq), "r" (isp)
/* switch to the softirq stack */ : "memory", "r0", "r1", "r2", "r3", "r4",
" mov %1, r15 \n" "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
/* restore the thread stack */ );
"mov r9, r15 \n"
: /* no outputs */
: "r" (__do_softirq), "r" (isp)
: "memory", "r0", "r1", "r2", "r3", "r4",
"r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
);
/*
* Shouldn't happen, we returned above if in_interrupt():
*/
WARN_ON_ONCE(softirq_count());
}
local_irq_restore(flags);
} }
#else #else
static inline void handle_one_irq(unsigned int irq) static inline void handle_one_irq(unsigned int irq)
......
...@@ -698,30 +698,19 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs) ...@@ -698,30 +698,19 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
set_irq_regs(old_regs); set_irq_regs(old_regs);
} }
void do_softirq(void) void do_softirq_own_stack(void)
{ {
unsigned long flags; void *orig_sp, *sp = softirq_stack[smp_processor_id()];
if (in_interrupt())
return;
local_irq_save(flags);
if (local_softirq_pending()) { sp += THREAD_SIZE - 192 - STACK_BIAS;
void *orig_sp, *sp = softirq_stack[smp_processor_id()];
sp += THREAD_SIZE - 192 - STACK_BIAS;
__asm__ __volatile__("mov %%sp, %0\n\t"
"mov %1, %%sp"
: "=&r" (orig_sp)
: "r" (sp));
__do_softirq();
__asm__ __volatile__("mov %0, %%sp"
: : "r" (orig_sp));
}
local_irq_restore(flags); __asm__ __volatile__("mov %%sp, %0\n\t"
"mov %1, %%sp"
: "=&r" (orig_sp)
: "r" (sp));
__do_softirq();
__asm__ __volatile__("mov %0, %%sp"
: : "r" (orig_sp));
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
......
...@@ -123,6 +123,7 @@ config X86 ...@@ -123,6 +123,7 @@ config X86
select COMPAT_OLD_SIGACTION if IA32_EMULATION select COMPAT_OLD_SIGACTION if IA32_EMULATION
select RTC_LIB select RTC_LIB
select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_STACKOVERFLOW
select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
config INSTRUCTION_DECODER config INSTRUCTION_DECODER
def_bool y def_bool y
......
...@@ -1342,7 +1342,7 @@ bad_gs: ...@@ -1342,7 +1342,7 @@ bad_gs:
.previous .previous
/* Call softirq on interrupt stack. Interrupts are off. */ /* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(call_softirq) ENTRY(do_softirq_own_stack)
CFI_STARTPROC CFI_STARTPROC
pushq_cfi %rbp pushq_cfi %rbp
CFI_REL_OFFSET rbp,0 CFI_REL_OFFSET rbp,0
...@@ -1359,7 +1359,7 @@ ENTRY(call_softirq) ...@@ -1359,7 +1359,7 @@ ENTRY(call_softirq)
decl PER_CPU_VAR(irq_count) decl PER_CPU_VAR(irq_count)
ret ret
CFI_ENDPROC CFI_ENDPROC
END(call_softirq) END(do_softirq_own_stack)
#ifdef CONFIG_XEN #ifdef CONFIG_XEN
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
......
...@@ -312,8 +312,7 @@ static void init_8259A(int auto_eoi) ...@@ -312,8 +312,7 @@ static void init_8259A(int auto_eoi)
*/ */
outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
/* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 on x86-64, /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 */
to 0x20-0x27 on i386 */
outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR); outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR);
/* 8259A-1 (the master) has a slave on IR2 */ /* 8259A-1 (the master) has a slave on IR2 */
......
...@@ -149,35 +149,21 @@ void irq_ctx_init(int cpu) ...@@ -149,35 +149,21 @@ void irq_ctx_init(int cpu)
cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
} }
asmlinkage void do_softirq(void) void do_softirq_own_stack(void)
{ {
unsigned long flags;
struct thread_info *curctx; struct thread_info *curctx;
union irq_ctx *irqctx; union irq_ctx *irqctx;
u32 *isp; u32 *isp;
if (in_interrupt()) curctx = current_thread_info();
return; irqctx = __this_cpu_read(softirq_ctx);
irqctx->tinfo.task = curctx->task;
local_irq_save(flags); irqctx->tinfo.previous_esp = current_stack_pointer;
if (local_softirq_pending()) {
curctx = current_thread_info();
irqctx = __this_cpu_read(softirq_ctx);
irqctx->tinfo.task = curctx->task;
irqctx->tinfo.previous_esp = current_stack_pointer;
/* build the stack frame on the softirq stack */
isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
call_on_stack(__do_softirq, isp); /* build the stack frame on the softirq stack */
/* isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
* Shouldn't happen, we returned above if in_interrupt():
*/
WARN_ON_ONCE(softirq_count());
}
local_irq_restore(flags); call_on_stack(__do_softirq, isp);
} }
bool handle_irq(unsigned irq, struct pt_regs *regs) bool handle_irq(unsigned irq, struct pt_regs *regs)
......
...@@ -87,24 +87,3 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) ...@@ -87,24 +87,3 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
generic_handle_irq_desc(irq, desc); generic_handle_irq_desc(irq, desc);
return true; return true;
} }
extern void call_softirq(void);
asmlinkage void do_softirq(void)
{
__u32 pending;
unsigned long flags;
if (in_interrupt())
return;
local_irq_save(flags);
pending = local_softirq_pending();
/* Switch to interrupt stack */
if (pending) {
call_softirq();
WARN_ON_ONCE(softirq_count());
}
local_irq_restore(flags);
}
...@@ -49,9 +49,11 @@ ...@@ -49,9 +49,11 @@
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
#include <linux/irqchip/bcm2835.h>
#include <asm/exception.h> #include <asm/exception.h>
#include <asm/mach/irq.h>
#include "irqchip.h"
/* Put the bank and irq (32 bits) into the hwirq */ /* Put the bank and irq (32 bits) into the hwirq */
#define MAKE_HWIRQ(b, n) ((b << 5) | (n)) #define MAKE_HWIRQ(b, n) ((b << 5) | (n))
...@@ -93,6 +95,8 @@ struct armctrl_ic { ...@@ -93,6 +95,8 @@ struct armctrl_ic {
}; };
static struct armctrl_ic intc __read_mostly; static struct armctrl_ic intc __read_mostly;
static asmlinkage void __exception_irq_entry bcm2835_handle_irq(
struct pt_regs *regs);
static void armctrl_mask_irq(struct irq_data *d) static void armctrl_mask_irq(struct irq_data *d)
{ {
...@@ -164,17 +168,9 @@ static int __init armctrl_of_init(struct device_node *node, ...@@ -164,17 +168,9 @@ static int __init armctrl_of_init(struct device_node *node,
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
} }
} }
return 0;
}
static struct of_device_id irq_of_match[] __initconst = {
{ .compatible = "brcm,bcm2835-armctrl-ic", .data = armctrl_of_init },
{ }
};
void __init bcm2835_init_irq(void) set_handle_irq(bcm2835_handle_irq);
{ return 0;
of_irq_init(irq_of_match);
} }
/* /*
...@@ -200,7 +196,7 @@ static void armctrl_handle_shortcut(int bank, struct pt_regs *regs, ...@@ -200,7 +196,7 @@ static void armctrl_handle_shortcut(int bank, struct pt_regs *regs,
handle_IRQ(irq_linear_revmap(intc.domain, irq), regs); handle_IRQ(irq_linear_revmap(intc.domain, irq), regs);
} }
asmlinkage void __exception_irq_entry bcm2835_handle_irq( static asmlinkage void __exception_irq_entry bcm2835_handle_irq(
struct pt_regs *regs) struct pt_regs *regs)
{ {
u32 stat, irq; u32 stat, irq;
...@@ -222,3 +218,5 @@ asmlinkage void __exception_irq_entry bcm2835_handle_irq( ...@@ -222,3 +218,5 @@ asmlinkage void __exception_irq_entry bcm2835_handle_irq(
} }
} }
} }
IRQCHIP_DECLARE(bcm2835_armctrl_ic, "brcm,bcm2835-armctrl-ic", armctrl_of_init);
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/irq.h>
/* /*
* These correspond to the IORESOURCE_IRQ_* defines in * These correspond to the IORESOURCE_IRQ_* defines in
...@@ -374,6 +375,16 @@ struct softirq_action ...@@ -374,6 +375,16 @@ struct softirq_action
asmlinkage void do_softirq(void); asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void); asmlinkage void __do_softirq(void);
#ifdef __ARCH_HAS_DO_SOFTIRQ
void do_softirq_own_stack(void);
#else
static inline void do_softirq_own_stack(void)
{
__do_softirq();
}
#endif
extern void open_softirq(int nr, void (*action)(struct softirq_action *)); extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void); extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr); extern void __raise_softirq_irqoff(unsigned int nr);
......
/*
* Copyright (C) 2010 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_IRQCHIP_BCM2835_H_
#define __LINUX_IRQCHIP_BCM2835_H_
#include <asm/exception.h>
extern void bcm2835_init_irq(void);
extern asmlinkage void __exception_irq_entry bcm2835_handle_irq(
struct pt_regs *regs);
#endif
...@@ -29,7 +29,6 @@ ...@@ -29,7 +29,6 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/irq.h> #include <trace/events/irq.h>
#include <asm/irq.h>
/* /*
- No shared variables, all the data are CPU local. - No shared variables, all the data are CPU local.
- If a softirq needs serialization, let it serialize itself - If a softirq needs serialization, let it serialize itself
...@@ -134,7 +133,6 @@ EXPORT_SYMBOL(local_bh_disable); ...@@ -134,7 +133,6 @@ EXPORT_SYMBOL(local_bh_disable);
static void __local_bh_enable(unsigned int cnt) static void __local_bh_enable(unsigned int cnt)
{ {
WARN_ON_ONCE(in_irq());
WARN_ON_ONCE(!irqs_disabled()); WARN_ON_ONCE(!irqs_disabled());
if (softirq_count() == cnt) if (softirq_count() == cnt)
...@@ -149,6 +147,7 @@ static void __local_bh_enable(unsigned int cnt) ...@@ -149,6 +147,7 @@ static void __local_bh_enable(unsigned int cnt)
*/ */
void _local_bh_enable(void) void _local_bh_enable(void)
{ {
WARN_ON_ONCE(in_irq());
__local_bh_enable(SOFTIRQ_DISABLE_OFFSET); __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
} }
...@@ -171,8 +170,13 @@ static inline void _local_bh_enable_ip(unsigned long ip) ...@@ -171,8 +170,13 @@ static inline void _local_bh_enable_ip(unsigned long ip)
*/ */
sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1); sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
if (unlikely(!in_interrupt() && local_softirq_pending())) if (unlikely(!in_interrupt() && local_softirq_pending())) {
/*
* Run softirq if any pending. And do it in its own stack
* as we may be calling this deep in a task call stack already.
*/
do_softirq(); do_softirq();
}
dec_preempt_count(); dec_preempt_count();
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
...@@ -280,10 +284,11 @@ asmlinkage void __do_softirq(void) ...@@ -280,10 +284,11 @@ asmlinkage void __do_softirq(void)
account_irq_exit_time(current); account_irq_exit_time(current);
__local_bh_enable(SOFTIRQ_OFFSET); __local_bh_enable(SOFTIRQ_OFFSET);
WARN_ON_ONCE(in_interrupt());
tsk_restore_flags(current, old_flags, PF_MEMALLOC); tsk_restore_flags(current, old_flags, PF_MEMALLOC);
} }
#ifndef __ARCH_HAS_DO_SOFTIRQ
asmlinkage void do_softirq(void) asmlinkage void do_softirq(void)
{ {
...@@ -298,13 +303,11 @@ asmlinkage void do_softirq(void) ...@@ -298,13 +303,11 @@ asmlinkage void do_softirq(void)
pending = local_softirq_pending(); pending = local_softirq_pending();
if (pending) if (pending)
__do_softirq(); do_softirq_own_stack();
local_irq_restore(flags); local_irq_restore(flags);
} }
#endif
/* /*
* Enter an interrupt context. * Enter an interrupt context.
*/ */
...@@ -329,15 +332,21 @@ void irq_enter(void) ...@@ -329,15 +332,21 @@ void irq_enter(void)
static inline void invoke_softirq(void) static inline void invoke_softirq(void)
{ {
if (!force_irqthreads) { if (!force_irqthreads) {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
/* /*
* We can safely execute softirq on the current stack if * We can safely execute softirq on the current stack if
* it is the irq stack, because it should be near empty * it is the irq stack, because it should be near empty
* at this stage. But we have no way to know if the arch * at this stage.
* calls irq_exit() on the irq stack. So call softirq
* in its own stack to prevent from any overrun on top
* of a potentially deep task stack.
*/ */
do_softirq(); __do_softirq();
#else
/*
* Otherwise, irq_exit() is called on the task stack that can
* be potentially deep already. So call softirq in its own stack
* to prevent from any overrun.
*/
do_softirq_own_stack();
#endif
} else { } else {
wakeup_softirqd(); wakeup_softirqd();
} }
...@@ -771,6 +780,10 @@ static void run_ksoftirqd(unsigned int cpu) ...@@ -771,6 +780,10 @@ static void run_ksoftirqd(unsigned int cpu)
{ {
local_irq_disable(); local_irq_disable();
if (local_softirq_pending()) { if (local_softirq_pending()) {
/*
* We can safely run softirq on inline stack, as we are not deep
* in the task stack here.
*/
__do_softirq(); __do_softirq();
rcu_note_context_switch(cpu); rcu_note_context_switch(cpu);
local_irq_enable(); local_irq_enable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment