Commit f02fc963 authored by Thomas Gleixner's avatar Thomas Gleixner

softirq: Move various protections into inline helpers

To allow reuse of the bulk of softirq processing code for RT and to avoid
#ifdeffery all over the place, split protections for various code sections
out into inline helpers so the RT variant can just replace them in one go.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Tested-by: default avatarPaul E. McKenney <paulmck@kernel.org>
Reviewed-by: default avatarFrederic Weisbecker <frederic@kernel.org>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210309085727.310118772@linutronix.de
parent 6516b386
...@@ -207,6 +207,32 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) ...@@ -207,6 +207,32 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
} }
EXPORT_SYMBOL(__local_bh_enable_ip); EXPORT_SYMBOL(__local_bh_enable_ip);
static inline void softirq_handle_begin(void)
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
}
static inline void softirq_handle_end(void)
{
__local_bh_enable(SOFTIRQ_OFFSET);
WARN_ON_ONCE(in_interrupt());
}
static inline void ksoftirqd_run_begin(void)
{
local_irq_disable();
}
static inline void ksoftirqd_run_end(void)
{
local_irq_enable();
}
static inline bool should_wake_ksoftirqd(void)
{
return true;
}
static inline void invoke_softirq(void) static inline void invoke_softirq(void)
{ {
if (ksoftirqd_running(local_softirq_pending())) if (ksoftirqd_running(local_softirq_pending()))
...@@ -319,7 +345,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) ...@@ -319,7 +345,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
pending = local_softirq_pending(); pending = local_softirq_pending();
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); softirq_handle_begin();
in_hardirq = lockdep_softirq_start(); in_hardirq = lockdep_softirq_start();
account_softirq_enter(current); account_softirq_enter(current);
...@@ -370,8 +396,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) ...@@ -370,8 +396,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
account_softirq_exit(current); account_softirq_exit(current);
lockdep_softirq_end(in_hardirq); lockdep_softirq_end(in_hardirq);
__local_bh_enable(SOFTIRQ_OFFSET); softirq_handle_end();
WARN_ON_ONCE(in_interrupt());
current_restore_flags(old_flags, PF_MEMALLOC); current_restore_flags(old_flags, PF_MEMALLOC);
} }
...@@ -466,7 +491,7 @@ inline void raise_softirq_irqoff(unsigned int nr) ...@@ -466,7 +491,7 @@ inline void raise_softirq_irqoff(unsigned int nr)
* Otherwise we wake up ksoftirqd to make sure we * Otherwise we wake up ksoftirqd to make sure we
* schedule the softirq soon. * schedule the softirq soon.
*/ */
if (!in_interrupt()) if (!in_interrupt() && should_wake_ksoftirqd())
wakeup_softirqd(); wakeup_softirqd();
} }
...@@ -698,18 +723,18 @@ static int ksoftirqd_should_run(unsigned int cpu) ...@@ -698,18 +723,18 @@ static int ksoftirqd_should_run(unsigned int cpu)
static void run_ksoftirqd(unsigned int cpu) static void run_ksoftirqd(unsigned int cpu)
{ {
local_irq_disable(); ksoftirqd_run_begin();
if (local_softirq_pending()) { if (local_softirq_pending()) {
/* /*
* We can safely run softirq on inline stack, as we are not deep * We can safely run softirq on inline stack, as we are not deep
* in the task stack here. * in the task stack here.
*/ */
__do_softirq(); __do_softirq();
local_irq_enable(); ksoftirqd_run_end();
cond_resched(); cond_resched();
return; return;
} }
local_irq_enable(); ksoftirqd_run_end();
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment