Commit 7c9906ca authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Don't redundantly disable irqs in rcu_irq_{enter,exit}()

This commit replaces a local_irq_save()/local_irq_restore() pair with
a lockdep assertion that interrupts are already disabled.  This should
remove the corresponding overhead from the interrupt entry/exit fastpaths.

This change was inspired by the fact that Iftekhar Ahmed's mutation
testing showed that removing rcu_irq_enter()'s call to local_ird_restore()
had no effect, which might indicate that interrupts were always enabled
anyway.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent d117c8aa
...@@ -379,9 +379,9 @@ static inline void rcu_init_nohz(void) ...@@ -379,9 +379,9 @@ static inline void rcu_init_nohz(void)
*/ */
#define RCU_NONIDLE(a) \ #define RCU_NONIDLE(a) \
do { \ do { \
rcu_irq_enter(); \ rcu_irq_enter_irqson(); \
do { a; } while (0); \ do { a; } while (0); \
rcu_irq_exit(); \ rcu_irq_exit_irqson(); \
} while (0) } while (0)
/* /*
......
...@@ -181,6 +181,14 @@ static inline void rcu_irq_enter(void) ...@@ -181,6 +181,14 @@ static inline void rcu_irq_enter(void)
{ {
} }
static inline void rcu_irq_exit_irqson(void)
{
}
static inline void rcu_irq_enter_irqson(void)
{
}
static inline void rcu_irq_exit(void) static inline void rcu_irq_exit(void)
{ {
} }
......
...@@ -97,6 +97,8 @@ void rcu_idle_enter(void); ...@@ -97,6 +97,8 @@ void rcu_idle_enter(void);
void rcu_idle_exit(void); void rcu_idle_exit(void);
void rcu_irq_enter(void); void rcu_irq_enter(void);
void rcu_irq_exit(void); void rcu_irq_exit(void);
void rcu_irq_enter_irqson(void);
void rcu_irq_exit_irqson(void);
void exit_rcu(void); void exit_rcu(void);
......
...@@ -171,8 +171,8 @@ extern void syscall_unregfunc(void); ...@@ -171,8 +171,8 @@ extern void syscall_unregfunc(void);
TP_PROTO(data_proto), \ TP_PROTO(data_proto), \
TP_ARGS(data_args), \ TP_ARGS(data_args), \
TP_CONDITION(cond), \ TP_CONDITION(cond), \
rcu_irq_enter(), \ rcu_irq_enter_irqson(), \
rcu_irq_exit()); \ rcu_irq_exit_irqson()); \
} }
#else #else
#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args)
......
...@@ -732,7 +732,7 @@ void rcu_user_enter(void) ...@@ -732,7 +732,7 @@ void rcu_user_enter(void)
* *
* Exit from an interrupt handler, which might possibly result in entering * Exit from an interrupt handler, which might possibly result in entering
* idle mode, in other words, leaving the mode in which read-side critical * idle mode, in other words, leaving the mode in which read-side critical
* sections can occur. * sections can occur. The caller must have disabled interrupts.
* *
* This code assumes that the idle loop never does anything that might * This code assumes that the idle loop never does anything that might
* result in unbalanced calls to irq_enter() and irq_exit(). If your * result in unbalanced calls to irq_enter() and irq_exit(). If your
...@@ -745,11 +745,10 @@ void rcu_user_enter(void) ...@@ -745,11 +745,10 @@ void rcu_user_enter(void)
*/ */
void rcu_irq_exit(void) void rcu_irq_exit(void)
{ {
unsigned long flags;
long long oldval; long long oldval;
struct rcu_dynticks *rdtp; struct rcu_dynticks *rdtp;
local_irq_save(flags); RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
rdtp = this_cpu_ptr(&rcu_dynticks); rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting; oldval = rdtp->dynticks_nesting;
rdtp->dynticks_nesting--; rdtp->dynticks_nesting--;
...@@ -760,6 +759,17 @@ void rcu_irq_exit(void) ...@@ -760,6 +759,17 @@ void rcu_irq_exit(void)
else else
rcu_eqs_enter_common(oldval, true); rcu_eqs_enter_common(oldval, true);
rcu_sysidle_enter(1); rcu_sysidle_enter(1);
}
/*
* Wrapper for rcu_irq_exit() where interrupts are enabled.
*/
void rcu_irq_exit_irqson(void)
{
unsigned long flags;
local_irq_save(flags);
rcu_irq_exit();
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -857,7 +867,7 @@ void rcu_user_exit(void) ...@@ -857,7 +867,7 @@ void rcu_user_exit(void)
* *
* Enter an interrupt handler, which might possibly result in exiting * Enter an interrupt handler, which might possibly result in exiting
* idle mode, in other words, entering the mode in which read-side critical * idle mode, in other words, entering the mode in which read-side critical
* sections can occur. * sections can occur. The caller must have disabled interrupts.
* *
* Note that the Linux kernel is fully capable of entering an interrupt * Note that the Linux kernel is fully capable of entering an interrupt
* handler that it never exits, for example when doing upcalls to * handler that it never exits, for example when doing upcalls to
...@@ -873,11 +883,10 @@ void rcu_user_exit(void) ...@@ -873,11 +883,10 @@ void rcu_user_exit(void)
*/ */
void rcu_irq_enter(void) void rcu_irq_enter(void)
{ {
unsigned long flags;
struct rcu_dynticks *rdtp; struct rcu_dynticks *rdtp;
long long oldval; long long oldval;
local_irq_save(flags); RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!");
rdtp = this_cpu_ptr(&rcu_dynticks); rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting; oldval = rdtp->dynticks_nesting;
rdtp->dynticks_nesting++; rdtp->dynticks_nesting++;
...@@ -888,6 +897,17 @@ void rcu_irq_enter(void) ...@@ -888,6 +897,17 @@ void rcu_irq_enter(void)
else else
rcu_eqs_exit_common(oldval, true); rcu_eqs_exit_common(oldval, true);
rcu_sysidle_exit(1); rcu_sysidle_exit(1);
}
/*
* Wrapper for rcu_irq_enter() where interrupts are enabled.
*/
void rcu_irq_enter_irqson(void)
{
unsigned long flags;
local_irq_save(flags);
rcu_irq_enter();
local_irq_restore(flags); local_irq_restore(flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment