Commit e6b80a3b authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Paul E. McKenney

rcu: Detect illegal rcu dereference in extended quiescent state

Report that none of the rcu read lock maps are held while in an RCU
extended quiescent state (the section between rcu_idle_enter()
and rcu_idle_exit()). This helps detect any use of rcu_dereference()
and friends from within the section in idle where RCU is not allowed.

This way we can guarantee an extended quiescent window where the CPU
can be put in dyntick idle mode or can simply aoid to be part of any
global grace period completion while in the idle loop.

Uses of RCU from such mode are totally ignored by RCU, hence the
importance of these checks.
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: default avatarJosh Triplett <josh@joshtriplett.org>
parent a0f8eefb
...@@ -228,6 +228,15 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head) ...@@ -228,6 +228,15 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
#ifdef CONFIG_PROVE_RCU
extern int rcu_is_cpu_idle(void);
#else /* !CONFIG_PROVE_RCU */
static inline int rcu_is_cpu_idle(void)
{
return 0;
}
#endif /* else !CONFIG_PROVE_RCU */
extern struct lockdep_map rcu_lock_map; extern struct lockdep_map rcu_lock_map;
# define rcu_read_acquire() \ # define rcu_read_acquire() \
lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
...@@ -262,6 +271,8 @@ static inline int rcu_read_lock_held(void) ...@@ -262,6 +271,8 @@ static inline int rcu_read_lock_held(void)
{ {
if (!debug_lockdep_rcu_enabled()) if (!debug_lockdep_rcu_enabled())
return 1; return 1;
if (rcu_is_cpu_idle())
return 0;
return lock_is_held(&rcu_lock_map); return lock_is_held(&rcu_lock_map);
} }
...@@ -285,6 +296,19 @@ extern int rcu_read_lock_bh_held(void); ...@@ -285,6 +296,19 @@ extern int rcu_read_lock_bh_held(void);
* *
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
* and while lockdep is disabled. * and while lockdep is disabled.
*
* Note that if the CPU is in the idle loop from an RCU point of
* view (ie: that we are in the section between rcu_idle_enter() and
* rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
* did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
* that are in such a section, considering these as in extended quiescent
* state, so such a CPU is effectively never in an RCU read-side critical
* section regardless of what RCU primitives it invokes. This state of
* affairs is required --- we need to keep an RCU-free window in idle
* where the CPU may possibly enter into low power mode. This way we can
* notice an extended quiescent state to other CPUs that started a grace
* period. Otherwise we would delay any grace period as long as we run in
* the idle task.
*/ */
#ifdef CONFIG_PREEMPT_COUNT #ifdef CONFIG_PREEMPT_COUNT
static inline int rcu_read_lock_sched_held(void) static inline int rcu_read_lock_sched_held(void)
...@@ -293,6 +317,8 @@ static inline int rcu_read_lock_sched_held(void) ...@@ -293,6 +317,8 @@ static inline int rcu_read_lock_sched_held(void)
if (!debug_lockdep_rcu_enabled()) if (!debug_lockdep_rcu_enabled())
return 1; return 1;
if (rcu_is_cpu_idle())
return 0;
if (debug_locks) if (debug_locks)
lockdep_opinion = lock_is_held(&rcu_sched_lock_map); lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
......
...@@ -93,6 +93,8 @@ int rcu_read_lock_bh_held(void) ...@@ -93,6 +93,8 @@ int rcu_read_lock_bh_held(void)
{ {
if (!debug_lockdep_rcu_enabled()) if (!debug_lockdep_rcu_enabled())
return 1; return 1;
if (rcu_is_cpu_idle())
return 0;
return in_softirq() || irqs_disabled(); return in_softirq() || irqs_disabled();
} }
EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
......
...@@ -157,6 +157,7 @@ int rcu_is_cpu_idle(void) ...@@ -157,6 +157,7 @@ int rcu_is_cpu_idle(void)
{ {
return !rcu_dynticks_nesting; return !rcu_dynticks_nesting;
} }
EXPORT_SYMBOL(rcu_is_cpu_idle);
#endif /* #ifdef CONFIG_PROVE_RCU */ #endif /* #ifdef CONFIG_PROVE_RCU */
......
...@@ -567,6 +567,7 @@ int rcu_is_cpu_idle(void) ...@@ -567,6 +567,7 @@ int rcu_is_cpu_idle(void)
preempt_enable(); preempt_enable();
return ret; return ret;
} }
EXPORT_SYMBOL(rcu_is_cpu_idle);
#endif /* #ifdef CONFIG_PROVE_RCU */ #endif /* #ifdef CONFIG_PROVE_RCU */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment