Commit ac0e3202 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'rcu/srcu' of...

Merge branch 'rcu/srcu' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu

Pull SRCU changes from Paul E. McKenney.

  " These include debugging aids, updates that move towards the goal
    of permitting srcu_read_lock() and srcu_read_unlock() to be used
    from idle and offline CPUs, and a few small fixes. "
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 0351096e 7a6b55e7
...@@ -151,30 +151,14 @@ void srcu_barrier(struct srcu_struct *sp); ...@@ -151,30 +151,14 @@ void srcu_barrier(struct srcu_struct *sp);
* Checks debug_lockdep_rcu_enabled() to prevent false positives during boot * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
* and while lockdep is disabled. * and while lockdep is disabled.
* *
* Note that if the CPU is in the idle loop from an RCU point of view * Note that SRCU is based on its own statemachine and it doesn't
* (ie: that we are in the section between rcu_idle_enter() and * relies on normal RCU, it can be called from the CPU which
* rcu_idle_exit()) then srcu_read_lock_held() returns false even if * is in the idle loop from an RCU point of view or offline.
* the CPU did an srcu_read_lock(). The reason for this is that RCU
* ignores CPUs that are in such a section, considering these as in
* extended quiescent state, so such a CPU is effectively never in an
* RCU read-side critical section regardless of what RCU primitives it
* invokes. This state of affairs is required --- we need to keep an
* RCU-free window in idle where the CPU may possibly enter into low
* power mode. This way we can notice an extended quiescent state to
* other CPUs that started a grace period. Otherwise we would delay any
* grace period as long as we run in the idle task.
*
* Similarly, we avoid claiming an SRCU read lock held if the current
* CPU is offline.
*/ */
static inline int srcu_read_lock_held(struct srcu_struct *sp) static inline int srcu_read_lock_held(struct srcu_struct *sp)
{ {
if (!debug_lockdep_rcu_enabled()) if (!debug_lockdep_rcu_enabled())
return 1; return 1;
if (rcu_is_cpu_idle())
return 0;
if (!rcu_lockdep_current_cpu_online())
return 0;
return lock_is_held(&sp->dep_map); return lock_is_held(&sp->dep_map);
} }
...@@ -236,8 +220,6 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) ...@@ -236,8 +220,6 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
int retval = __srcu_read_lock(sp); int retval = __srcu_read_lock(sp);
rcu_lock_acquire(&(sp)->dep_map); rcu_lock_acquire(&(sp)->dep_map);
rcu_lockdep_assert(!rcu_is_cpu_idle(),
"srcu_read_lock() used illegally while idle");
return retval; return retval;
} }
...@@ -251,8 +233,6 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) ...@@ -251,8 +233,6 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
__releases(sp) __releases(sp)
{ {
rcu_lockdep_assert(!rcu_is_cpu_idle(),
"srcu_read_unlock() used illegally while idle");
rcu_lock_release(&(sp)->dep_map); rcu_lock_release(&(sp)->dep_map);
__srcu_read_unlock(sp, idx); __srcu_read_unlock(sp, idx);
} }
......
...@@ -282,12 +282,8 @@ static int srcu_readers_active(struct srcu_struct *sp) ...@@ -282,12 +282,8 @@ static int srcu_readers_active(struct srcu_struct *sp)
*/ */
void cleanup_srcu_struct(struct srcu_struct *sp) void cleanup_srcu_struct(struct srcu_struct *sp)
{ {
int sum; if (WARN_ON(srcu_readers_active(sp)))
return; /* Leakage unless caller handles error. */
sum = srcu_readers_active(sp);
WARN_ON(sum); /* Leakage unless caller handles error. */
if (sum != 0)
return;
free_percpu(sp->per_cpu_ref); free_percpu(sp->per_cpu_ref);
sp->per_cpu_ref = NULL; sp->per_cpu_ref = NULL;
} }
...@@ -302,9 +298,8 @@ int __srcu_read_lock(struct srcu_struct *sp) ...@@ -302,9 +298,8 @@ int __srcu_read_lock(struct srcu_struct *sp)
{ {
int idx; int idx;
idx = ACCESS_ONCE(sp->completed) & 0x1;
preempt_disable(); preempt_disable();
idx = rcu_dereference_index_check(sp->completed,
rcu_read_lock_sched_held()) & 0x1;
ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1; ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
smp_mb(); /* B */ /* Avoid leaking the critical section. */ smp_mb(); /* B */ /* Avoid leaking the critical section. */
ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1; ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
...@@ -321,10 +316,8 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock); ...@@ -321,10 +316,8 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
*/ */
void __srcu_read_unlock(struct srcu_struct *sp, int idx) void __srcu_read_unlock(struct srcu_struct *sp, int idx)
{ {
preempt_disable();
smp_mb(); /* C */ /* Avoid leaking the critical section. */ smp_mb(); /* C */ /* Avoid leaking the critical section. */
ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1; this_cpu_dec(sp->per_cpu_ref->c[idx]);
preempt_enable();
} }
EXPORT_SYMBOL_GPL(__srcu_read_unlock); EXPORT_SYMBOL_GPL(__srcu_read_unlock);
...@@ -423,6 +416,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) ...@@ -423,6 +416,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
!lock_is_held(&rcu_sched_lock_map), !lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section"); "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section");
might_sleep();
init_completion(&rcu.completion); init_completion(&rcu.completion);
head->next = NULL; head->next = NULL;
...@@ -455,10 +449,12 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) ...@@ -455,10 +449,12 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
* synchronize_srcu - wait for prior SRCU read-side critical-section completion * synchronize_srcu - wait for prior SRCU read-side critical-section completion
* @sp: srcu_struct with which to synchronize. * @sp: srcu_struct with which to synchronize.
* *
* Flip the completed counter, and wait for the old count to drain to zero. * Wait for the count to drain to zero of both indexes. To avoid the
* As with classic RCU, the updater must use some separate means of * possible starvation of synchronize_srcu(), it waits for the count of
* synchronizing concurrent updates. Can block; must be called from * the index=((->completed & 1) ^ 1) to drain to zero at first,
* process context. * and then flip the completed and wait for the count of the other index.
*
* Can block; must be called from process context.
* *
* Note that it is illegal to call synchronize_srcu() from the corresponding * Note that it is illegal to call synchronize_srcu() from the corresponding
* SRCU read-side critical section; doing so will result in deadlock. * SRCU read-side critical section; doing so will result in deadlock.
...@@ -480,12 +476,11 @@ EXPORT_SYMBOL_GPL(synchronize_srcu); ...@@ -480,12 +476,11 @@ EXPORT_SYMBOL_GPL(synchronize_srcu);
* Wait for an SRCU grace period to elapse, but be more aggressive about * Wait for an SRCU grace period to elapse, but be more aggressive about
* spinning rather than blocking when waiting. * spinning rather than blocking when waiting.
* *
* Note that it is illegal to call this function while holding any lock * Note that it is also illegal to call synchronize_srcu_expedited()
* that is acquired by a CPU-hotplug notifier. It is also illegal to call * from the corresponding SRCU read-side critical section;
* synchronize_srcu_expedited() from the corresponding SRCU read-side * doing so will result in deadlock. However, it is perfectly legal
* critical section; doing so will result in deadlock. However, it is * to call synchronize_srcu_expedited() on one srcu_struct from some
* perfectly legal to call synchronize_srcu_expedited() on one srcu_struct * other srcu_struct's read-side critical section, as long as
* from some other srcu_struct's read-side critical section, as long as
* the resulting graph of srcu_structs is acyclic. * the resulting graph of srcu_structs is acyclic.
*/ */
void synchronize_srcu_expedited(struct srcu_struct *sp) void synchronize_srcu_expedited(struct srcu_struct *sp)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment