Commit a30489c5 authored by Paul E. McKenney's avatar Paul E. McKenney Committed by Paul E. McKenney

rcu: Instrument synchronize_rcu_expedited() for debugfs tracing

This commit adds the counters to rcu_state and updates them in
synchronize_rcu_expedited() to provide the data needed for debugfs
tracing.
Signed-off-by: default avatarPaul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 40694d66
...@@ -2321,6 +2321,7 @@ void synchronize_sched_expedited(void) ...@@ -2321,6 +2321,7 @@ void synchronize_sched_expedited(void)
(ulong)atomic_long_read(&rsp->expedited_done) + (ulong)atomic_long_read(&rsp->expedited_done) +
ULONG_MAX / 8)) { ULONG_MAX / 8)) {
synchronize_sched(); synchronize_sched();
atomic_long_inc(&rsp->expedited_wrap);
return; return;
} }
...@@ -2341,11 +2342,14 @@ void synchronize_sched_expedited(void) ...@@ -2341,11 +2342,14 @@ void synchronize_sched_expedited(void)
synchronize_sched_expedited_cpu_stop, synchronize_sched_expedited_cpu_stop,
NULL) == -EAGAIN) { NULL) == -EAGAIN) {
put_online_cpus(); put_online_cpus();
atomic_long_inc(&rsp->expedited_tryfail);
/* Check to see if someone else did our work for us. */ /* Check to see if someone else did our work for us. */
s = atomic_long_read(&rsp->expedited_done); s = atomic_long_read(&rsp->expedited_done);
if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
smp_mb(); /* ensure test happens before caller kfree */ /* ensure test happens before caller kfree */
smp_mb__before_atomic_inc(); /* ^^^ */
atomic_long_inc(&rsp->expedited_workdone1);
return; return;
} }
...@@ -2354,13 +2358,16 @@ void synchronize_sched_expedited(void) ...@@ -2354,13 +2358,16 @@ void synchronize_sched_expedited(void)
udelay(trycount * num_online_cpus()); udelay(trycount * num_online_cpus());
} else { } else {
synchronize_sched(); synchronize_sched();
atomic_long_inc(&rsp->expedited_normal);
return; return;
} }
/* Recheck to see if someone else did our work for us. */ /* Recheck to see if someone else did our work for us. */
s = atomic_long_read(&rsp->expedited_done); s = atomic_long_read(&rsp->expedited_done);
if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
smp_mb(); /* ensure test happens before caller kfree */ /* ensure test happens before caller kfree */
smp_mb__before_atomic_inc(); /* ^^^ */
atomic_long_inc(&rsp->expedited_workdone2);
return; return;
} }
...@@ -2375,6 +2382,7 @@ void synchronize_sched_expedited(void) ...@@ -2375,6 +2382,7 @@ void synchronize_sched_expedited(void)
snap = atomic_long_read(&rsp->expedited_start); snap = atomic_long_read(&rsp->expedited_start);
smp_mb(); /* ensure read is before try_stop_cpus(). */ smp_mb(); /* ensure read is before try_stop_cpus(). */
} }
atomic_long_inc(&rsp->expedited_stoppedcpus);
/* /*
* Everyone up to our most recent fetch is covered by our grace * Everyone up to our most recent fetch is covered by our grace
...@@ -2383,12 +2391,16 @@ void synchronize_sched_expedited(void) ...@@ -2383,12 +2391,16 @@ void synchronize_sched_expedited(void)
* than we did already did their update. * than we did already did their update.
*/ */
do { do {
atomic_long_inc(&rsp->expedited_done_tries);
s = atomic_long_read(&rsp->expedited_done); s = atomic_long_read(&rsp->expedited_done);
if (ULONG_CMP_GE((ulong)s, (ulong)snap)) { if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
smp_mb(); /* ensure test happens before caller kfree */ /* ensure test happens before caller kfree */
smp_mb__before_atomic_inc(); /* ^^^ */
atomic_long_inc(&rsp->expedited_done_lost);
break; break;
} }
} while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s); } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
atomic_long_inc(&rsp->expedited_done_exit);
put_online_cpus(); put_online_cpus();
} }
......
...@@ -406,6 +406,15 @@ struct rcu_state { ...@@ -406,6 +406,15 @@ struct rcu_state {
atomic_long_t expedited_start; /* Starting ticket. */ atomic_long_t expedited_start; /* Starting ticket. */
atomic_long_t expedited_done; /* Done ticket. */ atomic_long_t expedited_done; /* Done ticket. */
atomic_long_t expedited_wrap; /* # near-wrap incidents. */
atomic_long_t expedited_tryfail; /* # acquisition failures. */
atomic_long_t expedited_workdone1; /* # done by others #1. */
atomic_long_t expedited_workdone2; /* # done by others #2. */
atomic_long_t expedited_normal; /* # fallbacks to normal. */
atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
atomic_long_t expedited_done_tries; /* # tries to update _done. */
atomic_long_t expedited_done_lost; /* # times beaten to _done. */
atomic_long_t expedited_done_exit; /* # times exited _done loop. */
unsigned long jiffies_force_qs; /* Time at which to invoke */ unsigned long jiffies_force_qs; /* Time at which to invoke */
/* force_quiescent_state(). */ /* force_quiescent_state(). */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment