Commit a83eff0a authored by Paul E. McKenney's avatar Paul E. McKenney Committed by Paul E. McKenney

rcu: Add tracing for _rcu_barrier()

This commit adds event tracing for _rcu_barrier() execution.  This
is defined only if RCU_TRACE=y.
Signed-off-by: default avatarPaul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: default avatarJosh Triplett <josh@joshtriplett.org>
parent cf3a9c48
...@@ -541,6 +541,50 @@ TRACE_EVENT(rcu_torture_read, ...@@ -541,6 +541,50 @@ TRACE_EVENT(rcu_torture_read,
__entry->rcutorturename, __entry->rhp) __entry->rcutorturename, __entry->rhp)
); );
/*
* Tracepoint for _rcu_barrier() execution. The string "s" describes
* the _rcu_barrier phase:
* "Begin": rcu_barrier_callback() started.
* "Check": rcu_barrier_callback() checking for piggybacking.
* "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit.
* "Inc1": rcu_barrier_callback() piggyback check counter incremented.
* "Offline": rcu_barrier_callback() found offline CPU
* "OnlineQ": rcu_barrier_callback() found online CPU with callbacks.
* "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks.
* "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
* "CB": An rcu_barrier_callback() invoked a callback, not the last.
* "LastCB": An rcu_barrier_callback() invoked the last callback.
* "Inc2": rcu_barrier_callback() piggyback check counter incremented.
* The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
* is the count of remaining callbacks, and "done" is the piggybacking count.
*/
TRACE_EVENT(rcu_barrier,
TP_PROTO(char *rcuname, char *s, int cpu, int cnt, unsigned long done),
TP_ARGS(rcuname, s, cpu, cnt, done),
TP_STRUCT__entry(
__field(char *, rcuname)
__field(char *, s)
__field(int, cpu)
__field(int, cnt)
__field(unsigned long, done)
),
TP_fast_assign(
__entry->rcuname = rcuname;
__entry->s = s;
__entry->cpu = cpu;
__entry->cnt = cnt;
__entry->done = done;
),
TP_printk("%s %s cpu %d remaining %d # %lu",
__entry->rcuname, __entry->s, __entry->cpu, __entry->cnt,
__entry->done)
);
#else /* #ifdef CONFIG_RCU_TRACE */ #else /* #ifdef CONFIG_RCU_TRACE */
#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0) #define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
...@@ -564,6 +608,7 @@ TRACE_EVENT(rcu_torture_read, ...@@ -564,6 +608,7 @@ TRACE_EVENT(rcu_torture_read,
#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \ #define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
do { } while (0) do { } while (0)
#define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) #define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
#endif /* #else #ifdef CONFIG_RCU_TRACE */ #endif /* #else #ifdef CONFIG_RCU_TRACE */
......
...@@ -2259,6 +2259,17 @@ static int rcu_cpu_has_callbacks(int cpu) ...@@ -2259,6 +2259,17 @@ static int rcu_cpu_has_callbacks(int cpu)
rcu_preempt_cpu_has_callbacks(cpu); rcu_preempt_cpu_has_callbacks(cpu);
} }
/*
* Helper function for _rcu_barrier() tracing. If tracing is disabled,
* the compiler is expected to optimize this away.
*/
static void _rcu_barrier_trace(struct rcu_state *rsp, char *s,
int cpu, unsigned long done)
{
trace_rcu_barrier(rsp->name, s, cpu,
atomic_read(&rsp->barrier_cpu_count), done);
}
/* /*
* RCU callback function for _rcu_barrier(). If we are last, wake * RCU callback function for _rcu_barrier(). If we are last, wake
* up the task executing _rcu_barrier(). * up the task executing _rcu_barrier().
...@@ -2268,8 +2279,12 @@ static void rcu_barrier_callback(struct rcu_head *rhp) ...@@ -2268,8 +2279,12 @@ static void rcu_barrier_callback(struct rcu_head *rhp)
struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head); struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
struct rcu_state *rsp = rdp->rsp; struct rcu_state *rsp = rdp->rsp;
if (atomic_dec_and_test(&rsp->barrier_cpu_count)) if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
_rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done);
complete(&rsp->barrier_completion); complete(&rsp->barrier_completion);
} else {
_rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done);
}
} }
/* /*
...@@ -2280,6 +2295,7 @@ static void rcu_barrier_func(void *type) ...@@ -2280,6 +2295,7 @@ static void rcu_barrier_func(void *type)
struct rcu_state *rsp = type; struct rcu_state *rsp = type;
struct rcu_data *rdp = __this_cpu_ptr(rsp->rda); struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
_rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done);
atomic_inc(&rsp->barrier_cpu_count); atomic_inc(&rsp->barrier_cpu_count);
rsp->call(&rdp->barrier_head, rcu_barrier_callback); rsp->call(&rdp->barrier_head, rcu_barrier_callback);
} }
...@@ -2298,6 +2314,7 @@ static void _rcu_barrier(struct rcu_state *rsp) ...@@ -2298,6 +2314,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
unsigned long snap_done; unsigned long snap_done;
init_rcu_head_on_stack(&rd.barrier_head); init_rcu_head_on_stack(&rd.barrier_head);
_rcu_barrier_trace(rsp, "Begin", -1, snap);
/* Take mutex to serialize concurrent rcu_barrier() requests. */ /* Take mutex to serialize concurrent rcu_barrier() requests. */
mutex_lock(&rsp->barrier_mutex); mutex_lock(&rsp->barrier_mutex);
...@@ -2315,7 +2332,9 @@ static void _rcu_barrier(struct rcu_state *rsp) ...@@ -2315,7 +2332,9 @@ static void _rcu_barrier(struct rcu_state *rsp)
* value up to the next even number and adds two before comparing. * value up to the next even number and adds two before comparing.
*/ */
snap_done = ACCESS_ONCE(rsp->n_barrier_done); snap_done = ACCESS_ONCE(rsp->n_barrier_done);
_rcu_barrier_trace(rsp, "Check", -1, snap_done);
if (ULONG_CMP_GE(snap_done, ((snap + 1) & ~0x1) + 2)) { if (ULONG_CMP_GE(snap_done, ((snap + 1) & ~0x1) + 2)) {
_rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
smp_mb(); /* caller's subsequent code after above check. */ smp_mb(); /* caller's subsequent code after above check. */
mutex_unlock(&rsp->barrier_mutex); mutex_unlock(&rsp->barrier_mutex);
return; return;
...@@ -2328,6 +2347,7 @@ static void _rcu_barrier(struct rcu_state *rsp) ...@@ -2328,6 +2347,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
*/ */
ACCESS_ONCE(rsp->n_barrier_done)++; ACCESS_ONCE(rsp->n_barrier_done)++;
WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1); WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
_rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */ smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
/* /*
...@@ -2364,13 +2384,19 @@ static void _rcu_barrier(struct rcu_state *rsp) ...@@ -2364,13 +2384,19 @@ static void _rcu_barrier(struct rcu_state *rsp)
preempt_disable(); preempt_disable();
rdp = per_cpu_ptr(rsp->rda, cpu); rdp = per_cpu_ptr(rsp->rda, cpu);
if (cpu_is_offline(cpu)) { if (cpu_is_offline(cpu)) {
_rcu_barrier_trace(rsp, "Offline", cpu,
rsp->n_barrier_done);
preempt_enable(); preempt_enable();
while (cpu_is_offline(cpu) && ACCESS_ONCE(rdp->qlen)) while (cpu_is_offline(cpu) && ACCESS_ONCE(rdp->qlen))
schedule_timeout_interruptible(1); schedule_timeout_interruptible(1);
} else if (ACCESS_ONCE(rdp->qlen)) { } else if (ACCESS_ONCE(rdp->qlen)) {
_rcu_barrier_trace(rsp, "OnlineQ", cpu,
rsp->n_barrier_done);
smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
preempt_enable(); preempt_enable();
} else { } else {
_rcu_barrier_trace(rsp, "OnlineNQ", cpu,
rsp->n_barrier_done);
preempt_enable(); preempt_enable();
} }
} }
...@@ -2403,6 +2429,7 @@ static void _rcu_barrier(struct rcu_state *rsp) ...@@ -2403,6 +2429,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
smp_mb(); /* Keep increment after above mechanism. */ smp_mb(); /* Keep increment after above mechanism. */
ACCESS_ONCE(rsp->n_barrier_done)++; ACCESS_ONCE(rsp->n_barrier_done)++;
WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0); WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
_rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
smp_mb(); /* Keep increment before caller's subsequent code. */ smp_mb(); /* Keep increment before caller's subsequent code. */
/* Wait for all rcu_barrier_callback() callbacks to be invoked. */ /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment