Commit a278d471 authored by Paul E. McKenney's avatar Paul E. McKenney Committed by Steven Rostedt (VMware)

rcu: Fix dyntick-idle tracing

The tracing subsystem started using rcu_irq_entry() and rcu_irq_exit()
(with my blessing) to allow the current _rcuidle alternative tracepoint
name to be dispensed with while still maintaining good performance.
Unfortunately, this causes RCU's dyntick-idle entry code's tracing to
appear to RCU like an interrupt that occurs where RCU is not designed
to handle interrupts.

This commit fixes this problem by moving the zeroing of ->dynticks_nesting
after the offending trace_rcu_dyntick() statement, which narrows the
window of vulnerability to a pair of adjacent statements that are now
marked with comments to that effect.

Link: http://lkml.kernel.org/r/20170405093207.404f8deb@gandalf.local.home
Link: http://lkml.kernel.org/r/20170405193928.GM1600@linux.vnet.ibm.comReported-by: default avatarSteven Rostedt <rostedt@goodmis.org>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
parent 8aaf1ee7
...@@ -57,6 +57,7 @@ ...@@ -57,6 +57,7 @@
#include <linux/random.h> #include <linux/random.h>
#include <linux/trace_events.h> #include <linux/trace_events.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/ftrace.h>
#include "tree.h" #include "tree.h"
#include "rcu.h" #include "rcu.h"
...@@ -771,25 +772,24 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) ...@@ -771,25 +772,24 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
} }
/* /*
* rcu_eqs_enter_common - current CPU is moving towards extended quiescent state * rcu_eqs_enter_common - current CPU is entering an extended quiescent state
* *
* If the new value of the ->dynticks_nesting counter now is zero, * Enter idle, doing appropriate accounting. The caller must have
* we really have entered idle, and must do the appropriate accounting. * disabled interrupts.
* The caller must have disabled interrupts.
*/ */
static void rcu_eqs_enter_common(long long oldval, bool user) static void rcu_eqs_enter_common(bool user)
{ {
struct rcu_state *rsp; struct rcu_state *rsp;
struct rcu_data *rdp; struct rcu_data *rdp;
RCU_TRACE(struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);) struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0);
if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
!user && !is_idle_task(current)) { !user && !is_idle_task(current)) {
struct task_struct *idle __maybe_unused = struct task_struct *idle __maybe_unused =
idle_task(smp_processor_id()); idle_task(smp_processor_id());
trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0); trace_rcu_dyntick(TPS("Error on entry: not idle task"), rdtp->dynticks_nesting, 0);
rcu_ftrace_dump(DUMP_ORIG); rcu_ftrace_dump(DUMP_ORIG);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
current->pid, current->comm, current->pid, current->comm,
...@@ -800,7 +800,10 @@ static void rcu_eqs_enter_common(long long oldval, bool user) ...@@ -800,7 +800,10 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
do_nocb_deferred_wakeup(rdp); do_nocb_deferred_wakeup(rdp);
} }
rcu_prepare_for_idle(); rcu_prepare_for_idle();
rcu_dynticks_eqs_enter(); stack_tracer_disable();
rdtp->dynticks_nesting = 0; /* Breaks tracing momentarily. */
rcu_dynticks_eqs_enter(); /* After this, tracing works again. */
stack_tracer_enable();
rcu_dynticks_task_enter(); rcu_dynticks_task_enter();
/* /*
...@@ -821,19 +824,15 @@ static void rcu_eqs_enter_common(long long oldval, bool user) ...@@ -821,19 +824,15 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
*/ */
static void rcu_eqs_enter(bool user) static void rcu_eqs_enter(bool user)
{ {
long long oldval;
struct rcu_dynticks *rdtp; struct rcu_dynticks *rdtp;
rdtp = this_cpu_ptr(&rcu_dynticks); rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting;
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
(oldval & DYNTICK_TASK_NEST_MASK) == 0); (rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) { if ((rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
rdtp->dynticks_nesting = 0; rcu_eqs_enter_common(user);
rcu_eqs_enter_common(oldval, user); else
} else {
rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
}
} }
/** /**
...@@ -892,19 +891,18 @@ void rcu_user_enter(void) ...@@ -892,19 +891,18 @@ void rcu_user_enter(void)
*/ */
void rcu_irq_exit(void) void rcu_irq_exit(void)
{ {
long long oldval;
struct rcu_dynticks *rdtp; struct rcu_dynticks *rdtp;
RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!"); RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
rdtp = this_cpu_ptr(&rcu_dynticks); rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting;
rdtp->dynticks_nesting--;
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
rdtp->dynticks_nesting < 0); rdtp->dynticks_nesting < 1);
if (rdtp->dynticks_nesting) if (rdtp->dynticks_nesting <= 1) {
trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting); rcu_eqs_enter_common(true);
else } else {
rcu_eqs_enter_common(oldval, true); trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nesting, rdtp->dynticks_nesting - 1);
rdtp->dynticks_nesting--;
}
rcu_sysidle_enter(1); rcu_sysidle_enter(1);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment