Commit 4aa35e0d authored by Valentin Schneider's avatar Valentin Schneider Committed by Neeraj Upadhyay

context_tracking, rcu: Rename RCU_DYNTICKS_IDX into CT_RCU_WATCHING

The symbols relating to the CT_STATE part of context_tracking.state are now
all prefixed with CT_STATE.

The RCU dynticks counter part of that atomic variable still involves
symbols with different prefixes, align them all to be prefixed with
CT_RCU_WATCHING.
Suggested-by: default avatar"Paul E. McKenney" <paulmck@kernel.org>
Signed-off-by: default avatarValentin Schneider <vschneid@redhat.com>
Reviewed-by: default avatarFrederic Weisbecker <frederic@kernel.org>
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarNeeraj Upadhyay <neeraj.upadhyay@kernel.org>
parent d65d411c
...@@ -119,7 +119,7 @@ extern void ct_idle_exit(void); ...@@ -119,7 +119,7 @@ extern void ct_idle_exit(void);
*/ */
static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void) static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
{ {
return !(raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX); return !(raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING);
} }
/* /*
...@@ -142,7 +142,7 @@ static __always_inline bool warn_rcu_enter(void) ...@@ -142,7 +142,7 @@ static __always_inline bool warn_rcu_enter(void)
preempt_disable_notrace(); preempt_disable_notrace();
if (rcu_dynticks_curr_cpu_in_eqs()) { if (rcu_dynticks_curr_cpu_in_eqs()) {
ret = true; ret = true;
ct_state_inc(RCU_DYNTICKS_IDX); ct_state_inc(CT_RCU_WATCHING);
} }
return ret; return ret;
...@@ -151,7 +151,7 @@ static __always_inline bool warn_rcu_enter(void) ...@@ -151,7 +151,7 @@ static __always_inline bool warn_rcu_enter(void)
static __always_inline void warn_rcu_exit(bool rcu) static __always_inline void warn_rcu_exit(bool rcu)
{ {
if (rcu) if (rcu)
ct_state_inc(RCU_DYNTICKS_IDX); ct_state_inc(CT_RCU_WATCHING);
preempt_enable_notrace(); preempt_enable_notrace();
} }
......
...@@ -18,11 +18,11 @@ enum ctx_state { ...@@ -18,11 +18,11 @@ enum ctx_state {
CT_STATE_MAX = 4, CT_STATE_MAX = 4,
}; };
/* Even value for idle, else odd. */ /* Odd value for watching, else even. */
#define RCU_DYNTICKS_IDX CT_STATE_MAX #define CT_RCU_WATCHING CT_STATE_MAX
#define CT_STATE_MASK (CT_STATE_MAX - 1) #define CT_STATE_MASK (CT_STATE_MAX - 1)
#define CT_DYNTICKS_MASK (~CT_STATE_MASK) #define CT_RCU_WATCHING_MASK (~CT_STATE_MASK)
struct context_tracking { struct context_tracking {
#ifdef CONFIG_CONTEXT_TRACKING_USER #ifdef CONFIG_CONTEXT_TRACKING_USER
...@@ -58,21 +58,21 @@ static __always_inline int __ct_state(void) ...@@ -58,21 +58,21 @@ static __always_inline int __ct_state(void)
#ifdef CONFIG_CONTEXT_TRACKING_IDLE #ifdef CONFIG_CONTEXT_TRACKING_IDLE
static __always_inline int ct_dynticks(void) static __always_inline int ct_dynticks(void)
{ {
return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_DYNTICKS_MASK; return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING_MASK;
} }
static __always_inline int ct_dynticks_cpu(int cpu) static __always_inline int ct_dynticks_cpu(int cpu)
{ {
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
return atomic_read(&ct->state) & CT_DYNTICKS_MASK; return atomic_read(&ct->state) & CT_RCU_WATCHING_MASK;
} }
static __always_inline int ct_dynticks_cpu_acquire(int cpu) static __always_inline int ct_dynticks_cpu_acquire(int cpu)
{ {
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
return atomic_read_acquire(&ct->state) & CT_DYNTICKS_MASK; return atomic_read_acquire(&ct->state) & CT_RCU_WATCHING_MASK;
} }
static __always_inline long ct_dynticks_nesting(void) static __always_inline long ct_dynticks_nesting(void)
......
...@@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct context_tracking, context_tracking) = { ...@@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
.dynticks_nesting = 1, .dynticks_nesting = 1,
.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE, .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
#endif #endif
.state = ATOMIC_INIT(RCU_DYNTICKS_IDX), .state = ATOMIC_INIT(CT_RCU_WATCHING),
}; };
EXPORT_SYMBOL_GPL(context_tracking); EXPORT_SYMBOL_GPL(context_tracking);
...@@ -90,7 +90,7 @@ static noinstr void ct_kernel_exit_state(int offset) ...@@ -90,7 +90,7 @@ static noinstr void ct_kernel_exit_state(int offset)
rcu_dynticks_task_trace_enter(); // Before ->dynticks update! rcu_dynticks_task_trace_enter(); // Before ->dynticks update!
seq = ct_state_inc(offset); seq = ct_state_inc(offset);
// RCU is no longer watching. Better be in extended quiescent state! // RCU is no longer watching. Better be in extended quiescent state!
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & RCU_DYNTICKS_IDX)); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & CT_RCU_WATCHING));
} }
/* /*
...@@ -110,7 +110,7 @@ static noinstr void ct_kernel_enter_state(int offset) ...@@ -110,7 +110,7 @@ static noinstr void ct_kernel_enter_state(int offset)
seq = ct_state_inc(offset); seq = ct_state_inc(offset);
// RCU is now watching. Better not be in an extended quiescent state! // RCU is now watching. Better not be in an extended quiescent state!
rcu_dynticks_task_trace_exit(); // After ->dynticks update! rcu_dynticks_task_trace_exit(); // After ->dynticks update!
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & RCU_DYNTICKS_IDX)); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & CT_RCU_WATCHING));
} }
/* /*
...@@ -236,7 +236,7 @@ void noinstr ct_nmi_exit(void) ...@@ -236,7 +236,7 @@ void noinstr ct_nmi_exit(void)
instrumentation_end(); instrumentation_end();
// RCU is watching here ... // RCU is watching here ...
ct_kernel_exit_state(RCU_DYNTICKS_IDX); ct_kernel_exit_state(CT_RCU_WATCHING);
// ... but is no longer watching here. // ... but is no longer watching here.
if (!in_nmi()) if (!in_nmi())
...@@ -277,7 +277,7 @@ void noinstr ct_nmi_enter(void) ...@@ -277,7 +277,7 @@ void noinstr ct_nmi_enter(void)
rcu_dynticks_task_exit(); rcu_dynticks_task_exit();
// RCU is not watching here ... // RCU is not watching here ...
ct_kernel_enter_state(RCU_DYNTICKS_IDX); ct_kernel_enter_state(CT_RCU_WATCHING);
// ... but is watching here. // ... but is watching here.
instrumentation_begin(); instrumentation_begin();
...@@ -317,7 +317,7 @@ void noinstr ct_nmi_enter(void) ...@@ -317,7 +317,7 @@ void noinstr ct_nmi_enter(void)
void noinstr ct_idle_enter(void) void noinstr ct_idle_enter(void)
{ {
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !raw_irqs_disabled()); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !raw_irqs_disabled());
ct_kernel_exit(false, RCU_DYNTICKS_IDX + CT_STATE_IDLE); ct_kernel_exit(false, CT_RCU_WATCHING + CT_STATE_IDLE);
} }
EXPORT_SYMBOL_GPL(ct_idle_enter); EXPORT_SYMBOL_GPL(ct_idle_enter);
...@@ -335,7 +335,7 @@ void noinstr ct_idle_exit(void) ...@@ -335,7 +335,7 @@ void noinstr ct_idle_exit(void)
unsigned long flags; unsigned long flags;
raw_local_irq_save(flags); raw_local_irq_save(flags);
ct_kernel_enter(false, RCU_DYNTICKS_IDX - CT_STATE_IDLE); ct_kernel_enter(false, CT_RCU_WATCHING - CT_STATE_IDLE);
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
} }
EXPORT_SYMBOL_GPL(ct_idle_exit); EXPORT_SYMBOL_GPL(ct_idle_exit);
...@@ -504,7 +504,7 @@ void noinstr __ct_user_enter(enum ctx_state state) ...@@ -504,7 +504,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
* CPU doesn't need to maintain the tick for RCU maintenance purposes * CPU doesn't need to maintain the tick for RCU maintenance purposes
* when the CPU runs in userspace. * when the CPU runs in userspace.
*/ */
ct_kernel_exit(true, RCU_DYNTICKS_IDX + state); ct_kernel_exit(true, CT_RCU_WATCHING + state);
/* /*
* Special case if we only track user <-> kernel transitions for tickless * Special case if we only track user <-> kernel transitions for tickless
...@@ -534,7 +534,7 @@ void noinstr __ct_user_enter(enum ctx_state state) ...@@ -534,7 +534,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
/* /*
* Tracking for vtime and RCU EQS. Make sure we don't race * Tracking for vtime and RCU EQS. Make sure we don't race
* with NMIs. OTOH we don't care about ordering here since * with NMIs. OTOH we don't care about ordering here since
* RCU only requires RCU_DYNTICKS_IDX increments to be fully * RCU only requires CT_RCU_WATCHING increments to be fully
* ordered. * ordered.
*/ */
raw_atomic_add(state, &ct->state); raw_atomic_add(state, &ct->state);
...@@ -620,7 +620,7 @@ void noinstr __ct_user_exit(enum ctx_state state) ...@@ -620,7 +620,7 @@ void noinstr __ct_user_exit(enum ctx_state state)
* Exit RCU idle mode while entering the kernel because it can * Exit RCU idle mode while entering the kernel because it can
* run a RCU read side critical section anytime. * run a RCU read side critical section anytime.
*/ */
ct_kernel_enter(true, RCU_DYNTICKS_IDX - state); ct_kernel_enter(true, CT_RCU_WATCHING - state);
if (state == CT_STATE_USER) { if (state == CT_STATE_USER) {
instrumentation_begin(); instrumentation_begin();
vtime_user_exit(current); vtime_user_exit(current);
...@@ -644,7 +644,7 @@ void noinstr __ct_user_exit(enum ctx_state state) ...@@ -644,7 +644,7 @@ void noinstr __ct_user_exit(enum ctx_state state)
/* /*
* Tracking for vtime and RCU EQS. Make sure we don't race * Tracking for vtime and RCU EQS. Make sure we don't race
* with NMIs. OTOH we don't care about ordering here since * with NMIs. OTOH we don't care about ordering here since
* RCU only requires RCU_DYNTICKS_IDX increments to be fully * RCU only requires CT_RCU_WATCHING increments to be fully
* ordered. * ordered.
*/ */
raw_atomic_sub(state, &ct->state); raw_atomic_sub(state, &ct->state);
......
...@@ -294,9 +294,9 @@ void rcu_softirq_qs(void) ...@@ -294,9 +294,9 @@ void rcu_softirq_qs(void)
*/ */
static void rcu_dynticks_eqs_online(void) static void rcu_dynticks_eqs_online(void)
{ {
if (ct_dynticks() & RCU_DYNTICKS_IDX) if (ct_dynticks() & CT_RCU_WATCHING)
return; return;
ct_state_inc(RCU_DYNTICKS_IDX); ct_state_inc(CT_RCU_WATCHING);
} }
/* /*
...@@ -305,7 +305,7 @@ static void rcu_dynticks_eqs_online(void) ...@@ -305,7 +305,7 @@ static void rcu_dynticks_eqs_online(void)
*/ */
static bool rcu_dynticks_in_eqs(int snap) static bool rcu_dynticks_in_eqs(int snap)
{ {
return !(snap & RCU_DYNTICKS_IDX); return !(snap & CT_RCU_WATCHING);
} }
/* /*
...@@ -335,7 +335,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) ...@@ -335,7 +335,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
int snap; int snap;
// If not quiescent, force back to earlier extended quiescent state. // If not quiescent, force back to earlier extended quiescent state.
snap = ct_dynticks_cpu(cpu) & ~RCU_DYNTICKS_IDX; snap = ct_dynticks_cpu(cpu) & ~CT_RCU_WATCHING;
smp_rmb(); // Order ->dynticks and *vp reads. smp_rmb(); // Order ->dynticks and *vp reads.
if (READ_ONCE(*vp)) if (READ_ONCE(*vp))
return false; // Non-zero, so report failure; return false; // Non-zero, so report failure;
...@@ -361,9 +361,9 @@ notrace void rcu_momentary_dyntick_idle(void) ...@@ -361,9 +361,9 @@ notrace void rcu_momentary_dyntick_idle(void)
int seq; int seq;
raw_cpu_write(rcu_data.rcu_need_heavy_qs, false); raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
seq = ct_state_inc(2 * RCU_DYNTICKS_IDX); seq = ct_state_inc(2 * CT_RCU_WATCHING);
/* It is illegal to call this from idle state. */ /* It is illegal to call this from idle state. */
WARN_ON_ONCE(!(seq & RCU_DYNTICKS_IDX)); WARN_ON_ONCE(!(seq & CT_RCU_WATCHING));
rcu_preempt_deferred_qs(current); rcu_preempt_deferred_qs(current);
} }
EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle); EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment