Commit 02a5c550 authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Abstract extended quiescent state determination

This commit is the fourth step towards full abstraction of all accesses
to the ->dynticks counter, implementing previously open-coded checks and
comparisons in new rcu_dynticks_in_eqs() and rcu_dynticks_in_eqs_since()
functions.  This abstraction will ease changes to the ->dynticks counter
operation.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: default avatarJosh Triplett <josh@joshtriplett.org>
parent 2625d469
...@@ -27,6 +27,12 @@ ...@@ -27,6 +27,12 @@
#include <linux/cache.h> #include <linux/cache.h>
struct rcu_dynticks;
static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
{
return 0;
}
static inline unsigned long get_state_synchronize_rcu(void) static inline unsigned long get_state_synchronize_rcu(void)
{ {
return 0; return 0;
......
...@@ -336,17 +336,48 @@ static void rcu_dynticks_eqs_online(void) ...@@ -336,17 +336,48 @@ static void rcu_dynticks_eqs_online(void)
atomic_add(0x1, &rdtp->dynticks); atomic_add(0x1, &rdtp->dynticks);
} }
/*
* Is the current CPU in an extended quiescent state?
*
* No ordering, as we are sampling CPU-local information.
*/
bool rcu_dynticks_curr_cpu_in_eqs(void)
{
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
return !(atomic_read(&rdtp->dynticks) & 0x1);
}
/* /*
* Snapshot the ->dynticks counter with full ordering so as to allow * Snapshot the ->dynticks counter with full ordering so as to allow
* stable comparison of this counter with past and future snapshots. * stable comparison of this counter with past and future snapshots.
*/ */
static int rcu_dynticks_snap(struct rcu_dynticks *rdtp) int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
{ {
int snap = atomic_add_return(0, &rdtp->dynticks); int snap = atomic_add_return(0, &rdtp->dynticks);
return snap; return snap;
} }
/*
* Return true if the snapshot returned from rcu_dynticks_snap()
* indicates that RCU is in an extended quiescent state.
*/
static bool rcu_dynticks_in_eqs(int snap)
{
return !(snap & 0x1);
}
/*
* Return true if the CPU corresponding to the specified rcu_dynticks
* structure has spent some time in an extended quiescent state since
* rcu_dynticks_snap() returned the specified snapshot.
*/
static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
{
return snap != rcu_dynticks_snap(rdtp);
}
/* /*
* Do a double-increment of the ->dynticks counter to emulate a * Do a double-increment of the ->dynticks counter to emulate a
* momentary idle-CPU quiescent state. * momentary idle-CPU quiescent state.
...@@ -1045,7 +1076,7 @@ void rcu_nmi_enter(void) ...@@ -1045,7 +1076,7 @@ void rcu_nmi_enter(void)
* to be in the outermost NMI handler that interrupted an RCU-idle * to be in the outermost NMI handler that interrupted an RCU-idle
* period (observation due to Andy Lutomirski). * period (observation due to Andy Lutomirski).
*/ */
if (!(atomic_read(&rdtp->dynticks) & 0x1)) { if (rcu_dynticks_curr_cpu_in_eqs()) {
rcu_dynticks_eqs_exit(); rcu_dynticks_eqs_exit();
incby = 1; incby = 1;
} }
...@@ -1071,7 +1102,7 @@ void rcu_nmi_exit(void) ...@@ -1071,7 +1102,7 @@ void rcu_nmi_exit(void)
* to us!) * to us!)
*/ */
WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0); WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
/* /*
* If the nesting level is not 1, the CPU wasn't RCU-idle, so * If the nesting level is not 1, the CPU wasn't RCU-idle, so
...@@ -1097,9 +1128,7 @@ void rcu_nmi_exit(void) ...@@ -1097,9 +1128,7 @@ void rcu_nmi_exit(void)
*/ */
bool notrace __rcu_is_watching(void) bool notrace __rcu_is_watching(void)
{ {
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); return !rcu_dynticks_curr_cpu_in_eqs();
return atomic_read(&rdtp->dynticks) & 0x1;
} }
/** /**
...@@ -1184,7 +1213,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp, ...@@ -1184,7 +1213,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
{ {
rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks); rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
rcu_sysidle_check_cpu(rdp, isidle, maxj); rcu_sysidle_check_cpu(rdp, isidle, maxj);
if ((rdp->dynticks_snap & 0x1) == 0) { if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
rdp->mynode->gpnum)) rdp->mynode->gpnum))
...@@ -1203,12 +1232,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp, ...@@ -1203,12 +1232,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
static int rcu_implicit_dynticks_qs(struct rcu_data *rdp, static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
bool *isidle, unsigned long *maxj) bool *isidle, unsigned long *maxj)
{ {
unsigned int curr;
int *rcrmp; int *rcrmp;
unsigned int snap;
curr = (unsigned int)rcu_dynticks_snap(rdp->dynticks);
snap = (unsigned int)rdp->dynticks_snap;
/* /*
* If the CPU passed through or entered a dynticks idle phase with * If the CPU passed through or entered a dynticks idle phase with
...@@ -1218,7 +1242,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp, ...@@ -1218,7 +1242,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
* read-side critical section that started before the beginning * read-side critical section that started before the beginning
* of the current RCU grace period. * of the current RCU grace period.
*/ */
if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) { if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
rdp->dynticks_fqs++; rdp->dynticks_fqs++;
return 1; return 1;
...@@ -3807,7 +3831,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) ...@@ -3807,7 +3831,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
rdp->dynticks = &per_cpu(rcu_dynticks, cpu); rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
rdp->cpu = cpu; rdp->cpu = cpu;
rdp->rsp = rsp; rdp->rsp = rsp;
rcu_boot_init_nocb_percpu_data(rdp); rcu_boot_init_nocb_percpu_data(rdp);
......
...@@ -595,6 +595,8 @@ extern struct rcu_state rcu_bh_state; ...@@ -595,6 +595,8 @@ extern struct rcu_state rcu_bh_state;
extern struct rcu_state rcu_preempt_state; extern struct rcu_state rcu_preempt_state;
#endif /* #ifdef CONFIG_PREEMPT_RCU */ #endif /* #ifdef CONFIG_PREEMPT_RCU */
int rcu_dynticks_snap(struct rcu_dynticks *rdtp);
#ifdef CONFIG_RCU_BOOST #ifdef CONFIG_RCU_BOOST
DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu); DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
......
...@@ -360,7 +360,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, ...@@ -360,7 +360,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
rdp->exp_dynticks_snap = rdp->exp_dynticks_snap =
rcu_dynticks_snap(rdp->dynticks); rcu_dynticks_snap(rdp->dynticks);
if (raw_smp_processor_id() == cpu || if (raw_smp_processor_id() == cpu ||
!(rdp->exp_dynticks_snap & 0x1) || rcu_dynticks_in_eqs(rdp->exp_dynticks_snap) ||
!(rnp->qsmaskinitnext & rdp->grpmask)) !(rnp->qsmaskinitnext & rdp->grpmask))
mask_ofl_test |= rdp->grpmask; mask_ofl_test |= rdp->grpmask;
} }
...@@ -383,8 +383,8 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, ...@@ -383,8 +383,8 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
if (!(mask_ofl_ipi & mask)) if (!(mask_ofl_ipi & mask))
continue; continue;
retry_ipi: retry_ipi:
if (rcu_dynticks_snap(rdp->dynticks) != if (rcu_dynticks_in_eqs_since(rdp->dynticks,
rdp->exp_dynticks_snap) { rdp->exp_dynticks_snap)) {
mask_ofl_test |= mask; mask_ofl_test |= mask;
continue; continue;
} }
......
...@@ -1643,7 +1643,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) ...@@ -1643,7 +1643,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
"o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)], "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
"N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)], "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
ticks_value, ticks_title, ticks_value, ticks_title,
atomic_read(&rdtp->dynticks) & 0xfff, rcu_dynticks_snap(rdtp) & 0xfff,
rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart, READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
......
...@@ -124,7 +124,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) ...@@ -124,7 +124,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu), rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
rdp->core_needs_qs); rdp->core_needs_qs);
seq_printf(m, " dt=%d/%llx/%d df=%lu", seq_printf(m, " dt=%d/%llx/%d df=%lu",
atomic_read(&rdp->dynticks->dynticks), rcu_dynticks_snap(rdp->dynticks),
rdp->dynticks->dynticks_nesting, rdp->dynticks->dynticks_nesting,
rdp->dynticks->dynticks_nmi_nesting, rdp->dynticks->dynticks_nmi_nesting,
rdp->dynticks_fqs); rdp->dynticks_fqs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment