Commit 031aeee0 authored by Paul E. McKenney's avatar Paul E. McKenney

srcu: Improve rcu_seq grace-period-counter abstraction

The expedited grace-period code contains several open-coded shifts
know the format of an rcu_seq grace-period counter, which is not
particularly good style.  This commit therefore creates a new
rcu_seq_ctr() function that extracts the counter portion of the
counter, and an rcu_seq_state() function that extracts the low-order
state bit.  This commit prepares for SRCU callback parallelization,
which will require two state bits.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 91e27c35
...@@ -61,20 +61,41 @@ ...@@ -61,20 +61,41 @@
* Grace-period counter management. * Grace-period counter management.
*/ */
#define RCU_SEQ_CTR_SHIFT 1
#define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
/*
* Return the counter portion of a sequence number previously returned
* by rcu_seq_snap() or rcu_seq_current().
*/
static inline unsigned long rcu_seq_ctr(unsigned long s)
{
return s >> RCU_SEQ_CTR_SHIFT;
}
/*
* Return the state portion of a sequence number previously returned
* by rcu_seq_snap() or rcu_seq_current().
*/
static inline int rcu_seq_state(unsigned long s)
{
return s & RCU_SEQ_STATE_MASK;
}
/* Adjust sequence number for start of update-side operation. */ /* Adjust sequence number for start of update-side operation. */
static inline void rcu_seq_start(unsigned long *sp) static inline void rcu_seq_start(unsigned long *sp)
{ {
WRITE_ONCE(*sp, *sp + 1); WRITE_ONCE(*sp, *sp + 1);
smp_mb(); /* Ensure update-side operation after counter increment. */ smp_mb(); /* Ensure update-side operation after counter increment. */
WARN_ON_ONCE(!(*sp & 0x1)); WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
} }
/* Adjust sequence number for end of update-side operation. */ /* Adjust sequence number for end of update-side operation. */
static inline void rcu_seq_end(unsigned long *sp) static inline void rcu_seq_end(unsigned long *sp)
{ {
smp_mb(); /* Ensure update-side operation before counter increment. */ smp_mb(); /* Ensure update-side operation before counter increment. */
WARN_ON_ONCE(!(*sp & 0x1)); WARN_ON_ONCE(!rcu_seq_state(*sp));
WRITE_ONCE(*sp, *sp + 1); WRITE_ONCE(*sp, (*sp | RCU_SEQ_STATE_MASK) + 1);
} }
/* Take a snapshot of the update side's sequence number. */ /* Take a snapshot of the update side's sequence number. */
...@@ -82,7 +103,7 @@ static inline unsigned long rcu_seq_snap(unsigned long *sp) ...@@ -82,7 +103,7 @@ static inline unsigned long rcu_seq_snap(unsigned long *sp)
{ {
unsigned long s; unsigned long s;
s = (READ_ONCE(*sp) + 3) & ~0x1; s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
smp_mb(); /* Above access must not bleed into critical section. */ smp_mb(); /* Above access must not bleed into critical section. */
return s; return s;
} }
......
...@@ -292,7 +292,7 @@ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s) ...@@ -292,7 +292,7 @@ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
trace_rcu_exp_funnel_lock(rsp->name, rnp->level, trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
rnp->grplo, rnp->grphi, rnp->grplo, rnp->grphi,
TPS("wait")); TPS("wait"));
wait_event(rnp->exp_wq[(s >> 1) & 0x3], wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
sync_exp_work_done(rsp, sync_exp_work_done(rsp,
&rdp->exp_workdone2, s)); &rdp->exp_workdone2, s));
return true; return true;
...@@ -534,7 +534,7 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s) ...@@ -534,7 +534,7 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
spin_unlock(&rnp->exp_lock); spin_unlock(&rnp->exp_lock);
} }
smp_mb(); /* All above changes before wakeup. */ smp_mb(); /* All above changes before wakeup. */
wake_up_all(&rnp->exp_wq[(rsp->expedited_sequence >> 1) & 0x3]); wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rsp->expedited_sequence) & 0x3]);
} }
trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake")); trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake"));
mutex_unlock(&rsp->exp_wake_mutex); mutex_unlock(&rsp->exp_wake_mutex);
...@@ -612,9 +612,8 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp, ...@@ -612,9 +612,8 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
/* Wait for expedited grace period to complete. */ /* Wait for expedited grace period to complete. */
rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
rnp = rcu_get_root(rsp); rnp = rcu_get_root(rsp);
wait_event(rnp->exp_wq[(s >> 1) & 0x3], wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
sync_exp_work_done(rsp, sync_exp_work_done(rsp, &rdp->exp_workdone0, s));
&rdp->exp_workdone0, s));
smp_mb(); /* Workqueue actions happen before return. */ smp_mb(); /* Workqueue actions happen before return. */
/* Let the next expedited grace period start. */ /* Let the next expedited grace period start. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment