Commit de8cd0a5 authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Update tree_exp.h function-header comments

The function-header comments in kernel/rcu/tree_exp.h have gotten a bit
out of date, so this commit updates a number of them.
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent 6c7d7dbf
...@@ -21,7 +21,7 @@ static void rcu_exp_gp_seq_start(void) ...@@ -21,7 +21,7 @@ static void rcu_exp_gp_seq_start(void)
} }
/* /*
* Return then value that expedited-grace-period counter will have * Return the value that the expedited-grace-period counter will have
* at the end of the current grace period. * at the end of the current grace period.
*/ */
static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void) static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
...@@ -39,7 +39,9 @@ static void rcu_exp_gp_seq_end(void) ...@@ -39,7 +39,9 @@ static void rcu_exp_gp_seq_end(void)
} }
/* /*
* Take a snapshot of the expedited-grace-period counter. * Take a snapshot of the expedited-grace-period counter, which is the
* earliest value that will indicate that a full grace period has
* elapsed since the current time.
*/ */
static unsigned long rcu_exp_gp_seq_snap(void) static unsigned long rcu_exp_gp_seq_snap(void)
{ {
...@@ -143,22 +145,18 @@ static void __maybe_unused sync_exp_reset_tree(void) ...@@ -143,22 +145,18 @@ static void __maybe_unused sync_exp_reset_tree(void)
* Return non-zero if there is no RCU expedited grace period in progress * Return non-zero if there is no RCU expedited grace period in progress
* for the specified rcu_node structure, in other words, if all CPUs and * for the specified rcu_node structure, in other words, if all CPUs and
* tasks covered by the specified rcu_node structure have done their bit * tasks covered by the specified rcu_node structure have done their bit
* for the current expedited grace period. Works only for preemptible * for the current expedited grace period.
* RCU -- other RCU implementation use other means.
*
* Caller must hold the specificed rcu_node structure's ->lock
*/ */
static bool sync_rcu_exp_done(struct rcu_node *rnp) static bool sync_rcu_exp_done(struct rcu_node *rnp)
{ {
raw_lockdep_assert_held_rcu_node(rnp); raw_lockdep_assert_held_rcu_node(rnp);
return rnp->exp_tasks == NULL && return rnp->exp_tasks == NULL &&
READ_ONCE(rnp->expmask) == 0; READ_ONCE(rnp->expmask) == 0;
} }
/* /*
* Like sync_rcu_exp_done(), but this function assumes the caller doesn't * Like sync_rcu_exp_done(), but where the caller does not hold the
* hold the rcu_node's ->lock, and will acquire and release the lock itself * rcu_node's ->lock.
*/ */
static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp) static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
{ {
...@@ -180,8 +178,6 @@ static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp) ...@@ -180,8 +178,6 @@ static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
* which the task was queued or to one of that rcu_node structure's ancestors, * which the task was queued or to one of that rcu_node structure's ancestors,
* recursively up the tree. (Calm down, calm down, we do the recursion * recursively up the tree. (Calm down, calm down, we do the recursion
* iteratively!) * iteratively!)
*
* Caller must hold the specified rcu_node structure's ->lock.
*/ */
static void __rcu_report_exp_rnp(struct rcu_node *rnp, static void __rcu_report_exp_rnp(struct rcu_node *rnp,
bool wake, unsigned long flags) bool wake, unsigned long flags)
...@@ -189,6 +185,7 @@ static void __rcu_report_exp_rnp(struct rcu_node *rnp, ...@@ -189,6 +185,7 @@ static void __rcu_report_exp_rnp(struct rcu_node *rnp,
{ {
unsigned long mask; unsigned long mask;
raw_lockdep_assert_held_rcu_node(rnp);
for (;;) { for (;;) {
if (!sync_rcu_exp_done(rnp)) { if (!sync_rcu_exp_done(rnp)) {
if (!rnp->expmask) if (!rnp->expmask)
...@@ -452,6 +449,10 @@ static void sync_rcu_exp_select_cpus(void) ...@@ -452,6 +449,10 @@ static void sync_rcu_exp_select_cpus(void)
flush_work(&rnp->rew.rew_work); flush_work(&rnp->rew.rew_work);
} }
/*
* Wait for the expedited grace period to elapse, issuing any needed
* RCU CPU stall warnings along the way.
*/
static void synchronize_sched_expedited_wait(void) static void synchronize_sched_expedited_wait(void)
{ {
int cpu; int cpu;
...@@ -781,7 +782,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp) ...@@ -781,7 +782,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
* implementations, it is still unfriendly to real-time workloads, so is * implementations, it is still unfriendly to real-time workloads, so is
* thus not recommended for any sort of common-case code. In fact, if * thus not recommended for any sort of common-case code. In fact, if
* you are using synchronize_rcu_expedited() in a loop, please restructure * you are using synchronize_rcu_expedited() in a loop, please restructure
* your code to batch your updates, and then Use a single synchronize_rcu() * your code to batch your updates, and then use a single synchronize_rcu()
* instead. * instead.
* *
* This has the same semantics as (but is more brutal than) synchronize_rcu(). * This has the same semantics as (but is more brutal than) synchronize_rcu().
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment