Commit aacb5d91 authored by Paul E. McKenney's avatar Paul E. McKenney

srcu: Use "ssp" instead of "sp" for srcu_struct pointer

In RCU, the distinction between "rsp", "rnp", and "rdp" has served well
for a great many years, but in SRCU, "sp" vs. "sdp" has proven confusing.
This commit therefore renames SRCU's "sp" pointers to "ssp", so that there
is "ssp" for srcu_struct pointer, "snp" for srcu_node pointer, and "sdp"
for srcu_data pointer.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.ibm.com>
parent eb4c2382
...@@ -38,20 +38,20 @@ struct srcu_struct; ...@@ -38,20 +38,20 @@ struct srcu_struct;
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
int __init_srcu_struct(struct srcu_struct *sp, const char *name, int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
struct lock_class_key *key); struct lock_class_key *key);
#define init_srcu_struct(sp) \ #define init_srcu_struct(ssp) \
({ \ ({ \
static struct lock_class_key __srcu_key; \ static struct lock_class_key __srcu_key; \
\ \
__init_srcu_struct((sp), #sp, &__srcu_key); \ __init_srcu_struct((ssp), #ssp, &__srcu_key); \
}) })
#define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name }, #define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name },
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
int init_srcu_struct(struct srcu_struct *sp); int init_srcu_struct(struct srcu_struct *ssp);
#define __SRCU_DEP_MAP_INIT(srcu_name) #define __SRCU_DEP_MAP_INIT(srcu_name)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
...@@ -67,28 +67,28 @@ int init_srcu_struct(struct srcu_struct *sp); ...@@ -67,28 +67,28 @@ int init_srcu_struct(struct srcu_struct *sp);
struct srcu_struct { }; struct srcu_struct { };
#endif #endif
void call_srcu(struct srcu_struct *sp, struct rcu_head *head, void call_srcu(struct srcu_struct *ssp, struct rcu_head *head,
void (*func)(struct rcu_head *head)); void (*func)(struct rcu_head *head));
void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced); void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced);
int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp); int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
void synchronize_srcu(struct srcu_struct *sp); void synchronize_srcu(struct srcu_struct *ssp);
/** /**
* cleanup_srcu_struct - deconstruct a sleep-RCU structure * cleanup_srcu_struct - deconstruct a sleep-RCU structure
* @sp: structure to clean up. * @ssp: structure to clean up.
* *
* Must invoke this after you are finished using a given srcu_struct that * Must invoke this after you are finished using a given srcu_struct that
* was initialized via init_srcu_struct(), else you leak memory. * was initialized via init_srcu_struct(), else you leak memory.
*/ */
static inline void cleanup_srcu_struct(struct srcu_struct *sp) static inline void cleanup_srcu_struct(struct srcu_struct *ssp)
{ {
_cleanup_srcu_struct(sp, false); _cleanup_srcu_struct(ssp, false);
} }
/** /**
* cleanup_srcu_struct_quiesced - deconstruct a quiesced sleep-RCU structure * cleanup_srcu_struct_quiesced - deconstruct a quiesced sleep-RCU structure
* @sp: structure to clean up. * @ssp: structure to clean up.
* *
* Must invoke this after you are finished using a given srcu_struct that * Must invoke this after you are finished using a given srcu_struct that
* was initialized via init_srcu_struct(), else you leak memory. Also, * was initialized via init_srcu_struct(), else you leak memory. Also,
...@@ -103,16 +103,16 @@ static inline void cleanup_srcu_struct(struct srcu_struct *sp) ...@@ -103,16 +103,16 @@ static inline void cleanup_srcu_struct(struct srcu_struct *sp)
* (with high probability, anyway), and will also cause the srcu_struct * (with high probability, anyway), and will also cause the srcu_struct
* to be leaked. * to be leaked.
*/ */
static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *sp) static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *ssp)
{ {
_cleanup_srcu_struct(sp, true); _cleanup_srcu_struct(ssp, true);
} }
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
/** /**
* srcu_read_lock_held - might we be in SRCU read-side critical section? * srcu_read_lock_held - might we be in SRCU read-side critical section?
* @sp: The srcu_struct structure to check * @ssp: The srcu_struct structure to check
* *
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
* read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
...@@ -126,16 +126,16 @@ static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *sp) ...@@ -126,16 +126,16 @@ static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *sp)
* relies on normal RCU, it can be called from the CPU which * relies on normal RCU, it can be called from the CPU which
* is in the idle loop from an RCU point of view or offline. * is in the idle loop from an RCU point of view or offline.
*/ */
static inline int srcu_read_lock_held(const struct srcu_struct *sp) static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
{ {
if (!debug_lockdep_rcu_enabled()) if (!debug_lockdep_rcu_enabled())
return 1; return 1;
return lock_is_held(&sp->dep_map); return lock_is_held(&ssp->dep_map);
} }
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
static inline int srcu_read_lock_held(const struct srcu_struct *sp) static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
{ {
return 1; return 1;
} }
...@@ -145,7 +145,7 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp) ...@@ -145,7 +145,7 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp)
/** /**
* srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
* @p: the pointer to fetch and protect for later dereferencing * @p: the pointer to fetch and protect for later dereferencing
* @sp: pointer to the srcu_struct, which is used to check that we * @ssp: pointer to the srcu_struct, which is used to check that we
* really are in an SRCU read-side critical section. * really are in an SRCU read-side critical section.
* @c: condition to check for update-side use * @c: condition to check for update-side use
* *
...@@ -154,32 +154,32 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp) ...@@ -154,32 +154,32 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp)
* to 1. The @c argument will normally be a logical expression containing * to 1. The @c argument will normally be a logical expression containing
* lockdep_is_held() calls. * lockdep_is_held() calls.
*/ */
#define srcu_dereference_check(p, sp, c) \ #define srcu_dereference_check(p, ssp, c) \
__rcu_dereference_check((p), (c) || srcu_read_lock_held(sp), __rcu) __rcu_dereference_check((p), (c) || srcu_read_lock_held(ssp), __rcu)
/** /**
* srcu_dereference - fetch SRCU-protected pointer for later dereferencing * srcu_dereference - fetch SRCU-protected pointer for later dereferencing
* @p: the pointer to fetch and protect for later dereferencing * @p: the pointer to fetch and protect for later dereferencing
* @sp: pointer to the srcu_struct, which is used to check that we * @ssp: pointer to the srcu_struct, which is used to check that we
* really are in an SRCU read-side critical section. * really are in an SRCU read-side critical section.
* *
* Makes rcu_dereference_check() do the dirty work. If PROVE_RCU * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU
* is enabled, invoking this outside of an RCU read-side critical * is enabled, invoking this outside of an RCU read-side critical
* section will result in an RCU-lockdep splat. * section will result in an RCU-lockdep splat.
*/ */
#define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0) #define srcu_dereference(p, ssp) srcu_dereference_check((p), (ssp), 0)
/** /**
* srcu_dereference_notrace - no tracing and no lockdep calls from here * srcu_dereference_notrace - no tracing and no lockdep calls from here
* @p: the pointer to fetch and protect for later dereferencing * @p: the pointer to fetch and protect for later dereferencing
* @sp: pointer to the srcu_struct, which is used to check that we * @ssp: pointer to the srcu_struct, which is used to check that we
* really are in an SRCU read-side critical section. * really are in an SRCU read-side critical section.
*/ */
#define srcu_dereference_notrace(p, sp) srcu_dereference_check((p), (sp), 1) #define srcu_dereference_notrace(p, ssp) srcu_dereference_check((p), (ssp), 1)
/** /**
* srcu_read_lock - register a new reader for an SRCU-protected structure. * srcu_read_lock - register a new reader for an SRCU-protected structure.
* @sp: srcu_struct in which to register the new reader. * @ssp: srcu_struct in which to register the new reader.
* *
* Enter an SRCU read-side critical section. Note that SRCU read-side * Enter an SRCU read-side critical section. Note that SRCU read-side
* critical sections may be nested. However, it is illegal to * critical sections may be nested. However, it is illegal to
...@@ -194,44 +194,44 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp) ...@@ -194,44 +194,44 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp)
* srcu_read_unlock() in an irq handler if the matching srcu_read_lock() * srcu_read_unlock() in an irq handler if the matching srcu_read_lock()
* was invoked in process context. * was invoked in process context.
*/ */
static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
{ {
int retval; int retval;
retval = __srcu_read_lock(sp); retval = __srcu_read_lock(ssp);
rcu_lock_acquire(&(sp)->dep_map); rcu_lock_acquire(&(ssp)->dep_map);
return retval; return retval;
} }
/* Used by tracing, cannot be traced and cannot invoke lockdep. */ /* Used by tracing, cannot be traced and cannot invoke lockdep. */
static inline notrace int static inline notrace int
srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp) srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
{ {
int retval; int retval;
retval = __srcu_read_lock(sp); retval = __srcu_read_lock(ssp);
return retval; return retval;
} }
/** /**
* srcu_read_unlock - unregister a old reader from an SRCU-protected structure. * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
* @sp: srcu_struct in which to unregister the old reader. * @ssp: srcu_struct in which to unregister the old reader.
* @idx: return value from corresponding srcu_read_lock(). * @idx: return value from corresponding srcu_read_lock().
* *
* Exit an SRCU read-side critical section. * Exit an SRCU read-side critical section.
*/ */
static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
__releases(sp) __releases(ssp)
{ {
rcu_lock_release(&(sp)->dep_map); rcu_lock_release(&(ssp)->dep_map);
__srcu_read_unlock(sp, idx); __srcu_read_unlock(ssp, idx);
} }
/* Used by tracing, cannot be traced and cannot call lockdep. */ /* Used by tracing, cannot be traced and cannot call lockdep. */
static inline notrace void static inline notrace void
srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp) srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
{ {
__srcu_read_unlock(sp, idx); __srcu_read_unlock(ssp, idx);
} }
/** /**
......
...@@ -60,7 +60,7 @@ void srcu_drive_gp(struct work_struct *wp); ...@@ -60,7 +60,7 @@ void srcu_drive_gp(struct work_struct *wp);
#define DEFINE_STATIC_SRCU(name) \ #define DEFINE_STATIC_SRCU(name) \
static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name) static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name)
void synchronize_srcu(struct srcu_struct *sp); void synchronize_srcu(struct srcu_struct *ssp);
/* /*
* Counts the new reader in the appropriate per-CPU element of the * Counts the new reader in the appropriate per-CPU element of the
...@@ -68,36 +68,36 @@ void synchronize_srcu(struct srcu_struct *sp); ...@@ -68,36 +68,36 @@ void synchronize_srcu(struct srcu_struct *sp);
* __srcu_read_unlock() must be in the same handler instance. Returns an * __srcu_read_unlock() must be in the same handler instance. Returns an
* index that must be passed to the matching srcu_read_unlock(). * index that must be passed to the matching srcu_read_unlock().
*/ */
static inline int __srcu_read_lock(struct srcu_struct *sp) static inline int __srcu_read_lock(struct srcu_struct *ssp)
{ {
int idx; int idx;
idx = READ_ONCE(sp->srcu_idx); idx = READ_ONCE(ssp->srcu_idx);
WRITE_ONCE(sp->srcu_lock_nesting[idx], sp->srcu_lock_nesting[idx] + 1); WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1);
return idx; return idx;
} }
static inline void synchronize_srcu_expedited(struct srcu_struct *sp) static inline void synchronize_srcu_expedited(struct srcu_struct *ssp)
{ {
synchronize_srcu(sp); synchronize_srcu(ssp);
} }
static inline void srcu_barrier(struct srcu_struct *sp) static inline void srcu_barrier(struct srcu_struct *ssp)
{ {
synchronize_srcu(sp); synchronize_srcu(ssp);
} }
/* Defined here to avoid size increase for non-torture kernels. */ /* Defined here to avoid size increase for non-torture kernels. */
static inline void srcu_torture_stats_print(struct srcu_struct *sp, static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
char *tt, char *tf) char *tt, char *tf)
{ {
int idx; int idx;
idx = READ_ONCE(sp->srcu_idx) & 0x1; idx = READ_ONCE(ssp->srcu_idx) & 0x1;
pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n", pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
tt, tf, idx, tt, tf, idx,
READ_ONCE(sp->srcu_lock_nesting[!idx]), READ_ONCE(ssp->srcu_lock_nesting[!idx]),
READ_ONCE(sp->srcu_lock_nesting[idx])); READ_ONCE(ssp->srcu_lock_nesting[idx]));
} }
#endif #endif
...@@ -51,7 +51,7 @@ struct srcu_data { ...@@ -51,7 +51,7 @@ struct srcu_data {
unsigned long grpmask; /* Mask for leaf srcu_node */ unsigned long grpmask; /* Mask for leaf srcu_node */
/* ->srcu_data_have_cbs[]. */ /* ->srcu_data_have_cbs[]. */
int cpu; int cpu;
struct srcu_struct *sp; struct srcu_struct *ssp;
}; };
/* /*
...@@ -138,8 +138,8 @@ struct srcu_struct { ...@@ -138,8 +138,8 @@ struct srcu_struct {
#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
void synchronize_srcu_expedited(struct srcu_struct *sp); void synchronize_srcu_expedited(struct srcu_struct *ssp);
void srcu_barrier(struct srcu_struct *sp); void srcu_barrier(struct srcu_struct *ssp);
void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf); void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf);
#endif #endif
...@@ -37,30 +37,30 @@ int rcu_scheduler_active __read_mostly; ...@@ -37,30 +37,30 @@ int rcu_scheduler_active __read_mostly;
static LIST_HEAD(srcu_boot_list); static LIST_HEAD(srcu_boot_list);
static bool srcu_init_done; static bool srcu_init_done;
static int init_srcu_struct_fields(struct srcu_struct *sp) static int init_srcu_struct_fields(struct srcu_struct *ssp)
{ {
sp->srcu_lock_nesting[0] = 0; ssp->srcu_lock_nesting[0] = 0;
sp->srcu_lock_nesting[1] = 0; ssp->srcu_lock_nesting[1] = 0;
init_swait_queue_head(&sp->srcu_wq); init_swait_queue_head(&ssp->srcu_wq);
sp->srcu_cb_head = NULL; ssp->srcu_cb_head = NULL;
sp->srcu_cb_tail = &sp->srcu_cb_head; ssp->srcu_cb_tail = &ssp->srcu_cb_head;
sp->srcu_gp_running = false; ssp->srcu_gp_running = false;
sp->srcu_gp_waiting = false; ssp->srcu_gp_waiting = false;
sp->srcu_idx = 0; ssp->srcu_idx = 0;
INIT_WORK(&sp->srcu_work, srcu_drive_gp); INIT_WORK(&ssp->srcu_work, srcu_drive_gp);
INIT_LIST_HEAD(&sp->srcu_work.entry); INIT_LIST_HEAD(&ssp->srcu_work.entry);
return 0; return 0;
} }
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
int __init_srcu_struct(struct srcu_struct *sp, const char *name, int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
struct lock_class_key *key) struct lock_class_key *key)
{ {
/* Don't re-initialize a lock while it is held. */ /* Don't re-initialize a lock while it is held. */
debug_check_no_locks_freed((void *)sp, sizeof(*sp)); debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
lockdep_init_map(&sp->dep_map, name, key, 0); lockdep_init_map(&ssp->dep_map, name, key, 0);
return init_srcu_struct_fields(sp); return init_srcu_struct_fields(ssp);
} }
EXPORT_SYMBOL_GPL(__init_srcu_struct); EXPORT_SYMBOL_GPL(__init_srcu_struct);
...@@ -68,15 +68,15 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct); ...@@ -68,15 +68,15 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct);
/* /*
* init_srcu_struct - initialize a sleep-RCU structure * init_srcu_struct - initialize a sleep-RCU structure
* @sp: structure to initialize. * @ssp: structure to initialize.
* *
* Must invoke this on a given srcu_struct before passing that srcu_struct * Must invoke this on a given srcu_struct before passing that srcu_struct
* to any other function. Each srcu_struct represents a separate domain * to any other function. Each srcu_struct represents a separate domain
* of SRCU protection. * of SRCU protection.
*/ */
int init_srcu_struct(struct srcu_struct *sp) int init_srcu_struct(struct srcu_struct *ssp)
{ {
return init_srcu_struct_fields(sp); return init_srcu_struct_fields(ssp);
} }
EXPORT_SYMBOL_GPL(init_srcu_struct); EXPORT_SYMBOL_GPL(init_srcu_struct);
...@@ -84,22 +84,22 @@ EXPORT_SYMBOL_GPL(init_srcu_struct); ...@@ -84,22 +84,22 @@ EXPORT_SYMBOL_GPL(init_srcu_struct);
/* /*
* cleanup_srcu_struct - deconstruct a sleep-RCU structure * cleanup_srcu_struct - deconstruct a sleep-RCU structure
* @sp: structure to clean up. * @ssp: structure to clean up.
* *
* Must invoke this after you are finished using a given srcu_struct that * Must invoke this after you are finished using a given srcu_struct that
* was initialized via init_srcu_struct(), else you leak memory. * was initialized via init_srcu_struct(), else you leak memory.
*/ */
void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced) void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced)
{ {
WARN_ON(sp->srcu_lock_nesting[0] || sp->srcu_lock_nesting[1]); WARN_ON(ssp->srcu_lock_nesting[0] || ssp->srcu_lock_nesting[1]);
if (quiesced) if (quiesced)
WARN_ON(work_pending(&sp->srcu_work)); WARN_ON(work_pending(&ssp->srcu_work));
else else
flush_work(&sp->srcu_work); flush_work(&ssp->srcu_work);
WARN_ON(sp->srcu_gp_running); WARN_ON(ssp->srcu_gp_running);
WARN_ON(sp->srcu_gp_waiting); WARN_ON(ssp->srcu_gp_waiting);
WARN_ON(sp->srcu_cb_head); WARN_ON(ssp->srcu_cb_head);
WARN_ON(&sp->srcu_cb_head != sp->srcu_cb_tail); WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail);
} }
EXPORT_SYMBOL_GPL(_cleanup_srcu_struct); EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
...@@ -107,13 +107,13 @@ EXPORT_SYMBOL_GPL(_cleanup_srcu_struct); ...@@ -107,13 +107,13 @@ EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
* Removes the count for the old reader from the appropriate element of * Removes the count for the old reader from the appropriate element of
* the srcu_struct. * the srcu_struct.
*/ */
void __srcu_read_unlock(struct srcu_struct *sp, int idx) void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
{ {
int newval = sp->srcu_lock_nesting[idx] - 1; int newval = ssp->srcu_lock_nesting[idx] - 1;
WRITE_ONCE(sp->srcu_lock_nesting[idx], newval); WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval);
if (!newval && READ_ONCE(sp->srcu_gp_waiting)) if (!newval && READ_ONCE(ssp->srcu_gp_waiting))
swake_up_one(&sp->srcu_wq); swake_up_one(&ssp->srcu_wq);
} }
EXPORT_SYMBOL_GPL(__srcu_read_unlock); EXPORT_SYMBOL_GPL(__srcu_read_unlock);
...@@ -127,24 +127,24 @@ void srcu_drive_gp(struct work_struct *wp) ...@@ -127,24 +127,24 @@ void srcu_drive_gp(struct work_struct *wp)
int idx; int idx;
struct rcu_head *lh; struct rcu_head *lh;
struct rcu_head *rhp; struct rcu_head *rhp;
struct srcu_struct *sp; struct srcu_struct *ssp;
sp = container_of(wp, struct srcu_struct, srcu_work); ssp = container_of(wp, struct srcu_struct, srcu_work);
if (sp->srcu_gp_running || !READ_ONCE(sp->srcu_cb_head)) if (ssp->srcu_gp_running || !READ_ONCE(ssp->srcu_cb_head))
return; /* Already running or nothing to do. */ return; /* Already running or nothing to do. */
/* Remove recently arrived callbacks and wait for readers. */ /* Remove recently arrived callbacks and wait for readers. */
WRITE_ONCE(sp->srcu_gp_running, true); WRITE_ONCE(ssp->srcu_gp_running, true);
local_irq_disable(); local_irq_disable();
lh = sp->srcu_cb_head; lh = ssp->srcu_cb_head;
sp->srcu_cb_head = NULL; ssp->srcu_cb_head = NULL;
sp->srcu_cb_tail = &sp->srcu_cb_head; ssp->srcu_cb_tail = &ssp->srcu_cb_head;
local_irq_enable(); local_irq_enable();
idx = sp->srcu_idx; idx = ssp->srcu_idx;
WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx); WRITE_ONCE(ssp->srcu_idx, !ssp->srcu_idx);
WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */ WRITE_ONCE(ssp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx])); swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx]));
WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */ WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
/* Invoke the callbacks we removed above. */ /* Invoke the callbacks we removed above. */
while (lh) { while (lh) {
...@@ -161,9 +161,9 @@ void srcu_drive_gp(struct work_struct *wp) ...@@ -161,9 +161,9 @@ void srcu_drive_gp(struct work_struct *wp)
* at interrupt level, but the ->srcu_gp_running checks will * at interrupt level, but the ->srcu_gp_running checks will
* straighten that out. * straighten that out.
*/ */
WRITE_ONCE(sp->srcu_gp_running, false); WRITE_ONCE(ssp->srcu_gp_running, false);
if (READ_ONCE(sp->srcu_cb_head)) if (READ_ONCE(ssp->srcu_cb_head))
schedule_work(&sp->srcu_work); schedule_work(&ssp->srcu_work);
} }
EXPORT_SYMBOL_GPL(srcu_drive_gp); EXPORT_SYMBOL_GPL(srcu_drive_gp);
...@@ -171,7 +171,7 @@ EXPORT_SYMBOL_GPL(srcu_drive_gp); ...@@ -171,7 +171,7 @@ EXPORT_SYMBOL_GPL(srcu_drive_gp);
* Enqueue an SRCU callback on the specified srcu_struct structure, * Enqueue an SRCU callback on the specified srcu_struct structure,
* initiating grace-period processing if it is not already running. * initiating grace-period processing if it is not already running.
*/ */
void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
rcu_callback_t func) rcu_callback_t func)
{ {
unsigned long flags; unsigned long flags;
...@@ -179,14 +179,14 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, ...@@ -179,14 +179,14 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
rhp->func = func; rhp->func = func;
rhp->next = NULL; rhp->next = NULL;
local_irq_save(flags); local_irq_save(flags);
*sp->srcu_cb_tail = rhp; *ssp->srcu_cb_tail = rhp;
sp->srcu_cb_tail = &rhp->next; ssp->srcu_cb_tail = &rhp->next;
local_irq_restore(flags); local_irq_restore(flags);
if (!READ_ONCE(sp->srcu_gp_running)) { if (!READ_ONCE(ssp->srcu_gp_running)) {
if (likely(srcu_init_done)) if (likely(srcu_init_done))
schedule_work(&sp->srcu_work); schedule_work(&ssp->srcu_work);
else if (list_empty(&sp->srcu_work.entry)) else if (list_empty(&ssp->srcu_work.entry))
list_add(&sp->srcu_work.entry, &srcu_boot_list); list_add(&ssp->srcu_work.entry, &srcu_boot_list);
} }
} }
EXPORT_SYMBOL_GPL(call_srcu); EXPORT_SYMBOL_GPL(call_srcu);
...@@ -194,13 +194,13 @@ EXPORT_SYMBOL_GPL(call_srcu); ...@@ -194,13 +194,13 @@ EXPORT_SYMBOL_GPL(call_srcu);
/* /*
* synchronize_srcu - wait for prior SRCU read-side critical-section completion * synchronize_srcu - wait for prior SRCU read-side critical-section completion
*/ */
void synchronize_srcu(struct srcu_struct *sp) void synchronize_srcu(struct srcu_struct *ssp)
{ {
struct rcu_synchronize rs; struct rcu_synchronize rs;
init_rcu_head_on_stack(&rs.head); init_rcu_head_on_stack(&rs.head);
init_completion(&rs.completion); init_completion(&rs.completion);
call_srcu(sp, &rs.head, wakeme_after_rcu); call_srcu(ssp, &rs.head, wakeme_after_rcu);
wait_for_completion(&rs.completion); wait_for_completion(&rs.completion);
destroy_rcu_head_on_stack(&rs.head); destroy_rcu_head_on_stack(&rs.head);
} }
...@@ -219,13 +219,13 @@ void __init rcu_scheduler_starting(void) ...@@ -219,13 +219,13 @@ void __init rcu_scheduler_starting(void)
*/ */
void __init srcu_init(void) void __init srcu_init(void)
{ {
struct srcu_struct *sp; struct srcu_struct *ssp;
srcu_init_done = true; srcu_init_done = true;
while (!list_empty(&srcu_boot_list)) { while (!list_empty(&srcu_boot_list)) {
sp = list_first_entry(&srcu_boot_list, ssp = list_first_entry(&srcu_boot_list,
struct srcu_struct, srcu_work.entry); struct srcu_struct, srcu_work.entry);
list_del_init(&sp->srcu_work.entry); list_del_init(&ssp->srcu_work.entry);
schedule_work(&sp->srcu_work); schedule_work(&ssp->srcu_work);
} }
} }
...@@ -56,7 +56,7 @@ static LIST_HEAD(srcu_boot_list); ...@@ -56,7 +56,7 @@ static LIST_HEAD(srcu_boot_list);
static bool __read_mostly srcu_init_done; static bool __read_mostly srcu_init_done;
static void srcu_invoke_callbacks(struct work_struct *work); static void srcu_invoke_callbacks(struct work_struct *work);
static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
static void process_srcu(struct work_struct *work); static void process_srcu(struct work_struct *work);
/* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */ /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
...@@ -92,7 +92,7 @@ do { \ ...@@ -92,7 +92,7 @@ do { \
* srcu_read_unlock() running against them. So if the is_static parameter * srcu_read_unlock() running against them. So if the is_static parameter
* is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
*/ */
static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
{ {
int cpu; int cpu;
int i; int i;
...@@ -103,13 +103,13 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) ...@@ -103,13 +103,13 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
struct srcu_node *snp_first; struct srcu_node *snp_first;
/* Work out the overall tree geometry. */ /* Work out the overall tree geometry. */
sp->level[0] = &sp->node[0]; ssp->level[0] = &ssp->node[0];
for (i = 1; i < rcu_num_lvls; i++) for (i = 1; i < rcu_num_lvls; i++)
sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1]; ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
rcu_init_levelspread(levelspread, num_rcu_lvl); rcu_init_levelspread(levelspread, num_rcu_lvl);
/* Each pass through this loop initializes one srcu_node structure. */ /* Each pass through this loop initializes one srcu_node structure. */
srcu_for_each_node_breadth_first(sp, snp) { srcu_for_each_node_breadth_first(ssp, snp) {
spin_lock_init(&ACCESS_PRIVATE(snp, lock)); spin_lock_init(&ACCESS_PRIVATE(snp, lock));
WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
ARRAY_SIZE(snp->srcu_data_have_cbs)); ARRAY_SIZE(snp->srcu_data_have_cbs));
...@@ -120,17 +120,17 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) ...@@ -120,17 +120,17 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
snp->srcu_gp_seq_needed_exp = 0; snp->srcu_gp_seq_needed_exp = 0;
snp->grplo = -1; snp->grplo = -1;
snp->grphi = -1; snp->grphi = -1;
if (snp == &sp->node[0]) { if (snp == &ssp->node[0]) {
/* Root node, special case. */ /* Root node, special case. */
snp->srcu_parent = NULL; snp->srcu_parent = NULL;
continue; continue;
} }
/* Non-root node. */ /* Non-root node. */
if (snp == sp->level[level + 1]) if (snp == ssp->level[level + 1])
level++; level++;
snp->srcu_parent = sp->level[level - 1] + snp->srcu_parent = ssp->level[level - 1] +
(snp - sp->level[level]) / (snp - ssp->level[level]) /
levelspread[level - 1]; levelspread[level - 1];
} }
...@@ -141,14 +141,14 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) ...@@ -141,14 +141,14 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
ARRAY_SIZE(sdp->srcu_unlock_count)); ARRAY_SIZE(sdp->srcu_unlock_count));
level = rcu_num_lvls - 1; level = rcu_num_lvls - 1;
snp_first = sp->level[level]; snp_first = ssp->level[level];
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
sdp = per_cpu_ptr(sp->sda, cpu); sdp = per_cpu_ptr(ssp->sda, cpu);
spin_lock_init(&ACCESS_PRIVATE(sdp, lock)); spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
rcu_segcblist_init(&sdp->srcu_cblist); rcu_segcblist_init(&sdp->srcu_cblist);
sdp->srcu_cblist_invoking = false; sdp->srcu_cblist_invoking = false;
sdp->srcu_gp_seq_needed = sp->srcu_gp_seq; sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq; sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
sdp->mynode = &snp_first[cpu / levelspread[level]]; sdp->mynode = &snp_first[cpu / levelspread[level]];
for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
if (snp->grplo < 0) if (snp->grplo < 0)
...@@ -157,7 +157,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) ...@@ -157,7 +157,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
} }
sdp->cpu = cpu; sdp->cpu = cpu;
INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks); INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
sdp->sp = sp; sdp->ssp = ssp;
sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
if (is_static) if (is_static)
continue; continue;
...@@ -176,35 +176,35 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) ...@@ -176,35 +176,35 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
* parameter is passed through to init_srcu_struct_nodes(), and * parameter is passed through to init_srcu_struct_nodes(), and
* also tells us that ->sda has already been wired up to srcu_data. * also tells us that ->sda has already been wired up to srcu_data.
*/ */
static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static) static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
{ {
mutex_init(&sp->srcu_cb_mutex); mutex_init(&ssp->srcu_cb_mutex);
mutex_init(&sp->srcu_gp_mutex); mutex_init(&ssp->srcu_gp_mutex);
sp->srcu_idx = 0; ssp->srcu_idx = 0;
sp->srcu_gp_seq = 0; ssp->srcu_gp_seq = 0;
sp->srcu_barrier_seq = 0; ssp->srcu_barrier_seq = 0;
mutex_init(&sp->srcu_barrier_mutex); mutex_init(&ssp->srcu_barrier_mutex);
atomic_set(&sp->srcu_barrier_cpu_cnt, 0); atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
INIT_DELAYED_WORK(&sp->work, process_srcu); INIT_DELAYED_WORK(&ssp->work, process_srcu);
if (!is_static) if (!is_static)
sp->sda = alloc_percpu(struct srcu_data); ssp->sda = alloc_percpu(struct srcu_data);
init_srcu_struct_nodes(sp, is_static); init_srcu_struct_nodes(ssp, is_static);
sp->srcu_gp_seq_needed_exp = 0; ssp->srcu_gp_seq_needed_exp = 0;
sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */ smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
return sp->sda ? 0 : -ENOMEM; return ssp->sda ? 0 : -ENOMEM;
} }
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
int __init_srcu_struct(struct srcu_struct *sp, const char *name, int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
struct lock_class_key *key) struct lock_class_key *key)
{ {
/* Don't re-initialize a lock while it is held. */ /* Don't re-initialize a lock while it is held. */
debug_check_no_locks_freed((void *)sp, sizeof(*sp)); debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
lockdep_init_map(&sp->dep_map, name, key, 0); lockdep_init_map(&ssp->dep_map, name, key, 0);
spin_lock_init(&ACCESS_PRIVATE(sp, lock)); spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
return init_srcu_struct_fields(sp, false); return init_srcu_struct_fields(ssp, false);
} }
EXPORT_SYMBOL_GPL(__init_srcu_struct); EXPORT_SYMBOL_GPL(__init_srcu_struct);
...@@ -212,16 +212,16 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct); ...@@ -212,16 +212,16 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct);
/** /**
* init_srcu_struct - initialize a sleep-RCU structure * init_srcu_struct - initialize a sleep-RCU structure
* @sp: structure to initialize. * @ssp: structure to initialize.
* *
* Must invoke this on a given srcu_struct before passing that srcu_struct * Must invoke this on a given srcu_struct before passing that srcu_struct
* to any other function. Each srcu_struct represents a separate domain * to any other function. Each srcu_struct represents a separate domain
* of SRCU protection. * of SRCU protection.
*/ */
int init_srcu_struct(struct srcu_struct *sp) int init_srcu_struct(struct srcu_struct *ssp)
{ {
spin_lock_init(&ACCESS_PRIVATE(sp, lock)); spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
return init_srcu_struct_fields(sp, false); return init_srcu_struct_fields(ssp, false);
} }
EXPORT_SYMBOL_GPL(init_srcu_struct); EXPORT_SYMBOL_GPL(init_srcu_struct);
...@@ -231,37 +231,37 @@ EXPORT_SYMBOL_GPL(init_srcu_struct); ...@@ -231,37 +231,37 @@ EXPORT_SYMBOL_GPL(init_srcu_struct);
* First-use initialization of statically allocated srcu_struct * First-use initialization of statically allocated srcu_struct
* structure. Wiring up the combining tree is more than can be * structure. Wiring up the combining tree is more than can be
* done with compile-time initialization, so this check is added * done with compile-time initialization, so this check is added
* to each update-side SRCU primitive. Use sp->lock, which -is- * to each update-side SRCU primitive. Use ssp->lock, which -is-
* compile-time initialized, to resolve races involving multiple * compile-time initialized, to resolve races involving multiple
* CPUs trying to garner first-use privileges. * CPUs trying to garner first-use privileges.
*/ */
static void check_init_srcu_struct(struct srcu_struct *sp) static void check_init_srcu_struct(struct srcu_struct *ssp)
{ {
unsigned long flags; unsigned long flags;
/* The smp_load_acquire() pairs with the smp_store_release(). */ /* The smp_load_acquire() pairs with the smp_store_release(). */
if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/ if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/
return; /* Already initialized. */ return; /* Already initialized. */
spin_lock_irqsave_rcu_node(sp, flags); spin_lock_irqsave_rcu_node(ssp, flags);
if (!rcu_seq_state(sp->srcu_gp_seq_needed)) { if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
spin_unlock_irqrestore_rcu_node(sp, flags); spin_unlock_irqrestore_rcu_node(ssp, flags);
return; return;
} }
init_srcu_struct_fields(sp, true); init_srcu_struct_fields(ssp, true);
spin_unlock_irqrestore_rcu_node(sp, flags); spin_unlock_irqrestore_rcu_node(ssp, flags);
} }
/* /*
* Returns approximate total of the readers' ->srcu_lock_count[] values * Returns approximate total of the readers' ->srcu_lock_count[] values
* for the rank of per-CPU counters specified by idx. * for the rank of per-CPU counters specified by idx.
*/ */
static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx) static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
{ {
int cpu; int cpu;
unsigned long sum = 0; unsigned long sum = 0;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
sum += READ_ONCE(cpuc->srcu_lock_count[idx]); sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
} }
...@@ -272,13 +272,13 @@ static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx) ...@@ -272,13 +272,13 @@ static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
* Returns approximate total of the readers' ->srcu_unlock_count[] values * Returns approximate total of the readers' ->srcu_unlock_count[] values
* for the rank of per-CPU counters specified by idx. * for the rank of per-CPU counters specified by idx.
*/ */
static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx) static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
{ {
int cpu; int cpu;
unsigned long sum = 0; unsigned long sum = 0;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
} }
...@@ -289,11 +289,11 @@ static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx) ...@@ -289,11 +289,11 @@ static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
* Return true if the number of pre-existing readers is determined to * Return true if the number of pre-existing readers is determined to
* be zero. * be zero.
*/ */
static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx) static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
{ {
unsigned long unlocks; unsigned long unlocks;
unlocks = srcu_readers_unlock_idx(sp, idx); unlocks = srcu_readers_unlock_idx(ssp, idx);
/* /*
* Make sure that a lock is always counted if the corresponding * Make sure that a lock is always counted if the corresponding
...@@ -329,25 +329,25 @@ static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx) ...@@ -329,25 +329,25 @@ static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
* of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient, * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
* especially on 64-bit systems. * especially on 64-bit systems.
*/ */
return srcu_readers_lock_idx(sp, idx) == unlocks; return srcu_readers_lock_idx(ssp, idx) == unlocks;
} }
/** /**
* srcu_readers_active - returns true if there are readers. and false * srcu_readers_active - returns true if there are readers. and false
* otherwise * otherwise
* @sp: which srcu_struct to count active readers (holding srcu_read_lock). * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
* *
* Note that this is not an atomic primitive, and can therefore suffer * Note that this is not an atomic primitive, and can therefore suffer
* severe errors when invoked on an active srcu_struct. That said, it * severe errors when invoked on an active srcu_struct. That said, it
* can be useful as an error check at cleanup time. * can be useful as an error check at cleanup time.
*/ */
static bool srcu_readers_active(struct srcu_struct *sp) static bool srcu_readers_active(struct srcu_struct *ssp)
{ {
int cpu; int cpu;
unsigned long sum = 0; unsigned long sum = 0;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
sum += READ_ONCE(cpuc->srcu_lock_count[0]); sum += READ_ONCE(cpuc->srcu_lock_count[0]);
sum += READ_ONCE(cpuc->srcu_lock_count[1]); sum += READ_ONCE(cpuc->srcu_lock_count[1]);
...@@ -363,44 +363,44 @@ static bool srcu_readers_active(struct srcu_struct *sp) ...@@ -363,44 +363,44 @@ static bool srcu_readers_active(struct srcu_struct *sp)
* Return grace-period delay, zero if there are expedited grace * Return grace-period delay, zero if there are expedited grace
* periods pending, SRCU_INTERVAL otherwise. * periods pending, SRCU_INTERVAL otherwise.
*/ */
static unsigned long srcu_get_delay(struct srcu_struct *sp) static unsigned long srcu_get_delay(struct srcu_struct *ssp)
{ {
if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq), if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq),
READ_ONCE(sp->srcu_gp_seq_needed_exp))) READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
return 0; return 0;
return SRCU_INTERVAL; return SRCU_INTERVAL;
} }
/* Helper for cleanup_srcu_struct() and cleanup_srcu_struct_quiesced(). */ /* Helper for cleanup_srcu_struct() and cleanup_srcu_struct_quiesced(). */
void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced) void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced)
{ {
int cpu; int cpu;
if (WARN_ON(!srcu_get_delay(sp))) if (WARN_ON(!srcu_get_delay(ssp)))
return; /* Just leak it! */ return; /* Just leak it! */
if (WARN_ON(srcu_readers_active(sp))) if (WARN_ON(srcu_readers_active(ssp)))
return; /* Just leak it! */ return; /* Just leak it! */
if (quiesced) { if (quiesced) {
if (WARN_ON(delayed_work_pending(&sp->work))) if (WARN_ON(delayed_work_pending(&ssp->work)))
return; /* Just leak it! */ return; /* Just leak it! */
} else { } else {
flush_delayed_work(&sp->work); flush_delayed_work(&ssp->work);
} }
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
if (quiesced) { if (quiesced) {
if (WARN_ON(delayed_work_pending(&per_cpu_ptr(sp->sda, cpu)->work))) if (WARN_ON(delayed_work_pending(&per_cpu_ptr(ssp->sda, cpu)->work)))
return; /* Just leak it! */ return; /* Just leak it! */
} else { } else {
flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work); flush_delayed_work(&per_cpu_ptr(ssp->sda, cpu)->work);
} }
if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) || if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
WARN_ON(srcu_readers_active(sp))) { WARN_ON(srcu_readers_active(ssp))) {
pr_info("%s: Active srcu_struct %p state: %d\n", pr_info("%s: Active srcu_struct %p state: %d\n",
__func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)));
return; /* Caller forgot to stop doing call_srcu()? */ return; /* Caller forgot to stop doing call_srcu()? */
} }
free_percpu(sp->sda); free_percpu(ssp->sda);
sp->sda = NULL; ssp->sda = NULL;
} }
EXPORT_SYMBOL_GPL(_cleanup_srcu_struct); EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
...@@ -409,12 +409,12 @@ EXPORT_SYMBOL_GPL(_cleanup_srcu_struct); ...@@ -409,12 +409,12 @@ EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
* srcu_struct. * srcu_struct.
* Returns an index that must be passed to the matching srcu_read_unlock(). * Returns an index that must be passed to the matching srcu_read_unlock().
*/ */
int __srcu_read_lock(struct srcu_struct *sp) int __srcu_read_lock(struct srcu_struct *ssp)
{ {
int idx; int idx;
idx = READ_ONCE(sp->srcu_idx) & 0x1; idx = READ_ONCE(ssp->srcu_idx) & 0x1;
this_cpu_inc(sp->sda->srcu_lock_count[idx]); this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
smp_mb(); /* B */ /* Avoid leaking the critical section. */ smp_mb(); /* B */ /* Avoid leaking the critical section. */
return idx; return idx;
} }
...@@ -425,10 +425,10 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock); ...@@ -425,10 +425,10 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
* element of the srcu_struct. Note that this may well be a different * element of the srcu_struct. Note that this may well be a different
* CPU than that which was incremented by the corresponding srcu_read_lock(). * CPU than that which was incremented by the corresponding srcu_read_lock().
*/ */
void __srcu_read_unlock(struct srcu_struct *sp, int idx) void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
{ {
smp_mb(); /* C */ /* Avoid leaking the critical section. */ smp_mb(); /* C */ /* Avoid leaking the critical section. */
this_cpu_inc(sp->sda->srcu_unlock_count[idx]); this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
} }
EXPORT_SYMBOL_GPL(__srcu_read_unlock); EXPORT_SYMBOL_GPL(__srcu_read_unlock);
...@@ -444,22 +444,22 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock); ...@@ -444,22 +444,22 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
/* /*
* Start an SRCU grace period. * Start an SRCU grace period.
*/ */
static void srcu_gp_start(struct srcu_struct *sp) static void srcu_gp_start(struct srcu_struct *ssp)
{ {
struct srcu_data *sdp = this_cpu_ptr(sp->sda); struct srcu_data *sdp = this_cpu_ptr(ssp->sda);
int state; int state;
lockdep_assert_held(&ACCESS_PRIVATE(sp, lock)); lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
spin_lock_rcu_node(sdp); /* Interrupts already disabled. */ spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
rcu_segcblist_advance(&sdp->srcu_cblist, rcu_segcblist_advance(&sdp->srcu_cblist,
rcu_seq_current(&sp->srcu_gp_seq)); rcu_seq_current(&ssp->srcu_gp_seq));
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
rcu_seq_snap(&sp->srcu_gp_seq)); rcu_seq_snap(&ssp->srcu_gp_seq));
spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */ spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
rcu_seq_start(&sp->srcu_gp_seq); rcu_seq_start(&ssp->srcu_gp_seq);
state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); state = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
WARN_ON_ONCE(state != SRCU_STATE_SCAN1); WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
} }
...@@ -513,7 +513,7 @@ static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) ...@@ -513,7 +513,7 @@ static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
* just-completed grace period, the one corresponding to idx. If possible, * just-completed grace period, the one corresponding to idx. If possible,
* schedule this invocation on the corresponding CPUs. * schedule this invocation on the corresponding CPUs.
*/ */
static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp, static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
unsigned long mask, unsigned long delay) unsigned long mask, unsigned long delay)
{ {
int cpu; int cpu;
...@@ -521,7 +521,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp, ...@@ -521,7 +521,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
if (!(mask & (1 << (cpu - snp->grplo)))) if (!(mask & (1 << (cpu - snp->grplo))))
continue; continue;
srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay); srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
} }
} }
...@@ -534,7 +534,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp, ...@@ -534,7 +534,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
* are initiating callback invocation. This allows the ->srcu_have_cbs[] * are initiating callback invocation. This allows the ->srcu_have_cbs[]
* array to have a finite number of elements. * array to have a finite number of elements.
*/ */
static void srcu_gp_end(struct srcu_struct *sp) static void srcu_gp_end(struct srcu_struct *ssp)
{ {
unsigned long cbdelay; unsigned long cbdelay;
bool cbs; bool cbs;
...@@ -548,28 +548,28 @@ static void srcu_gp_end(struct srcu_struct *sp) ...@@ -548,28 +548,28 @@ static void srcu_gp_end(struct srcu_struct *sp)
struct srcu_node *snp; struct srcu_node *snp;
/* Prevent more than one additional grace period. */ /* Prevent more than one additional grace period. */
mutex_lock(&sp->srcu_cb_mutex); mutex_lock(&ssp->srcu_cb_mutex);
/* End the current grace period. */ /* End the current grace period. */
spin_lock_irq_rcu_node(sp); spin_lock_irq_rcu_node(ssp);
idx = rcu_seq_state(sp->srcu_gp_seq); idx = rcu_seq_state(ssp->srcu_gp_seq);
WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
cbdelay = srcu_get_delay(sp); cbdelay = srcu_get_delay(ssp);
sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
rcu_seq_end(&sp->srcu_gp_seq); rcu_seq_end(&ssp->srcu_gp_seq);
gpseq = rcu_seq_current(&sp->srcu_gp_seq); gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq)) if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
sp->srcu_gp_seq_needed_exp = gpseq; ssp->srcu_gp_seq_needed_exp = gpseq;
spin_unlock_irq_rcu_node(sp); spin_unlock_irq_rcu_node(ssp);
mutex_unlock(&sp->srcu_gp_mutex); mutex_unlock(&ssp->srcu_gp_mutex);
/* A new grace period can start at this point. But only one. */ /* A new grace period can start at this point. But only one. */
/* Initiate callback invocation as needed. */ /* Initiate callback invocation as needed. */
idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
srcu_for_each_node_breadth_first(sp, snp) { srcu_for_each_node_breadth_first(ssp, snp) {
spin_lock_irq_rcu_node(snp); spin_lock_irq_rcu_node(snp);
cbs = false; cbs = false;
last_lvl = snp >= sp->level[rcu_num_lvls - 1]; last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
if (last_lvl) if (last_lvl)
cbs = snp->srcu_have_cbs[idx] == gpseq; cbs = snp->srcu_have_cbs[idx] == gpseq;
snp->srcu_have_cbs[idx] = gpseq; snp->srcu_have_cbs[idx] = gpseq;
...@@ -580,12 +580,12 @@ static void srcu_gp_end(struct srcu_struct *sp) ...@@ -580,12 +580,12 @@ static void srcu_gp_end(struct srcu_struct *sp)
snp->srcu_data_have_cbs[idx] = 0; snp->srcu_data_have_cbs[idx] = 0;
spin_unlock_irq_rcu_node(snp); spin_unlock_irq_rcu_node(snp);
if (cbs) if (cbs)
srcu_schedule_cbs_snp(sp, snp, mask, cbdelay); srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
/* Occasionally prevent srcu_data counter wrap. */ /* Occasionally prevent srcu_data counter wrap. */
if (!(gpseq & counter_wrap_check) && last_lvl) if (!(gpseq & counter_wrap_check) && last_lvl)
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
sdp = per_cpu_ptr(sp->sda, cpu); sdp = per_cpu_ptr(ssp->sda, cpu);
spin_lock_irqsave_rcu_node(sdp, flags); spin_lock_irqsave_rcu_node(sdp, flags);
if (ULONG_CMP_GE(gpseq, if (ULONG_CMP_GE(gpseq,
sdp->srcu_gp_seq_needed + 100)) sdp->srcu_gp_seq_needed + 100))
...@@ -598,18 +598,18 @@ static void srcu_gp_end(struct srcu_struct *sp) ...@@ -598,18 +598,18 @@ static void srcu_gp_end(struct srcu_struct *sp)
} }
/* Callback initiation done, allow grace periods after next. */ /* Callback initiation done, allow grace periods after next. */
mutex_unlock(&sp->srcu_cb_mutex); mutex_unlock(&ssp->srcu_cb_mutex);
/* Start a new grace period if needed. */ /* Start a new grace period if needed. */
spin_lock_irq_rcu_node(sp); spin_lock_irq_rcu_node(ssp);
gpseq = rcu_seq_current(&sp->srcu_gp_seq); gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
if (!rcu_seq_state(gpseq) && if (!rcu_seq_state(gpseq) &&
ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) { ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
srcu_gp_start(sp); srcu_gp_start(ssp);
spin_unlock_irq_rcu_node(sp); spin_unlock_irq_rcu_node(ssp);
srcu_reschedule(sp, 0); srcu_reschedule(ssp, 0);
} else { } else {
spin_unlock_irq_rcu_node(sp); spin_unlock_irq_rcu_node(ssp);
} }
} }
...@@ -620,13 +620,13 @@ static void srcu_gp_end(struct srcu_struct *sp) ...@@ -620,13 +620,13 @@ static void srcu_gp_end(struct srcu_struct *sp)
* but without expediting. To start a completely new grace period, * but without expediting. To start a completely new grace period,
* whether expedited or not, use srcu_funnel_gp_start() instead. * whether expedited or not, use srcu_funnel_gp_start() instead.
*/ */
static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp, static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
unsigned long s) unsigned long s)
{ {
unsigned long flags; unsigned long flags;
for (; snp != NULL; snp = snp->srcu_parent) { for (; snp != NULL; snp = snp->srcu_parent) {
if (rcu_seq_done(&sp->srcu_gp_seq, s) || if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
return; return;
spin_lock_irqsave_rcu_node(snp, flags); spin_lock_irqsave_rcu_node(snp, flags);
...@@ -637,10 +637,10 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp, ...@@ -637,10 +637,10 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
spin_unlock_irqrestore_rcu_node(snp, flags); spin_unlock_irqrestore_rcu_node(snp, flags);
} }
spin_lock_irqsave_rcu_node(sp, flags); spin_lock_irqsave_rcu_node(ssp, flags);
if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
sp->srcu_gp_seq_needed_exp = s; ssp->srcu_gp_seq_needed_exp = s;
spin_unlock_irqrestore_rcu_node(sp, flags); spin_unlock_irqrestore_rcu_node(ssp, flags);
} }
/* /*
...@@ -653,7 +653,7 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp, ...@@ -653,7 +653,7 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
* Note that this function also does the work of srcu_funnel_exp_start(), * Note that this function also does the work of srcu_funnel_exp_start(),
* in some cases by directly invoking it. * in some cases by directly invoking it.
*/ */
static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
unsigned long s, bool do_norm) unsigned long s, bool do_norm)
{ {
unsigned long flags; unsigned long flags;
...@@ -663,7 +663,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, ...@@ -663,7 +663,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
/* Each pass through the loop does one level of the srcu_node tree. */ /* Each pass through the loop does one level of the srcu_node tree. */
for (; snp != NULL; snp = snp->srcu_parent) { for (; snp != NULL; snp = snp->srcu_parent) {
if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode) if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode)
return; /* GP already done and CBs recorded. */ return; /* GP already done and CBs recorded. */
spin_lock_irqsave_rcu_node(snp, flags); spin_lock_irqsave_rcu_node(snp, flags);
if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
...@@ -678,7 +678,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, ...@@ -678,7 +678,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
return; return;
} }
if (!do_norm) if (!do_norm)
srcu_funnel_exp_start(sp, snp, s); srcu_funnel_exp_start(ssp, snp, s);
return; return;
} }
snp->srcu_have_cbs[idx] = s; snp->srcu_have_cbs[idx] = s;
...@@ -690,29 +690,29 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, ...@@ -690,29 +690,29 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
} }
/* Top of tree, must ensure the grace period will be started. */ /* Top of tree, must ensure the grace period will be started. */
spin_lock_irqsave_rcu_node(sp, flags); spin_lock_irqsave_rcu_node(ssp, flags);
if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) { if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
/* /*
* Record need for grace period s. Pair with load * Record need for grace period s. Pair with load
* acquire setting up for initialization. * acquire setting up for initialization.
*/ */
smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/ smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/
} }
if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
sp->srcu_gp_seq_needed_exp = s; ssp->srcu_gp_seq_needed_exp = s;
/* If grace period not already done and none in progress, start it. */ /* If grace period not already done and none in progress, start it. */
if (!rcu_seq_done(&sp->srcu_gp_seq, s) && if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) { rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
srcu_gp_start(sp); srcu_gp_start(ssp);
if (likely(srcu_init_done)) if (likely(srcu_init_done))
queue_delayed_work(rcu_gp_wq, &sp->work, queue_delayed_work(rcu_gp_wq, &ssp->work,
srcu_get_delay(sp)); srcu_get_delay(ssp));
else if (list_empty(&sp->work.work.entry)) else if (list_empty(&ssp->work.work.entry))
list_add(&sp->work.work.entry, &srcu_boot_list); list_add(&ssp->work.work.entry, &srcu_boot_list);
} }
spin_unlock_irqrestore_rcu_node(sp, flags); spin_unlock_irqrestore_rcu_node(ssp, flags);
} }
/* /*
...@@ -720,12 +720,12 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, ...@@ -720,12 +720,12 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
* loop an additional time if there is an expedited grace period pending. * loop an additional time if there is an expedited grace period pending.
* The caller must ensure that ->srcu_idx is not changed while checking. * The caller must ensure that ->srcu_idx is not changed while checking.
*/ */
static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
{ {
for (;;) { for (;;) {
if (srcu_readers_active_idx_check(sp, idx)) if (srcu_readers_active_idx_check(ssp, idx))
return true; return true;
if (--trycount + !srcu_get_delay(sp) <= 0) if (--trycount + !srcu_get_delay(ssp) <= 0)
return false; return false;
udelay(SRCU_RETRY_CHECK_DELAY); udelay(SRCU_RETRY_CHECK_DELAY);
} }
...@@ -736,7 +736,7 @@ static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) ...@@ -736,7 +736,7 @@ static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
* use the other rank of the ->srcu_(un)lock_count[] arrays. This allows * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
* us to wait for pre-existing readers in a starvation-free manner. * us to wait for pre-existing readers in a starvation-free manner.
*/ */
static void srcu_flip(struct srcu_struct *sp) static void srcu_flip(struct srcu_struct *ssp)
{ {
/* /*
* Ensure that if this updater saw a given reader's increment * Ensure that if this updater saw a given reader's increment
...@@ -748,7 +748,7 @@ static void srcu_flip(struct srcu_struct *sp) ...@@ -748,7 +748,7 @@ static void srcu_flip(struct srcu_struct *sp)
*/ */
smp_mb(); /* E */ /* Pairs with B and C. */ smp_mb(); /* E */ /* Pairs with B and C. */
WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1); WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
/* /*
* Ensure that if the updater misses an __srcu_read_unlock() * Ensure that if the updater misses an __srcu_read_unlock()
...@@ -781,7 +781,7 @@ static void srcu_flip(struct srcu_struct *sp) ...@@ -781,7 +781,7 @@ static void srcu_flip(struct srcu_struct *sp)
* negligible when amoritized over that time period, and the extra latency * negligible when amoritized over that time period, and the extra latency
* of a needlessly non-expedited grace period is similarly negligible. * of a needlessly non-expedited grace period is similarly negligible.
*/ */
static bool srcu_might_be_idle(struct srcu_struct *sp) static bool srcu_might_be_idle(struct srcu_struct *ssp)
{ {
unsigned long curseq; unsigned long curseq;
unsigned long flags; unsigned long flags;
...@@ -790,7 +790,7 @@ static bool srcu_might_be_idle(struct srcu_struct *sp) ...@@ -790,7 +790,7 @@ static bool srcu_might_be_idle(struct srcu_struct *sp)
/* If the local srcu_data structure has callbacks, not idle. */ /* If the local srcu_data structure has callbacks, not idle. */
local_irq_save(flags); local_irq_save(flags);
sdp = this_cpu_ptr(sp->sda); sdp = this_cpu_ptr(ssp->sda);
if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
local_irq_restore(flags); local_irq_restore(flags);
return false; /* Callbacks already present, so not idle. */ return false; /* Callbacks already present, so not idle. */
...@@ -806,17 +806,17 @@ static bool srcu_might_be_idle(struct srcu_struct *sp) ...@@ -806,17 +806,17 @@ static bool srcu_might_be_idle(struct srcu_struct *sp)
/* First, see if enough time has passed since the last GP. */ /* First, see if enough time has passed since the last GP. */
t = ktime_get_mono_fast_ns(); t = ktime_get_mono_fast_ns();
if (exp_holdoff == 0 || if (exp_holdoff == 0 ||
time_in_range_open(t, sp->srcu_last_gp_end, time_in_range_open(t, ssp->srcu_last_gp_end,
sp->srcu_last_gp_end + exp_holdoff)) ssp->srcu_last_gp_end + exp_holdoff))
return false; /* Too soon after last GP. */ return false; /* Too soon after last GP. */
/* Next, check for probable idleness. */ /* Next, check for probable idleness. */
curseq = rcu_seq_current(&sp->srcu_gp_seq); curseq = rcu_seq_current(&ssp->srcu_gp_seq);
smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed))) if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
return false; /* Grace period in progress, so not idle. */ return false; /* Grace period in progress, so not idle. */
smp_mb(); /* Order ->srcu_gp_seq with prior access. */ smp_mb(); /* Order ->srcu_gp_seq with prior access. */
if (curseq != rcu_seq_current(&sp->srcu_gp_seq)) if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
return false; /* GP # changed, so not idle. */ return false; /* GP # changed, so not idle. */
return true; /* With reasonable probability, idle! */ return true; /* With reasonable probability, idle! */
} }
...@@ -856,7 +856,7 @@ static void srcu_leak_callback(struct rcu_head *rhp) ...@@ -856,7 +856,7 @@ static void srcu_leak_callback(struct rcu_head *rhp)
* srcu_read_lock(), and srcu_read_unlock() that are all passed the same * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
* srcu_struct structure. * srcu_struct structure.
*/ */
void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
rcu_callback_t func, bool do_norm) rcu_callback_t func, bool do_norm)
{ {
unsigned long flags; unsigned long flags;
...@@ -866,7 +866,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, ...@@ -866,7 +866,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
unsigned long s; unsigned long s;
struct srcu_data *sdp; struct srcu_data *sdp;
check_init_srcu_struct(sp); check_init_srcu_struct(ssp);
if (debug_rcu_head_queue(rhp)) { if (debug_rcu_head_queue(rhp)) {
/* Probable double call_srcu(), so leak the callback. */ /* Probable double call_srcu(), so leak the callback. */
WRITE_ONCE(rhp->func, srcu_leak_callback); WRITE_ONCE(rhp->func, srcu_leak_callback);
...@@ -874,14 +874,14 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, ...@@ -874,14 +874,14 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
return; return;
} }
rhp->func = func; rhp->func = func;
idx = srcu_read_lock(sp); idx = srcu_read_lock(ssp);
local_irq_save(flags); local_irq_save(flags);
sdp = this_cpu_ptr(sp->sda); sdp = this_cpu_ptr(ssp->sda);
spin_lock_rcu_node(sdp); spin_lock_rcu_node(sdp);
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
rcu_segcblist_advance(&sdp->srcu_cblist, rcu_segcblist_advance(&sdp->srcu_cblist,
rcu_seq_current(&sp->srcu_gp_seq)); rcu_seq_current(&ssp->srcu_gp_seq));
s = rcu_seq_snap(&sp->srcu_gp_seq); s = rcu_seq_snap(&ssp->srcu_gp_seq);
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
sdp->srcu_gp_seq_needed = s; sdp->srcu_gp_seq_needed = s;
...@@ -893,15 +893,15 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, ...@@ -893,15 +893,15 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
} }
spin_unlock_irqrestore_rcu_node(sdp, flags); spin_unlock_irqrestore_rcu_node(sdp, flags);
if (needgp) if (needgp)
srcu_funnel_gp_start(sp, sdp, s, do_norm); srcu_funnel_gp_start(ssp, sdp, s, do_norm);
else if (needexp) else if (needexp)
srcu_funnel_exp_start(sp, sdp->mynode, s); srcu_funnel_exp_start(ssp, sdp->mynode, s);
srcu_read_unlock(sp, idx); srcu_read_unlock(ssp, idx);
} }
/** /**
* call_srcu() - Queue a callback for invocation after an SRCU grace period * call_srcu() - Queue a callback for invocation after an SRCU grace period
* @sp: srcu_struct in queue the callback * @ssp: srcu_struct in queue the callback
* @rhp: structure to be used for queueing the SRCU callback. * @rhp: structure to be used for queueing the SRCU callback.
* @func: function to be invoked after the SRCU grace period * @func: function to be invoked after the SRCU grace period
* *
...@@ -916,21 +916,21 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, ...@@ -916,21 +916,21 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
* The callback will be invoked from process context, but must nevertheless * The callback will be invoked from process context, but must nevertheless
* be fast and must not block. * be fast and must not block.
*/ */
void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
rcu_callback_t func) rcu_callback_t func)
{ {
__call_srcu(sp, rhp, func, true); __call_srcu(ssp, rhp, func, true);
} }
EXPORT_SYMBOL_GPL(call_srcu); EXPORT_SYMBOL_GPL(call_srcu);
/* /*
* Helper function for synchronize_srcu() and synchronize_srcu_expedited(). * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
*/ */
static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm) static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
{ {
struct rcu_synchronize rcu; struct rcu_synchronize rcu;
RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) || RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) ||
lock_is_held(&rcu_bh_lock_map) || lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_lock_map) || lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map), lock_is_held(&rcu_sched_lock_map),
...@@ -939,10 +939,10 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm) ...@@ -939,10 +939,10 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
return; return;
might_sleep(); might_sleep();
check_init_srcu_struct(sp); check_init_srcu_struct(ssp);
init_completion(&rcu.completion); init_completion(&rcu.completion);
init_rcu_head_on_stack(&rcu.head); init_rcu_head_on_stack(&rcu.head);
__call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm); __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
wait_for_completion(&rcu.completion); wait_for_completion(&rcu.completion);
destroy_rcu_head_on_stack(&rcu.head); destroy_rcu_head_on_stack(&rcu.head);
...@@ -958,7 +958,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm) ...@@ -958,7 +958,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
/** /**
* synchronize_srcu_expedited - Brute-force SRCU grace period * synchronize_srcu_expedited - Brute-force SRCU grace period
* @sp: srcu_struct with which to synchronize. * @ssp: srcu_struct with which to synchronize.
* *
* Wait for an SRCU grace period to elapse, but be more aggressive about * Wait for an SRCU grace period to elapse, but be more aggressive about
* spinning rather than blocking when waiting. * spinning rather than blocking when waiting.
...@@ -966,15 +966,15 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm) ...@@ -966,15 +966,15 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
* Note that synchronize_srcu_expedited() has the same deadlock and * Note that synchronize_srcu_expedited() has the same deadlock and
* memory-ordering properties as does synchronize_srcu(). * memory-ordering properties as does synchronize_srcu().
*/ */
void synchronize_srcu_expedited(struct srcu_struct *sp) void synchronize_srcu_expedited(struct srcu_struct *ssp)
{ {
__synchronize_srcu(sp, rcu_gp_is_normal()); __synchronize_srcu(ssp, rcu_gp_is_normal());
} }
EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
/** /**
* synchronize_srcu - wait for prior SRCU read-side critical-section completion * synchronize_srcu - wait for prior SRCU read-side critical-section completion
* @sp: srcu_struct with which to synchronize. * @ssp: srcu_struct with which to synchronize.
* *
* Wait for the count to drain to zero of both indexes. To avoid the * Wait for the count to drain to zero of both indexes. To avoid the
* possible starvation of synchronize_srcu(), it waits for the count of * possible starvation of synchronize_srcu(), it waits for the count of
...@@ -1016,12 +1016,12 @@ EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); ...@@ -1016,12 +1016,12 @@ EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
* SRCU must also provide it. Note that detecting idleness is heuristic * SRCU must also provide it. Note that detecting idleness is heuristic
* and subject to both false positives and negatives. * and subject to both false positives and negatives.
*/ */
void synchronize_srcu(struct srcu_struct *sp) void synchronize_srcu(struct srcu_struct *ssp)
{ {
if (srcu_might_be_idle(sp) || rcu_gp_is_expedited()) if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
synchronize_srcu_expedited(sp); synchronize_srcu_expedited(ssp);
else else
__synchronize_srcu(sp, true); __synchronize_srcu(ssp, true);
} }
EXPORT_SYMBOL_GPL(synchronize_srcu); EXPORT_SYMBOL_GPL(synchronize_srcu);
...@@ -1031,36 +1031,36 @@ EXPORT_SYMBOL_GPL(synchronize_srcu); ...@@ -1031,36 +1031,36 @@ EXPORT_SYMBOL_GPL(synchronize_srcu);
static void srcu_barrier_cb(struct rcu_head *rhp) static void srcu_barrier_cb(struct rcu_head *rhp)
{ {
struct srcu_data *sdp; struct srcu_data *sdp;
struct srcu_struct *sp; struct srcu_struct *ssp;
sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
sp = sdp->sp; ssp = sdp->ssp;
if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
complete(&sp->srcu_barrier_completion); complete(&ssp->srcu_barrier_completion);
} }
/** /**
* srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
* @sp: srcu_struct on which to wait for in-flight callbacks. * @ssp: srcu_struct on which to wait for in-flight callbacks.
*/ */
void srcu_barrier(struct srcu_struct *sp) void srcu_barrier(struct srcu_struct *ssp)
{ {
int cpu; int cpu;
struct srcu_data *sdp; struct srcu_data *sdp;
unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq); unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
check_init_srcu_struct(sp); check_init_srcu_struct(ssp);
mutex_lock(&sp->srcu_barrier_mutex); mutex_lock(&ssp->srcu_barrier_mutex);
if (rcu_seq_done(&sp->srcu_barrier_seq, s)) { if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
smp_mb(); /* Force ordering following return. */ smp_mb(); /* Force ordering following return. */
mutex_unlock(&sp->srcu_barrier_mutex); mutex_unlock(&ssp->srcu_barrier_mutex);
return; /* Someone else did our work for us. */ return; /* Someone else did our work for us. */
} }
rcu_seq_start(&sp->srcu_barrier_seq); rcu_seq_start(&ssp->srcu_barrier_seq);
init_completion(&sp->srcu_barrier_completion); init_completion(&ssp->srcu_barrier_completion);
/* Initial count prevents reaching zero until all CBs are posted. */ /* Initial count prevents reaching zero until all CBs are posted. */
atomic_set(&sp->srcu_barrier_cpu_cnt, 1); atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
/* /*
* Each pass through this loop enqueues a callback, but only * Each pass through this loop enqueues a callback, but only
...@@ -1071,39 +1071,39 @@ void srcu_barrier(struct srcu_struct *sp) ...@@ -1071,39 +1071,39 @@ void srcu_barrier(struct srcu_struct *sp)
* grace period as the last callback already in the queue. * grace period as the last callback already in the queue.
*/ */
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
sdp = per_cpu_ptr(sp->sda, cpu); sdp = per_cpu_ptr(ssp->sda, cpu);
spin_lock_irq_rcu_node(sdp); spin_lock_irq_rcu_node(sdp);
atomic_inc(&sp->srcu_barrier_cpu_cnt); atomic_inc(&ssp->srcu_barrier_cpu_cnt);
sdp->srcu_barrier_head.func = srcu_barrier_cb; sdp->srcu_barrier_head.func = srcu_barrier_cb;
debug_rcu_head_queue(&sdp->srcu_barrier_head); debug_rcu_head_queue(&sdp->srcu_barrier_head);
if (!rcu_segcblist_entrain(&sdp->srcu_cblist, if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
&sdp->srcu_barrier_head, 0)) { &sdp->srcu_barrier_head, 0)) {
debug_rcu_head_unqueue(&sdp->srcu_barrier_head); debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
atomic_dec(&sp->srcu_barrier_cpu_cnt); atomic_dec(&ssp->srcu_barrier_cpu_cnt);
} }
spin_unlock_irq_rcu_node(sdp); spin_unlock_irq_rcu_node(sdp);
} }
/* Remove the initial count, at which point reaching zero can happen. */ /* Remove the initial count, at which point reaching zero can happen. */
if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
complete(&sp->srcu_barrier_completion); complete(&ssp->srcu_barrier_completion);
wait_for_completion(&sp->srcu_barrier_completion); wait_for_completion(&ssp->srcu_barrier_completion);
rcu_seq_end(&sp->srcu_barrier_seq); rcu_seq_end(&ssp->srcu_barrier_seq);
mutex_unlock(&sp->srcu_barrier_mutex); mutex_unlock(&ssp->srcu_barrier_mutex);
} }
EXPORT_SYMBOL_GPL(srcu_barrier); EXPORT_SYMBOL_GPL(srcu_barrier);
/** /**
* srcu_batches_completed - return batches completed. * srcu_batches_completed - return batches completed.
* @sp: srcu_struct on which to report batch completion. * @ssp: srcu_struct on which to report batch completion.
* *
* Report the number of batches, correlated with, but not necessarily * Report the number of batches, correlated with, but not necessarily
* precisely the same as, the number of grace periods that have elapsed. * precisely the same as, the number of grace periods that have elapsed.
*/ */
unsigned long srcu_batches_completed(struct srcu_struct *sp) unsigned long srcu_batches_completed(struct srcu_struct *ssp)
{ {
return sp->srcu_idx; return ssp->srcu_idx;
} }
EXPORT_SYMBOL_GPL(srcu_batches_completed); EXPORT_SYMBOL_GPL(srcu_batches_completed);
...@@ -1112,11 +1112,11 @@ EXPORT_SYMBOL_GPL(srcu_batches_completed); ...@@ -1112,11 +1112,11 @@ EXPORT_SYMBOL_GPL(srcu_batches_completed);
* to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
* completed in that state. * completed in that state.
*/ */
static void srcu_advance_state(struct srcu_struct *sp) static void srcu_advance_state(struct srcu_struct *ssp)
{ {
int idx; int idx;
mutex_lock(&sp->srcu_gp_mutex); mutex_lock(&ssp->srcu_gp_mutex);
/* /*
* Because readers might be delayed for an extended period after * Because readers might be delayed for an extended period after
...@@ -1128,47 +1128,47 @@ static void srcu_advance_state(struct srcu_struct *sp) ...@@ -1128,47 +1128,47 @@ static void srcu_advance_state(struct srcu_struct *sp)
* The load-acquire ensures that we see the accesses performed * The load-acquire ensures that we see the accesses performed
* by the prior grace period. * by the prior grace period.
*/ */
idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */ idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */
if (idx == SRCU_STATE_IDLE) { if (idx == SRCU_STATE_IDLE) {
spin_lock_irq_rcu_node(sp); spin_lock_irq_rcu_node(ssp);
if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq)); WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
spin_unlock_irq_rcu_node(sp); spin_unlock_irq_rcu_node(ssp);
mutex_unlock(&sp->srcu_gp_mutex); mutex_unlock(&ssp->srcu_gp_mutex);
return; return;
} }
idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
if (idx == SRCU_STATE_IDLE) if (idx == SRCU_STATE_IDLE)
srcu_gp_start(sp); srcu_gp_start(ssp);
spin_unlock_irq_rcu_node(sp); spin_unlock_irq_rcu_node(ssp);
if (idx != SRCU_STATE_IDLE) { if (idx != SRCU_STATE_IDLE) {
mutex_unlock(&sp->srcu_gp_mutex); mutex_unlock(&ssp->srcu_gp_mutex);
return; /* Someone else started the grace period. */ return; /* Someone else started the grace period. */
} }
} }
if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
idx = 1 ^ (sp->srcu_idx & 1); idx = 1 ^ (ssp->srcu_idx & 1);
if (!try_check_zero(sp, idx, 1)) { if (!try_check_zero(ssp, idx, 1)) {
mutex_unlock(&sp->srcu_gp_mutex); mutex_unlock(&ssp->srcu_gp_mutex);
return; /* readers present, retry later. */ return; /* readers present, retry later. */
} }
srcu_flip(sp); srcu_flip(ssp);
rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2); rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
} }
if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
/* /*
* SRCU read-side critical sections are normally short, * SRCU read-side critical sections are normally short,
* so check at least twice in quick succession after a flip. * so check at least twice in quick succession after a flip.
*/ */
idx = 1 ^ (sp->srcu_idx & 1); idx = 1 ^ (ssp->srcu_idx & 1);
if (!try_check_zero(sp, idx, 2)) { if (!try_check_zero(ssp, idx, 2)) {
mutex_unlock(&sp->srcu_gp_mutex); mutex_unlock(&ssp->srcu_gp_mutex);
return; /* readers present, retry later. */ return; /* readers present, retry later. */
} }
srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */ srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */
} }
} }
...@@ -1184,14 +1184,14 @@ static void srcu_invoke_callbacks(struct work_struct *work) ...@@ -1184,14 +1184,14 @@ static void srcu_invoke_callbacks(struct work_struct *work)
struct rcu_cblist ready_cbs; struct rcu_cblist ready_cbs;
struct rcu_head *rhp; struct rcu_head *rhp;
struct srcu_data *sdp; struct srcu_data *sdp;
struct srcu_struct *sp; struct srcu_struct *ssp;
sdp = container_of(work, struct srcu_data, work.work); sdp = container_of(work, struct srcu_data, work.work);
sp = sdp->sp; ssp = sdp->ssp;
rcu_cblist_init(&ready_cbs); rcu_cblist_init(&ready_cbs);
spin_lock_irq_rcu_node(sdp); spin_lock_irq_rcu_node(sdp);
rcu_segcblist_advance(&sdp->srcu_cblist, rcu_segcblist_advance(&sdp->srcu_cblist,
rcu_seq_current(&sp->srcu_gp_seq)); rcu_seq_current(&ssp->srcu_gp_seq));
if (sdp->srcu_cblist_invoking || if (sdp->srcu_cblist_invoking ||
!rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
spin_unlock_irq_rcu_node(sdp); spin_unlock_irq_rcu_node(sdp);
...@@ -1217,7 +1217,7 @@ static void srcu_invoke_callbacks(struct work_struct *work) ...@@ -1217,7 +1217,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
spin_lock_irq_rcu_node(sdp); spin_lock_irq_rcu_node(sdp);
rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs); rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
rcu_seq_snap(&sp->srcu_gp_seq)); rcu_seq_snap(&ssp->srcu_gp_seq));
sdp->srcu_cblist_invoking = false; sdp->srcu_cblist_invoking = false;
more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
spin_unlock_irq_rcu_node(sdp); spin_unlock_irq_rcu_node(sdp);
...@@ -1229,24 +1229,24 @@ static void srcu_invoke_callbacks(struct work_struct *work) ...@@ -1229,24 +1229,24 @@ static void srcu_invoke_callbacks(struct work_struct *work)
* Finished one round of SRCU grace period. Start another if there are * Finished one round of SRCU grace period. Start another if there are
* more SRCU callbacks queued, otherwise put SRCU into not-running state. * more SRCU callbacks queued, otherwise put SRCU into not-running state.
*/ */
static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
{ {
bool pushgp = true; bool pushgp = true;
spin_lock_irq_rcu_node(sp); spin_lock_irq_rcu_node(ssp);
if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) { if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
/* All requests fulfilled, time to go idle. */ /* All requests fulfilled, time to go idle. */
pushgp = false; pushgp = false;
} }
} else if (!rcu_seq_state(sp->srcu_gp_seq)) { } else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
/* Outstanding request and no GP. Start one. */ /* Outstanding request and no GP. Start one. */
srcu_gp_start(sp); srcu_gp_start(ssp);
} }
spin_unlock_irq_rcu_node(sp); spin_unlock_irq_rcu_node(ssp);
if (pushgp) if (pushgp)
queue_delayed_work(rcu_gp_wq, &sp->work, delay); queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
} }
/* /*
...@@ -1254,41 +1254,41 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) ...@@ -1254,41 +1254,41 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
*/ */
static void process_srcu(struct work_struct *work) static void process_srcu(struct work_struct *work)
{ {
struct srcu_struct *sp; struct srcu_struct *ssp;
sp = container_of(work, struct srcu_struct, work.work); ssp = container_of(work, struct srcu_struct, work.work);
srcu_advance_state(sp); srcu_advance_state(ssp);
srcu_reschedule(sp, srcu_get_delay(sp)); srcu_reschedule(ssp, srcu_get_delay(ssp));
} }
void srcutorture_get_gp_data(enum rcutorture_type test_type, void srcutorture_get_gp_data(enum rcutorture_type test_type,
struct srcu_struct *sp, int *flags, struct srcu_struct *ssp, int *flags,
unsigned long *gp_seq) unsigned long *gp_seq)
{ {
if (test_type != SRCU_FLAVOR) if (test_type != SRCU_FLAVOR)
return; return;
*flags = 0; *flags = 0;
*gp_seq = rcu_seq_current(&sp->srcu_gp_seq); *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
} }
EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf) void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
{ {
int cpu; int cpu;
int idx; int idx;
unsigned long s0 = 0, s1 = 0; unsigned long s0 = 0, s1 = 0;
idx = sp->srcu_idx & 0x1; idx = ssp->srcu_idx & 0x1;
pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):", pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
tt, tf, rcu_seq_current(&sp->srcu_gp_seq), idx); tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
unsigned long l0, l1; unsigned long l0, l1;
unsigned long u0, u1; unsigned long u0, u1;
long c0, c1; long c0, c1;
struct srcu_data *sdp; struct srcu_data *sdp;
sdp = per_cpu_ptr(sp->sda, cpu); sdp = per_cpu_ptr(ssp->sda, cpu);
u0 = sdp->srcu_unlock_count[!idx]; u0 = sdp->srcu_unlock_count[!idx];
u1 = sdp->srcu_unlock_count[idx]; u1 = sdp->srcu_unlock_count[idx];
...@@ -1323,14 +1323,14 @@ early_initcall(srcu_bootup_announce); ...@@ -1323,14 +1323,14 @@ early_initcall(srcu_bootup_announce);
void __init srcu_init(void) void __init srcu_init(void)
{ {
struct srcu_struct *sp; struct srcu_struct *ssp;
srcu_init_done = true; srcu_init_done = true;
while (!list_empty(&srcu_boot_list)) { while (!list_empty(&srcu_boot_list)) {
sp = list_first_entry(&srcu_boot_list, struct srcu_struct, ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
work.work.entry); work.work.entry);
check_init_srcu_struct(sp); check_init_srcu_struct(ssp);
list_del_init(&sp->work.work.entry); list_del_init(&ssp->work.work.entry);
queue_work(rcu_gp_wq, &sp->work.work); queue_work(rcu_gp_wq, &ssp->work.work);
} }
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment