Commit 7d0ae808 authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Convert ACCESS_ONCE() to READ_ONCE() and WRITE_ONCE()

This commit moves from the old ACCESS_ONCE() API to the new READ_ONCE()
and WRITE_ONCE() APIs.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
[ paulmck:  Updated to include kernel/torture.c as suggested by Jason Low. ]
parent 030bbdbf
......@@ -29,8 +29,8 @@
*/
static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
{
ACCESS_ONCE(list->next) = list;
ACCESS_ONCE(list->prev) = list;
WRITE_ONCE(list->next, list);
WRITE_ONCE(list->prev, list);
}
/*
......@@ -288,7 +288,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
#define list_first_or_null_rcu(ptr, type, member) \
({ \
struct list_head *__ptr = (ptr); \
struct list_head *__next = ACCESS_ONCE(__ptr->next); \
struct list_head *__next = READ_ONCE(__ptr->next); \
likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
})
......
......@@ -364,8 +364,8 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
#define rcu_note_voluntary_context_switch(t) \
do { \
rcu_all_qs(); \
if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
if (READ_ONCE((t)->rcu_tasks_holdout)) \
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
} while (0)
#else /* #ifdef CONFIG_TASKS_RCU */
#define TASKS_RCU(x) do { } while (0)
......@@ -609,7 +609,7 @@ static inline void rcu_preempt_sleep_check(void)
#define __rcu_access_pointer(p, space) \
({ \
typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(_________p1)); \
})
......@@ -630,7 +630,7 @@ static inline void rcu_preempt_sleep_check(void)
#define __rcu_access_index(p, space) \
({ \
typeof(p) _________p1 = ACCESS_ONCE(p); \
typeof(p) _________p1 = READ_ONCE(p); \
rcu_dereference_sparse(p, space); \
(_________p1); \
})
......@@ -659,7 +659,7 @@ static inline void rcu_preempt_sleep_check(void)
*/
#define lockless_dereference(p) \
({ \
typeof(p) _________p1 = ACCESS_ONCE(p); \
typeof(p) _________p1 = READ_ONCE(p); \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
(_________p1); \
})
......@@ -702,7 +702,7 @@ static inline void rcu_preempt_sleep_check(void)
* @p: The pointer to read
*
* Return the value of the specified RCU-protected pointer, but omit the
* smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
* smp_read_barrier_depends() and keep the READ_ONCE(). This is useful
* when the value of this pointer is accessed, but the pointer is not
* dereferenced, for example, when testing an RCU-protected pointer against
* NULL. Although rcu_access_pointer() may also be used in cases where
......@@ -791,7 +791,7 @@ static inline void rcu_preempt_sleep_check(void)
* @p: The index to read
*
* Return the value of the specified RCU-protected index, but omit the
* smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
* smp_read_barrier_depends() and keep the READ_ONCE(). This is useful
* when the value of this index is accessed, but the index is not
* dereferenced, for example, when testing an RCU-protected index against
* -1. Although rcu_access_index() may also be used in cases where
......@@ -827,7 +827,7 @@ static inline void rcu_preempt_sleep_check(void)
* @c: The conditions under which the dereference will take place
*
* Return the value of the specified RCU-protected pointer, but omit
* both the smp_read_barrier_depends() and the ACCESS_ONCE(). This
* both the smp_read_barrier_depends() and the READ_ONCE(). This
* is useful in cases where update-side locks prevent the value of the
* pointer from changing. Please note that this primitive does -not-
* prevent the compiler from repeating this reference or combining it
......
......@@ -1413,7 +1413,7 @@ static int rcu_torture_barrier_cbs(void *arg)
do {
wait_event(barrier_cbs_wq[myid],
(newphase =
ACCESS_ONCE(barrier_phase)) != lastphase ||
READ_ONCE(barrier_phase)) != lastphase ||
torture_must_stop());
lastphase = newphase;
smp_mb(); /* ensure barrier_phase load before ->call(). */
......
......@@ -151,7 +151,7 @@ static unsigned long srcu_readers_seq_idx(struct srcu_struct *sp, int idx)
unsigned long t;
for_each_possible_cpu(cpu) {
t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]);
t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]);
sum += t;
}
return sum;
......@@ -168,7 +168,7 @@ static unsigned long srcu_readers_active_idx(struct srcu_struct *sp, int idx)
unsigned long t;
for_each_possible_cpu(cpu) {
t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]);
t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]);
sum += t;
}
return sum;
......@@ -265,8 +265,8 @@ static int srcu_readers_active(struct srcu_struct *sp)
unsigned long sum = 0;
for_each_possible_cpu(cpu) {
sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]);
sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]);
sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]);
sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]);
}
return sum;
}
......@@ -296,7 +296,7 @@ int __srcu_read_lock(struct srcu_struct *sp)
{
int idx;
idx = ACCESS_ONCE(sp->completed) & 0x1;
idx = READ_ONCE(sp->completed) & 0x1;
preempt_disable();
__this_cpu_inc(sp->per_cpu_ref->c[idx]);
smp_mb(); /* B */ /* Avoid leaking the critical section. */
......
......@@ -144,16 +144,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
return;
rcp->ticks_this_gp++;
j = jiffies;
js = ACCESS_ONCE(rcp->jiffies_stall);
js = READ_ONCE(rcp->jiffies_stall);
if (rcp->rcucblist && ULONG_CMP_GE(j, js)) {
pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n",
rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
jiffies - rcp->gp_start, rcp->qlen);
dump_stack();
ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
3 * rcu_jiffies_till_stall_check() + 3;
WRITE_ONCE(rcp->jiffies_stall,
jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
} else if (ULONG_CMP_GE(j, js)) {
ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
WRITE_ONCE(rcp->jiffies_stall,
jiffies + rcu_jiffies_till_stall_check());
}
}
......@@ -161,7 +162,8 @@ static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
{
rcp->ticks_this_gp = 0;
rcp->gp_start = jiffies;
ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
WRITE_ONCE(rcp->jiffies_stall,
jiffies + rcu_jiffies_till_stall_check());
}
static void check_cpu_stalls(void)
......
This diff is collapsed.
This diff is collapsed.
......@@ -277,7 +277,7 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n",
rsp->n_force_qs, rsp->n_force_qs_ngp,
rsp->n_force_qs - rsp->n_force_qs_ngp,
ACCESS_ONCE(rsp->n_force_qs_lh), rsp->qlen_lazy, rsp->qlen);
READ_ONCE(rsp->n_force_qs_lh), rsp->qlen_lazy, rsp->qlen);
for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) {
if (rnp->level != level) {
seq_puts(m, "\n");
......@@ -323,8 +323,8 @@ static void show_one_rcugp(struct seq_file *m, struct rcu_state *rsp)
struct rcu_node *rnp = &rsp->node[0];
raw_spin_lock_irqsave(&rnp->lock, flags);
completed = ACCESS_ONCE(rsp->completed);
gpnum = ACCESS_ONCE(rsp->gpnum);
completed = READ_ONCE(rsp->completed);
gpnum = READ_ONCE(rsp->gpnum);
if (completed == gpnum)
gpage = 0;
else
......
......@@ -150,14 +150,14 @@ void __rcu_read_unlock(void)
barrier(); /* critical section before exit code. */
t->rcu_read_lock_nesting = INT_MIN;
barrier(); /* assign before ->rcu_read_unlock_special load */
if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special.s)))
if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
rcu_read_unlock_special(t);
barrier(); /* ->rcu_read_unlock_special load before assign */
t->rcu_read_lock_nesting = 0;
}
#ifdef CONFIG_PROVE_LOCKING
{
int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
int rrln = READ_ONCE(t->rcu_read_lock_nesting);
WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
}
......@@ -389,17 +389,17 @@ module_param(rcu_cpu_stall_timeout, int, 0644);
int rcu_jiffies_till_stall_check(void)
{
int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout);
int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
/*
* Limit check must be consistent with the Kconfig limits
* for CONFIG_RCU_CPU_STALL_TIMEOUT.
*/
if (till_stall_check < 3) {
ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
WRITE_ONCE(rcu_cpu_stall_timeout, 3);
till_stall_check = 3;
} else if (till_stall_check > 300) {
ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
WRITE_ONCE(rcu_cpu_stall_timeout, 300);
till_stall_check = 300;
}
return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
......@@ -550,12 +550,12 @@ static void check_holdout_task(struct task_struct *t,
{
int cpu;
if (!ACCESS_ONCE(t->rcu_tasks_holdout) ||
t->rcu_tasks_nvcsw != ACCESS_ONCE(t->nvcsw) ||
!ACCESS_ONCE(t->on_rq) ||
if (!READ_ONCE(t->rcu_tasks_holdout) ||
t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
!READ_ONCE(t->on_rq) ||
(IS_ENABLED(CONFIG_NO_HZ_FULL) &&
!is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
ACCESS_ONCE(t->rcu_tasks_holdout) = false;
WRITE_ONCE(t->rcu_tasks_holdout, false);
list_del_init(&t->rcu_tasks_holdout_list);
put_task_struct(t);
return;
......@@ -639,11 +639,11 @@ static int __noreturn rcu_tasks_kthread(void *arg)
*/
rcu_read_lock();
for_each_process_thread(g, t) {
if (t != current && ACCESS_ONCE(t->on_rq) &&
if (t != current && READ_ONCE(t->on_rq) &&
!is_idle_task(t)) {
get_task_struct(t);
t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
ACCESS_ONCE(t->rcu_tasks_holdout) = true;
t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
WRITE_ONCE(t->rcu_tasks_holdout, true);
list_add(&t->rcu_tasks_holdout_list,
&rcu_tasks_holdouts);
}
......@@ -672,7 +672,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
struct task_struct *t1;
schedule_timeout_interruptible(HZ);
rtst = ACCESS_ONCE(rcu_task_stall_timeout);
rtst = READ_ONCE(rcu_task_stall_timeout);
needreport = rtst > 0 &&
time_after(jiffies, lastreport + rtst);
if (needreport)
......@@ -728,7 +728,7 @@ static void rcu_spawn_tasks_kthread(void)
static struct task_struct *rcu_tasks_kthread_ptr;
struct task_struct *t;
if (ACCESS_ONCE(rcu_tasks_kthread_ptr)) {
if (READ_ONCE(rcu_tasks_kthread_ptr)) {
smp_mb(); /* Ensure caller sees full kthread. */
return;
}
......@@ -740,7 +740,7 @@ static void rcu_spawn_tasks_kthread(void)
t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
BUG_ON(IS_ERR(t));
smp_mb(); /* Ensure others see full kthread. */
ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
WRITE_ONCE(rcu_tasks_kthread_ptr, t);
mutex_unlock(&rcu_tasks_kthread_mutex);
}
......
......@@ -409,7 +409,7 @@ static void (*torture_shutdown_hook)(void);
*/
void torture_shutdown_absorb(const char *title)
{
while (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
while (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
pr_notice("torture thread %s parking due to system shutdown\n",
title);
schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
......@@ -480,9 +480,9 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
unsigned long unused2, void *unused3)
{
mutex_lock(&fullstop_mutex);
if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
if (READ_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
WRITE_ONCE(fullstop, FULLSTOP_SHUTDOWN);
} else {
pr_warn("Concurrent rmmod and shutdown illegal!\n");
}
......@@ -523,13 +523,13 @@ static int stutter;
*/
void stutter_wait(const char *title)
{
while (ACCESS_ONCE(stutter_pause_test) ||
(torture_runnable && !ACCESS_ONCE(*torture_runnable))) {
while (READ_ONCE(stutter_pause_test) ||
(torture_runnable && !READ_ONCE(*torture_runnable))) {
if (stutter_pause_test)
if (ACCESS_ONCE(stutter_pause_test) == 1)
if (READ_ONCE(stutter_pause_test) == 1)
schedule_timeout_interruptible(1);
else
while (ACCESS_ONCE(stutter_pause_test))
while (READ_ONCE(stutter_pause_test))
cond_resched();
else
schedule_timeout_interruptible(round_jiffies_relative(HZ));
......@@ -549,14 +549,14 @@ static int torture_stutter(void *arg)
if (!torture_must_stop()) {
if (stutter > 1) {
schedule_timeout_interruptible(stutter - 1);
ACCESS_ONCE(stutter_pause_test) = 2;
WRITE_ONCE(stutter_pause_test, 2);
}
schedule_timeout_interruptible(1);
ACCESS_ONCE(stutter_pause_test) = 1;
WRITE_ONCE(stutter_pause_test, 1);
}
if (!torture_must_stop())
schedule_timeout_interruptible(stutter);
ACCESS_ONCE(stutter_pause_test) = 0;
WRITE_ONCE(stutter_pause_test, 0);
torture_shutdown_absorb("torture_stutter");
} while (!torture_must_stop());
torture_kthread_stopping("torture_stutter");
......@@ -642,13 +642,13 @@ EXPORT_SYMBOL_GPL(torture_init_end);
bool torture_cleanup_begin(void)
{
mutex_lock(&fullstop_mutex);
if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
if (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
pr_warn("Concurrent rmmod and shutdown illegal!\n");
mutex_unlock(&fullstop_mutex);
schedule_timeout_uninterruptible(10);
return true;
}
ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
WRITE_ONCE(fullstop, FULLSTOP_RMMOD);
mutex_unlock(&fullstop_mutex);
torture_shutdown_cleanup();
torture_shuffle_cleanup();
......@@ -681,7 +681,7 @@ EXPORT_SYMBOL_GPL(torture_must_stop);
*/
bool torture_must_stop_irq(void)
{
return ACCESS_ONCE(fullstop) != FULLSTOP_DONTSTOP;
return READ_ONCE(fullstop) != FULLSTOP_DONTSTOP;
}
EXPORT_SYMBOL_GPL(torture_must_stop_irq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment