Commit 4d3199e4 authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Ingo Molnar

locking: Remove ACCESS_ONCE() usage

With the new standardized functions, we can replace all
ACCESS_ONCE() calls across relevant locking - this includes
lockref and seqlock while at it.

ACCESS_ONCE() does not work reliably on non-scalar types.
For example gcc 4.6 and 4.7 might remove the volatile tag
for such accesses during the SRA (scalar replacement of
aggregates) step:

  https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145

Update the new calls regardless of if it is a scalar type,
this is cleaner than having three alternatives.
Signed-off-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/1424662301.6539.18.camel@stgolabs.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 2ae79026
...@@ -108,7 +108,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s) ...@@ -108,7 +108,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
unsigned ret; unsigned ret;
repeat: repeat:
ret = ACCESS_ONCE(s->sequence); ret = READ_ONCE(s->sequence);
if (unlikely(ret & 1)) { if (unlikely(ret & 1)) {
cpu_relax(); cpu_relax();
goto repeat; goto repeat;
...@@ -127,7 +127,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s) ...@@ -127,7 +127,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
*/ */
static inline unsigned raw_read_seqcount(const seqcount_t *s) static inline unsigned raw_read_seqcount(const seqcount_t *s)
{ {
unsigned ret = ACCESS_ONCE(s->sequence); unsigned ret = READ_ONCE(s->sequence);
smp_rmb(); smp_rmb();
return ret; return ret;
} }
...@@ -179,7 +179,7 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s) ...@@ -179,7 +179,7 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
*/ */
static inline unsigned raw_seqcount_begin(const seqcount_t *s) static inline unsigned raw_seqcount_begin(const seqcount_t *s)
{ {
unsigned ret = ACCESS_ONCE(s->sequence); unsigned ret = READ_ONCE(s->sequence);
smp_rmb(); smp_rmb();
return ret & ~1; return ret & ~1;
} }
......
...@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) ...@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
*/ */
return; return;
} }
ACCESS_ONCE(prev->next) = node; WRITE_ONCE(prev->next, node);
/* Wait until the lock holder passes the lock down. */ /* Wait until the lock holder passes the lock down. */
arch_mcs_spin_lock_contended(&node->locked); arch_mcs_spin_lock_contended(&node->locked);
...@@ -91,7 +91,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) ...@@ -91,7 +91,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
static inline static inline
void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
{ {
struct mcs_spinlock *next = ACCESS_ONCE(node->next); struct mcs_spinlock *next = READ_ONCE(node->next);
if (likely(!next)) { if (likely(!next)) {
/* /*
...@@ -100,7 +100,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) ...@@ -100,7 +100,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
if (likely(cmpxchg(lock, node, NULL) == node)) if (likely(cmpxchg(lock, node, NULL) == node))
return; return;
/* Wait until the next pointer is set */ /* Wait until the next pointer is set */
while (!(next = ACCESS_ONCE(node->next))) while (!(next = READ_ONCE(node->next)))
cpu_relax_lowlatency(); cpu_relax_lowlatency();
} }
......
...@@ -266,7 +266,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock) ...@@ -266,7 +266,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
return 0; return 0;
rcu_read_lock(); rcu_read_lock();
owner = ACCESS_ONCE(lock->owner); owner = READ_ONCE(lock->owner);
if (owner) if (owner)
retval = owner->on_cpu; retval = owner->on_cpu;
rcu_read_unlock(); rcu_read_unlock();
...@@ -340,7 +340,7 @@ static bool mutex_optimistic_spin(struct mutex *lock, ...@@ -340,7 +340,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
* As such, when deadlock detection needs to be * As such, when deadlock detection needs to be
* performed the optimistic spinning cannot be done. * performed the optimistic spinning cannot be done.
*/ */
if (ACCESS_ONCE(ww->ctx)) if (READ_ONCE(ww->ctx))
break; break;
} }
...@@ -348,7 +348,7 @@ static bool mutex_optimistic_spin(struct mutex *lock, ...@@ -348,7 +348,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
* If there's an owner, wait for it to either * If there's an owner, wait for it to either
* release the lock or go to sleep. * release the lock or go to sleep.
*/ */
owner = ACCESS_ONCE(lock->owner); owner = READ_ONCE(lock->owner);
if (owner && !mutex_spin_on_owner(lock, owner)) if (owner && !mutex_spin_on_owner(lock, owner))
break; break;
...@@ -487,7 +487,7 @@ static inline int __sched ...@@ -487,7 +487,7 @@ static inline int __sched
__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
{ {
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
if (!hold_ctx) if (!hold_ctx)
return 0; return 0;
......
...@@ -98,7 +98,7 @@ bool osq_lock(struct optimistic_spin_queue *lock) ...@@ -98,7 +98,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
prev = decode_cpu(old); prev = decode_cpu(old);
node->prev = prev; node->prev = prev;
ACCESS_ONCE(prev->next) = node; WRITE_ONCE(prev->next, node);
/* /*
* Normally @prev is untouchable after the above store; because at that * Normally @prev is untouchable after the above store; because at that
...@@ -109,7 +109,7 @@ bool osq_lock(struct optimistic_spin_queue *lock) ...@@ -109,7 +109,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
* cmpxchg in an attempt to undo our queueing. * cmpxchg in an attempt to undo our queueing.
*/ */
while (!ACCESS_ONCE(node->locked)) { while (!READ_ONCE(node->locked)) {
/* /*
* If we need to reschedule bail... so we can block. * If we need to reschedule bail... so we can block.
*/ */
...@@ -148,7 +148,7 @@ bool osq_lock(struct optimistic_spin_queue *lock) ...@@ -148,7 +148,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
* Or we race against a concurrent unqueue()'s step-B, in which * Or we race against a concurrent unqueue()'s step-B, in which
* case its step-C will write us a new @node->prev pointer. * case its step-C will write us a new @node->prev pointer.
*/ */
prev = ACCESS_ONCE(node->prev); prev = READ_ONCE(node->prev);
} }
/* /*
...@@ -170,8 +170,8 @@ bool osq_lock(struct optimistic_spin_queue *lock) ...@@ -170,8 +170,8 @@ bool osq_lock(struct optimistic_spin_queue *lock)
* it will wait in Step-A. * it will wait in Step-A.
*/ */
ACCESS_ONCE(next->prev) = prev; WRITE_ONCE(next->prev, prev);
ACCESS_ONCE(prev->next) = next; WRITE_ONCE(prev->next, next);
return false; return false;
} }
...@@ -193,11 +193,11 @@ void osq_unlock(struct optimistic_spin_queue *lock) ...@@ -193,11 +193,11 @@ void osq_unlock(struct optimistic_spin_queue *lock)
node = this_cpu_ptr(&osq_node); node = this_cpu_ptr(&osq_node);
next = xchg(&node->next, NULL); next = xchg(&node->next, NULL);
if (next) { if (next) {
ACCESS_ONCE(next->locked) = 1; WRITE_ONCE(next->locked, 1);
return; return;
} }
next = osq_wait_next(lock, node, NULL); next = osq_wait_next(lock, node, NULL);
if (next) if (next)
ACCESS_ONCE(next->locked) = 1; WRITE_ONCE(next->locked, 1);
} }
...@@ -279,7 +279,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem) ...@@ -279,7 +279,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
*/ */
static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
{ {
long old, count = ACCESS_ONCE(sem->count); long old, count = READ_ONCE(sem->count);
while (true) { while (true) {
if (!(count == 0 || count == RWSEM_WAITING_BIAS)) if (!(count == 0 || count == RWSEM_WAITING_BIAS))
...@@ -304,9 +304,9 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) ...@@ -304,9 +304,9 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
return false; return false;
rcu_read_lock(); rcu_read_lock();
owner = ACCESS_ONCE(sem->owner); owner = READ_ONCE(sem->owner);
if (!owner) { if (!owner) {
long count = ACCESS_ONCE(sem->count); long count = READ_ONCE(sem->count);
/* /*
* If sem->owner is not set, yet we have just recently entered the * If sem->owner is not set, yet we have just recently entered the
* slowpath with the lock being active, then there is a possibility * slowpath with the lock being active, then there is a possibility
...@@ -385,7 +385,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem) ...@@ -385,7 +385,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
goto done; goto done;
while (true) { while (true) {
owner = ACCESS_ONCE(sem->owner); owner = READ_ONCE(sem->owner);
if (owner && !rwsem_spin_on_owner(sem, owner)) if (owner && !rwsem_spin_on_owner(sem, owner))
break; break;
...@@ -459,7 +459,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) ...@@ -459,7 +459,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
/* we're now waiting on the lock, but no longer actively locking */ /* we're now waiting on the lock, but no longer actively locking */
if (waiting) { if (waiting) {
count = ACCESS_ONCE(sem->count); count = READ_ONCE(sem->count);
/* /*
* If there were already threads queued before us and there are * If there were already threads queued before us and there are
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#define CMPXCHG_LOOP(CODE, SUCCESS) do { \ #define CMPXCHG_LOOP(CODE, SUCCESS) do { \
struct lockref old; \ struct lockref old; \
BUILD_BUG_ON(sizeof(old) != 8); \ BUILD_BUG_ON(sizeof(old) != 8); \
old.lock_count = ACCESS_ONCE(lockref->lock_count); \ old.lock_count = READ_ONCE(lockref->lock_count); \
while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
struct lockref new = old, prev = old; \ struct lockref new = old, prev = old; \
CODE \ CODE \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment