Commit 81a43ada authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Ingo Molnar

locking/mutex: Use acquire/release semantics

As of 654672d4 (locking/atomics: Add _{acquire|release|relaxed}()
variants of some atomic operations) and 6d79ef2d (locking, asm-generic:
Add _{relaxed|acquire|release}() variants for 'atomic_long_t'), weakly
ordered archs can benefit from more relaxed use of barriers when locking
and unlocking, instead of regular full barrier semantics. While currently
only arm64 supports such optimizations, updating corresponding locking
primitives serves for other archs to immediately benefit as well, once the
necessary machinery is implemented of course.
Signed-off-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul E.McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/1443643395-17016-3-git-send-email-dave@stgolabs.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 63ab7bd0
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
static inline void static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{ {
if (unlikely(atomic_dec_return(count) < 0)) if (unlikely(atomic_dec_return_acquire(count) < 0))
fail_fn(count); fail_fn(count);
} }
...@@ -35,7 +35,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) ...@@ -35,7 +35,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int static inline int
__mutex_fastpath_lock_retval(atomic_t *count) __mutex_fastpath_lock_retval(atomic_t *count)
{ {
if (unlikely(atomic_dec_return(count) < 0)) if (unlikely(atomic_dec_return_acquire(count) < 0))
return -1; return -1;
return 0; return 0;
} }
...@@ -56,7 +56,7 @@ __mutex_fastpath_lock_retval(atomic_t *count) ...@@ -56,7 +56,7 @@ __mutex_fastpath_lock_retval(atomic_t *count)
static inline void static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{ {
if (unlikely(atomic_inc_return(count) <= 0)) if (unlikely(atomic_inc_return_release(count) <= 0))
fail_fn(count); fail_fn(count);
} }
...@@ -80,7 +80,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) ...@@ -80,7 +80,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{ {
if (likely(atomic_cmpxchg(count, 1, 0) == 1)) if (likely(atomic_cmpxchg_acquire(count, 1, 0) == 1))
return 1; return 1;
return 0; return 0;
} }
......
...@@ -31,7 +31,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) ...@@ -31,7 +31,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
* to ensure that any waiting tasks are woken up by the * to ensure that any waiting tasks are woken up by the
* unlock slow path. * unlock slow path.
*/ */
if (likely(atomic_xchg(count, -1) != 1)) if (likely(atomic_xchg_acquire(count, -1) != 1))
fail_fn(count); fail_fn(count);
} }
...@@ -46,7 +46,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) ...@@ -46,7 +46,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int static inline int
__mutex_fastpath_lock_retval(atomic_t *count) __mutex_fastpath_lock_retval(atomic_t *count)
{ {
if (unlikely(atomic_xchg(count, 0) != 1)) if (unlikely(atomic_xchg_acquire(count, 0) != 1))
if (likely(atomic_xchg(count, -1) != 1)) if (likely(atomic_xchg(count, -1) != 1))
return -1; return -1;
return 0; return 0;
...@@ -67,7 +67,7 @@ __mutex_fastpath_lock_retval(atomic_t *count) ...@@ -67,7 +67,7 @@ __mutex_fastpath_lock_retval(atomic_t *count)
static inline void static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{ {
if (unlikely(atomic_xchg(count, 1) != 0)) if (unlikely(atomic_xchg_release(count, 1) != 0))
fail_fn(count); fail_fn(count);
} }
...@@ -91,7 +91,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) ...@@ -91,7 +91,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{ {
int prev = atomic_xchg(count, 0); int prev = atomic_xchg_acquire(count, 0);
if (unlikely(prev < 0)) { if (unlikely(prev < 0)) {
/* /*
...@@ -105,7 +105,7 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) ...@@ -105,7 +105,7 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
* owner's unlock path needlessly, but that's not a problem * owner's unlock path needlessly, but that's not a problem
* in practice. ] * in practice. ]
*/ */
prev = atomic_xchg(count, prev); prev = atomic_xchg_acquire(count, prev);
if (prev < 0) if (prev < 0)
prev = 0; prev = 0;
} }
......
...@@ -277,7 +277,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock) ...@@ -277,7 +277,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
static inline bool mutex_try_to_acquire(struct mutex *lock) static inline bool mutex_try_to_acquire(struct mutex *lock)
{ {
return !mutex_is_locked(lock) && return !mutex_is_locked(lock) &&
(atomic_cmpxchg(&lock->count, 1, 0) == 1); (atomic_cmpxchg_acquire(&lock->count, 1, 0) == 1);
} }
/* /*
...@@ -529,7 +529,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ...@@ -529,7 +529,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* Once more, try to acquire the lock. Only try-lock the mutex if * Once more, try to acquire the lock. Only try-lock the mutex if
* it is unlocked to reduce unnecessary xchg() operations. * it is unlocked to reduce unnecessary xchg() operations.
*/ */
if (!mutex_is_locked(lock) && (atomic_xchg(&lock->count, 0) == 1)) if (!mutex_is_locked(lock) &&
(atomic_xchg_acquire(&lock->count, 0) == 1))
goto skip_wait; goto skip_wait;
debug_mutex_lock_common(lock, &waiter); debug_mutex_lock_common(lock, &waiter);
...@@ -553,7 +554,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ...@@ -553,7 +554,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* non-negative in order to avoid unnecessary xchg operations: * non-negative in order to avoid unnecessary xchg operations:
*/ */
if (atomic_read(&lock->count) >= 0 && if (atomic_read(&lock->count) >= 0 &&
(atomic_xchg(&lock->count, -1) == 1)) (atomic_xchg_acquire(&lock->count, -1) == 1))
break; break;
/* /*
...@@ -867,7 +868,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) ...@@ -867,7 +868,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
spin_lock_mutex(&lock->wait_lock, flags); spin_lock_mutex(&lock->wait_lock, flags);
prev = atomic_xchg(&lock->count, -1); prev = atomic_xchg_acquire(&lock->count, -1);
if (likely(prev == 1)) { if (likely(prev == 1)) {
mutex_set_owner(lock); mutex_set_owner(lock);
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment