Commit 2f6560e6 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/qspinlock: Optimised atomic_try_cmpxchg_lock() that adds the lock hint

This brings the behaviour of the uncontended fast path back to roughly
equivalent to simple spinlocks -- a single atomic op with lock hint.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Acked-by: default avatarWaiman Long <longman@redhat.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200724131423.1362108-6-npiggin@gmail.com
parent 20c0e826
...@@ -193,6 +193,34 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v) ...@@ -193,6 +193,34 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
/*
* Don't want to override the generic atomic_try_cmpxchg_acquire, because
* we add a lock hint to the lwarx, which may not be wanted for the
* _acquire case (and is not used by the other _acquire variants so it
* would be a surprise).
*/
static __always_inline bool
atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
{
int r, o = *old;
__asm__ __volatile__ (
"1:\t" PPC_LWARX(%0,0,%2,1) " # atomic_try_cmpxchg_acquire \n"
" cmpw 0,%0,%3 \n"
" bne- 2f \n"
" stwcx. %4,0,%2 \n"
" bne- 1b \n"
"\t" PPC_ACQUIRE_BARRIER " \n"
"2: \n"
: "=&r" (r), "+m" (v->counter)
: "r" (&v->counter), "r" (o), "r" (new)
: "cr0", "memory");
if (unlikely(r != o))
*old = r;
return likely(r == o);
}
/** /**
* atomic_fetch_add_unless - add unless the number is a given value * atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
......
...@@ -37,7 +37,7 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock) ...@@ -37,7 +37,7 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
{ {
u32 val = 0; u32 val = 0;
if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) if (likely(atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL)))
return; return;
queued_spin_lock_slowpath(lock, val); queued_spin_lock_slowpath(lock, val);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment