Commit 53bf57fa authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking/qspinlock: Re-order code

Flip the branch condition after atomic_fetch_or_acquire(_Q_PENDING_VAL)
such that we loose the indent. This also result in a more natural code
flow IMO.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: andrea.parri@amarulasolutions.com
Cc: longman@redhat.com
Link: https://lkml.kernel.org/r/20181003130257.156322446@infradead.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent ec57e2f0
...@@ -330,11 +330,19 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) ...@@ -330,11 +330,19 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
* 0,0,1 -> 0,1,1 ; pending * 0,0,1 -> 0,1,1 ; pending
*/ */
val = atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val); val = atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
if (!(val & ~_Q_LOCKED_MASK)) { /*
* If we observe any contention; undo and queue.
*/
if (unlikely(val & ~_Q_LOCKED_MASK)) {
if (!(val & _Q_PENDING_MASK))
clear_pending(lock);
goto queue;
}
/* /*
* We're pending, wait for the owner to go away. * We're pending, wait for the owner to go away.
* *
* *,1,1 -> *,1,0 * 0,1,1 -> 0,1,0
* *
* this wait loop must be a load-acquire such that we match the * this wait loop must be a load-acquire such that we match the
* store-release that clears the locked bit and create lock * store-release that clears the locked bit and create lock
...@@ -342,27 +350,17 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) ...@@ -342,27 +350,17 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
* clear_pending_set_locked() implementations imply full * clear_pending_set_locked() implementations imply full
* barriers. * barriers.
*/ */
if (val & _Q_LOCKED_MASK) { if (val & _Q_LOCKED_MASK)
atomic_cond_read_acquire(&lock->val, atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_MASK));
!(VAL & _Q_LOCKED_MASK));
}
/* /*
* take ownership and clear the pending bit. * take ownership and clear the pending bit.
* *
* *,1,0 -> *,0,1 * 0,1,0 -> 0,0,1
*/ */
clear_pending_set_locked(lock); clear_pending_set_locked(lock);
qstat_inc(qstat_lock_pending, true); qstat_inc(qstat_lock_pending, true);
return; return;
}
/*
* If pending was clear but there are waiters in the queue, then
* we need to undo our setting of pending before we queue ourselves.
*/
if (!(val & _Q_PENDING_MASK))
clear_pending(lock);
/* /*
* End of pending bit optimistic spinning and beginning of MCS * End of pending bit optimistic spinning and beginning of MCS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment