Commit b89aa12c authored by Vineet Gupta's avatar Vineet Gupta

ARCv2: spinlock/rwlock: Reset retry delay when starting a new spin-wait cycle

The previous commit for delayed retry of SCOND needs some fine tuning
for spin locks.

The backoff from delayed retry in conjunction with spin looping of lock
itself can potentially cause the delay counter to reach high values.
So to provide fairness to any lock operation, after a lock "seems"
available (i.e. just before first SCOND try0, reset the delay counter
back to starting value of 1

Essentially reset delay to 1 for a new spin-wait-loop-acquire cycle.
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarVineet Gupta <vgupta@synopsys.com>
parent e78fdfef
...@@ -279,7 +279,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) ...@@ -279,7 +279,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
__asm__ __volatile__( __asm__ __volatile__(
"0: mov %[delay], 1 \n" "0: mov %[delay], 1 \n"
"1: llock %[val], [%[slock]] \n" "1: llock %[val], [%[slock]] \n"
" breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */ " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
" scond %[LOCKED], [%[slock]] \n" /* acquire */ " scond %[LOCKED], [%[slock]] \n" /* acquire */
" bz 4f \n" /* done */ " bz 4f \n" /* done */
" \n" " \n"
...@@ -358,7 +358,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw) ...@@ -358,7 +358,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
__asm__ __volatile__( __asm__ __volatile__(
"0: mov %[delay], 1 \n" "0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n" "1: llock %[val], [%[rwlock]] \n"
" brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */ " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
" sub %[val], %[val], 1 \n" /* reader lock */ " sub %[val], %[val], 1 \n" /* reader lock */
" scond %[val], [%[rwlock]] \n" " scond %[val], [%[rwlock]] \n"
" bz 4f \n" /* done */ " bz 4f \n" /* done */
...@@ -427,7 +427,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) ...@@ -427,7 +427,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
__asm__ __volatile__( __asm__ __volatile__(
"0: mov %[delay], 1 \n" "0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n" "1: llock %[val], [%[rwlock]] \n"
" brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */ " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
" mov %[val], %[WR_LOCKED] \n" " mov %[val], %[WR_LOCKED] \n"
" scond %[val], [%[rwlock]] \n" " scond %[val], [%[rwlock]] \n"
" bz 4f \n" " bz 4f \n"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment