Commit 2e927c64 authored by Michal Hocko's avatar Michal Hocko Committed by Ingo Molnar

locking/rwsem: Drop explicit memory barriers

sh and xtensa seem to be the only architectures which use explicit
memory barriers for rw_semaphore operations even though they are not
really needed because there is the full memory barrier is always implied
by atomic_{inc,dec,add,sub}_return() resp. cmpxchg(). Remove them.
Signed-off-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Chris Zankel <chris@zankel.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Cc: Signed-off-by: Jason Low <jason.low2@hp.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: linux-alpha@vger.kernel.org
Cc: linux-arch@vger.kernel.org
Cc: linux-ia64@vger.kernel.org
Cc: linux-s390@vger.kernel.org
Cc: linux-sh@vger.kernel.org
Cc: linux-xtensa@linux-xtensa.org
Cc: sparclinux@vger.kernel.org
Link: http://lkml.kernel.org/r/1460041951-22347-3-git-send-email-mhocko@kernel.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent f8e04d85
...@@ -24,9 +24,7 @@ ...@@ -24,9 +24,7 @@
*/ */
static inline void __down_read(struct rw_semaphore *sem) static inline void __down_read(struct rw_semaphore *sem)
{ {
if (atomic_inc_return((atomic_t *)(&sem->count)) > 0) if (atomic_inc_return((atomic_t *)(&sem->count)) <= 0)
smp_wmb();
else
rwsem_down_read_failed(sem); rwsem_down_read_failed(sem);
} }
...@@ -37,7 +35,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) ...@@ -37,7 +35,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
while ((tmp = sem->count) >= 0) { while ((tmp = sem->count) >= 0) {
if (tmp == cmpxchg(&sem->count, tmp, if (tmp == cmpxchg(&sem->count, tmp,
tmp + RWSEM_ACTIVE_READ_BIAS)) { tmp + RWSEM_ACTIVE_READ_BIAS)) {
smp_wmb();
return 1; return 1;
} }
} }
...@@ -53,9 +50,7 @@ static inline void __down_write(struct rw_semaphore *sem) ...@@ -53,9 +50,7 @@ static inline void __down_write(struct rw_semaphore *sem)
tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count)); (atomic_t *)(&sem->count));
if (tmp == RWSEM_ACTIVE_WRITE_BIAS) if (tmp != RWSEM_ACTIVE_WRITE_BIAS)
smp_wmb();
else
rwsem_down_write_failed(sem); rwsem_down_write_failed(sem);
} }
...@@ -65,7 +60,6 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) ...@@ -65,7 +60,6 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS); RWSEM_ACTIVE_WRITE_BIAS);
smp_wmb();
return tmp == RWSEM_UNLOCKED_VALUE; return tmp == RWSEM_UNLOCKED_VALUE;
} }
...@@ -76,7 +70,6 @@ static inline void __up_read(struct rw_semaphore *sem) ...@@ -76,7 +70,6 @@ static inline void __up_read(struct rw_semaphore *sem)
{ {
int tmp; int tmp;
smp_wmb();
tmp = atomic_dec_return((atomic_t *)(&sem->count)); tmp = atomic_dec_return((atomic_t *)(&sem->count));
if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0) if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem); rwsem_wake(sem);
...@@ -87,7 +80,6 @@ static inline void __up_read(struct rw_semaphore *sem) ...@@ -87,7 +80,6 @@ static inline void __up_read(struct rw_semaphore *sem)
*/ */
static inline void __up_write(struct rw_semaphore *sem) static inline void __up_write(struct rw_semaphore *sem)
{ {
smp_wmb();
if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count)) < 0) (atomic_t *)(&sem->count)) < 0)
rwsem_wake(sem); rwsem_wake(sem);
...@@ -108,7 +100,6 @@ static inline void __downgrade_write(struct rw_semaphore *sem) ...@@ -108,7 +100,6 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
{ {
int tmp; int tmp;
smp_wmb();
tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
if (tmp < 0) if (tmp < 0)
rwsem_downgrade_wake(sem); rwsem_downgrade_wake(sem);
...@@ -119,7 +110,6 @@ static inline void __downgrade_write(struct rw_semaphore *sem) ...@@ -119,7 +110,6 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
*/ */
static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
{ {
smp_mb();
return atomic_add_return(delta, (atomic_t *)(&sem->count)); return atomic_add_return(delta, (atomic_t *)(&sem->count));
} }
......
...@@ -29,9 +29,7 @@ ...@@ -29,9 +29,7 @@
*/ */
static inline void __down_read(struct rw_semaphore *sem) static inline void __down_read(struct rw_semaphore *sem)
{ {
if (atomic_add_return(1,(atomic_t *)(&sem->count)) > 0) if (atomic_add_return(1,(atomic_t *)(&sem->count)) <= 0)
smp_wmb();
else
rwsem_down_read_failed(sem); rwsem_down_read_failed(sem);
} }
...@@ -42,7 +40,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) ...@@ -42,7 +40,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
while ((tmp = sem->count) >= 0) { while ((tmp = sem->count) >= 0) {
if (tmp == cmpxchg(&sem->count, tmp, if (tmp == cmpxchg(&sem->count, tmp,
tmp + RWSEM_ACTIVE_READ_BIAS)) { tmp + RWSEM_ACTIVE_READ_BIAS)) {
smp_wmb();
return 1; return 1;
} }
} }
...@@ -58,9 +55,7 @@ static inline void __down_write(struct rw_semaphore *sem) ...@@ -58,9 +55,7 @@ static inline void __down_write(struct rw_semaphore *sem)
tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count)); (atomic_t *)(&sem->count));
if (tmp == RWSEM_ACTIVE_WRITE_BIAS) if (tmp != RWSEM_ACTIVE_WRITE_BIAS)
smp_wmb();
else
rwsem_down_write_failed(sem); rwsem_down_write_failed(sem);
} }
...@@ -70,7 +65,6 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) ...@@ -70,7 +65,6 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS); RWSEM_ACTIVE_WRITE_BIAS);
smp_wmb();
return tmp == RWSEM_UNLOCKED_VALUE; return tmp == RWSEM_UNLOCKED_VALUE;
} }
...@@ -81,7 +75,6 @@ static inline void __up_read(struct rw_semaphore *sem) ...@@ -81,7 +75,6 @@ static inline void __up_read(struct rw_semaphore *sem)
{ {
int tmp; int tmp;
smp_wmb();
tmp = atomic_sub_return(1,(atomic_t *)(&sem->count)); tmp = atomic_sub_return(1,(atomic_t *)(&sem->count));
if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0) if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem); rwsem_wake(sem);
...@@ -92,7 +85,6 @@ static inline void __up_read(struct rw_semaphore *sem) ...@@ -92,7 +85,6 @@ static inline void __up_read(struct rw_semaphore *sem)
*/ */
static inline void __up_write(struct rw_semaphore *sem) static inline void __up_write(struct rw_semaphore *sem)
{ {
smp_wmb();
if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count)) < 0) (atomic_t *)(&sem->count)) < 0)
rwsem_wake(sem); rwsem_wake(sem);
...@@ -113,7 +105,6 @@ static inline void __downgrade_write(struct rw_semaphore *sem) ...@@ -113,7 +105,6 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
{ {
int tmp; int tmp;
smp_wmb();
tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
if (tmp < 0) if (tmp < 0)
rwsem_downgrade_wake(sem); rwsem_downgrade_wake(sem);
...@@ -124,7 +115,6 @@ static inline void __downgrade_write(struct rw_semaphore *sem) ...@@ -124,7 +115,6 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
*/ */
static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
{ {
smp_mb();
return atomic_add_return(delta, (atomic_t *)(&sem->count)); return atomic_add_return(delta, (atomic_t *)(&sem->count));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment