Commit 710adaa9 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

arch,avr32: Convert smp_mb__*()

AVR32's mb() implementation is a compiler barrier(), therefore it all
doesn't matter, fully rely on whatever asm-generic/barrier.h
generates.
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/n/tip-8gow97a7mapmnec0pvf729pj@git.kernel.org
Cc: Haavard Skinnemoen <hskinnemoen@gmail.com>
Cc: Hans-Christian Egtvedt <egtvedt@samfundet.no>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 8715466b
...@@ -183,9 +183,4 @@ static inline int atomic_sub_if_positive(int i, atomic_t *v) ...@@ -183,9 +183,4 @@ static inline int atomic_sub_if_positive(int i, atomic_t *v)
#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) #define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif /* __ASM_AVR32_ATOMIC_H */ #endif /* __ASM_AVR32_ATOMIC_H */
...@@ -13,12 +13,7 @@ ...@@ -13,12 +13,7 @@
#endif #endif
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/barrier.h>
/*
* clear_bit() doesn't provide any barrier for the compiler
*/
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
/* /*
* set_bit - Atomically set a bit in memory * set_bit - Atomically set a bit in memory
...@@ -67,7 +62,7 @@ static inline void set_bit(int nr, volatile void * addr) ...@@ -67,7 +62,7 @@ static inline void set_bit(int nr, volatile void * addr)
* *
* clear_bit() is atomic and may not be reordered. However, it does * clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes, * not contain a memory barrier, so if it is used for locking purposes,
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
* in order to ensure changes are visible on other processors. * in order to ensure changes are visible on other processors.
*/ */
static inline void clear_bit(int nr, volatile void * addr) static inline void clear_bit(int nr, volatile void * addr)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment