Commit 89607d5e authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

arch,m32r: Convert smp_mb__*()

M32r uses asm-generic/barrier.h and its smp_mb() is barrier();
therefore we can use the generic versions which default to smp_mb().
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/n/tip-wh6xljltyvmpy9t0bc80k1fy@git.kernel.org
Cc: Hirokazu Takata <takata@linux-m32r.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org
Cc: linux-m32r-ja@ml.linux-m32r.org
Cc: linux-m32r@ml.linux-m32r.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 0cd64efb
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/cmpxchg.h> #include <asm/cmpxchg.h>
#include <asm/dcache_clear.h> #include <asm/dcache_clear.h>
#include <asm/barrier.h>
/* /*
* Atomic operations that C can't guarantee us. Useful for * Atomic operations that C can't guarantee us. Useful for
...@@ -308,10 +309,4 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr) ...@@ -308,10 +309,4 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr)
local_irq_restore(flags); local_irq_restore(flags);
} }
/* Atomic operations are already serializing on m32r */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif /* _ASM_M32R_ATOMIC_H */ #endif /* _ASM_M32R_ATOMIC_H */
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/dcache_clear.h> #include <asm/dcache_clear.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/barrier.h>
/* /*
* These have to be done with inline assembly: that way the bit-setting * These have to be done with inline assembly: that way the bit-setting
...@@ -73,7 +74,7 @@ static __inline__ void set_bit(int nr, volatile void * addr) ...@@ -73,7 +74,7 @@ static __inline__ void set_bit(int nr, volatile void * addr)
* *
* clear_bit() is atomic and may not be reordered. However, it does * clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes, * not contain a memory barrier, so if it is used for locking purposes,
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
* in order to ensure changes are visible on other processors. * in order to ensure changes are visible on other processors.
*/ */
static __inline__ void clear_bit(int nr, volatile void * addr) static __inline__ void clear_bit(int nr, volatile void * addr)
...@@ -103,9 +104,6 @@ static __inline__ void clear_bit(int nr, volatile void * addr) ...@@ -103,9 +104,6 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
local_irq_restore(flags); local_irq_restore(flags);
} }
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
/** /**
* change_bit - Toggle a bit in memory * change_bit - Toggle a bit in memory
* @nr: Bit to clear * @nr: Bit to clear
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment