Commit ab3f02fc authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking/arch: Add WRITE_ONCE() to set_mb()

Since we assume set_mb() to result in a single store followed by a
full memory barrier, employ WRITE_ONCE().
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 6ce47fd9
...@@ -81,7 +81,7 @@ do { \ ...@@ -81,7 +81,7 @@ do { \
#define read_barrier_depends() do { } while(0) #define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; smp_mb(); } while (0) #define set_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
#define smp_mb__before_atomic() smp_mb() #define smp_mb__before_atomic() smp_mb()
#define smp_mb__after_atomic() smp_mb() #define smp_mb__after_atomic() smp_mb()
......
...@@ -114,7 +114,7 @@ do { \ ...@@ -114,7 +114,7 @@ do { \
#define read_barrier_depends() do { } while(0) #define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; smp_mb(); } while (0) #define set_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
#define nop() asm volatile("nop"); #define nop() asm volatile("nop");
#define smp_mb__before_atomic() smp_mb() #define smp_mb__before_atomic() smp_mb()
......
...@@ -82,7 +82,7 @@ do { \ ...@@ -82,7 +82,7 @@ do { \
* acquire vs release semantics but we can't discuss this stuff with * acquire vs release semantics but we can't discuss this stuff with
* Linus just yet. Grrr... * Linus just yet. Grrr...
*/ */
#define set_mb(var, value) do { (var) = (value); mb(); } while (0) #define set_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
/* /*
* The group barrier in front of the rsm & ssm are necessary to ensure * The group barrier in front of the rsm & ssm are necessary to ensure
......
...@@ -84,7 +84,7 @@ static inline void fence(void) ...@@ -84,7 +84,7 @@ static inline void fence(void)
#define read_barrier_depends() do { } while (0) #define read_barrier_depends() do { } while (0)
#define smp_read_barrier_depends() do { } while (0) #define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; smp_mb(); } while (0) #define set_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
#define smp_store_release(p, v) \ #define smp_store_release(p, v) \
do { \ do { \
......
...@@ -113,7 +113,7 @@ ...@@ -113,7 +113,7 @@
#endif #endif
#define set_mb(var, value) \ #define set_mb(var, value) \
do { var = value; smp_mb(); } while (0) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") #define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
#define rmb() __asm__ __volatile__ ("sync" : : : "memory") #define rmb() __asm__ __volatile__ ("sync" : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory") #define wmb() __asm__ __volatile__ ("sync" : : : "memory")
#define set_mb(var, value) do { var = value; mb(); } while (0) #define set_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
#ifdef __SUBARCH_HAS_LWSYNC #ifdef __SUBARCH_HAS_LWSYNC
# define SMPWMB LWSYNC # define SMPWMB LWSYNC
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
#define smp_mb__before_atomic() smp_mb() #define smp_mb__before_atomic() smp_mb()
#define smp_mb__after_atomic() smp_mb() #define smp_mb__after_atomic() smp_mb()
#define set_mb(var, value) do { var = value; mb(); } while (0) #define set_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
#define smp_store_release(p, v) \ #define smp_store_release(p, v) \
do { \ do { \
......
...@@ -41,7 +41,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ ...@@ -41,7 +41,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
#define dma_wmb() wmb() #define dma_wmb() wmb()
#define set_mb(__var, __value) \ #define set_mb(__var, __value) \
do { __var = __value; membar_safe("#StoreLoad"); } while(0) do { WRITE_ONCE(__var, __value); membar_safe("#StoreLoad"); } while(0)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define smp_mb() mb() #define smp_mb() mb()
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#define smp_mb() barrier() #define smp_mb() barrier()
#define smp_rmb() barrier() #define smp_rmb() barrier()
#define smp_wmb() barrier() #define smp_wmb() barrier()
#define set_mb(var, value) do { var = value; barrier(); } while (0) #define set_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
#endif /* SMP */ #endif /* SMP */
#define read_barrier_depends() do { } while (0) #define read_barrier_depends() do { } while (0)
......
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
#define smp_mb() barrier() #define smp_mb() barrier()
#define smp_rmb() barrier() #define smp_rmb() barrier()
#define smp_wmb() barrier() #define smp_wmb() barrier()
#define set_mb(var, value) do { var = value; barrier(); } while (0) #define set_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
#define read_barrier_depends() do { } while (0) #define read_barrier_depends() do { } while (0)
#define smp_read_barrier_depends() do { } while (0) #define smp_read_barrier_depends() do { } while (0)
......
...@@ -67,7 +67,7 @@ ...@@ -67,7 +67,7 @@
#endif #endif
#ifndef set_mb #ifndef set_mb
#define set_mb(var, value) do { (var) = (value); mb(); } while (0) #define set_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
#endif #endif
#ifndef smp_mb__before_atomic #ifndef smp_mb__before_atomic
......
...@@ -250,7 +250,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s ...@@ -250,7 +250,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
#define WRITE_ONCE(x, val) \ #define WRITE_ONCE(x, val) \
({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; }) ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment