Commit eebd1b92 authored by Michael S. Tsirkin's avatar Michael S. Tsirkin

ia64: define __smp_xxx

This defines __smp_xxx barriers for ia64,
for use by virtualization.

smp_xxx barriers are removed as they are
defined correctly by asm-generic/barriers.h

This reduces the amount of arch-specific boiler-plate code.
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Acked-by: default avatarTony Luck <tony.luck@intel.com>
Acked-by: default avatarArnd Bergmann <arnd@arndb.de>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
parent 27f6cabc
...@@ -42,28 +42,24 @@ ...@@ -42,28 +42,24 @@
#define dma_rmb() mb() #define dma_rmb() mb()
#define dma_wmb() mb() #define dma_wmb() mb()
#ifdef CONFIG_SMP # define __smp_mb() mb()
# define smp_mb() mb()
#else
# define smp_mb() barrier()
#endif
#define smp_mb__before_atomic() barrier() #define __smp_mb__before_atomic() barrier()
#define smp_mb__after_atomic() barrier() #define __smp_mb__after_atomic() barrier()
/* /*
* IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
* need for asm trickery! * need for asm trickery!
*/ */
#define smp_store_release(p, v) \ #define __smp_store_release(p, v) \
do { \ do { \
compiletime_assert_atomic_type(*p); \ compiletime_assert_atomic_type(*p); \
barrier(); \ barrier(); \
WRITE_ONCE(*p, v); \ WRITE_ONCE(*p, v); \
} while (0) } while (0)
#define smp_load_acquire(p) \ #define __smp_load_acquire(p) \
({ \ ({ \
typeof(*p) ___p1 = READ_ONCE(*p); \ typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \ compiletime_assert_atomic_type(*p); \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment