Commit 4532b305 authored by H. Peter Anvin's avatar H. Peter Anvin

x86, asm: Clean up and simplify <asm/cmpxchg.h>

Remove the __xg() hack to create a memory barrier near xchg and
cmpxchg; it has been there since 1.3.11 but should not be necessary
with "asm volatile" and a "memory" clobber, neither of which were
there in the original implementation.

However, we *should* make this a volatile reference.
Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
LKML-Reference: <AANLkTikAmaDPji-TVDarmG1yD=fwbffcsmEU=YEuP+8r@mail.gmail.com>
parent 69309a05
...@@ -11,38 +11,42 @@ ...@@ -11,38 +11,42 @@
extern void __xchg_wrong_size(void); extern void __xchg_wrong_size(void);
/* /*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
* Note 2: xchg has side effect, so that attribute volatile is necessary, * Since this is generally used to protect other memory information, we
* but generally the primitive is invalid, *ptr is output argument. --ANK * use "asm volatile" and "memory" clobbers to prevent gcc from moving
* information around.
*/ */
struct __xchg_dummy {
unsigned long a[100];
};
#define __xg(x) ((struct __xchg_dummy *)(x))
#define __xchg(x, ptr, size) \ #define __xchg(x, ptr, size) \
({ \ ({ \
__typeof(*(ptr)) __x = (x); \ __typeof(*(ptr)) __x = (x); \
switch (size) { \ switch (size) { \
case 1: \ case 1: \
asm volatile("xchgb %b0,%1" \ { \
: "=q" (__x), "+m" (*__xg(ptr)) \ volatile u8 *__ptr = (volatile u8 *)(ptr); \
asm volatile("xchgb %0,%1" \
: "=q" (__x), "+m" (*__ptr) \
: "0" (__x) \ : "0" (__x) \
: "memory"); \ : "memory"); \
break; \ break; \
} \
case 2: \ case 2: \
asm volatile("xchgw %w0,%1" \ { \
: "=r" (__x), "+m" (*__xg(ptr)) \ volatile u16 *__ptr = (volatile u16 *)(ptr); \
asm volatile("xchgw %0,%1" \
: "=r" (__x), "+m" (*__ptr) \
: "0" (__x) \ : "0" (__x) \
: "memory"); \ : "memory"); \
break; \ break; \
} \
case 4: \ case 4: \
{ \
volatile u32 *__ptr = (volatile u32 *)(ptr); \
asm volatile("xchgl %0,%1" \ asm volatile("xchgl %0,%1" \
: "=r" (__x), "+m" (*__xg(ptr)) \ : "=r" (__x), "+m" (*__ptr) \
: "0" (__x) \ : "0" (__x) \
: "memory"); \ : "memory"); \
break; \ break; \
} \
default: \ default: \
__xchg_wrong_size(); \ __xchg_wrong_size(); \
} \ } \
...@@ -94,23 +98,32 @@ extern void __cmpxchg_wrong_size(void); ...@@ -94,23 +98,32 @@ extern void __cmpxchg_wrong_size(void);
__typeof__(*(ptr)) __new = (new); \ __typeof__(*(ptr)) __new = (new); \
switch (size) { \ switch (size) { \
case 1: \ case 1: \
asm volatile(lock "cmpxchgb %b2,%1" \ { \
: "=a" (__ret), "+m" (*__xg(ptr)) \ volatile u8 *__ptr = (volatile u8 *)(ptr); \
asm volatile(lock "cmpxchgb %2,%1" \
: "=a" (__ret), "+m" (*__ptr) \
: "q" (__new), "0" (__old) \ : "q" (__new), "0" (__old) \
: "memory"); \ : "memory"); \
break; \ break; \
} \
case 2: \ case 2: \
asm volatile(lock "cmpxchgw %w2,%1" \ { \
: "=a" (__ret), "+m" (*__xg(ptr)) \ volatile u16 *__ptr = (volatile u16 *)(ptr); \
asm volatile(lock "cmpxchgw %2,%1" \
: "=a" (__ret), "+m" (*__ptr) \
: "r" (__new), "0" (__old) \ : "r" (__new), "0" (__old) \
: "memory"); \ : "memory"); \
break; \ break; \
} \
case 4: \ case 4: \
{ \
volatile u32 *__ptr = (volatile u32 *)(ptr); \
asm volatile(lock "cmpxchgl %2,%1" \ asm volatile(lock "cmpxchgl %2,%1" \
: "=a" (__ret), "+m" (*__xg(ptr)) \ : "=a" (__ret), "+m" (*__ptr) \
: "r" (__new), "0" (__old) \ : "r" (__new), "0" (__old) \
: "memory"); \ : "memory"); \
break; \ break; \
} \
default: \ default: \
__cmpxchg_wrong_size(); \ __cmpxchg_wrong_size(); \
} \ } \
...@@ -148,31 +161,27 @@ extern void __cmpxchg_wrong_size(void); ...@@ -148,31 +161,27 @@ extern void __cmpxchg_wrong_size(void);
(unsigned long long)(n))) (unsigned long long)(n)))
#endif #endif
static inline unsigned long long __cmpxchg64(volatile void *ptr, static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
unsigned long long old,
unsigned long long new)
{ {
unsigned long long prev; u64 prev;
asm volatile(LOCK_PREFIX "cmpxchg8b %1" asm volatile(LOCK_PREFIX "cmpxchg8b %1"
: "=A" (prev), : "=A" (prev),
"+m" (*__xg(ptr)) "+m" (*ptr)
: "b" ((unsigned long)new), : "b" ((u32)new),
"c" ((unsigned long)(new >> 32)), "c" ((u32)(new >> 32)),
"0" (old) "0" (old)
: "memory"); : "memory");
return prev; return prev;
} }
static inline unsigned long long __cmpxchg64_local(volatile void *ptr, static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
unsigned long long old,
unsigned long long new)
{ {
unsigned long long prev; u64 prev;
asm volatile("cmpxchg8b %1" asm volatile("cmpxchg8b %1"
: "=A" (prev), : "=A" (prev),
"+m" (*__xg(ptr)) "+m" (*ptr)
: "b" ((unsigned long)new), : "b" ((u32)new),
"c" ((unsigned long)(new >> 32)), "c" ((u32)(new >> 32)),
"0" (old) "0" (old)
: "memory"); : "memory");
return prev; return prev;
......
...@@ -3,8 +3,6 @@ ...@@ -3,8 +3,6 @@
#include <asm/alternative.h> /* Provides LOCK_PREFIX */ #include <asm/alternative.h> /* Provides LOCK_PREFIX */
#define __xg(x) ((volatile long *)(x))
static inline void set_64bit(volatile u64 *ptr, u64 val) static inline void set_64bit(volatile u64 *ptr, u64 val)
{ {
*ptr = val; *ptr = val;
...@@ -14,38 +12,51 @@ extern void __xchg_wrong_size(void); ...@@ -14,38 +12,51 @@ extern void __xchg_wrong_size(void);
extern void __cmpxchg_wrong_size(void); extern void __cmpxchg_wrong_size(void);
/* /*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
* Note 2: xchg has side effect, so that attribute volatile is necessary, * Since this is generally used to protect other memory information, we
* but generally the primitive is invalid, *ptr is output argument. --ANK * use "asm volatile" and "memory" clobbers to prevent gcc from moving
* information around.
*/ */
#define __xchg(x, ptr, size) \ #define __xchg(x, ptr, size) \
({ \ ({ \
__typeof(*(ptr)) __x = (x); \ __typeof(*(ptr)) __x = (x); \
switch (size) { \ switch (size) { \
case 1: \ case 1: \
asm volatile("xchgb %b0,%1" \ { \
: "=q" (__x), "+m" (*__xg(ptr)) \ volatile u8 *__ptr = (volatile u8 *)(ptr); \
asm volatile("xchgb %0,%1" \
: "=q" (__x), "+m" (*__ptr) \
: "0" (__x) \ : "0" (__x) \
: "memory"); \ : "memory"); \
break; \ break; \
} \
case 2: \ case 2: \
asm volatile("xchgw %w0,%1" \ { \
: "=r" (__x), "+m" (*__xg(ptr)) \ volatile u16 *__ptr = (volatile u16 *)(ptr); \
asm volatile("xchgw %0,%1" \
: "=r" (__x), "+m" (*__ptr) \
: "0" (__x) \ : "0" (__x) \
: "memory"); \ : "memory"); \
break; \ break; \
} \
case 4: \ case 4: \
asm volatile("xchgl %k0,%1" \ { \
: "=r" (__x), "+m" (*__xg(ptr)) \ volatile u32 *__ptr = (volatile u32 *)(ptr); \
asm volatile("xchgl %0,%1" \
: "=r" (__x), "+m" (*__ptr) \
: "0" (__x) \ : "0" (__x) \
: "memory"); \ : "memory"); \
break; \ break; \
} \
case 8: \ case 8: \
{ \
volatile u64 *__ptr = (volatile u64 *)(ptr); \
asm volatile("xchgq %0,%1" \ asm volatile("xchgq %0,%1" \
: "=r" (__x), "+m" (*__xg(ptr)) \ : "=r" (__x), "+m" (*__ptr) \
: "0" (__x) \ : "0" (__x) \
: "memory"); \ : "memory"); \
break; \ break; \
} \
default: \ default: \
__xchg_wrong_size(); \ __xchg_wrong_size(); \
} \ } \
...@@ -69,29 +80,41 @@ extern void __cmpxchg_wrong_size(void); ...@@ -69,29 +80,41 @@ extern void __cmpxchg_wrong_size(void);
__typeof__(*(ptr)) __new = (new); \ __typeof__(*(ptr)) __new = (new); \
switch (size) { \ switch (size) { \
case 1: \ case 1: \
asm volatile(lock "cmpxchgb %b2,%1" \ { \
: "=a" (__ret), "+m" (*__xg(ptr)) \ volatile u8 *__ptr = (volatile u8 *)(ptr); \
asm volatile(lock "cmpxchgb %2,%1" \
: "=a" (__ret), "+m" (*__ptr) \
: "q" (__new), "0" (__old) \ : "q" (__new), "0" (__old) \
: "memory"); \ : "memory"); \
break; \ break; \
} \
case 2: \ case 2: \
asm volatile(lock "cmpxchgw %w2,%1" \ { \
: "=a" (__ret), "+m" (*__xg(ptr)) \ volatile u16 *__ptr = (volatile u16 *)(ptr); \
asm volatile(lock "cmpxchgw %2,%1" \
: "=a" (__ret), "+m" (*__ptr) \
: "r" (__new), "0" (__old) \ : "r" (__new), "0" (__old) \
: "memory"); \ : "memory"); \
break; \ break; \
} \
case 4: \ case 4: \
asm volatile(lock "cmpxchgl %k2,%1" \ { \
: "=a" (__ret), "+m" (*__xg(ptr)) \ volatile u32 *__ptr = (volatile u32 *)(ptr); \
asm volatile(lock "cmpxchgl %2,%1" \
: "=a" (__ret), "+m" (*__ptr) \
: "r" (__new), "0" (__old) \ : "r" (__new), "0" (__old) \
: "memory"); \ : "memory"); \
break; \ break; \
} \
case 8: \ case 8: \
{ \
volatile u64 *__ptr = (volatile u64 *)(ptr); \
asm volatile(lock "cmpxchgq %2,%1" \ asm volatile(lock "cmpxchgq %2,%1" \
: "=a" (__ret), "+m" (*__xg(ptr)) \ : "=a" (__ret), "+m" (*__ptr) \
: "r" (__new), "0" (__old) \ : "r" (__new), "0" (__old) \
: "memory"); \ : "memory"); \
break; \ break; \
} \
default: \ default: \
__cmpxchg_wrong_size(); \ __cmpxchg_wrong_size(); \
} \ } \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment