Commit c0df1081 authored by Mark Rutland's avatar Mark Rutland Committed by Ingo Molnar

arm64, locking/atomics: Use instrumented atomics

Now that the generic atomic headers provide instrumented wrappers of all
the atomics implemented by arm64, let's migrate arm64 over to these.

The additional instrumentation will help to find bugs (e.g. when fuzzing
with Syzkaller).

Mostly this change involves adding an arch_ prefix to a number of
function names and macro definitions. When LSE atomics are used, the
out-of-line LL/SC atomics will be named __ll_sc_arch_atomic_${OP}.

Adding the arch_ prefix requires some whitespace fixups to keep things
aligned. Some other unusual whitespace is fixed up at the same time
(e.g. in the cmpxchg wrappers).
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: linuxdrivers@attotech.com
Cc: dvyukov@google.com
Cc: boqun.feng@gmail.com
Cc: arnd@arndb.de
Cc: aryabinin@virtuozzo.com
Cc: glider@google.com
Link: http://lkml.kernel.org/r/20180904104830.2975-7-mark.rutland@arm.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 8d325880
This diff is collapsed.
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
#define ATOMIC_OP(op, asm_op) \ #define ATOMIC_OP(op, asm_op) \
__LL_SC_INLINE void \ __LL_SC_INLINE void \
__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \ __LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \
{ \ { \
unsigned long tmp; \ unsigned long tmp; \
int result; \ int result; \
...@@ -53,11 +53,11 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \ ...@@ -53,11 +53,11 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i)); \ : "Ir" (i)); \
} \ } \
__LL_SC_EXPORT(atomic_##op); __LL_SC_EXPORT(arch_atomic_##op);
#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE int \ __LL_SC_INLINE int \
__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \ __LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \
{ \ { \
unsigned long tmp; \ unsigned long tmp; \
int result; \ int result; \
...@@ -75,11 +75,11 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \ ...@@ -75,11 +75,11 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
\ \
return result; \ return result; \
} \ } \
__LL_SC_EXPORT(atomic_##op##_return##name); __LL_SC_EXPORT(arch_atomic_##op##_return##name);
#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE int \ __LL_SC_INLINE int \
__LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \ __LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \
{ \ { \
unsigned long tmp; \ unsigned long tmp; \
int val, result; \ int val, result; \
...@@ -97,7 +97,7 @@ __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \ ...@@ -97,7 +97,7 @@ __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
\ \
return result; \ return result; \
} \ } \
__LL_SC_EXPORT(atomic_fetch_##op##name); __LL_SC_EXPORT(arch_atomic_fetch_##op##name);
#define ATOMIC_OPS(...) \ #define ATOMIC_OPS(...) \
ATOMIC_OP(__VA_ARGS__) \ ATOMIC_OP(__VA_ARGS__) \
...@@ -133,7 +133,7 @@ ATOMIC_OPS(xor, eor) ...@@ -133,7 +133,7 @@ ATOMIC_OPS(xor, eor)
#define ATOMIC64_OP(op, asm_op) \ #define ATOMIC64_OP(op, asm_op) \
__LL_SC_INLINE void \ __LL_SC_INLINE void \
__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \ __LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v)) \
{ \ { \
long result; \ long result; \
unsigned long tmp; \ unsigned long tmp; \
...@@ -147,11 +147,11 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \ ...@@ -147,11 +147,11 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i)); \ : "Ir" (i)); \
} \ } \
__LL_SC_EXPORT(atomic64_##op); __LL_SC_EXPORT(arch_atomic64_##op);
#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE long \ __LL_SC_INLINE long \
__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \ __LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, atomic64_t *v))\
{ \ { \
long result; \ long result; \
unsigned long tmp; \ unsigned long tmp; \
...@@ -169,11 +169,11 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \ ...@@ -169,11 +169,11 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
\ \
return result; \ return result; \
} \ } \
__LL_SC_EXPORT(atomic64_##op##_return##name); __LL_SC_EXPORT(arch_atomic64_##op##_return##name);
#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
__LL_SC_INLINE long \ __LL_SC_INLINE long \
__LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \ __LL_SC_PREFIX(arch_atomic64_fetch_##op##name(long i, atomic64_t *v)) \
{ \ { \
long result, val; \ long result, val; \
unsigned long tmp; \ unsigned long tmp; \
...@@ -191,7 +191,7 @@ __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \ ...@@ -191,7 +191,7 @@ __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
\ \
return result; \ return result; \
} \ } \
__LL_SC_EXPORT(atomic64_fetch_##op##name); __LL_SC_EXPORT(arch_atomic64_fetch_##op##name);
#define ATOMIC64_OPS(...) \ #define ATOMIC64_OPS(...) \
ATOMIC64_OP(__VA_ARGS__) \ ATOMIC64_OP(__VA_ARGS__) \
...@@ -226,7 +226,7 @@ ATOMIC64_OPS(xor, eor) ...@@ -226,7 +226,7 @@ ATOMIC64_OPS(xor, eor)
#undef ATOMIC64_OP #undef ATOMIC64_OP
__LL_SC_INLINE long __LL_SC_INLINE long
__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v)) __LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v))
{ {
long result; long result;
unsigned long tmp; unsigned long tmp;
...@@ -246,7 +246,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v)) ...@@ -246,7 +246,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
return result; return result;
} }
__LL_SC_EXPORT(atomic64_dec_if_positive); __LL_SC_EXPORT(arch_atomic64_dec_if_positive);
#define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl) \ #define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl) \
__LL_SC_INLINE unsigned long \ __LL_SC_INLINE unsigned long \
......
...@@ -25,9 +25,9 @@ ...@@ -25,9 +25,9 @@
#error "please don't include this file directly" #error "please don't include this file directly"
#endif #endif
#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op) #define __LL_SC_ATOMIC(op) __LL_SC_CALL(arch_atomic_##op)
#define ATOMIC_OP(op, asm_op) \ #define ATOMIC_OP(op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \ static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \ { \
register int w0 asm ("w0") = i; \ register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \ register atomic_t *x1 asm ("x1") = v; \
...@@ -47,7 +47,7 @@ ATOMIC_OP(add, stadd) ...@@ -47,7 +47,7 @@ ATOMIC_OP(add, stadd)
#undef ATOMIC_OP #undef ATOMIC_OP
#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \ #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
static inline int atomic_fetch_##op##name(int i, atomic_t *v) \ static inline int arch_atomic_fetch_##op##name(int i, atomic_t *v) \
{ \ { \
register int w0 asm ("w0") = i; \ register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \ register atomic_t *x1 asm ("x1") = v; \
...@@ -79,7 +79,7 @@ ATOMIC_FETCH_OPS(add, ldadd) ...@@ -79,7 +79,7 @@ ATOMIC_FETCH_OPS(add, ldadd)
#undef ATOMIC_FETCH_OPS #undef ATOMIC_FETCH_OPS
#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
static inline int atomic_add_return##name(int i, atomic_t *v) \ static inline int arch_atomic_add_return##name(int i, atomic_t *v) \
{ \ { \
register int w0 asm ("w0") = i; \ register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \ register atomic_t *x1 asm ("x1") = v; \
...@@ -105,7 +105,7 @@ ATOMIC_OP_ADD_RETURN( , al, "memory") ...@@ -105,7 +105,7 @@ ATOMIC_OP_ADD_RETURN( , al, "memory")
#undef ATOMIC_OP_ADD_RETURN #undef ATOMIC_OP_ADD_RETURN
static inline void atomic_and(int i, atomic_t *v) static inline void arch_atomic_and(int i, atomic_t *v)
{ {
register int w0 asm ("w0") = i; register int w0 asm ("w0") = i;
register atomic_t *x1 asm ("x1") = v; register atomic_t *x1 asm ("x1") = v;
...@@ -123,7 +123,7 @@ static inline void atomic_and(int i, atomic_t *v) ...@@ -123,7 +123,7 @@ static inline void atomic_and(int i, atomic_t *v)
} }
#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \ #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
static inline int atomic_fetch_and##name(int i, atomic_t *v) \ static inline int arch_atomic_fetch_and##name(int i, atomic_t *v) \
{ \ { \
register int w0 asm ("w0") = i; \ register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \ register atomic_t *x1 asm ("x1") = v; \
...@@ -149,7 +149,7 @@ ATOMIC_FETCH_OP_AND( , al, "memory") ...@@ -149,7 +149,7 @@ ATOMIC_FETCH_OP_AND( , al, "memory")
#undef ATOMIC_FETCH_OP_AND #undef ATOMIC_FETCH_OP_AND
static inline void atomic_sub(int i, atomic_t *v) static inline void arch_atomic_sub(int i, atomic_t *v)
{ {
register int w0 asm ("w0") = i; register int w0 asm ("w0") = i;
register atomic_t *x1 asm ("x1") = v; register atomic_t *x1 asm ("x1") = v;
...@@ -167,7 +167,7 @@ static inline void atomic_sub(int i, atomic_t *v) ...@@ -167,7 +167,7 @@ static inline void atomic_sub(int i, atomic_t *v)
} }
#define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \ #define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
static inline int atomic_sub_return##name(int i, atomic_t *v) \ static inline int arch_atomic_sub_return##name(int i, atomic_t *v) \
{ \ { \
register int w0 asm ("w0") = i; \ register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \ register atomic_t *x1 asm ("x1") = v; \
...@@ -195,7 +195,7 @@ ATOMIC_OP_SUB_RETURN( , al, "memory") ...@@ -195,7 +195,7 @@ ATOMIC_OP_SUB_RETURN( , al, "memory")
#undef ATOMIC_OP_SUB_RETURN #undef ATOMIC_OP_SUB_RETURN
#define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \ #define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \
static inline int atomic_fetch_sub##name(int i, atomic_t *v) \ static inline int arch_atomic_fetch_sub##name(int i, atomic_t *v) \
{ \ { \
register int w0 asm ("w0") = i; \ register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \ register atomic_t *x1 asm ("x1") = v; \
...@@ -222,9 +222,9 @@ ATOMIC_FETCH_OP_SUB( , al, "memory") ...@@ -222,9 +222,9 @@ ATOMIC_FETCH_OP_SUB( , al, "memory")
#undef ATOMIC_FETCH_OP_SUB #undef ATOMIC_FETCH_OP_SUB
#undef __LL_SC_ATOMIC #undef __LL_SC_ATOMIC
#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op) #define __LL_SC_ATOMIC64(op) __LL_SC_CALL(arch_atomic64_##op)
#define ATOMIC64_OP(op, asm_op) \ #define ATOMIC64_OP(op, asm_op) \
static inline void atomic64_##op(long i, atomic64_t *v) \ static inline void arch_atomic64_##op(long i, atomic64_t *v) \
{ \ { \
register long x0 asm ("x0") = i; \ register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \ register atomic64_t *x1 asm ("x1") = v; \
...@@ -244,7 +244,7 @@ ATOMIC64_OP(add, stadd) ...@@ -244,7 +244,7 @@ ATOMIC64_OP(add, stadd)
#undef ATOMIC64_OP #undef ATOMIC64_OP
#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \ #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \ static inline long arch_atomic64_fetch_##op##name(long i, atomic64_t *v)\
{ \ { \
register long x0 asm ("x0") = i; \ register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \ register atomic64_t *x1 asm ("x1") = v; \
...@@ -276,7 +276,7 @@ ATOMIC64_FETCH_OPS(add, ldadd) ...@@ -276,7 +276,7 @@ ATOMIC64_FETCH_OPS(add, ldadd)
#undef ATOMIC64_FETCH_OPS #undef ATOMIC64_FETCH_OPS
#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \ #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
static inline long atomic64_add_return##name(long i, atomic64_t *v) \ static inline long arch_atomic64_add_return##name(long i, atomic64_t *v)\
{ \ { \
register long x0 asm ("x0") = i; \ register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \ register atomic64_t *x1 asm ("x1") = v; \
...@@ -302,7 +302,7 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory") ...@@ -302,7 +302,7 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory")
#undef ATOMIC64_OP_ADD_RETURN #undef ATOMIC64_OP_ADD_RETURN
static inline void atomic64_and(long i, atomic64_t *v) static inline void arch_atomic64_and(long i, atomic64_t *v)
{ {
register long x0 asm ("x0") = i; register long x0 asm ("x0") = i;
register atomic64_t *x1 asm ("x1") = v; register atomic64_t *x1 asm ("x1") = v;
...@@ -320,7 +320,7 @@ static inline void atomic64_and(long i, atomic64_t *v) ...@@ -320,7 +320,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
} }
#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \ #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \ static inline long arch_atomic64_fetch_and##name(long i, atomic64_t *v) \
{ \ { \
register long x0 asm ("x0") = i; \ register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \ register atomic64_t *x1 asm ("x1") = v; \
...@@ -346,7 +346,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory") ...@@ -346,7 +346,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory")
#undef ATOMIC64_FETCH_OP_AND #undef ATOMIC64_FETCH_OP_AND
static inline void atomic64_sub(long i, atomic64_t *v) static inline void arch_atomic64_sub(long i, atomic64_t *v)
{ {
register long x0 asm ("x0") = i; register long x0 asm ("x0") = i;
register atomic64_t *x1 asm ("x1") = v; register atomic64_t *x1 asm ("x1") = v;
...@@ -364,7 +364,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) ...@@ -364,7 +364,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
} }
#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \ #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
static inline long atomic64_sub_return##name(long i, atomic64_t *v) \ static inline long arch_atomic64_sub_return##name(long i, atomic64_t *v)\
{ \ { \
register long x0 asm ("x0") = i; \ register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \ register atomic64_t *x1 asm ("x1") = v; \
...@@ -392,7 +392,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory") ...@@ -392,7 +392,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory")
#undef ATOMIC64_OP_SUB_RETURN #undef ATOMIC64_OP_SUB_RETURN
#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \ #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \ static inline long arch_atomic64_fetch_sub##name(long i, atomic64_t *v) \
{ \ { \
register long x0 asm ("x0") = i; \ register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \ register atomic64_t *x1 asm ("x1") = v; \
...@@ -418,7 +418,7 @@ ATOMIC64_FETCH_OP_SUB( , al, "memory") ...@@ -418,7 +418,7 @@ ATOMIC64_FETCH_OP_SUB( , al, "memory")
#undef ATOMIC64_FETCH_OP_SUB #undef ATOMIC64_FETCH_OP_SUB
static inline long atomic64_dec_if_positive(atomic64_t *v) static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
{ {
register long x0 asm ("x0") = (long)v; register long x0 asm ("x0") = (long)v;
......
...@@ -110,10 +110,10 @@ __XCHG_GEN(_mb) ...@@ -110,10 +110,10 @@ __XCHG_GEN(_mb)
}) })
/* xchg */ /* xchg */
#define xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__) #define arch_xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
#define xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__) #define arch_xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
#define xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__) #define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
#define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__) #define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
#define __CMPXCHG_GEN(sfx) \ #define __CMPXCHG_GEN(sfx) \
static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
...@@ -154,18 +154,18 @@ __CMPXCHG_GEN(_mb) ...@@ -154,18 +154,18 @@ __CMPXCHG_GEN(_mb)
}) })
/* cmpxchg */ /* cmpxchg */
#define cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__) #define arch_cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
#define cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__) #define arch_cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
#define cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__) #define arch_cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
#define cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__) #define arch_cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
#define cmpxchg_local cmpxchg_relaxed #define arch_cmpxchg_local arch_cmpxchg_relaxed
/* cmpxchg64 */ /* cmpxchg64 */
#define cmpxchg64_relaxed cmpxchg_relaxed #define arch_cmpxchg64_relaxed arch_cmpxchg_relaxed
#define cmpxchg64_acquire cmpxchg_acquire #define arch_cmpxchg64_acquire arch_cmpxchg_acquire
#define cmpxchg64_release cmpxchg_release #define arch_cmpxchg64_release arch_cmpxchg_release
#define cmpxchg64 cmpxchg #define arch_cmpxchg64 arch_cmpxchg
#define cmpxchg64_local cmpxchg_local #define arch_cmpxchg64_local arch_cmpxchg_local
/* cmpxchg_double */ /* cmpxchg_double */
#define system_has_cmpxchg_double() 1 #define system_has_cmpxchg_double() 1
...@@ -177,9 +177,9 @@ __CMPXCHG_GEN(_mb) ...@@ -177,9 +177,9 @@ __CMPXCHG_GEN(_mb)
VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \ VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \
}) })
#define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \ #define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
({\ ({ \
int __ret;\ int __ret; \
__cmpxchg_double_check(ptr1, ptr2); \ __cmpxchg_double_check(ptr1, ptr2); \
__ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \ __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
(unsigned long)(n1), (unsigned long)(n2), \ (unsigned long)(n1), (unsigned long)(n2), \
...@@ -187,9 +187,9 @@ __CMPXCHG_GEN(_mb) ...@@ -187,9 +187,9 @@ __CMPXCHG_GEN(_mb)
__ret; \ __ret; \
}) })
#define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \ #define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
({\ ({ \
int __ret;\ int __ret; \
__cmpxchg_double_check(ptr1, ptr2); \ __cmpxchg_double_check(ptr1, ptr2); \
__ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \ __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
(unsigned long)(n1), (unsigned long)(n2), \ (unsigned long)(n1), (unsigned long)(n2), \
......
...@@ -22,6 +22,6 @@ ...@@ -22,6 +22,6 @@
#define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p) #define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p)
#define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p) #define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p)
#define sync_test_bit(nr, addr) test_bit(nr, addr) #define sync_test_bit(nr, addr) test_bit(nr, addr)
#define sync_cmpxchg cmpxchg #define arch_sync_cmpxchg arch_cmpxchg
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment