Commit 4ec45856 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking/atomic, arch/mips: Convert to _relaxed atomics

Generic code will construct {,_acquire,_release} versions by adding the
required smp_mb__{before,after}_atomic() calls.

XXX if/when MIPS will start using their new SYNCxx instructions they
can provide custom __atomic_op_{acquire,release}() macros as per the
powerpc example.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: linux-mips@linux-mips.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent fe14d2f1
...@@ -79,12 +79,10 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ ...@@ -79,12 +79,10 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
} }
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
{ \ { \
int result; \ int result; \
\ \
smp_mb__before_llsc(); \
\
if (kernel_uses_llsc && R10000_LLSC_WAR) { \ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
int temp; \ int temp; \
\ \
...@@ -125,18 +123,14 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ ...@@ -125,18 +123,14 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
raw_local_irq_restore(flags); \ raw_local_irq_restore(flags); \
} \ } \
\ \
smp_llsc_mb(); \
\
return result; \ return result; \
} }
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static __inline__ int atomic_fetch_##op(int i, atomic_t * v) \ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
{ \ { \
int result; \ int result; \
\ \
smp_mb__before_llsc(); \
\
if (kernel_uses_llsc && R10000_LLSC_WAR) { \ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
int temp; \ int temp; \
\ \
...@@ -176,8 +170,6 @@ static __inline__ int atomic_fetch_##op(int i, atomic_t * v) \ ...@@ -176,8 +170,6 @@ static __inline__ int atomic_fetch_##op(int i, atomic_t * v) \
raw_local_irq_restore(flags); \ raw_local_irq_restore(flags); \
} \ } \
\ \
smp_llsc_mb(); \
\
return result; \ return result; \
} }
...@@ -189,6 +181,11 @@ static __inline__ int atomic_fetch_##op(int i, atomic_t * v) \ ...@@ -189,6 +181,11 @@ static __inline__ int atomic_fetch_##op(int i, atomic_t * v) \
ATOMIC_OPS(add, +=, addu) ATOMIC_OPS(add, +=, addu)
ATOMIC_OPS(sub, -=, subu) ATOMIC_OPS(sub, -=, subu)
#define atomic_add_return_relaxed atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
#undef ATOMIC_OPS #undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \ #define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \
...@@ -198,6 +195,10 @@ ATOMIC_OPS(and, &=, and) ...@@ -198,6 +195,10 @@ ATOMIC_OPS(and, &=, and)
ATOMIC_OPS(or, |=, or) ATOMIC_OPS(or, |=, or)
ATOMIC_OPS(xor, ^=, xor) ATOMIC_OPS(xor, ^=, xor)
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP #undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
...@@ -420,12 +421,10 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ ...@@ -420,12 +421,10 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
} }
#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ #define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
{ \ { \
long result; \ long result; \
\ \
smp_mb__before_llsc(); \
\
if (kernel_uses_llsc && R10000_LLSC_WAR) { \ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
long temp; \ long temp; \
\ \
...@@ -467,18 +466,14 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ ...@@ -467,18 +466,14 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
raw_local_irq_restore(flags); \ raw_local_irq_restore(flags); \
} \ } \
\ \
smp_llsc_mb(); \
\
return result; \ return result; \
} }
#define ATOMIC64_FETCH_OP(op, c_op, asm_op) \ #define ATOMIC64_FETCH_OP(op, c_op, asm_op) \
static __inline__ long atomic64_fetch_##op(long i, atomic64_t * v) \ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
{ \ { \
long result; \ long result; \
\ \
smp_mb__before_llsc(); \
\
if (kernel_uses_llsc && R10000_LLSC_WAR) { \ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
long temp; \ long temp; \
\ \
...@@ -519,8 +514,6 @@ static __inline__ long atomic64_fetch_##op(long i, atomic64_t * v) \ ...@@ -519,8 +514,6 @@ static __inline__ long atomic64_fetch_##op(long i, atomic64_t * v) \
raw_local_irq_restore(flags); \ raw_local_irq_restore(flags); \
} \ } \
\ \
smp_llsc_mb(); \
\
return result; \ return result; \
} }
...@@ -532,6 +525,11 @@ static __inline__ long atomic64_fetch_##op(long i, atomic64_t * v) \ ...@@ -532,6 +525,11 @@ static __inline__ long atomic64_fetch_##op(long i, atomic64_t * v) \
ATOMIC64_OPS(add, +=, daddu) ATOMIC64_OPS(add, +=, daddu)
ATOMIC64_OPS(sub, -=, dsubu) ATOMIC64_OPS(sub, -=, dsubu)
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, c_op, asm_op) \ #define ATOMIC64_OPS(op, c_op, asm_op) \
ATOMIC64_OP(op, c_op, asm_op) \ ATOMIC64_OP(op, c_op, asm_op) \
...@@ -541,6 +539,10 @@ ATOMIC64_OPS(and, &=, and) ...@@ -541,6 +539,10 @@ ATOMIC64_OPS(and, &=, and)
ATOMIC64_OPS(or, |=, or) ATOMIC64_OPS(or, |=, or)
ATOMIC64_OPS(xor, ^=, xor) ATOMIC64_OPS(xor, ^=, xor)
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP #undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP_RETURN
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment