Commit 4936084c authored by Joshua Kinard's avatar Joshua Kinard Committed by Paul Burton

MIPS: Cleanup R10000_LLSC_WAR logic in atomic.h

This patch reduces down the conditionals in MIPS atomic code that deal
with a silicon bug in early R10000 cpus that required a workaround of
a branch-likely instruction following a store-conditional in order to
to guarantee the whole ll/sc sequence is atomic.  As the only real
difference is a branch-likely instruction (beqzl) over a standard
branch (beqz), the conditional is reduced down to a single preprocessor
check at the top to pick the required instruction.

This requires writing the uses in assembler, thus we discard the
non-R10000 case that uses a mixture of a C do...while loop with
embedded assembler that was added back in commit 7837314d ("MIPS:
Get rid of branches to .subsections.").  A note found in the git log
for commit 5999eca25c1f ("[MIPS] Improve branch prediction in ll/sc
atomic operations.") is also addressed.

The macro definition for the branch instruction and the code comment
derives from a patch sent in earlier by Paul Burton for various cmpxchg
cleanups.

[paul.burton@mips.com:
  - Minor whitespace fix for checkpatch.]
Signed-off-by: default avatarJoshua Kinard <kumba@gentoo.org>
Signed-off-by: default avatarPaul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/17736/
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: James Hogan <james.hogan@mips.com>
Cc: "Maciej W. Rozycki" <macro@mips.com>
Cc: linux-mips@linux-mips.org
parent a0a5ac3c
...@@ -22,6 +22,17 @@ ...@@ -22,6 +22,17 @@
#include <asm/cmpxchg.h> #include <asm/cmpxchg.h>
#include <asm/war.h> #include <asm/war.h>
/*
* Using a branch-likely instruction to check the result of an sc instruction
* works around a bug present in R10000 CPUs prior to revision 3.0 that could
* cause ll-sc sequences to execute non-atomically.
*/
#if R10000_LLSC_WAR
# define __scbeqz "beqzl"
#else
# define __scbeqz "beqz"
#endif
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
/* /*
...@@ -44,31 +55,18 @@ ...@@ -44,31 +55,18 @@
#define ATOMIC_OP(op, c_op, asm_op) \ #define ATOMIC_OP(op, c_op, asm_op) \
static __inline__ void atomic_##op(int i, atomic_t * v) \ static __inline__ void atomic_##op(int i, atomic_t * v) \
{ \ { \
if (kernel_uses_llsc && R10000_LLSC_WAR) { \ if (kernel_uses_llsc) { \
int temp; \ int temp; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set arch=r4000 \n" \ " .set "MIPS_ISA_LEVEL" \n" \
"1: ll %0, %1 # atomic_" #op " \n" \ "1: ll %0, %1 # atomic_" #op " \n" \
" " #asm_op " %0, %2 \n" \ " " #asm_op " %0, %2 \n" \
" sc %0, %1 \n" \ " sc %0, %1 \n" \
" beqzl %0, 1b \n" \ "\t" __scbeqz " %0, 1b \n" \
" .set mips0 \n" \ " .set mips0 \n" \
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \ : "Ir" (i)); \
} else if (kernel_uses_llsc) { \
int temp; \
\
do { \
__asm__ __volatile__( \
" .set "MIPS_ISA_LEVEL" \n" \
" ll %0, %1 # atomic_" #op "\n" \
" " #asm_op " %0, %2 \n" \
" sc %0, %1 \n" \
" .set mips0 \n" \
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
} while (unlikely(!temp)); \
} else { \ } else { \
unsigned long flags; \ unsigned long flags; \
\ \
...@@ -83,36 +81,20 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \ ...@@ -83,36 +81,20 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
{ \ { \
int result; \ int result; \
\ \
if (kernel_uses_llsc && R10000_LLSC_WAR) { \ if (kernel_uses_llsc) { \
int temp; \ int temp; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set arch=r4000 \n" \ " .set "MIPS_ISA_LEVEL" \n" \
"1: ll %1, %2 # atomic_" #op "_return \n" \ "1: ll %1, %2 # atomic_" #op "_return \n" \
" " #asm_op " %0, %1, %3 \n" \ " " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \ " sc %0, %2 \n" \
" beqzl %0, 1b \n" \ "\t" __scbeqz " %0, 1b \n" \
" " #asm_op " %0, %1, %3 \n" \ " " #asm_op " %0, %1, %3 \n" \
" .set mips0 \n" \ " .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \ : "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \ "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \ : "Ir" (i)); \
} else if (kernel_uses_llsc) { \
int temp; \
\
do { \
__asm__ __volatile__( \
" .set "MIPS_ISA_LEVEL" \n" \
" ll %1, %2 # atomic_" #op "_return \n" \
" " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
} while (unlikely(!result)); \
\
result = temp; result c_op i; \
} else { \ } else { \
unsigned long flags; \ unsigned long flags; \
\ \
...@@ -131,36 +113,20 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \ ...@@ -131,36 +113,20 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
{ \ { \
int result; \ int result; \
\ \
if (kernel_uses_llsc && R10000_LLSC_WAR) { \ if (kernel_uses_llsc) { \
int temp; \ int temp; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set arch=r4000 \n" \ " .set "MIPS_ISA_LEVEL" \n" \
"1: ll %1, %2 # atomic_fetch_" #op " \n" \ "1: ll %1, %2 # atomic_fetch_" #op " \n" \
" " #asm_op " %0, %1, %3 \n" \ " " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \ " sc %0, %2 \n" \
" beqzl %0, 1b \n" \ "\t" __scbeqz " %0, 1b \n" \
" move %0, %1 \n" \ " move %0, %1 \n" \
" .set mips0 \n" \ " .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \ : "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \ "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \ : "Ir" (i)); \
} else if (kernel_uses_llsc) { \
int temp; \
\
do { \
__asm__ __volatile__( \
" .set "MIPS_ISA_LEVEL" \n" \
" ll %1, %2 # atomic_fetch_" #op " \n" \
" " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
} while (unlikely(!result)); \
\
result = temp; \
} else { \ } else { \
unsigned long flags; \ unsigned long flags; \
\ \
...@@ -218,24 +184,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) ...@@ -218,24 +184,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
smp_mb__before_llsc(); smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc) {
int temp;
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: ll %1, %2 # atomic_sub_if_positive\n"
" subu %0, %1, %3 \n"
" move %1, %0 \n"
" bltz %0, 1f \n"
" sc %1, %2 \n"
" beqzl %1, 1b \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp),
"+" GCC_OFF_SMALL_ASM() (v->counter)
: "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
: "memory");
} else if (kernel_uses_llsc) {
int temp; int temp;
__asm__ __volatile__( __asm__ __volatile__(
...@@ -245,7 +194,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) ...@@ -245,7 +194,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
" move %1, %0 \n" " move %1, %0 \n"
" bltz %0, 1f \n" " bltz %0, 1f \n"
" sc %1, %2 \n" " sc %1, %2 \n"
" beqz %1, 1b \n" "\t" __scbeqz " %1, 1b \n"
"1: \n" "1: \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (result), "=&r" (temp), : "=&r" (result), "=&r" (temp),
...@@ -382,31 +331,18 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -382,31 +331,18 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
#define ATOMIC64_OP(op, c_op, asm_op) \ #define ATOMIC64_OP(op, c_op, asm_op) \
static __inline__ void atomic64_##op(long i, atomic64_t * v) \ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
{ \ { \
if (kernel_uses_llsc && R10000_LLSC_WAR) { \ if (kernel_uses_llsc) { \
long temp; \ long temp; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set arch=r4000 \n" \ " .set "MIPS_ISA_LEVEL" \n" \
"1: lld %0, %1 # atomic64_" #op " \n" \ "1: lld %0, %1 # atomic64_" #op " \n" \
" " #asm_op " %0, %2 \n" \ " " #asm_op " %0, %2 \n" \
" scd %0, %1 \n" \ " scd %0, %1 \n" \
" beqzl %0, 1b \n" \ "\t" __scbeqz " %0, 1b \n" \
" .set mips0 \n" \ " .set mips0 \n" \
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \ : "Ir" (i)); \
} else if (kernel_uses_llsc) { \
long temp; \
\
do { \
__asm__ __volatile__( \
" .set "MIPS_ISA_LEVEL" \n" \
" lld %0, %1 # atomic64_" #op "\n" \
" " #asm_op " %0, %2 \n" \
" scd %0, %1 \n" \
" .set mips0 \n" \
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
} while (unlikely(!temp)); \
} else { \ } else { \
unsigned long flags; \ unsigned long flags; \
\ \
...@@ -421,37 +357,20 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ ...@@ -421,37 +357,20 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
{ \ { \
long result; \ long result; \
\ \
if (kernel_uses_llsc && R10000_LLSC_WAR) { \ if (kernel_uses_llsc) { \
long temp; \ long temp; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set arch=r4000 \n" \ " .set "MIPS_ISA_LEVEL" \n" \
"1: lld %1, %2 # atomic64_" #op "_return\n" \ "1: lld %1, %2 # atomic64_" #op "_return\n" \
" " #asm_op " %0, %1, %3 \n" \ " " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \ " scd %0, %2 \n" \
" beqzl %0, 1b \n" \ "\t" __scbeqz " %0, 1b \n" \
" " #asm_op " %0, %1, %3 \n" \ " " #asm_op " %0, %1, %3 \n" \
" .set mips0 \n" \ " .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \ : "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \ "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \ : "Ir" (i)); \
} else if (kernel_uses_llsc) { \
long temp; \
\
do { \
__asm__ __volatile__( \
" .set "MIPS_ISA_LEVEL" \n" \
" lld %1, %2 # atomic64_" #op "_return\n" \
" " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
"=" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
: "memory"); \
} while (unlikely(!result)); \
\
result = temp; result c_op i; \
} else { \ } else { \
unsigned long flags; \ unsigned long flags; \
\ \
...@@ -474,33 +393,16 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ ...@@ -474,33 +393,16 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
long temp; \ long temp; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set arch=r4000 \n" \ " .set "MIPS_ISA_LEVEL" \n" \
"1: lld %1, %2 # atomic64_fetch_" #op "\n" \ "1: lld %1, %2 # atomic64_fetch_" #op "\n" \
" " #asm_op " %0, %1, %3 \n" \ " " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \ " scd %0, %2 \n" \
" beqzl %0, 1b \n" \ "\t" __scbeqz " %0, 1b \n" \
" move %0, %1 \n" \ " move %0, %1 \n" \
" .set mips0 \n" \ " .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \ : "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \ "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \ : "Ir" (i)); \
} else if (kernel_uses_llsc) { \
long temp; \
\
do { \
__asm__ __volatile__( \
" .set "MIPS_ISA_LEVEL" \n" \
" lld %1, %2 # atomic64_fetch_" #op "\n" \
" " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
"=" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
: "memory"); \
} while (unlikely(!result)); \
\
result = temp; \
} else { \ } else { \
unsigned long flags; \ unsigned long flags; \
\ \
...@@ -559,24 +461,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) ...@@ -559,24 +461,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
smp_mb__before_llsc(); smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc) {
long temp;
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: lld %1, %2 # atomic64_sub_if_positive\n"
" dsubu %0, %1, %3 \n"
" move %1, %0 \n"
" bltz %0, 1f \n"
" scd %1, %2 \n"
" beqzl %1, 1b \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp),
"=" GCC_OFF_SMALL_ASM() (v->counter)
: "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
: "memory");
} else if (kernel_uses_llsc) {
long temp; long temp;
__asm__ __volatile__( __asm__ __volatile__(
...@@ -586,7 +471,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) ...@@ -586,7 +471,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
" move %1, %0 \n" " move %1, %0 \n"
" bltz %0, 1f \n" " bltz %0, 1f \n"
" scd %1, %2 \n" " scd %1, %2 \n"
" beqz %1, 1b \n" "\t" __scbeqz " %1, 1b \n"
"1: \n" "1: \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (result), "=&r" (temp), : "=&r" (result), "=&r" (temp),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment