Commit 878f75c7 authored by Paul Burton's avatar Paul Burton

MIPS: Unify sc beqz definition

We currently duplicate the definition of __scbeqz in asm/atomic.h &
asm/cmpxchg.h. Move it to asm/llsc.h & rename it to __SC_BEQZ to fit
better with the existing __SC macro provided there.

We include a tab in the string in order to avoid the need for users to
indent code any further to include whitespace of their own after the
instruction mnemonic.
Signed-off-by: default avatarPaul Burton <paul.burton@mips.com>
Cc: linux-mips@vger.kernel.org
Cc: Huacai Chen <chenhc@lemote.com>
Cc: Jiaxun Yang <jiaxun.yang@flygoat.com>
Cc: linux-kernel@vger.kernel.org
parent 376357ac
...@@ -20,19 +20,9 @@ ...@@ -20,19 +20,9 @@
#include <asm/compiler.h> #include <asm/compiler.h>
#include <asm/cpu-features.h> #include <asm/cpu-features.h>
#include <asm/cmpxchg.h> #include <asm/cmpxchg.h>
#include <asm/llsc.h>
#include <asm/war.h> #include <asm/war.h>
/*
* Using a branch-likely instruction to check the result of an sc instruction
* works around a bug present in R10000 CPUs prior to revision 3.0 that could
* cause ll-sc sequences to execute non-atomically.
*/
#if R10000_LLSC_WAR
# define __scbeqz "beqzl"
#else
# define __scbeqz "beqz"
#endif
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
/* /*
...@@ -65,7 +55,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ ...@@ -65,7 +55,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
"1: ll %0, %1 # atomic_" #op " \n" \ "1: ll %0, %1 # atomic_" #op " \n" \
" " #asm_op " %0, %2 \n" \ " " #asm_op " %0, %2 \n" \
" sc %0, %1 \n" \ " sc %0, %1 \n" \
"\t" __scbeqz " %0, 1b \n" \ "\t" __SC_BEQZ "%0, 1b \n" \
" .set pop \n" \ " .set pop \n" \
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i) : __LLSC_CLOBBER); \ : "Ir" (i) : __LLSC_CLOBBER); \
...@@ -93,7 +83,7 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \ ...@@ -93,7 +83,7 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
"1: ll %1, %2 # atomic_" #op "_return \n" \ "1: ll %1, %2 # atomic_" #op "_return \n" \
" " #asm_op " %0, %1, %3 \n" \ " " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \ " sc %0, %2 \n" \
"\t" __scbeqz " %0, 1b \n" \ "\t" __SC_BEQZ "%0, 1b \n" \
" " #asm_op " %0, %1, %3 \n" \ " " #asm_op " %0, %1, %3 \n" \
" .set pop \n" \ " .set pop \n" \
: "=&r" (result), "=&r" (temp), \ : "=&r" (result), "=&r" (temp), \
...@@ -127,7 +117,7 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \ ...@@ -127,7 +117,7 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
"1: ll %1, %2 # atomic_fetch_" #op " \n" \ "1: ll %1, %2 # atomic_fetch_" #op " \n" \
" " #asm_op " %0, %1, %3 \n" \ " " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \ " sc %0, %2 \n" \
"\t" __scbeqz " %0, 1b \n" \ "\t" __SC_BEQZ "%0, 1b \n" \
" .set pop \n" \ " .set pop \n" \
" move %0, %1 \n" \ " move %0, %1 \n" \
: "=&r" (result), "=&r" (temp), \ : "=&r" (result), "=&r" (temp), \
...@@ -205,7 +195,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) ...@@ -205,7 +195,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
" .set push \n" " .set push \n"
" .set "MIPS_ISA_LEVEL" \n" " .set "MIPS_ISA_LEVEL" \n"
" sc %1, %2 \n" " sc %1, %2 \n"
"\t" __scbeqz " %1, 1b \n" "\t" __SC_BEQZ "%1, 1b \n"
"2: \n" "2: \n"
" .set pop \n" " .set pop \n"
: "=&r" (result), "=&r" (temp), : "=&r" (result), "=&r" (temp),
...@@ -267,7 +257,7 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \ ...@@ -267,7 +257,7 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
"1: lld %0, %1 # atomic64_" #op " \n" \ "1: lld %0, %1 # atomic64_" #op " \n" \
" " #asm_op " %0, %2 \n" \ " " #asm_op " %0, %2 \n" \
" scd %0, %1 \n" \ " scd %0, %1 \n" \
"\t" __scbeqz " %0, 1b \n" \ "\t" __SC_BEQZ "%0, 1b \n" \
" .set pop \n" \ " .set pop \n" \
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i) : __LLSC_CLOBBER); \ : "Ir" (i) : __LLSC_CLOBBER); \
...@@ -295,7 +285,7 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \ ...@@ -295,7 +285,7 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
"1: lld %1, %2 # atomic64_" #op "_return\n" \ "1: lld %1, %2 # atomic64_" #op "_return\n" \
" " #asm_op " %0, %1, %3 \n" \ " " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \ " scd %0, %2 \n" \
"\t" __scbeqz " %0, 1b \n" \ "\t" __SC_BEQZ "%0, 1b \n" \
" " #asm_op " %0, %1, %3 \n" \ " " #asm_op " %0, %1, %3 \n" \
" .set pop \n" \ " .set pop \n" \
: "=&r" (result), "=&r" (temp), \ : "=&r" (result), "=&r" (temp), \
...@@ -329,7 +319,7 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \ ...@@ -329,7 +319,7 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
"1: lld %1, %2 # atomic64_fetch_" #op "\n" \ "1: lld %1, %2 # atomic64_fetch_" #op "\n" \
" " #asm_op " %0, %1, %3 \n" \ " " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \ " scd %0, %2 \n" \
"\t" __scbeqz " %0, 1b \n" \ "\t" __SC_BEQZ "%0, 1b \n" \
" move %0, %1 \n" \ " move %0, %1 \n" \
" .set pop \n" \ " .set pop \n" \
: "=&r" (result), "=&r" (temp), \ : "=&r" (result), "=&r" (temp), \
...@@ -404,7 +394,7 @@ static __inline__ s64 atomic64_sub_if_positive(s64 i, atomic64_t * v) ...@@ -404,7 +394,7 @@ static __inline__ s64 atomic64_sub_if_positive(s64 i, atomic64_t * v)
" move %1, %0 \n" " move %1, %0 \n"
" bltz %0, 1f \n" " bltz %0, 1f \n"
" scd %1, %2 \n" " scd %1, %2 \n"
"\t" __scbeqz " %1, 1b \n" "\t" __SC_BEQZ "%1, 1b \n"
"1: \n" "1: \n"
" .set pop \n" " .set pop \n"
: "=&r" (result), "=&r" (temp), : "=&r" (result), "=&r" (temp),
......
...@@ -11,19 +11,9 @@ ...@@ -11,19 +11,9 @@
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/irqflags.h> #include <linux/irqflags.h>
#include <asm/compiler.h> #include <asm/compiler.h>
#include <asm/llsc.h>
#include <asm/war.h> #include <asm/war.h>
/*
* Using a branch-likely instruction to check the result of an sc instruction
* works around a bug present in R10000 CPUs prior to revision 3.0 that could
* cause ll-sc sequences to execute non-atomically.
*/
#if R10000_LLSC_WAR
# define __scbeqz "beqzl"
#else
# define __scbeqz "beqz"
#endif
/* /*
* These functions doesn't exist, so if they are called you'll either: * These functions doesn't exist, so if they are called you'll either:
* *
...@@ -57,7 +47,7 @@ extern unsigned long __xchg_called_with_bad_pointer(void) ...@@ -57,7 +47,7 @@ extern unsigned long __xchg_called_with_bad_pointer(void)
" move $1, %z3 \n" \ " move $1, %z3 \n" \
" .set " MIPS_ISA_ARCH_LEVEL " \n" \ " .set " MIPS_ISA_ARCH_LEVEL " \n" \
" " st " $1, %1 \n" \ " " st " $1, %1 \n" \
"\t" __scbeqz " $1, 1b \n" \ "\t" __SC_BEQZ "$1, 1b \n" \
" .set pop \n" \ " .set pop \n" \
: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
: GCC_OFF_SMALL_ASM() (*m), "Jr" (val) \ : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) \
...@@ -130,7 +120,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x, ...@@ -130,7 +120,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
" move $1, %z4 \n" \ " move $1, %z4 \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
" " st " $1, %1 \n" \ " " st " $1, %1 \n" \
"\t" __scbeqz " $1, 1b \n" \ "\t" __SC_BEQZ "$1, 1b \n" \
" .set pop \n" \ " .set pop \n" \
"2: \n" \ "2: \n" \
: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
...@@ -268,7 +258,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr, ...@@ -268,7 +258,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
/* Attempt to store new at ptr */ /* Attempt to store new at ptr */
" scd %L1, %2 \n" " scd %L1, %2 \n"
/* If we failed, loop! */ /* If we failed, loop! */
"\t" __scbeqz " %L1, 1b \n" "\t" __SC_BEQZ "%L1, 1b \n"
" .set pop \n" " .set pop \n"
"2: \n" "2: \n"
: "=&r"(ret), : "=&r"(ret),
...@@ -311,6 +301,4 @@ static inline unsigned long __cmpxchg64(volatile void *ptr, ...@@ -311,6 +301,4 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
# endif /* !CONFIG_SMP */ # endif /* !CONFIG_SMP */
#endif /* !CONFIG_64BIT */ #endif /* !CONFIG_64BIT */
#undef __scbeqz
#endif /* __ASM_CMPXCHG_H */ #endif /* __ASM_CMPXCHG_H */
...@@ -25,4 +25,15 @@ ...@@ -25,4 +25,15 @@
#define __EXT "dext " #define __EXT "dext "
#endif #endif
/*
* Using a branch-likely instruction to check the result of an sc instruction
* works around a bug present in R10000 CPUs prior to revision 3.0 that could
* cause ll-sc sequences to execute non-atomically.
*/
#if R10000_LLSC_WAR
# define __SC_BEQZ "beqzl "
#else
# define __SC_BEQZ "beqz "
#endif
#endif /* __ASM_LLSC_H */ #endif /* __ASM_LLSC_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment