Commit 378ed6f0 authored by Paul Burton's avatar Paul Burton

MIPS: Avoid using .set mips0 to restore ISA

We currently have 2 commonly used methods for switching ISA within
assembly code, then restoring the original ISA.

  1) Using a pair of .set push & .set pop directives. For example:

     .set	push
     .set	mips32r2
     <some_insn>
     .set	pop

  2) Using .set mips0 to restore the ISA originally specified on the
     command line. For example:

     .set	mips32r2
     <some_insn>
     .set	mips0

Unfortunately method 2 does not work with nanoMIPS toolchains, where the
assembler rejects the .set mips0 directive like so:

     Error: cannot change ISA from nanoMIPS to mips0

In preparation for supporting nanoMIPS builds, switch all instances of
method 2 in generic non-platform-specific code to use push & pop as in
method 1 instead. The .set push & .set pop is arguably cleaner anyway,
and if nothing else it's good to consistently use one method.
Signed-off-by: default avatarPaul Burton <paul.burton@mips.com>
Patchwork: https://patchwork.linux-mips.org/patch/21037/
Cc: linux-mips@linux-mips.org
parent 183b40f9
...@@ -59,12 +59,13 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ ...@@ -59,12 +59,13 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
int temp; \ int temp; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \ " .set "MIPS_ISA_LEVEL" \n" \
"1: ll %0, %1 # atomic_" #op " \n" \ "1: ll %0, %1 # atomic_" #op " \n" \
" " #asm_op " %0, %2 \n" \ " " #asm_op " %0, %2 \n" \
" sc %0, %1 \n" \ " sc %0, %1 \n" \
"\t" __scbeqz " %0, 1b \n" \ "\t" __scbeqz " %0, 1b \n" \
" .set mips0 \n" \ " .set pop \n" \
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \ : "Ir" (i)); \
} else { \ } else { \
...@@ -85,13 +86,14 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \ ...@@ -85,13 +86,14 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
int temp; \ int temp; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \ " .set "MIPS_ISA_LEVEL" \n" \
"1: ll %1, %2 # atomic_" #op "_return \n" \ "1: ll %1, %2 # atomic_" #op "_return \n" \
" " #asm_op " %0, %1, %3 \n" \ " " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \ " sc %0, %2 \n" \
"\t" __scbeqz " %0, 1b \n" \ "\t" __scbeqz " %0, 1b \n" \
" " #asm_op " %0, %1, %3 \n" \ " " #asm_op " %0, %1, %3 \n" \
" .set mips0 \n" \ " .set pop \n" \
: "=&r" (result), "=&r" (temp), \ : "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \ "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \ : "Ir" (i)); \
...@@ -117,12 +119,13 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \ ...@@ -117,12 +119,13 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
int temp; \ int temp; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \ " .set "MIPS_ISA_LEVEL" \n" \
"1: ll %1, %2 # atomic_fetch_" #op " \n" \ "1: ll %1, %2 # atomic_fetch_" #op " \n" \
" " #asm_op " %0, %1, %3 \n" \ " " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \ " sc %0, %2 \n" \
"\t" __scbeqz " %0, 1b \n" \ "\t" __scbeqz " %0, 1b \n" \
" .set mips0 \n" \ " .set pop \n" \
" move %0, %1 \n" \ " move %0, %1 \n" \
: "=&r" (result), "=&r" (temp), \ : "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \ "+" GCC_OFF_SMALL_ASM() (v->counter) \
...@@ -188,17 +191,19 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) ...@@ -188,17 +191,19 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
int temp; int temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set "MIPS_ISA_LEVEL" \n" " .set "MIPS_ISA_LEVEL" \n"
"1: ll %1, %2 # atomic_sub_if_positive\n" "1: ll %1, %2 # atomic_sub_if_positive\n"
" .set mips0 \n" " .set pop \n"
" subu %0, %1, %3 \n" " subu %0, %1, %3 \n"
" move %1, %0 \n" " move %1, %0 \n"
" bltz %0, 1f \n" " bltz %0, 1f \n"
" .set push \n"
" .set "MIPS_ISA_LEVEL" \n" " .set "MIPS_ISA_LEVEL" \n"
" sc %1, %2 \n" " sc %1, %2 \n"
"\t" __scbeqz " %1, 1b \n" "\t" __scbeqz " %1, 1b \n"
"1: \n" "1: \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (result), "=&r" (temp), : "=&r" (result), "=&r" (temp),
"+" GCC_OFF_SMALL_ASM() (v->counter) "+" GCC_OFF_SMALL_ASM() (v->counter)
: "Ir" (i)); : "Ir" (i));
...@@ -252,12 +257,13 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ ...@@ -252,12 +257,13 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
long temp; \ long temp; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \ " .set "MIPS_ISA_LEVEL" \n" \
"1: lld %0, %1 # atomic64_" #op " \n" \ "1: lld %0, %1 # atomic64_" #op " \n" \
" " #asm_op " %0, %2 \n" \ " " #asm_op " %0, %2 \n" \
" scd %0, %1 \n" \ " scd %0, %1 \n" \
"\t" __scbeqz " %0, 1b \n" \ "\t" __scbeqz " %0, 1b \n" \
" .set mips0 \n" \ " .set pop \n" \
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \ : "Ir" (i)); \
} else { \ } else { \
...@@ -278,13 +284,14 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ ...@@ -278,13 +284,14 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
long temp; \ long temp; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \ " .set "MIPS_ISA_LEVEL" \n" \
"1: lld %1, %2 # atomic64_" #op "_return\n" \ "1: lld %1, %2 # atomic64_" #op "_return\n" \
" " #asm_op " %0, %1, %3 \n" \ " " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \ " scd %0, %2 \n" \
"\t" __scbeqz " %0, 1b \n" \ "\t" __scbeqz " %0, 1b \n" \
" " #asm_op " %0, %1, %3 \n" \ " " #asm_op " %0, %1, %3 \n" \
" .set mips0 \n" \ " .set pop \n" \
: "=&r" (result), "=&r" (temp), \ : "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \ "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \ : "Ir" (i)); \
...@@ -310,13 +317,14 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ ...@@ -310,13 +317,14 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
long temp; \ long temp; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \ " .set "MIPS_ISA_LEVEL" \n" \
"1: lld %1, %2 # atomic64_fetch_" #op "\n" \ "1: lld %1, %2 # atomic64_fetch_" #op "\n" \
" " #asm_op " %0, %1, %3 \n" \ " " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \ " scd %0, %2 \n" \
"\t" __scbeqz " %0, 1b \n" \ "\t" __scbeqz " %0, 1b \n" \
" move %0, %1 \n" \ " move %0, %1 \n" \
" .set mips0 \n" \ " .set pop \n" \
: "=&r" (result), "=&r" (temp), \ : "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \ "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \ : "Ir" (i)); \
...@@ -382,6 +390,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) ...@@ -382,6 +390,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
long temp; long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set "MIPS_ISA_LEVEL" \n" " .set "MIPS_ISA_LEVEL" \n"
"1: lld %1, %2 # atomic64_sub_if_positive\n" "1: lld %1, %2 # atomic64_sub_if_positive\n"
" dsubu %0, %1, %3 \n" " dsubu %0, %1, %3 \n"
...@@ -390,7 +399,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) ...@@ -390,7 +399,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
" scd %1, %2 \n" " scd %1, %2 \n"
"\t" __scbeqz " %1, 1b \n" "\t" __scbeqz " %1, 1b \n"
"1: \n" "1: \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (result), "=&r" (temp), : "=&r" (result), "=&r" (temp),
"+" GCC_OFF_SMALL_ASM() (v->counter) "+" GCC_OFF_SMALL_ASM() (v->counter)
: "Ir" (i)); : "Ir" (i));
......
...@@ -58,12 +58,13 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -58,12 +58,13 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set arch=r4000 \n" " .set arch=r4000 \n"
"1: " __LL "%0, %1 # set_bit \n" "1: " __LL "%0, %1 # set_bit \n"
" or %0, %2 \n" " or %0, %2 \n"
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
" beqzl %0, 1b \n" " beqzl %0, 1b \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m) : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
: "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
...@@ -80,11 +81,12 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -80,11 +81,12 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n" " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 # set_bit \n" " " __LL "%0, %1 # set_bit \n"
" or %0, %2 \n" " or %0, %2 \n"
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
: "ir" (1UL << bit)); : "ir" (1UL << bit));
} while (unlikely(!temp)); } while (unlikely(!temp));
...@@ -110,12 +112,13 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -110,12 +112,13 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set arch=r4000 \n" " .set arch=r4000 \n"
"1: " __LL "%0, %1 # clear_bit \n" "1: " __LL "%0, %1 # clear_bit \n"
" and %0, %2 \n" " and %0, %2 \n"
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
" beqzl %0, 1b \n" " beqzl %0, 1b \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
: "ir" (~(1UL << bit))); : "ir" (~(1UL << bit)));
#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
...@@ -132,11 +135,12 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -132,11 +135,12 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n" " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 # clear_bit \n" " " __LL "%0, %1 # clear_bit \n"
" and %0, %2 \n" " and %0, %2 \n"
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
: "ir" (~(1UL << bit))); : "ir" (~(1UL << bit)));
} while (unlikely(!temp)); } while (unlikely(!temp));
...@@ -176,12 +180,13 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -176,12 +180,13 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
unsigned long temp; unsigned long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set arch=r4000 \n" " .set arch=r4000 \n"
"1: " __LL "%0, %1 # change_bit \n" "1: " __LL "%0, %1 # change_bit \n"
" xor %0, %2 \n" " xor %0, %2 \n"
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
" beqzl %0, 1b \n" " beqzl %0, 1b \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
: "ir" (1UL << bit)); : "ir" (1UL << bit));
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
...@@ -190,11 +195,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -190,11 +195,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n" " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 # change_bit \n" " " __LL "%0, %1 # change_bit \n"
" xor %0, %2 \n" " xor %0, %2 \n"
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
: "ir" (1UL << bit)); : "ir" (1UL << bit));
} while (unlikely(!temp)); } while (unlikely(!temp));
...@@ -223,13 +229,14 @@ static inline int test_and_set_bit(unsigned long nr, ...@@ -223,13 +229,14 @@ static inline int test_and_set_bit(unsigned long nr,
unsigned long temp; unsigned long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set arch=r4000 \n" " .set arch=r4000 \n"
"1: " __LL "%0, %1 # test_and_set_bit \n" "1: " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n" " or %2, %0, %3 \n"
" " __SC "%2, %1 \n" " " __SC "%2, %1 \n"
" beqzl %2, 1b \n" " beqzl %2, 1b \n"
" and %2, %0, %3 \n" " and %2, %0, %3 \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "r" (1UL << bit) : "r" (1UL << bit)
: "memory"); : "memory");
...@@ -239,11 +246,12 @@ static inline int test_and_set_bit(unsigned long nr, ...@@ -239,11 +246,12 @@ static inline int test_and_set_bit(unsigned long nr,
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n" " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 # test_and_set_bit \n" " " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n" " or %2, %0, %3 \n"
" " __SC "%2, %1 \n" " " __SC "%2, %1 \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "r" (1UL << bit) : "r" (1UL << bit)
: "memory"); : "memory");
...@@ -277,13 +285,14 @@ static inline int test_and_set_bit_lock(unsigned long nr, ...@@ -277,13 +285,14 @@ static inline int test_and_set_bit_lock(unsigned long nr,
unsigned long temp; unsigned long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set arch=r4000 \n" " .set arch=r4000 \n"
"1: " __LL "%0, %1 # test_and_set_bit \n" "1: " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n" " or %2, %0, %3 \n"
" " __SC "%2, %1 \n" " " __SC "%2, %1 \n"
" beqzl %2, 1b \n" " beqzl %2, 1b \n"
" and %2, %0, %3 \n" " and %2, %0, %3 \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (temp), "+m" (*m), "=&r" (res) : "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit) : "r" (1UL << bit)
: "memory"); : "memory");
...@@ -293,11 +302,12 @@ static inline int test_and_set_bit_lock(unsigned long nr, ...@@ -293,11 +302,12 @@ static inline int test_and_set_bit_lock(unsigned long nr,
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n" " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 # test_and_set_bit \n" " " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n" " or %2, %0, %3 \n"
" " __SC "%2, %1 \n" " " __SC "%2, %1 \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "r" (1UL << bit) : "r" (1UL << bit)
: "memory"); : "memory");
...@@ -332,6 +342,7 @@ static inline int test_and_clear_bit(unsigned long nr, ...@@ -332,6 +342,7 @@ static inline int test_and_clear_bit(unsigned long nr,
unsigned long temp; unsigned long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set arch=r4000 \n" " .set arch=r4000 \n"
"1: " __LL "%0, %1 # test_and_clear_bit \n" "1: " __LL "%0, %1 # test_and_clear_bit \n"
" or %2, %0, %3 \n" " or %2, %0, %3 \n"
...@@ -339,7 +350,7 @@ static inline int test_and_clear_bit(unsigned long nr, ...@@ -339,7 +350,7 @@ static inline int test_and_clear_bit(unsigned long nr,
" " __SC "%2, %1 \n" " " __SC "%2, %1 \n"
" beqzl %2, 1b \n" " beqzl %2, 1b \n"
" and %2, %0, %3 \n" " and %2, %0, %3 \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "r" (1UL << bit) : "r" (1UL << bit)
: "memory"); : "memory");
...@@ -365,12 +376,13 @@ static inline int test_and_clear_bit(unsigned long nr, ...@@ -365,12 +376,13 @@ static inline int test_and_clear_bit(unsigned long nr,
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n" " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 # test_and_clear_bit \n" " " __LL "%0, %1 # test_and_clear_bit \n"
" or %2, %0, %3 \n" " or %2, %0, %3 \n"
" xor %2, %3 \n" " xor %2, %3 \n"
" " __SC "%2, %1 \n" " " __SC "%2, %1 \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "r" (1UL << bit) : "r" (1UL << bit)
: "memory"); : "memory");
...@@ -406,13 +418,14 @@ static inline int test_and_change_bit(unsigned long nr, ...@@ -406,13 +418,14 @@ static inline int test_and_change_bit(unsigned long nr,
unsigned long temp; unsigned long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set arch=r4000 \n" " .set arch=r4000 \n"
"1: " __LL "%0, %1 # test_and_change_bit \n" "1: " __LL "%0, %1 # test_and_change_bit \n"
" xor %2, %0, %3 \n" " xor %2, %0, %3 \n"
" " __SC "%2, %1 \n" " " __SC "%2, %1 \n"
" beqzl %2, 1b \n" " beqzl %2, 1b \n"
" and %2, %0, %3 \n" " and %2, %0, %3 \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "r" (1UL << bit) : "r" (1UL << bit)
: "memory"); : "memory");
...@@ -422,11 +435,12 @@ static inline int test_and_change_bit(unsigned long nr, ...@@ -422,11 +435,12 @@ static inline int test_and_change_bit(unsigned long nr,
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n" " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 # test_and_change_bit \n" " " __LL "%0, %1 # test_and_change_bit \n"
" xor %2, %0, %3 \n" " xor %2, %0, %3 \n"
" " __SC "\t%2, %1 \n" " " __SC "\t%2, %1 \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "r" (1UL << bit) : "r" (1UL << bit)
: "memory"); : "memory");
......
...@@ -47,9 +47,10 @@ extern unsigned long __xchg_called_with_bad_pointer(void) ...@@ -47,9 +47,10 @@ extern unsigned long __xchg_called_with_bad_pointer(void)
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set noat \n" \ " .set noat \n" \
" .set push \n" \
" .set " MIPS_ISA_ARCH_LEVEL " \n" \ " .set " MIPS_ISA_ARCH_LEVEL " \n" \
"1: " ld " %0, %2 # __xchg_asm \n" \ "1: " ld " %0, %2 # __xchg_asm \n" \
" .set mips0 \n" \ " .set pop \n" \
" move $1, %z3 \n" \ " move $1, %z3 \n" \
" .set " MIPS_ISA_ARCH_LEVEL " \n" \ " .set " MIPS_ISA_ARCH_LEVEL " \n" \
" " st " $1, %1 \n" \ " " st " $1, %1 \n" \
...@@ -117,10 +118,11 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x, ...@@ -117,10 +118,11 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set noat \n" \ " .set noat \n" \
" .set push \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
"1: " ld " %0, %2 # __cmpxchg_asm \n" \ "1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \ " bne %0, %z3, 2f \n" \
" .set mips0 \n" \ " .set pop \n" \
" move $1, %z4 \n" \ " move $1, %z4 \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
" " st " $1, %1 \n" \ " " st " $1, %1 \n" \
......
...@@ -21,12 +21,13 @@ static inline void edac_atomic_scrub(void *va, u32 size) ...@@ -21,12 +21,13 @@ static inline void edac_atomic_scrub(void *va, u32 size)
*/ */
__asm__ __volatile__ ( __asm__ __volatile__ (
" .set push \n"
" .set mips2 \n" " .set mips2 \n"
"1: ll %0, %1 # edac_atomic_scrub \n" "1: ll %0, %1 # edac_atomic_scrub \n"
" addu %0, $0 \n" " addu %0, $0 \n"
" sc %0, %1 \n" " sc %0, %1 \n"
" beqz %0, 1b \n" " beqz %0, 1b \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*virt_addr) : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*virt_addr)
: GCC_OFF_SMALL_ASM() (*virt_addr)); : GCC_OFF_SMALL_ASM() (*virt_addr));
......
...@@ -24,9 +24,10 @@ ...@@ -24,9 +24,10 @@
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set noat \n" \ " .set noat \n" \
" .set push \n" \
" .set arch=r4000 \n" \ " .set arch=r4000 \n" \
"1: ll %1, %4 # __futex_atomic_op \n" \ "1: ll %1, %4 # __futex_atomic_op \n" \
" .set mips0 \n" \ " .set pop \n" \
" " insn " \n" \ " " insn " \n" \
" .set arch=r4000 \n" \ " .set arch=r4000 \n" \
"2: sc $1, %2 \n" \ "2: sc $1, %2 \n" \
...@@ -35,7 +36,6 @@ ...@@ -35,7 +36,6 @@
"3: \n" \ "3: \n" \
" .insn \n" \ " .insn \n" \
" .set pop \n" \ " .set pop \n" \
" .set mips0 \n" \
" .section .fixup,\"ax\" \n" \ " .section .fixup,\"ax\" \n" \
"4: li %0, %6 \n" \ "4: li %0, %6 \n" \
" j 3b \n" \ " j 3b \n" \
...@@ -53,9 +53,10 @@ ...@@ -53,9 +53,10 @@
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set noat \n" \ " .set noat \n" \
" .set push \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
"1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \ "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \
" .set mips0 \n" \ " .set pop \n" \
" " insn " \n" \ " " insn " \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
"2: "user_sc("$1", "%2")" \n" \ "2: "user_sc("$1", "%2")" \n" \
...@@ -64,7 +65,6 @@ ...@@ -64,7 +65,6 @@
"3: \n" \ "3: \n" \
" .insn \n" \ " .insn \n" \
" .set pop \n" \ " .set pop \n" \
" .set mips0 \n" \
" .section .fixup,\"ax\" \n" \ " .section .fixup,\"ax\" \n" \
"4: li %0, %6 \n" \ "4: li %0, %6 \n" \
" j 3b \n" \ " j 3b \n" \
...@@ -137,10 +137,11 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -137,10 +137,11 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"# futex_atomic_cmpxchg_inatomic \n" "# futex_atomic_cmpxchg_inatomic \n"
" .set push \n" " .set push \n"
" .set noat \n" " .set noat \n"
" .set push \n"
" .set arch=r4000 \n" " .set arch=r4000 \n"
"1: ll %1, %3 \n" "1: ll %1, %3 \n"
" bne %1, %z4, 3f \n" " bne %1, %z4, 3f \n"
" .set mips0 \n" " .set pop \n"
" move $1, %z5 \n" " move $1, %z5 \n"
" .set arch=r4000 \n" " .set arch=r4000 \n"
"2: sc $1, %2 \n" "2: sc $1, %2 \n"
...@@ -166,10 +167,11 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -166,10 +167,11 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"# futex_atomic_cmpxchg_inatomic \n" "# futex_atomic_cmpxchg_inatomic \n"
" .set push \n" " .set push \n"
" .set noat \n" " .set noat \n"
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n" " .set "MIPS_ISA_ARCH_LEVEL" \n"
"1: "user_ll("%1", "%3")" \n" "1: "user_ll("%1", "%3")" \n"
" bne %1, %z4, 3f \n" " bne %1, %z4, 3f \n"
" .set mips0 \n" " .set pop \n"
" move $1, %z5 \n" " move $1, %z5 \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n" " .set "MIPS_ISA_ARCH_LEVEL" \n"
"2: "user_sc("$1", "%2")" \n" "2: "user_sc("$1", "%2")" \n"
......
...@@ -66,10 +66,11 @@ do { \ ...@@ -66,10 +66,11 @@ do { \
unsigned long tmp; \ unsigned long tmp; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \ " .set "MIPS_ISA_LEVEL" \n" \
" dla %0, 1f \n" \ " dla %0, 1f \n" \
" jr.hb %0 \n" \ " jr.hb %0 \n" \
" .set mips0 \n" \ " .set pop \n" \
"1: \n" \ "1: \n" \
: "=r" (tmp)); \ : "=r" (tmp)); \
} while (0) } while (0)
...@@ -141,10 +142,11 @@ do { \ ...@@ -141,10 +142,11 @@ do { \
unsigned long tmp; \ unsigned long tmp; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \
" .set mips64r2 \n" \ " .set mips64r2 \n" \
" dla %0, 1f \n" \ " dla %0, 1f \n" \
" jr.hb %0 \n" \ " jr.hb %0 \n" \
" .set mips0 \n" \ " .set pop \n" \
"1: \n" \ "1: \n" \
: "=r" (tmp)); \ : "=r" (tmp)); \
} while (0) } while (0)
......
...@@ -354,13 +354,14 @@ static inline void pfx##write##bwlq(type val, \ ...@@ -354,13 +354,14 @@ static inline void pfx##write##bwlq(type val, \
if (irq) \ if (irq) \
local_irq_save(__flags); \ local_irq_save(__flags); \
__asm__ __volatile__( \ __asm__ __volatile__( \
".set arch=r4000" "\t\t# __writeq""\n\t" \ ".set push" "\t\t# __writeq""\n\t" \
".set arch=r4000" "\n\t" \
"dsll32 %L0, %L0, 0" "\n\t" \ "dsll32 %L0, %L0, 0" "\n\t" \
"dsrl32 %L0, %L0, 0" "\n\t" \ "dsrl32 %L0, %L0, 0" "\n\t" \
"dsll32 %M0, %M0, 0" "\n\t" \ "dsll32 %M0, %M0, 0" "\n\t" \
"or %L0, %L0, %M0" "\n\t" \ "or %L0, %L0, %M0" "\n\t" \
"sd %L0, %2" "\n\t" \ "sd %L0, %2" "\n\t" \
".set mips0" "\n" \ ".set pop" "\n" \
: "=r" (__tmp) \ : "=r" (__tmp) \
: "0" (__val), "m" (*__mem)); \ : "0" (__val), "m" (*__mem)); \
if (irq) \ if (irq) \
...@@ -387,11 +388,12 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ ...@@ -387,11 +388,12 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
if (irq) \ if (irq) \
local_irq_save(__flags); \ local_irq_save(__flags); \
__asm__ __volatile__( \ __asm__ __volatile__( \
".set arch=r4000" "\t\t# __readq" "\n\t" \ ".set push" "\t\t# __readq" "\n\t" \
".set arch=r4000" "\n\t" \
"ld %L0, %1" "\n\t" \ "ld %L0, %1" "\n\t" \
"dsra32 %M0, %L0, 0" "\n\t" \ "dsra32 %M0, %L0, 0" "\n\t" \
"sll %L0, %L0, 0" "\n\t" \ "sll %L0, %L0, 0" "\n\t" \
".set mips0" "\n" \ ".set pop" "\n" \
: "=r" (__val) \ : "=r" (__val) \
: "m" (*__mem)); \ : "m" (*__mem)); \
if (irq) \ if (irq) \
......
...@@ -411,11 +411,12 @@ static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg, ...@@ -411,11 +411,12 @@ static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
unsigned long temp; unsigned long temp;
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n" " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 \n" " " __LL "%0, %1 \n"
" or %0, %2 \n" " or %0, %2 \n"
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (temp), "+m" (*reg) : "=&r" (temp), "+m" (*reg)
: "r" (val)); : "r" (val));
} while (unlikely(!temp)); } while (unlikely(!temp));
...@@ -427,11 +428,12 @@ static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg, ...@@ -427,11 +428,12 @@ static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
unsigned long temp; unsigned long temp;
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n" " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 \n" " " __LL "%0, %1 \n"
" and %0, %2 \n" " and %0, %2 \n"
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (temp), "+m" (*reg) : "=&r" (temp), "+m" (*reg)
: "r" (~val)); : "r" (~val));
} while (unlikely(!temp)); } while (unlikely(!temp));
...@@ -444,12 +446,13 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg, ...@@ -444,12 +446,13 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
unsigned long temp; unsigned long temp;
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n" " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 \n" " " __LL "%0, %1 \n"
" and %0, %2 \n" " and %0, %2 \n"
" or %0, %3 \n" " or %0, %3 \n"
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (temp), "+m" (*reg) : "=&r" (temp), "+m" (*reg)
: "r" (~change), "r" (val & change)); : "r" (~change), "r" (val & change));
} while (unlikely(!temp)); } while (unlikely(!temp));
......
...@@ -35,13 +35,14 @@ static __inline__ long local_add_return(long i, local_t * l) ...@@ -35,13 +35,14 @@ static __inline__ long local_add_return(long i, local_t * l)
unsigned long temp; unsigned long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set arch=r4000 \n" " .set arch=r4000 \n"
"1:" __LL "%1, %2 # local_add_return \n" "1:" __LL "%1, %2 # local_add_return \n"
" addu %0, %1, %3 \n" " addu %0, %1, %3 \n"
__SC "%0, %2 \n" __SC "%0, %2 \n"
" beqzl %0, 1b \n" " beqzl %0, 1b \n"
" addu %0, %1, %3 \n" " addu %0, %1, %3 \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (result), "=&r" (temp), "=m" (l->a.counter) : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
: "Ir" (i), "m" (l->a.counter) : "Ir" (i), "m" (l->a.counter)
: "memory"); : "memory");
...@@ -49,13 +50,14 @@ static __inline__ long local_add_return(long i, local_t * l) ...@@ -49,13 +50,14 @@ static __inline__ long local_add_return(long i, local_t * l)
unsigned long temp; unsigned long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n" " .set "MIPS_ISA_ARCH_LEVEL" \n"
"1:" __LL "%1, %2 # local_add_return \n" "1:" __LL "%1, %2 # local_add_return \n"
" addu %0, %1, %3 \n" " addu %0, %1, %3 \n"
__SC "%0, %2 \n" __SC "%0, %2 \n"
" beqz %0, 1b \n" " beqz %0, 1b \n"
" addu %0, %1, %3 \n" " addu %0, %1, %3 \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (result), "=&r" (temp), "=m" (l->a.counter) : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
: "Ir" (i), "m" (l->a.counter) : "Ir" (i), "m" (l->a.counter)
: "memory"); : "memory");
...@@ -80,13 +82,14 @@ static __inline__ long local_sub_return(long i, local_t * l) ...@@ -80,13 +82,14 @@ static __inline__ long local_sub_return(long i, local_t * l)
unsigned long temp; unsigned long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set arch=r4000 \n" " .set arch=r4000 \n"
"1:" __LL "%1, %2 # local_sub_return \n" "1:" __LL "%1, %2 # local_sub_return \n"
" subu %0, %1, %3 \n" " subu %0, %1, %3 \n"
__SC "%0, %2 \n" __SC "%0, %2 \n"
" beqzl %0, 1b \n" " beqzl %0, 1b \n"
" subu %0, %1, %3 \n" " subu %0, %1, %3 \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (result), "=&r" (temp), "=m" (l->a.counter) : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
: "Ir" (i), "m" (l->a.counter) : "Ir" (i), "m" (l->a.counter)
: "memory"); : "memory");
...@@ -94,13 +97,14 @@ static __inline__ long local_sub_return(long i, local_t * l) ...@@ -94,13 +97,14 @@ static __inline__ long local_sub_return(long i, local_t * l)
unsigned long temp; unsigned long temp;
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n" " .set "MIPS_ISA_ARCH_LEVEL" \n"
"1:" __LL "%1, %2 # local_sub_return \n" "1:" __LL "%1, %2 # local_sub_return \n"
" subu %0, %1, %3 \n" " subu %0, %1, %3 \n"
__SC "%0, %2 \n" __SC "%0, %2 \n"
" beqz %0, 1b \n" " beqz %0, 1b \n"
" subu %0, %1, %3 \n" " subu %0, %1, %3 \n"
" .set mips0 \n" " .set pop \n"
: "=&r" (result), "=&r" (temp), "=m" (l->a.counter) : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
: "Ir" (i), "m" (l->a.counter) : "Ir" (i), "m" (l->a.counter)
: "memory"); : "memory");
......
...@@ -255,12 +255,12 @@ static inline unsigned int dmt(void) ...@@ -255,12 +255,12 @@ static inline unsigned int dmt(void)
static inline void __raw_emt(void) static inline void __raw_emt(void)
{ {
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set noreorder \n" " .set noreorder \n"
" .set mips32r2 \n" " .set mips32r2 \n"
" .word 0x41600be1 # emt \n" " .word 0x41600be1 # emt \n"
" ehb \n" " ehb \n"
" .set mips0 \n" " .set pop");
" .set reorder");
} }
/* enable multi-threaded execution if previous suggested it should be. /* enable multi-threaded execution if previous suggested it should be.
...@@ -277,9 +277,10 @@ static inline void emt(int previous) ...@@ -277,9 +277,10 @@ static inline void emt(int previous)
static inline void ehb(void) static inline void ehb(void)
{ {
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set mips32r2 \n" " .set mips32r2 \n"
" ehb \n" " ehb \n"
" .set mips0 \n"); " .set pop \n");
} }
#define mftc0(rt,sel) \ #define mftc0(rt,sel) \
......
...@@ -1345,9 +1345,10 @@ do { \ ...@@ -1345,9 +1345,10 @@ do { \
: "=r" (__res)); \ : "=r" (__res)); \
else \ else \
__asm__ vol( \ __asm__ vol( \
".set\tpush\n\t" \
".set\tmips32\n\t" \ ".set\tmips32\n\t" \
"mfc0\t%0, " #source ", " #sel "\n\t" \ "mfc0\t%0, " #source ", " #sel "\n\t" \
".set\tmips0\n\t" \ ".set\tpop\n\t" \
: "=r" (__res)); \ : "=r" (__res)); \
__res; \ __res; \
}) })
...@@ -1358,15 +1359,17 @@ do { \ ...@@ -1358,15 +1359,17 @@ do { \
__res = __read_64bit_c0_split(source, sel, vol); \ __res = __read_64bit_c0_split(source, sel, vol); \
else if (sel == 0) \ else if (sel == 0) \
__asm__ vol( \ __asm__ vol( \
".set\tpush\n\t" \
".set\tmips3\n\t" \ ".set\tmips3\n\t" \
"dmfc0\t%0, " #source "\n\t" \ "dmfc0\t%0, " #source "\n\t" \
".set\tmips0" \ ".set\tpop" \
: "=r" (__res)); \ : "=r" (__res)); \
else \ else \
__asm__ vol( \ __asm__ vol( \
".set\tpush\n\t" \
".set\tmips64\n\t" \ ".set\tmips64\n\t" \
"dmfc0\t%0, " #source ", " #sel "\n\t" \ "dmfc0\t%0, " #source ", " #sel "\n\t" \
".set\tmips0" \ ".set\tpop" \
: "=r" (__res)); \ : "=r" (__res)); \
__res; \ __res; \
}) })
...@@ -1391,9 +1394,10 @@ do { \ ...@@ -1391,9 +1394,10 @@ do { \
: : "Jr" ((unsigned int)(value))); \ : : "Jr" ((unsigned int)(value))); \
else \ else \
__asm__ __volatile__( \ __asm__ __volatile__( \
".set\tpush\n\t" \
".set\tmips32\n\t" \ ".set\tmips32\n\t" \
"mtc0\t%z0, " #register ", " #sel "\n\t" \ "mtc0\t%z0, " #register ", " #sel "\n\t" \
".set\tmips0" \ ".set\tpop" \
: : "Jr" ((unsigned int)(value))); \ : : "Jr" ((unsigned int)(value))); \
} while (0) } while (0)
...@@ -1403,15 +1407,17 @@ do { \ ...@@ -1403,15 +1407,17 @@ do { \
__write_64bit_c0_split(register, sel, value); \ __write_64bit_c0_split(register, sel, value); \
else if (sel == 0) \ else if (sel == 0) \
__asm__ __volatile__( \ __asm__ __volatile__( \
".set\tpush\n\t" \
".set\tmips3\n\t" \ ".set\tmips3\n\t" \
"dmtc0\t%z0, " #register "\n\t" \ "dmtc0\t%z0, " #register "\n\t" \
".set\tmips0" \ ".set\tpop" \
: : "Jr" (value)); \ : : "Jr" (value)); \
else \ else \
__asm__ __volatile__( \ __asm__ __volatile__( \
".set\tpush\n\t" \
".set\tmips64\n\t" \ ".set\tmips64\n\t" \
"dmtc0\t%z0, " #register ", " #sel "\n\t" \ "dmtc0\t%z0, " #register ", " #sel "\n\t" \
".set\tmips0" \ ".set\tpop" \
: : "Jr" (value)); \ : : "Jr" (value)); \
} while (0) } while (0)
...@@ -1463,19 +1469,21 @@ do { \ ...@@ -1463,19 +1469,21 @@ do { \
local_irq_save(__flags); \ local_irq_save(__flags); \
if (sel == 0) \ if (sel == 0) \
__asm__ vol( \ __asm__ vol( \
".set\tpush\n\t" \
".set\tmips64\n\t" \ ".set\tmips64\n\t" \
"dmfc0\t%L0, " #source "\n\t" \ "dmfc0\t%L0, " #source "\n\t" \
"dsra\t%M0, %L0, 32\n\t" \ "dsra\t%M0, %L0, 32\n\t" \
"sll\t%L0, %L0, 0\n\t" \ "sll\t%L0, %L0, 0\n\t" \
".set\tmips0" \ ".set\tpop" \
: "=r" (__val)); \ : "=r" (__val)); \
else \ else \
__asm__ vol( \ __asm__ vol( \
".set\tpush\n\t" \
".set\tmips64\n\t" \ ".set\tmips64\n\t" \
"dmfc0\t%L0, " #source ", " #sel "\n\t" \ "dmfc0\t%L0, " #source ", " #sel "\n\t" \
"dsra\t%M0, %L0, 32\n\t" \ "dsra\t%M0, %L0, 32\n\t" \
"sll\t%L0, %L0, 0\n\t" \ "sll\t%L0, %L0, 0\n\t" \
".set\tmips0" \ ".set\tpop" \
: "=r" (__val)); \ : "=r" (__val)); \
local_irq_restore(__flags); \ local_irq_restore(__flags); \
\ \
...@@ -1498,23 +1506,25 @@ do { \ ...@@ -1498,23 +1506,25 @@ do { \
: "+r" (__tmp)); \ : "+r" (__tmp)); \
else if (sel == 0) \ else if (sel == 0) \
__asm__ __volatile__( \ __asm__ __volatile__( \
".set\tpush\n\t" \
".set\tmips64\n\t" \ ".set\tmips64\n\t" \
"dsll\t%L0, %L0, 32\n\t" \ "dsll\t%L0, %L0, 32\n\t" \
"dsrl\t%L0, %L0, 32\n\t" \ "dsrl\t%L0, %L0, 32\n\t" \
"dsll\t%M0, %M0, 32\n\t" \ "dsll\t%M0, %M0, 32\n\t" \
"or\t%L0, %L0, %M0\n\t" \ "or\t%L0, %L0, %M0\n\t" \
"dmtc0\t%L0, " #source "\n\t" \ "dmtc0\t%L0, " #source "\n\t" \
".set\tmips0" \ ".set\tpop" \
: "+r" (__tmp)); \ : "+r" (__tmp)); \
else \ else \
__asm__ __volatile__( \ __asm__ __volatile__( \
".set\tpush\n\t" \
".set\tmips64\n\t" \ ".set\tmips64\n\t" \
"dsll\t%L0, %L0, 32\n\t" \ "dsll\t%L0, %L0, 32\n\t" \
"dsrl\t%L0, %L0, 32\n\t" \ "dsrl\t%L0, %L0, 32\n\t" \
"dsll\t%M0, %M0, 32\n\t" \ "dsll\t%M0, %M0, 32\n\t" \
"or\t%L0, %L0, %M0\n\t" \ "or\t%L0, %L0, %M0\n\t" \
"dmtc0\t%L0, " #source ", " #sel "\n\t" \ "dmtc0\t%L0, " #source ", " #sel "\n\t" \
".set\tmips0" \ ".set\tpop" \
: "+r" (__tmp)); \ : "+r" (__tmp)); \
local_irq_restore(__flags); \ local_irq_restore(__flags); \
} while (0) } while (0)
......
...@@ -214,8 +214,8 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) ...@@ -214,8 +214,8 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__ ( __asm__ __volatile__ (
" .set arch=r4000 \n"
" .set push \n" " .set push \n"
" .set arch=r4000 \n"
" .set noreorder \n" " .set noreorder \n"
"1:" __LL "%[tmp], %[buddy] \n" "1:" __LL "%[tmp], %[buddy] \n"
" bnez %[tmp], 2f \n" " bnez %[tmp], 2f \n"
...@@ -225,13 +225,12 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) ...@@ -225,13 +225,12 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
" nop \n" " nop \n"
"2: \n" "2: \n"
" .set pop \n" " .set pop \n"
" .set mips0 \n"
: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
: [global] "r" (page_global)); : [global] "r" (page_global));
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
__asm__ __volatile__ ( __asm__ __volatile__ (
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" .set push \n" " .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" .set noreorder \n" " .set noreorder \n"
"1:" __LL "%[tmp], %[buddy] \n" "1:" __LL "%[tmp], %[buddy] \n"
" bnez %[tmp], 2f \n" " bnez %[tmp], 2f \n"
...@@ -241,7 +240,6 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) ...@@ -241,7 +240,6 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
" nop \n" " nop \n"
"2: \n" "2: \n"
" .set pop \n" " .set pop \n"
" .set mips0 \n"
: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
: [global] "r" (page_global)); : [global] "r" (page_global));
} }
......
...@@ -427,9 +427,10 @@ ...@@ -427,9 +427,10 @@
#ifdef CONFIG_CPU_MIPSR6 #ifdef CONFIG_CPU_MIPSR6
eretnc eretnc
#else #else
.set push
.set arch=r4000 .set arch=r4000
eret eret
.set mips0 .set pop
#endif #endif
.endm .endm
......
...@@ -652,9 +652,10 @@ isrdhwr: ...@@ -652,9 +652,10 @@ isrdhwr:
ori k1, _THREAD_MASK ori k1, _THREAD_MASK
xori k1, _THREAD_MASK xori k1, _THREAD_MASK
LONG_L v1, TI_TP_VALUE(k1) LONG_L v1, TI_TP_VALUE(k1)
.set push
.set arch=r4000 .set arch=r4000
eret eret
.set mips0 .set pop
#endif #endif
.set pop .set pop
END(handle_ri_rdhwr) END(handle_ri_rdhwr)
......
...@@ -101,7 +101,8 @@ static void __cpuidle au1k_wait(void) ...@@ -101,7 +101,8 @@ static void __cpuidle au1k_wait(void)
unsigned long c0status = read_c0_status() | 1; /* irqs on */ unsigned long c0status = read_c0_status() | 1; /* irqs on */
__asm__( __asm__(
" .set arch=r4000 \n" " .set push \n"
" .set arch=r4000 \n"
" cache 0x14, 0(%0) \n" " cache 0x14, 0(%0) \n"
" cache 0x14, 32(%0) \n" " cache 0x14, 32(%0) \n"
" sync \n" " sync \n"
...@@ -111,7 +112,7 @@ static void __cpuidle au1k_wait(void) ...@@ -111,7 +112,7 @@ static void __cpuidle au1k_wait(void)
" nop \n" " nop \n"
" nop \n" " nop \n"
" nop \n" " nop \n"
" .set mips0 \n" " .set pop \n"
: : "r" (au1k_wait), "r" (c0status)); : : "r" (au1k_wait), "r" (c0status));
} }
......
...@@ -106,6 +106,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) ...@@ -106,6 +106,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
if (cpu_has_llsc && R10000_LLSC_WAR) { if (cpu_has_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__ ( __asm__ __volatile__ (
" .set push \n"
" .set arch=r4000 \n" " .set arch=r4000 \n"
" li %[err], 0 \n" " li %[err], 0 \n"
"1: ll %[old], (%[addr]) \n" "1: ll %[old], (%[addr]) \n"
...@@ -122,7 +123,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) ...@@ -122,7 +123,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
" "STR(PTR)" 1b, 4b \n" " "STR(PTR)" 1b, 4b \n"
" "STR(PTR)" 2b, 4b \n" " "STR(PTR)" 2b, 4b \n"
" .previous \n" " .previous \n"
" .set mips0 \n" " .set pop \n"
: [old] "=&r" (old), : [old] "=&r" (old),
[err] "=&r" (err), [err] "=&r" (err),
[tmp] "=&r" (tmp) [tmp] "=&r" (tmp)
...@@ -132,6 +133,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) ...@@ -132,6 +133,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
: "memory"); : "memory");
} else if (cpu_has_llsc) { } else if (cpu_has_llsc) {
__asm__ __volatile__ ( __asm__ __volatile__ (
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n" " .set "MIPS_ISA_ARCH_LEVEL" \n"
" li %[err], 0 \n" " li %[err], 0 \n"
"1: \n" "1: \n"
...@@ -150,7 +152,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) ...@@ -150,7 +152,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
" "STR(PTR)" 1b, 5b \n" " "STR(PTR)" 1b, 5b \n"
" "STR(PTR)" 2b, 5b \n" " "STR(PTR)" 2b, 5b \n"
" .previous \n" " .previous \n"
" .set mips0 \n" " .set pop \n"
: [old] "=&r" (old), : [old] "=&r" (old),
[err] "=&r" (err), [err] "=&r" (err),
[tmp] "=&r" (tmp) [tmp] "=&r" (tmp)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment