Commit eda8dd12 authored by Max Filippov's avatar Max Filippov

xtensa: use a14 instead of a15 in inline assembly

a15 is a frame pointer in the call0 xtensa ABI, don't use it explicitly
in the inline assembly. Use a14 instead, as it has the same properties
as a15 w.r.t. window overflow.
Signed-off-by: default avatarMax Filippov <jcmvbkbc@gmail.com>
parent e369953a
...@@ -25,15 +25,15 @@ ...@@ -25,15 +25,15 @@
* *
* Locking interrupts looks like this: * Locking interrupts looks like this:
* *
* rsil a15, TOPLEVEL * rsil a14, TOPLEVEL
* <code> * <code>
* wsr a15, PS * wsr a14, PS
* rsync * rsync
* *
* Note that a15 is used here because the register allocation * Note that a14 is used here because the register allocation
* done by the compiler is not guaranteed and a window overflow * done by the compiler is not guaranteed and a window overflow
* may not occur between the rsil and wsr instructions. By using * may not occur between the rsil and wsr instructions. By using
* a15 in the rsil, the machine is guaranteed to be in a state * a14 in the rsil, the machine is guaranteed to be in a state
* where no register reference will cause an overflow. * where no register reference will cause an overflow.
*/ */
...@@ -185,15 +185,15 @@ static inline void arch_atomic_##op(int i, atomic_t * v) \ ...@@ -185,15 +185,15 @@ static inline void arch_atomic_##op(int i, atomic_t * v) \
unsigned int vval; \ unsigned int vval; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" rsil a15, "__stringify(TOPLEVEL)"\n" \ " rsil a14, "__stringify(TOPLEVEL)"\n" \
" l32i %[result], %[mem]\n" \ " l32i %[result], %[mem]\n" \
" " #op " %[result], %[result], %[i]\n" \ " " #op " %[result], %[result], %[i]\n" \
" s32i %[result], %[mem]\n" \ " s32i %[result], %[mem]\n" \
" wsr a15, ps\n" \ " wsr a14, ps\n" \
" rsync\n" \ " rsync\n" \
: [result] "=&a" (vval), [mem] "+m" (*v) \ : [result] "=&a" (vval), [mem] "+m" (*v) \
: [i] "a" (i) \ : [i] "a" (i) \
: "a15", "memory" \ : "a14", "memory" \
); \ ); \
} \ } \
...@@ -203,15 +203,15 @@ static inline int arch_atomic_##op##_return(int i, atomic_t * v) \ ...@@ -203,15 +203,15 @@ static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
unsigned int vval; \ unsigned int vval; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" rsil a15,"__stringify(TOPLEVEL)"\n" \ " rsil a14,"__stringify(TOPLEVEL)"\n" \
" l32i %[result], %[mem]\n" \ " l32i %[result], %[mem]\n" \
" " #op " %[result], %[result], %[i]\n" \ " " #op " %[result], %[result], %[i]\n" \
" s32i %[result], %[mem]\n" \ " s32i %[result], %[mem]\n" \
" wsr a15, ps\n" \ " wsr a14, ps\n" \
" rsync\n" \ " rsync\n" \
: [result] "=&a" (vval), [mem] "+m" (*v) \ : [result] "=&a" (vval), [mem] "+m" (*v) \
: [i] "a" (i) \ : [i] "a" (i) \
: "a15", "memory" \ : "a14", "memory" \
); \ ); \
\ \
return vval; \ return vval; \
...@@ -223,16 +223,16 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \ ...@@ -223,16 +223,16 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
unsigned int tmp, vval; \ unsigned int tmp, vval; \
\ \
__asm__ __volatile__( \ __asm__ __volatile__( \
" rsil a15,"__stringify(TOPLEVEL)"\n" \ " rsil a14,"__stringify(TOPLEVEL)"\n" \
" l32i %[result], %[mem]\n" \ " l32i %[result], %[mem]\n" \
" " #op " %[tmp], %[result], %[i]\n" \ " " #op " %[tmp], %[result], %[i]\n" \
" s32i %[tmp], %[mem]\n" \ " s32i %[tmp], %[mem]\n" \
" wsr a15, ps\n" \ " wsr a14, ps\n" \
" rsync\n" \ " rsync\n" \
: [result] "=&a" (vval), [tmp] "=&a" (tmp), \ : [result] "=&a" (vval), [tmp] "=&a" (tmp), \
[mem] "+m" (*v) \ [mem] "+m" (*v) \
: [i] "a" (i) \ : [i] "a" (i) \
: "a15", "memory" \ : "a14", "memory" \
); \ ); \
\ \
return vval; \ return vval; \
......
...@@ -52,16 +52,16 @@ __cmpxchg_u32(volatile int *p, int old, int new) ...@@ -52,16 +52,16 @@ __cmpxchg_u32(volatile int *p, int old, int new)
return new; return new;
#else #else
__asm__ __volatile__( __asm__ __volatile__(
" rsil a15, "__stringify(TOPLEVEL)"\n" " rsil a14, "__stringify(TOPLEVEL)"\n"
" l32i %[old], %[mem]\n" " l32i %[old], %[mem]\n"
" bne %[old], %[cmp], 1f\n" " bne %[old], %[cmp], 1f\n"
" s32i %[new], %[mem]\n" " s32i %[new], %[mem]\n"
"1:\n" "1:\n"
" wsr a15, ps\n" " wsr a14, ps\n"
" rsync\n" " rsync\n"
: [old] "=&a" (old), [mem] "+m" (*p) : [old] "=&a" (old), [mem] "+m" (*p)
: [cmp] "a" (old), [new] "r" (new) : [cmp] "a" (old), [new] "r" (new)
: "a15", "memory"); : "a14", "memory");
return old; return old;
#endif #endif
} }
...@@ -116,10 +116,10 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, ...@@ -116,10 +116,10 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
/* /*
* xchg_u32 * xchg_u32
* *
* Note that a15 is used here because the register allocation * Note that a14 is used here because the register allocation
* done by the compiler is not guaranteed and a window overflow * done by the compiler is not guaranteed and a window overflow
* may not occur between the rsil and wsr instructions. By using * may not occur between the rsil and wsr instructions. By using
* a15 in the rsil, the machine is guaranteed to be in a state * a14 in the rsil, the machine is guaranteed to be in a state
* where no register reference will cause an overflow. * where no register reference will cause an overflow.
*/ */
...@@ -157,14 +157,14 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) ...@@ -157,14 +157,14 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#else #else
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__( __asm__ __volatile__(
" rsil a15, "__stringify(TOPLEVEL)"\n" " rsil a14, "__stringify(TOPLEVEL)"\n"
" l32i %[tmp], %[mem]\n" " l32i %[tmp], %[mem]\n"
" s32i %[val], %[mem]\n" " s32i %[val], %[mem]\n"
" wsr a15, ps\n" " wsr a14, ps\n"
" rsync\n" " rsync\n"
: [tmp] "=&a" (tmp), [mem] "+m" (*m) : [tmp] "=&a" (tmp), [mem] "+m" (*m)
: [val] "a" (val) : [val] "a" (val)
: "a15", "memory"); : "a14", "memory");
return tmp; return tmp;
#endif #endif
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment