Commit 3a0310eb authored by Will Deacon's avatar Will Deacon Committed by Catalin Marinas

arm64: atomics: fix grossly inconsistent asm constraints for exclusives

Our uses of inline asm constraints for atomic operations are fairly
wild and varied. We basically need to guarantee the following:

  1. Any instructions with barrier implications
     (load-acquire/store-release) have a "memory" clobber

  2. When performing exclusive accesses, the addresing mode is generated
     using the "Q" constraint

  3. Atomic blocks which use the condition flags, have a "cc" clobber

This patch addresses these concerns which, as well as fixing the
semantics of the code, stops GCC complaining about impossible asm
constraints.
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent c0e01d5d
...@@ -49,12 +49,12 @@ static inline void atomic_add(int i, atomic_t *v) ...@@ -49,12 +49,12 @@ static inline void atomic_add(int i, atomic_t *v)
int result; int result;
asm volatile("// atomic_add\n" asm volatile("// atomic_add\n"
"1: ldxr %w0, [%3]\n" "1: ldxr %w0, %2\n"
" add %w0, %w0, %w4\n" " add %w0, %w0, %w3\n"
" stxr %w1, %w0, [%3]\n" " stxr %w1, %w0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+o" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "r" (&v->counter), "Ir" (i) : "Ir" (i)
: "cc"); : "cc");
} }
...@@ -64,13 +64,13 @@ static inline int atomic_add_return(int i, atomic_t *v) ...@@ -64,13 +64,13 @@ static inline int atomic_add_return(int i, atomic_t *v)
int result; int result;
asm volatile("// atomic_add_return\n" asm volatile("// atomic_add_return\n"
"1: ldaxr %w0, [%3]\n" "1: ldaxr %w0, %2\n"
" add %w0, %w0, %w4\n" " add %w0, %w0, %w3\n"
" stlxr %w1, %w0, [%3]\n" " stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+o" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "r" (&v->counter), "Ir" (i) : "Ir" (i)
: "cc"); : "cc", "memory");
return result; return result;
} }
...@@ -81,12 +81,12 @@ static inline void atomic_sub(int i, atomic_t *v) ...@@ -81,12 +81,12 @@ static inline void atomic_sub(int i, atomic_t *v)
int result; int result;
asm volatile("// atomic_sub\n" asm volatile("// atomic_sub\n"
"1: ldxr %w0, [%3]\n" "1: ldxr %w0, %2\n"
" sub %w0, %w0, %w4\n" " sub %w0, %w0, %w3\n"
" stxr %w1, %w0, [%3]\n" " stxr %w1, %w0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+o" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "r" (&v->counter), "Ir" (i) : "Ir" (i)
: "cc"); : "cc");
} }
...@@ -96,13 +96,13 @@ static inline int atomic_sub_return(int i, atomic_t *v) ...@@ -96,13 +96,13 @@ static inline int atomic_sub_return(int i, atomic_t *v)
int result; int result;
asm volatile("// atomic_sub_return\n" asm volatile("// atomic_sub_return\n"
"1: ldaxr %w0, [%3]\n" "1: ldaxr %w0, %2\n"
" sub %w0, %w0, %w4\n" " sub %w0, %w0, %w3\n"
" stlxr %w1, %w0, [%3]\n" " stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+o" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "r" (&v->counter), "Ir" (i) : "Ir" (i)
: "cc"); : "cc", "memory");
return result; return result;
} }
...@@ -113,15 +113,15 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) ...@@ -113,15 +113,15 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
int oldval; int oldval;
asm volatile("// atomic_cmpxchg\n" asm volatile("// atomic_cmpxchg\n"
"1: ldaxr %w1, [%3]\n" "1: ldaxr %w1, %2\n"
" cmp %w1, %w4\n" " cmp %w1, %w3\n"
" b.ne 2f\n" " b.ne 2f\n"
" stlxr %w0, %w5, [%3]\n" " stlxr %w0, %w4, %2\n"
" cbnz %w0, 1b\n" " cbnz %w0, 1b\n"
"2:" "2:"
: "=&r" (tmp), "=&r" (oldval), "+o" (ptr->counter) : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
: "r" (&ptr->counter), "Ir" (old), "r" (new) : "Ir" (old), "r" (new)
: "cc"); : "cc", "memory");
return oldval; return oldval;
} }
...@@ -131,12 +131,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) ...@@ -131,12 +131,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
unsigned long tmp, tmp2; unsigned long tmp, tmp2;
asm volatile("// atomic_clear_mask\n" asm volatile("// atomic_clear_mask\n"
"1: ldxr %0, [%3]\n" "1: ldxr %0, %2\n"
" bic %0, %0, %4\n" " bic %0, %0, %3\n"
" stxr %w1, %0, [%3]\n" " stxr %w1, %0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (tmp), "=&r" (tmp2), "+o" (*addr) : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr)
: "r" (addr), "Ir" (mask) : "Ir" (mask)
: "cc"); : "cc");
} }
...@@ -182,12 +182,12 @@ static inline void atomic64_add(u64 i, atomic64_t *v) ...@@ -182,12 +182,12 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
unsigned long tmp; unsigned long tmp;
asm volatile("// atomic64_add\n" asm volatile("// atomic64_add\n"
"1: ldxr %0, [%3]\n" "1: ldxr %0, %2\n"
" add %0, %0, %4\n" " add %0, %0, %3\n"
" stxr %w1, %0, [%3]\n" " stxr %w1, %0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+o" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "r" (&v->counter), "Ir" (i) : "Ir" (i)
: "cc"); : "cc");
} }
...@@ -197,13 +197,13 @@ static inline long atomic64_add_return(long i, atomic64_t *v) ...@@ -197,13 +197,13 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
unsigned long tmp; unsigned long tmp;
asm volatile("// atomic64_add_return\n" asm volatile("// atomic64_add_return\n"
"1: ldaxr %0, [%3]\n" "1: ldaxr %0, %2\n"
" add %0, %0, %4\n" " add %0, %0, %3\n"
" stlxr %w1, %0, [%3]\n" " stlxr %w1, %0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+o" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "r" (&v->counter), "Ir" (i) : "Ir" (i)
: "cc"); : "cc", "memory");
return result; return result;
} }
...@@ -214,12 +214,12 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) ...@@ -214,12 +214,12 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
unsigned long tmp; unsigned long tmp;
asm volatile("// atomic64_sub\n" asm volatile("// atomic64_sub\n"
"1: ldxr %0, [%3]\n" "1: ldxr %0, %2\n"
" sub %0, %0, %4\n" " sub %0, %0, %3\n"
" stxr %w1, %0, [%3]\n" " stxr %w1, %0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+o" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "r" (&v->counter), "Ir" (i) : "Ir" (i)
: "cc"); : "cc");
} }
...@@ -229,13 +229,13 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) ...@@ -229,13 +229,13 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
unsigned long tmp; unsigned long tmp;
asm volatile("// atomic64_sub_return\n" asm volatile("// atomic64_sub_return\n"
"1: ldaxr %0, [%3]\n" "1: ldaxr %0, %2\n"
" sub %0, %0, %4\n" " sub %0, %0, %3\n"
" stlxr %w1, %0, [%3]\n" " stlxr %w1, %0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+o" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "r" (&v->counter), "Ir" (i) : "Ir" (i)
: "cc"); : "cc", "memory");
return result; return result;
} }
...@@ -246,15 +246,15 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) ...@@ -246,15 +246,15 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
unsigned long res; unsigned long res;
asm volatile("// atomic64_cmpxchg\n" asm volatile("// atomic64_cmpxchg\n"
"1: ldaxr %1, [%3]\n" "1: ldaxr %1, %2\n"
" cmp %1, %4\n" " cmp %1, %3\n"
" b.ne 2f\n" " b.ne 2f\n"
" stlxr %w0, %5, [%3]\n" " stlxr %w0, %4, %2\n"
" cbnz %w0, 1b\n" " cbnz %w0, 1b\n"
"2:" "2:"
: "=&r" (res), "=&r" (oldval), "+o" (ptr->counter) : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
: "r" (&ptr->counter), "Ir" (old), "r" (new) : "Ir" (old), "r" (new)
: "cc"); : "cc", "memory");
return oldval; return oldval;
} }
...@@ -267,15 +267,15 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) ...@@ -267,15 +267,15 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
unsigned long tmp; unsigned long tmp;
asm volatile("// atomic64_dec_if_positive\n" asm volatile("// atomic64_dec_if_positive\n"
"1: ldaxr %0, [%3]\n" "1: ldaxr %0, %2\n"
" subs %0, %0, #1\n" " subs %0, %0, #1\n"
" b.mi 2f\n" " b.mi 2f\n"
" stlxr %w1, %0, [%3]\n" " stlxr %w1, %0, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
"2:" "2:"
: "=&r" (result), "=&r" (tmp), "+o" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "r" (&v->counter) :
: "cc"); : "cc", "memory");
return result; return result;
} }
......
...@@ -29,39 +29,39 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size ...@@ -29,39 +29,39 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
switch (size) { switch (size) {
case 1: case 1:
asm volatile("// __xchg1\n" asm volatile("// __xchg1\n"
"1: ldaxrb %w0, [%3]\n" "1: ldaxrb %w0, %2\n"
" stlxrb %w1, %w2, [%3]\n" " stlxrb %w1, %w3, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp) : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
: "r" (x), "r" (ptr) : "r" (x)
: "memory", "cc"); : "cc", "memory");
break; break;
case 2: case 2:
asm volatile("// __xchg2\n" asm volatile("// __xchg2\n"
"1: ldaxrh %w0, [%3]\n" "1: ldaxrh %w0, %2\n"
" stlxrh %w1, %w2, [%3]\n" " stlxrh %w1, %w3, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp) : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
: "r" (x), "r" (ptr) : "r" (x)
: "memory", "cc"); : "cc", "memory");
break; break;
case 4: case 4:
asm volatile("// __xchg4\n" asm volatile("// __xchg4\n"
"1: ldaxr %w0, [%3]\n" "1: ldaxr %w0, %2\n"
" stlxr %w1, %w2, [%3]\n" " stlxr %w1, %w3, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp) : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
: "r" (x), "r" (ptr) : "r" (x)
: "memory", "cc"); : "cc", "memory");
break; break;
case 8: case 8:
asm volatile("// __xchg8\n" asm volatile("// __xchg8\n"
"1: ldaxr %0, [%3]\n" "1: ldaxr %0, %2\n"
" stlxr %w1, %2, [%3]\n" " stlxr %w1, %3, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp) : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
: "r" (x), "r" (ptr) : "r" (x)
: "memory", "cc"); : "cc", "memory");
break; break;
default: default:
BUILD_BUG(); BUILD_BUG();
...@@ -82,14 +82,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -82,14 +82,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
case 1: case 1:
do { do {
asm volatile("// __cmpxchg1\n" asm volatile("// __cmpxchg1\n"
" ldxrb %w1, [%2]\n" " ldxrb %w1, %2\n"
" mov %w0, #0\n" " mov %w0, #0\n"
" cmp %w1, %w3\n" " cmp %w1, %w3\n"
" b.ne 1f\n" " b.ne 1f\n"
" stxrb %w0, %w4, [%2]\n" " stxrb %w0, %w4, %2\n"
"1:\n" "1:\n"
: "=&r" (res), "=&r" (oldval) : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
: "r" (ptr), "Ir" (old), "r" (new) : "Ir" (old), "r" (new)
: "cc"); : "cc");
} while (res); } while (res);
break; break;
...@@ -97,29 +97,29 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -97,29 +97,29 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
case 2: case 2:
do { do {
asm volatile("// __cmpxchg2\n" asm volatile("// __cmpxchg2\n"
" ldxrh %w1, [%2]\n" " ldxrh %w1, %2\n"
" mov %w0, #0\n" " mov %w0, #0\n"
" cmp %w1, %w3\n" " cmp %w1, %w3\n"
" b.ne 1f\n" " b.ne 1f\n"
" stxrh %w0, %w4, [%2]\n" " stxrh %w0, %w4, %2\n"
"1:\n" "1:\n"
: "=&r" (res), "=&r" (oldval) : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr)
: "r" (ptr), "Ir" (old), "r" (new) : "Ir" (old), "r" (new)
: "memory", "cc"); : "cc");
} while (res); } while (res);
break; break;
case 4: case 4:
do { do {
asm volatile("// __cmpxchg4\n" asm volatile("// __cmpxchg4\n"
" ldxr %w1, [%2]\n" " ldxr %w1, %2\n"
" mov %w0, #0\n" " mov %w0, #0\n"
" cmp %w1, %w3\n" " cmp %w1, %w3\n"
" b.ne 1f\n" " b.ne 1f\n"
" stxr %w0, %w4, [%2]\n" " stxr %w0, %w4, %2\n"
"1:\n" "1:\n"
: "=&r" (res), "=&r" (oldval) : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr)
: "r" (ptr), "Ir" (old), "r" (new) : "Ir" (old), "r" (new)
: "cc"); : "cc");
} while (res); } while (res);
break; break;
...@@ -127,14 +127,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -127,14 +127,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
case 8: case 8:
do { do {
asm volatile("// __cmpxchg8\n" asm volatile("// __cmpxchg8\n"
" ldxr %1, [%2]\n" " ldxr %1, %2\n"
" mov %w0, #0\n" " mov %w0, #0\n"
" cmp %1, %3\n" " cmp %1, %3\n"
" b.ne 1f\n" " b.ne 1f\n"
" stxr %w0, %4, [%2]\n" " stxr %w0, %4, %2\n"
"1:\n" "1:\n"
: "=&r" (res), "=&r" (oldval) : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr)
: "r" (ptr), "Ir" (old), "r" (new) : "Ir" (old), "r" (new)
: "cc"); : "cc");
} while (res); } while (res);
break; break;
......
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
" .popsection\n" \ " .popsection\n" \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
: "r" (oparg), "Ir" (-EFAULT) \ : "r" (oparg), "Ir" (-EFAULT) \
: "cc") : "cc", "memory")
static inline int static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
......
...@@ -45,13 +45,13 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) ...@@ -45,13 +45,13 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
asm volatile( asm volatile(
" sevl\n" " sevl\n"
"1: wfe\n" "1: wfe\n"
"2: ldaxr %w0, [%1]\n" "2: ldaxr %w0, %1\n"
" cbnz %w0, 1b\n" " cbnz %w0, 1b\n"
" stxr %w0, %w2, [%1]\n" " stxr %w0, %w2, %1\n"
" cbnz %w0, 2b\n" " cbnz %w0, 2b\n"
: "=&r" (tmp) : "=&r" (tmp), "+Q" (lock->lock)
: "r" (&lock->lock), "r" (1) : "r" (1)
: "memory"); : "cc", "memory");
} }
static inline int arch_spin_trylock(arch_spinlock_t *lock) static inline int arch_spin_trylock(arch_spinlock_t *lock)
...@@ -59,13 +59,13 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) ...@@ -59,13 +59,13 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
unsigned int tmp; unsigned int tmp;
asm volatile( asm volatile(
" ldaxr %w0, [%1]\n" " ldaxr %w0, %1\n"
" cbnz %w0, 1f\n" " cbnz %w0, 1f\n"
" stxr %w0, %w2, [%1]\n" " stxr %w0, %w2, %1\n"
"1:\n" "1:\n"
: "=&r" (tmp) : "=&r" (tmp), "+Q" (lock->lock)
: "r" (&lock->lock), "r" (1) : "r" (1)
: "memory"); : "cc", "memory");
return !tmp; return !tmp;
} }
...@@ -73,8 +73,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) ...@@ -73,8 +73,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock) static inline void arch_spin_unlock(arch_spinlock_t *lock)
{ {
asm volatile( asm volatile(
" stlr %w1, [%0]\n" " stlr %w1, %0\n"
: : "r" (&lock->lock), "r" (0) : "memory"); : "=Q" (lock->lock) : "r" (0) : "memory");
} }
/* /*
...@@ -94,13 +94,13 @@ static inline void arch_write_lock(arch_rwlock_t *rw) ...@@ -94,13 +94,13 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
asm volatile( asm volatile(
" sevl\n" " sevl\n"
"1: wfe\n" "1: wfe\n"
"2: ldaxr %w0, [%1]\n" "2: ldaxr %w0, %1\n"
" cbnz %w0, 1b\n" " cbnz %w0, 1b\n"
" stxr %w0, %w2, [%1]\n" " stxr %w0, %w2, %1\n"
" cbnz %w0, 2b\n" " cbnz %w0, 2b\n"
: "=&r" (tmp) : "=&r" (tmp), "+Q" (rw->lock)
: "r" (&rw->lock), "r" (0x80000000) : "r" (0x80000000)
: "memory"); : "cc", "memory");
} }
static inline int arch_write_trylock(arch_rwlock_t *rw) static inline int arch_write_trylock(arch_rwlock_t *rw)
...@@ -108,13 +108,13 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) ...@@ -108,13 +108,13 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
unsigned int tmp; unsigned int tmp;
asm volatile( asm volatile(
" ldaxr %w0, [%1]\n" " ldaxr %w0, %1\n"
" cbnz %w0, 1f\n" " cbnz %w0, 1f\n"
" stxr %w0, %w2, [%1]\n" " stxr %w0, %w2, %1\n"
"1:\n" "1:\n"
: "=&r" (tmp) : "=&r" (tmp), "+Q" (rw->lock)
: "r" (&rw->lock), "r" (0x80000000) : "r" (0x80000000)
: "memory"); : "cc", "memory");
return !tmp; return !tmp;
} }
...@@ -122,8 +122,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) ...@@ -122,8 +122,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw) static inline void arch_write_unlock(arch_rwlock_t *rw)
{ {
asm volatile( asm volatile(
" stlr %w1, [%0]\n" " stlr %w1, %0\n"
: : "r" (&rw->lock), "r" (0) : "memory"); : "=Q" (rw->lock) : "r" (0) : "memory");
} }
/* write_can_lock - would write_trylock() succeed? */ /* write_can_lock - would write_trylock() succeed? */
...@@ -148,14 +148,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw) ...@@ -148,14 +148,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
asm volatile( asm volatile(
" sevl\n" " sevl\n"
"1: wfe\n" "1: wfe\n"
"2: ldaxr %w0, [%2]\n" "2: ldaxr %w0, %2\n"
" add %w0, %w0, #1\n" " add %w0, %w0, #1\n"
" tbnz %w0, #31, 1b\n" " tbnz %w0, #31, 1b\n"
" stxr %w1, %w0, [%2]\n" " stxr %w1, %w0, %2\n"
" cbnz %w1, 2b\n" " cbnz %w1, 2b\n"
: "=&r" (tmp), "=&r" (tmp2) : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
: "r" (&rw->lock) :
: "memory"); : "cc", "memory");
} }
static inline void arch_read_unlock(arch_rwlock_t *rw) static inline void arch_read_unlock(arch_rwlock_t *rw)
...@@ -163,13 +163,13 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) ...@@ -163,13 +163,13 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
unsigned int tmp, tmp2; unsigned int tmp, tmp2;
asm volatile( asm volatile(
"1: ldxr %w0, [%2]\n" "1: ldxr %w0, %2\n"
" sub %w0, %w0, #1\n" " sub %w0, %w0, #1\n"
" stlxr %w1, %w0, [%2]\n" " stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (tmp), "=&r" (tmp2) : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
: "r" (&rw->lock) :
: "memory"); : "cc", "memory");
} }
static inline int arch_read_trylock(arch_rwlock_t *rw) static inline int arch_read_trylock(arch_rwlock_t *rw)
...@@ -177,14 +177,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) ...@@ -177,14 +177,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
unsigned int tmp, tmp2 = 1; unsigned int tmp, tmp2 = 1;
asm volatile( asm volatile(
" ldaxr %w0, [%2]\n" " ldaxr %w0, %2\n"
" add %w0, %w0, #1\n" " add %w0, %w0, #1\n"
" tbnz %w0, #31, 1f\n" " tbnz %w0, #31, 1f\n"
" stxr %w1, %w0, [%2]\n" " stxr %w1, %w0, %2\n"
"1:\n" "1:\n"
: "=&r" (tmp), "+r" (tmp2) : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
: "r" (&rw->lock) :
: "memory"); : "cc", "memory");
return !tmp2; return !tmp2;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment