Commit 95c41896 authored by Will Deacon's avatar Will Deacon Committed by Catalin Marinas

arm64: asm: remove redundant "cc" clobbers

cbnz/tbnz don't update the condition flags, so remove the "cc" clobbers
from inline asm blocks that only use these instructions to implement
conditional branches.
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 8e86f0b4
...@@ -54,8 +54,7 @@ static inline void atomic_add(int i, atomic_t *v) ...@@ -54,8 +54,7 @@ static inline void atomic_add(int i, atomic_t *v)
" stxr %w1, %w0, %2\n" " stxr %w1, %w0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i));
: "cc");
} }
static inline int atomic_add_return(int i, atomic_t *v) static inline int atomic_add_return(int i, atomic_t *v)
...@@ -70,7 +69,7 @@ static inline int atomic_add_return(int i, atomic_t *v) ...@@ -70,7 +69,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i)
: "cc", "memory"); : "memory");
smp_mb(); smp_mb();
return result; return result;
...@@ -87,8 +86,7 @@ static inline void atomic_sub(int i, atomic_t *v) ...@@ -87,8 +86,7 @@ static inline void atomic_sub(int i, atomic_t *v)
" stxr %w1, %w0, %2\n" " stxr %w1, %w0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i));
: "cc");
} }
static inline int atomic_sub_return(int i, atomic_t *v) static inline int atomic_sub_return(int i, atomic_t *v)
...@@ -103,7 +101,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) ...@@ -103,7 +101,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i)
: "cc", "memory"); : "memory");
smp_mb(); smp_mb();
return result; return result;
...@@ -125,7 +123,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) ...@@ -125,7 +123,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
"2:" "2:"
: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new) : "Ir" (old), "r" (new)
: "cc", "memory"); : "cc");
smp_mb(); smp_mb();
return oldval; return oldval;
...@@ -178,8 +176,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v) ...@@ -178,8 +176,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
" stxr %w1, %0, %2\n" " stxr %w1, %0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i));
: "cc");
} }
static inline long atomic64_add_return(long i, atomic64_t *v) static inline long atomic64_add_return(long i, atomic64_t *v)
...@@ -194,7 +191,7 @@ static inline long atomic64_add_return(long i, atomic64_t *v) ...@@ -194,7 +191,7 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i)
: "cc", "memory"); : "memory");
smp_mb(); smp_mb();
return result; return result;
...@@ -211,8 +208,7 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) ...@@ -211,8 +208,7 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
" stxr %w1, %0, %2\n" " stxr %w1, %0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i));
: "cc");
} }
static inline long atomic64_sub_return(long i, atomic64_t *v) static inline long atomic64_sub_return(long i, atomic64_t *v)
...@@ -227,7 +223,7 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) ...@@ -227,7 +223,7 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i)
: "cc", "memory"); : "memory");
smp_mb(); smp_mb();
return result; return result;
...@@ -249,7 +245,7 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) ...@@ -249,7 +245,7 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
"2:" "2:"
: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new) : "Ir" (old), "r" (new)
: "cc", "memory"); : "cc");
smp_mb(); smp_mb();
return oldval; return oldval;
......
...@@ -34,7 +34,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size ...@@ -34,7 +34,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
: "r" (x) : "r" (x)
: "cc", "memory"); : "memory");
break; break;
case 2: case 2:
asm volatile("// __xchg2\n" asm volatile("// __xchg2\n"
...@@ -43,7 +43,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size ...@@ -43,7 +43,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
: "r" (x) : "r" (x)
: "cc", "memory"); : "memory");
break; break;
case 4: case 4:
asm volatile("// __xchg4\n" asm volatile("// __xchg4\n"
...@@ -52,7 +52,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size ...@@ -52,7 +52,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
: "r" (x) : "r" (x)
: "cc", "memory"); : "memory");
break; break;
case 8: case 8:
asm volatile("// __xchg8\n" asm volatile("// __xchg8\n"
...@@ -61,7 +61,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size ...@@ -61,7 +61,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
: "r" (x) : "r" (x)
: "cc", "memory"); : "memory");
break; break;
default: default:
BUILD_BUG(); BUILD_BUG();
......
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
" .popsection\n" \ " .popsection\n" \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
: "r" (oparg), "Ir" (-EFAULT) \ : "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory") : "memory")
static inline int static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
...@@ -129,7 +129,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -129,7 +129,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" .popsection\n" " .popsection\n"
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT) : "r" (oldval), "r" (newval), "Ir" (-EFAULT)
: "cc", "memory"); : "memory");
*uval = val; *uval = val;
return ret; return ret;
......
...@@ -132,7 +132,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) ...@@ -132,7 +132,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
" cbnz %w0, 2b\n" " cbnz %w0, 2b\n"
: "=&r" (tmp), "+Q" (rw->lock) : "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000) : "r" (0x80000000)
: "cc", "memory"); : "memory");
} }
static inline int arch_write_trylock(arch_rwlock_t *rw) static inline int arch_write_trylock(arch_rwlock_t *rw)
...@@ -146,7 +146,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) ...@@ -146,7 +146,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
"1:\n" "1:\n"
: "=&r" (tmp), "+Q" (rw->lock) : "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000) : "r" (0x80000000)
: "cc", "memory"); : "memory");
return !tmp; return !tmp;
} }
...@@ -187,7 +187,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw) ...@@ -187,7 +187,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
" cbnz %w1, 2b\n" " cbnz %w1, 2b\n"
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
: :
: "cc", "memory"); : "memory");
} }
static inline void arch_read_unlock(arch_rwlock_t *rw) static inline void arch_read_unlock(arch_rwlock_t *rw)
...@@ -201,7 +201,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) ...@@ -201,7 +201,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
: :
: "cc", "memory"); : "memory");
} }
static inline int arch_read_trylock(arch_rwlock_t *rw) static inline int arch_read_trylock(arch_rwlock_t *rw)
...@@ -216,7 +216,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) ...@@ -216,7 +216,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
"1:\n" "1:\n"
: "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock) : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
: :
: "cc", "memory"); : "memory");
return !tmp2; return !tmp2;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment