Commit b4f9209b authored by Will Deacon's avatar Will Deacon

arm64: Avoid masking "old" for LSE cmpxchg() implementation

The CAS instructions implicitly access only the relevant bits of the "old"
argument, so there is no need for explicit masking via type-casting as
there is in the LL/SC implementation.

Move the casting into the LL/SC code and remove it altogether for the LSE
implementation.
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 5ef3fe4c
......@@ -257,6 +257,14 @@ __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \
unsigned long tmp; \
u##sz oldval; \
\
/* \
* Sub-word sizes require explicit casting so that the compare \
* part of the cmpxchg doesn't end up interpreting non-zero \
* upper bits of the register containing "old". \
*/ \
if (sz < 32) \
old = (u##sz)old; \
\
asm volatile( \
" prfm pstl1strm, %[v]\n" \
"1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \
......
......@@ -448,11 +448,11 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
#define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \
static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \
unsigned long old, \
u##sz old, \
u##sz new) \
{ \
register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
register unsigned long x1 asm ("x1") = old; \
register u##sz x1 asm ("x1") = old; \
register u##sz x2 asm ("x2") = new; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
......
......@@ -123,9 +123,9 @@ static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
{ \
switch (size) { \
case 1: \
return __cmpxchg_case##sfx##_8(ptr, (u8)old, new); \
return __cmpxchg_case##sfx##_8(ptr, old, new); \
case 2: \
return __cmpxchg_case##sfx##_16(ptr, (u16)old, new); \
return __cmpxchg_case##sfx##_16(ptr, old, new); \
case 4: \
return __cmpxchg_case##sfx##_32(ptr, old, new); \
case 8: \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment