Commit 3e0f5a15 authored by Will Deacon's avatar Will Deacon Committed by Russell King

ARM: 7404/1: cmpxchg64: use atomic64 and local64 routines for cmpxchg64

The cmpxchg64 routines for ARMv6+ CPUs replicate inline assembly that
already exists for atomic64 operations. Furthermore, the cmpxchg64 code
uses the "memory" constraint in the clobber list rather than identifying
the region of memory that is actually modified.

This patch replaces the ARMv6+ cmpxchg64 code with macros that expand to
the atomic64_ and local64_ variants, casting the pointer parameter to
the appropriate container type.

Cc: Nicolas Pitre <nico@linaro.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 0bd82ade
...@@ -229,66 +229,19 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, ...@@ -229,66 +229,19 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
(unsigned long)(n), \ (unsigned long)(n), \
sizeof(*(ptr)))) sizeof(*(ptr))))
#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ #define cmpxchg64(ptr, o, n) \
((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
/* atomic64_t, \
* Note : ARMv7-M (currently unsupported by Linux) does not support counter), \
* ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should (unsigned long)(o), \
* not be allowed to use __cmpxchg64. (unsigned long)(n)))
*/
static inline unsigned long long __cmpxchg64(volatile void *ptr, #define cmpxchg64_local(ptr, o, n) \
unsigned long long old, ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
unsigned long long new) local64_t, \
{ a), \
register unsigned long long oldval asm("r0"); (unsigned long)(o), \
register unsigned long long __old asm("r2") = old; (unsigned long)(n)))
register unsigned long long __new asm("r4") = new;
unsigned long res;
do {
asm volatile(
" @ __cmpxchg8\n"
" ldrexd %1, %H1, [%2]\n"
" mov %0, #0\n"
" teq %1, %3\n"
" teqeq %H1, %H3\n"
" strexdeq %0, %4, %H4, [%2]\n"
: "=&r" (res), "=&r" (oldval)
: "r" (ptr), "Ir" (__old), "r" (__new)
: "memory", "cc");
} while (res);
return oldval;
}
static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
unsigned long long old,
unsigned long long new)
{
unsigned long long ret;
smp_mb();
ret = __cmpxchg64(ptr, old, new);
smp_mb();
return ret;
}
#define cmpxchg64(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)))
#define cmpxchg64_local(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg64((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)))
#else /* min ARCH = ARMv6 */
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif
#endif /* __LINUX_ARM_ARCH__ >= 6 */ #endif /* __LINUX_ARM_ARCH__ >= 6 */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment