Commit c32ffce0 authored by Will Deacon's avatar Will Deacon Committed by Russell King

ARM: 7984/1: prefetch: add prefetchw invocations for barriered atomics

After a bunch of benchmarking on the interaction between dmb and pldw,
it turns out that issuing the pldw *after* the dmb instruction can
give modest performance gains (~3% atomic_add_return improvement on a
dual A15).

This patch adds prefetchw invocations to our barriered atomic operations
including cmpxchg, test_and_xxx and futexes.
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 6ea41c80
...@@ -60,6 +60,7 @@ static inline int atomic_add_return(int i, atomic_t *v) ...@@ -60,6 +60,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
int result; int result;
smp_mb(); smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_add_return\n" __asm__ __volatile__("@ atomic_add_return\n"
"1: ldrex %0, [%3]\n" "1: ldrex %0, [%3]\n"
...@@ -99,6 +100,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) ...@@ -99,6 +100,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
int result; int result;
smp_mb(); smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_sub_return\n" __asm__ __volatile__("@ atomic_sub_return\n"
"1: ldrex %0, [%3]\n" "1: ldrex %0, [%3]\n"
...@@ -121,6 +123,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) ...@@ -121,6 +123,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
unsigned long res; unsigned long res;
smp_mb(); smp_mb();
prefetchw(&ptr->counter);
do { do {
__asm__ __volatile__("@ atomic_cmpxchg\n" __asm__ __volatile__("@ atomic_cmpxchg\n"
...@@ -299,6 +302,7 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) ...@@ -299,6 +302,7 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
unsigned long tmp; unsigned long tmp;
smp_mb(); smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_add_return\n" __asm__ __volatile__("@ atomic64_add_return\n"
"1: ldrexd %0, %H0, [%3]\n" "1: ldrexd %0, %H0, [%3]\n"
...@@ -340,6 +344,7 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v) ...@@ -340,6 +344,7 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
unsigned long tmp; unsigned long tmp;
smp_mb(); smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_sub_return\n" __asm__ __volatile__("@ atomic64_sub_return\n"
"1: ldrexd %0, %H0, [%3]\n" "1: ldrexd %0, %H0, [%3]\n"
...@@ -364,6 +369,7 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, ...@@ -364,6 +369,7 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
unsigned long res; unsigned long res;
smp_mb(); smp_mb();
prefetchw(&ptr->counter);
do { do {
__asm__ __volatile__("@ atomic64_cmpxchg\n" __asm__ __volatile__("@ atomic64_cmpxchg\n"
...@@ -388,6 +394,7 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new) ...@@ -388,6 +394,7 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
unsigned long tmp; unsigned long tmp;
smp_mb(); smp_mb();
prefetchw(&ptr->counter);
__asm__ __volatile__("@ atomic64_xchg\n" __asm__ __volatile__("@ atomic64_xchg\n"
"1: ldrexd %0, %H0, [%3]\n" "1: ldrexd %0, %H0, [%3]\n"
...@@ -409,6 +416,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) ...@@ -409,6 +416,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
unsigned long tmp; unsigned long tmp;
smp_mb(); smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_dec_if_positive\n" __asm__ __volatile__("@ atomic64_dec_if_positive\n"
"1: ldrexd %0, %H0, [%3]\n" "1: ldrexd %0, %H0, [%3]\n"
...@@ -436,6 +444,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) ...@@ -436,6 +444,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
int ret = 1; int ret = 1;
smp_mb(); smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_add_unless\n" __asm__ __volatile__("@ atomic64_add_unless\n"
"1: ldrexd %0, %H0, [%4]\n" "1: ldrexd %0, %H0, [%4]\n"
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define __ASM_ARM_CMPXCHG_H #define __ASM_ARM_CMPXCHG_H
#include <linux/irqflags.h> #include <linux/irqflags.h>
#include <linux/prefetch.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
...@@ -35,6 +36,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size ...@@ -35,6 +36,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#endif #endif
smp_mb(); smp_mb();
prefetchw((const void *)ptr);
switch (size) { switch (size) {
#if __LINUX_ARM_ARCH__ >= 6 #if __LINUX_ARM_ARCH__ >= 6
...@@ -138,6 +140,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -138,6 +140,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
{ {
unsigned long oldval, res; unsigned long oldval, res;
prefetchw((const void *)ptr);
switch (size) { switch (size) {
#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
case 1: case 1:
...@@ -230,6 +234,8 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr, ...@@ -230,6 +234,8 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
unsigned long long oldval; unsigned long long oldval;
unsigned long res; unsigned long res;
prefetchw(ptr);
__asm__ __volatile__( __asm__ __volatile__(
"1: ldrexd %1, %H1, [%3]\n" "1: ldrexd %1, %H1, [%3]\n"
" teq %1, %4\n" " teq %1, %4\n"
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
smp_mb(); \ smp_mb(); \
prefetchw(uaddr); \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: ldrex %1, [%3]\n" \ "1: ldrex %1, [%3]\n" \
" " insn "\n" \ " " insn "\n" \
...@@ -46,6 +47,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -46,6 +47,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT; return -EFAULT;
smp_mb(); smp_mb();
/* Prefetching cannot fault */
prefetchw(uaddr);
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: ldrex %1, [%4]\n" "1: ldrex %1, [%4]\n"
" teq %1, %2\n" " teq %1, %2\n"
......
...@@ -37,6 +37,11 @@ UNWIND( .fnstart ) ...@@ -37,6 +37,11 @@ UNWIND( .fnstart )
add r1, r1, r0, lsl #2 @ Get word offset add r1, r1, r0, lsl #2 @ Get word offset
mov r3, r2, lsl r3 @ create mask mov r3, r2, lsl r3 @ create mask
smp_dmb smp_dmb
#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
.arch_extension mp
ALT_SMP(W(pldw) [r1])
ALT_UP(W(nop))
#endif
1: ldrex r2, [r1] 1: ldrex r2, [r1]
ands r0, r2, r3 @ save old value of bit ands r0, r2, r3 @ save old value of bit
\instr r2, r2, r3 @ toggle bit \instr r2, r2, r3 @ toggle bit
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment