Commit 3405d230 authored by Michael Ellerman's avatar Michael Ellerman Committed by Benjamin Herrenschmidt

powerpc: Add support for the optimised lockref implementation

This commit adds the architecture support required to enable the
optimised implementation of lockrefs.

That's as simple as defining arch_spin_value_unlocked() and selecting
the Kconfig option.

We also define cmpxchg64_relaxed(), because the lockref code does not
need the cmpxchg to have barrier semantics.

Using Linus' test case[1] on one system I see a 4x improvement for the
basic enablement, and a further 1.3x for cmpxchg64_relaxed(), for a
total of 5.3x vs the baseline.

On another system I see more like 2x improvement.

[1]: http://marc.info/?l=linux-fsdevel&m=137782380714721&w=4Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent f7d98d18
...@@ -139,6 +139,7 @@ config PPC ...@@ -139,6 +139,7 @@ config PPC
select OLD_SIGACTION if PPC32 select OLD_SIGACTION if PPC32
select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_STACKOVERFLOW
select HAVE_IRQ_EXIT_ON_IRQ_STACK select HAVE_IRQ_EXIT_ON_IRQ_STACK
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
config GENERIC_CSUM config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN def_bool CPU_LITTLE_ENDIAN
......
...@@ -300,6 +300,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, ...@@ -300,6 +300,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \ cmpxchg_local((ptr), (o), (n)); \
}) })
#define cmpxchg64_relaxed cmpxchg64_local
#else #else
#include <asm-generic/cmpxchg-local.h> #include <asm-generic/cmpxchg-local.h>
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
......
...@@ -54,6 +54,11 @@ ...@@ -54,6 +54,11 @@
#define SYNC_IO #define SYNC_IO
#endif #endif
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.slock == 0;
}
/* /*
* This returns the old value in the lock, so we succeeded * This returns the old value in the lock, so we succeeded
* in getting the lock if the return value is 0. * in getting the lock if the return value is 0.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment