Commit 335d7afb authored by Gerald Schaefer's avatar Gerald Schaefer Committed by Ingo Molnar

mutexes, sched: Introduce arch_mutex_cpu_relax()

The spinning mutex implementation uses cpu_relax() in busy loops as a
compiler barrier. Depending on the architecture, cpu_relax() may do more
than needed in this specific mutex spin loops. On System z we also give
up the time slice of the virtual cpu in cpu_relax(), which prevents
effective spinning on the mutex.

This patch replaces cpu_relax() in the spinning mutex code with
arch_mutex_cpu_relax(), which can be defined by each architecture that
selects HAVE_ARCH_MUTEX_CPU_RELAX. The default is still cpu_relax(), so
this patch should not affect other architectures than System z for now.
Signed-off-by: default avatarGerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1290437256.7455.4.camel@thinkpad>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 22a867d8
...@@ -175,4 +175,7 @@ config HAVE_PERF_EVENTS_NMI ...@@ -175,4 +175,7 @@ config HAVE_PERF_EVENTS_NMI
config HAVE_ARCH_JUMP_LABEL config HAVE_ARCH_JUMP_LABEL
bool bool
config HAVE_ARCH_MUTEX_CPU_RELAX
bool
source "kernel/gcov/Kconfig" source "kernel/gcov/Kconfig"
...@@ -99,6 +99,7 @@ config S390 ...@@ -99,6 +99,7 @@ config S390
select HAVE_KERNEL_LZMA select HAVE_KERNEL_LZMA
select HAVE_KERNEL_LZO select HAVE_KERNEL_LZO
select HAVE_GET_USER_PAGES_FAST select HAVE_GET_USER_PAGES_FAST
select HAVE_ARCH_MUTEX_CPU_RELAX
select ARCH_INLINE_SPIN_TRYLOCK select ARCH_INLINE_SPIN_TRYLOCK
select ARCH_INLINE_SPIN_TRYLOCK_BH select ARCH_INLINE_SPIN_TRYLOCK_BH
select ARCH_INLINE_SPIN_LOCK select ARCH_INLINE_SPIN_LOCK
......
...@@ -7,3 +7,5 @@ ...@@ -7,3 +7,5 @@
*/ */
#include <asm-generic/mutex-dec.h> #include <asm-generic/mutex-dec.h>
#define arch_mutex_cpu_relax() barrier()
...@@ -160,4 +160,8 @@ extern int mutex_trylock(struct mutex *lock); ...@@ -160,4 +160,8 @@ extern int mutex_trylock(struct mutex *lock);
extern void mutex_unlock(struct mutex *lock); extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
#define arch_mutex_cpu_relax() cpu_relax()
#endif
#endif #endif
...@@ -199,7 +199,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ...@@ -199,7 +199,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* memory barriers as we'll eventually observe the right * memory barriers as we'll eventually observe the right
* values at the cost of a few extra spins. * values at the cost of a few extra spins.
*/ */
cpu_relax(); arch_mutex_cpu_relax();
} }
#endif #endif
spin_lock_mutex(&lock->wait_lock, flags); spin_lock_mutex(&lock->wait_lock, flags);
......
...@@ -75,6 +75,7 @@ ...@@ -75,6 +75,7 @@
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/irq_regs.h> #include <asm/irq_regs.h>
#include <asm/mutex.h>
#include "sched_cpupri.h" #include "sched_cpupri.h"
#include "workqueue_sched.h" #include "workqueue_sched.h"
...@@ -3888,7 +3889,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) ...@@ -3888,7 +3889,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
if (task_thread_info(rq->curr) != owner || need_resched()) if (task_thread_info(rq->curr) != owner || need_resched())
return 0; return 0;
cpu_relax(); arch_mutex_cpu_relax();
} }
return 1; return 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment