Commit 90631822 authored by Jason Low's avatar Jason Low Committed by Ingo Molnar

locking/spinlocks/mcs: Convert osq lock to atomic_t to reduce overhead

The cancellable MCS spinlock is currently used to queue threads that are
doing optimistic spinning. It uses per-cpu nodes, where a thread obtaining
the lock would access and queue the local node corresponding to the CPU that
it's running on. Currently, the cancellable MCS lock is implemented by using
pointers to these nodes.

In this patch, instead of operating on pointers to the per-cpu nodes, we
store the CPU numbers in which the per-cpu nodes correspond to in atomic_t.
A similar concept is used with the qspinlock.

By operating on the CPU # of the nodes using atomic_t instead of pointers
to those nodes, this can reduce the overhead of the cancellable MCS spinlock
by 32 bits (on 64 bit systems).
Signed-off-by: default avatarJason Low <jason.low2@hp.com>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Scott Norton <scott.norton@hp.com>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Waiman Long <waiman.long@hp.com>
Cc: Davidlohr Bueso <davidlohr@hp.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Chris Mason <clm@fb.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Josef Bacik <jbacik@fusionio.com>
Link: http://lkml.kernel.org/r/1405358872-3732-3-git-send-email-jason.low2@hp.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 046a619d
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <linux/osq_lock.h>
/* /*
* Simple, straightforward mutexes with strict semantics: * Simple, straightforward mutexes with strict semantics:
...@@ -46,7 +47,6 @@ ...@@ -46,7 +47,6 @@
* - detects multi-task circular deadlocks and prints out all affected * - detects multi-task circular deadlocks and prints out all affected
* locks and tasks (and only those tasks) * locks and tasks (and only those tasks)
*/ */
struct optimistic_spin_node;
struct mutex { struct mutex {
/* 1: unlocked, 0: locked, negative: locked, possible waiters */ /* 1: unlocked, 0: locked, negative: locked, possible waiters */
atomic_t count; atomic_t count;
...@@ -56,7 +56,7 @@ struct mutex { ...@@ -56,7 +56,7 @@ struct mutex {
struct task_struct *owner; struct task_struct *owner;
#endif #endif
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
struct optimistic_spin_node *osq; /* Spinner MCS lock */ struct optimistic_spin_queue osq; /* Spinner MCS lock */
#endif #endif
#ifdef CONFIG_DEBUG_MUTEXES #ifdef CONFIG_DEBUG_MUTEXES
const char *name; const char *name;
......
#ifndef __LINUX_OSQ_LOCK_H
#define __LINUX_OSQ_LOCK_H
/*
* An MCS like lock especially tailored for optimistic spinning for sleeping
* lock implementations (mutex, rwsem, etc).
*/
#define OSQ_UNLOCKED_VAL (0)
struct optimistic_spin_queue {
/*
* Stores an encoded value of the CPU # of the tail node in the queue.
* If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
*/
atomic_t tail;
};
#endif
...@@ -13,10 +13,9 @@ ...@@ -13,10 +13,9 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/osq_lock.h>
struct optimistic_spin_node;
struct rw_semaphore; struct rw_semaphore;
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
...@@ -33,7 +32,7 @@ struct rw_semaphore { ...@@ -33,7 +32,7 @@ struct rw_semaphore {
* if the owner is running on the cpu. * if the owner is running on the cpu.
*/ */
struct task_struct *owner; struct task_struct *owner;
struct optimistic_spin_node *osq; /* spinner MCS lock */ struct optimistic_spin_queue osq; /* spinner MCS lock */
#endif #endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map; struct lockdep_map dep_map;
...@@ -70,7 +69,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) ...@@ -70,7 +69,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
__RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
LIST_HEAD_INIT((name).wait_list), \ LIST_HEAD_INIT((name).wait_list), \
NULL, /* owner */ \ NULL, /* owner */ \
NULL /* mcs lock */ \ { ATOMIC_INIT(OSQ_UNLOCKED_VAL) } /* osq */ \
__RWSEM_DEP_MAP_INIT(name) } __RWSEM_DEP_MAP_INIT(name) }
#else #else
#define __RWSEM_INITIALIZER(name) \ #define __RWSEM_INITIALIZER(name) \
......
...@@ -16,19 +16,45 @@ ...@@ -16,19 +16,45 @@
*/ */
static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node); static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
/*
* We use the value 0 to represent "no CPU", thus the encoded value
* will be the CPU number incremented by 1.
*/
static inline int encode_cpu(int cpu_nr)
{
return cpu_nr + 1;
}
static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
{
int cpu_nr = encoded_cpu_val - 1;
return per_cpu_ptr(&osq_node, cpu_nr);
}
/* /*
* Get a stable @node->next pointer, either for unlock() or unqueue() purposes. * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
* Can return NULL in case we were the last queued and we updated @lock instead. * Can return NULL in case we were the last queued and we updated @lock instead.
*/ */
static inline struct optimistic_spin_node * static inline struct optimistic_spin_node *
osq_wait_next(struct optimistic_spin_node **lock, osq_wait_next(struct optimistic_spin_queue *lock,
struct optimistic_spin_node *node, struct optimistic_spin_node *node,
struct optimistic_spin_node *prev) struct optimistic_spin_node *prev)
{ {
struct optimistic_spin_node *next = NULL; struct optimistic_spin_node *next = NULL;
int curr = encode_cpu(smp_processor_id());
int old;
/*
* If there is a prev node in queue, then the 'old' value will be
* the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
* we're currently last in queue, then the queue will then become empty.
*/
old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
for (;;) { for (;;) {
if (*lock == node && cmpxchg(lock, node, prev) == node) { if (atomic_read(&lock->tail) == curr &&
atomic_cmpxchg(&lock->tail, curr, old) == curr) {
/* /*
* We were the last queued, we moved @lock back. @prev * We were the last queued, we moved @lock back. @prev
* will now observe @lock and will complete its * will now observe @lock and will complete its
...@@ -59,18 +85,23 @@ osq_wait_next(struct optimistic_spin_node **lock, ...@@ -59,18 +85,23 @@ osq_wait_next(struct optimistic_spin_node **lock,
return next; return next;
} }
bool osq_lock(struct optimistic_spin_node **lock) bool osq_lock(struct optimistic_spin_queue *lock)
{ {
struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
struct optimistic_spin_node *prev, *next; struct optimistic_spin_node *prev, *next;
int curr = encode_cpu(smp_processor_id());
int old;
node->locked = 0; node->locked = 0;
node->next = NULL; node->next = NULL;
node->cpu = curr;
node->prev = prev = xchg(lock, node); old = atomic_xchg(&lock->tail, curr);
if (likely(prev == NULL)) if (old == OSQ_UNLOCKED_VAL)
return true; return true;
prev = decode_cpu(old);
node->prev = prev;
ACCESS_ONCE(prev->next) = node; ACCESS_ONCE(prev->next) = node;
/* /*
...@@ -149,15 +180,16 @@ bool osq_lock(struct optimistic_spin_node **lock) ...@@ -149,15 +180,16 @@ bool osq_lock(struct optimistic_spin_node **lock)
return false; return false;
} }
void osq_unlock(struct optimistic_spin_node **lock) void osq_unlock(struct optimistic_spin_queue *lock)
{ {
struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
struct optimistic_spin_node *next; struct optimistic_spin_node *next;
int curr = encode_cpu(smp_processor_id());
/* /*
* Fast path for the uncontended case. * Fast path for the uncontended case.
*/ */
if (likely(cmpxchg(lock, node, NULL) == node)) if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr))
return; return;
/* /*
......
...@@ -121,9 +121,10 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) ...@@ -121,9 +121,10 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
struct optimistic_spin_node { struct optimistic_spin_node {
struct optimistic_spin_node *next, *prev; struct optimistic_spin_node *next, *prev;
int locked; /* 1 if lock acquired */ int locked; /* 1 if lock acquired */
int cpu; /* encoded CPU # value */
}; };
extern bool osq_lock(struct optimistic_spin_node **lock); extern bool osq_lock(struct optimistic_spin_queue *lock);
extern void osq_unlock(struct optimistic_spin_node **lock); extern void osq_unlock(struct optimistic_spin_queue *lock);
#endif /* __LINUX_MCS_SPINLOCK_H */ #endif /* __LINUX_MCS_SPINLOCK_H */
...@@ -60,7 +60,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) ...@@ -60,7 +60,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
INIT_LIST_HEAD(&lock->wait_list); INIT_LIST_HEAD(&lock->wait_list);
mutex_clear_owner(lock); mutex_clear_owner(lock);
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
lock->osq = NULL; atomic_set(&lock->osq.tail, OSQ_UNLOCKED_VAL);
#endif #endif
debug_mutex_init(lock, name, key); debug_mutex_init(lock, name, key);
......
...@@ -84,7 +84,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, ...@@ -84,7 +84,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
INIT_LIST_HEAD(&sem->wait_list); INIT_LIST_HEAD(&sem->wait_list);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
sem->owner = NULL; sem->owner = NULL;
sem->osq = NULL; atomic_set(&sem->osq.tail, OSQ_UNLOCKED_VAL);
#endif #endif
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment