Commit 046a619d authored by Jason Low's avatar Jason Low Committed by Ingo Molnar

locking/spinlocks/mcs: Rename optimistic_spin_queue() to optimistic_spin_node()

Currently, the per-cpu nodes structure for the cancellable MCS spinlock is
named "optimistic_spin_queue". However, in a follow up patch in the series
we will be introducing a new structure that serves as the new "handle" for
the lock. It would make more sense if that structure is named
"optimistic_spin_queue". Additionally, since the current use of the
"optimistic_spin_queue" structure are  "nodes", it might be better if we
rename them to "node" anyway.

This preparatory patch renames all current "optimistic_spin_queue"
to "optimistic_spin_node".
Signed-off-by: default avatarJason Low <jason.low2@hp.com>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Scott Norton <scott.norton@hp.com>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Waiman Long <waiman.long@hp.com>
Cc: Davidlohr Bueso <davidlohr@hp.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Chris Mason <clm@fb.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Josef Bacik <jbacik@fusionio.com>
Link: http://lkml.kernel.org/r/1405358872-3732-2-git-send-email-jason.low2@hp.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 37e95624
......@@ -46,7 +46,7 @@
* - detects multi-task circular deadlocks and prints out all affected
* locks and tasks (and only those tasks)
*/
struct optimistic_spin_queue;
struct optimistic_spin_node;
struct mutex {
/* 1: unlocked, 0: locked, negative: locked, possible waiters */
atomic_t count;
......@@ -56,7 +56,7 @@ struct mutex {
struct task_struct *owner;
#endif
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
struct optimistic_spin_queue *osq; /* Spinner MCS lock */
struct optimistic_spin_node *osq; /* Spinner MCS lock */
#endif
#ifdef CONFIG_DEBUG_MUTEXES
const char *name;
......
......@@ -16,7 +16,7 @@
#include <linux/atomic.h>
struct optimistic_spin_queue;
struct optimistic_spin_node;
struct rw_semaphore;
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
......@@ -33,7 +33,7 @@ struct rw_semaphore {
* if the owner is running on the cpu.
*/
struct task_struct *owner;
struct optimistic_spin_queue *osq; /* spinner MCS lock */
struct optimistic_spin_node *osq; /* spinner MCS lock */
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
......
......@@ -14,18 +14,18 @@
* called from interrupt context and we have preemption disabled while
* spinning.
*/
static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node);
static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
/*
* Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
* Can return NULL in case we were the last queued and we updated @lock instead.
*/
static inline struct optimistic_spin_queue *
osq_wait_next(struct optimistic_spin_queue **lock,
struct optimistic_spin_queue *node,
struct optimistic_spin_queue *prev)
static inline struct optimistic_spin_node *
osq_wait_next(struct optimistic_spin_node **lock,
struct optimistic_spin_node *node,
struct optimistic_spin_node *prev)
{
struct optimistic_spin_queue *next = NULL;
struct optimistic_spin_node *next = NULL;
for (;;) {
if (*lock == node && cmpxchg(lock, node, prev) == node) {
......@@ -59,10 +59,10 @@ osq_wait_next(struct optimistic_spin_queue **lock,
return next;
}
bool osq_lock(struct optimistic_spin_queue **lock)
bool osq_lock(struct optimistic_spin_node **lock)
{
struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
struct optimistic_spin_queue *prev, *next;
struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
struct optimistic_spin_node *prev, *next;
node->locked = 0;
node->next = NULL;
......@@ -149,10 +149,10 @@ bool osq_lock(struct optimistic_spin_queue **lock)
return false;
}
void osq_unlock(struct optimistic_spin_queue **lock)
void osq_unlock(struct optimistic_spin_node **lock)
{
struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
struct optimistic_spin_queue *next;
struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
struct optimistic_spin_node *next;
/*
* Fast path for the uncontended case.
......
......@@ -118,12 +118,12 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
* mutex_lock()/rwsem_down_{read,write}() etc.
*/
struct optimistic_spin_queue {
struct optimistic_spin_queue *next, *prev;
struct optimistic_spin_node {
struct optimistic_spin_node *next, *prev;
int locked; /* 1 if lock acquired */
};
extern bool osq_lock(struct optimistic_spin_queue **lock);
extern void osq_unlock(struct optimistic_spin_queue **lock);
extern bool osq_lock(struct optimistic_spin_node **lock);
extern void osq_unlock(struct optimistic_spin_node **lock);
#endif /* __LINUX_MCS_SPINLOCK_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment