Commit 6e1e5196 authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Ingo Molnar

locking/qrwlock: Rename ->lock to ->wait_lock

... trivial, but reads a little nicer when we name our
actual primitive 'lock'.
Signed-off-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <Waiman.Long@hpe.com>
Link: http://lkml.kernel.org/r/1442216244-4409-1-git-send-email-dave@stgolabs.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent e58cdf58
...@@ -10,12 +10,12 @@ ...@@ -10,12 +10,12 @@
typedef struct qrwlock { typedef struct qrwlock {
atomic_t cnts; atomic_t cnts;
arch_spinlock_t lock; arch_spinlock_t wait_lock;
} arch_rwlock_t; } arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { \ #define __ARCH_RW_LOCK_UNLOCKED { \
.cnts = ATOMIC_INIT(0), \ .cnts = ATOMIC_INIT(0), \
.lock = __ARCH_SPIN_LOCK_UNLOCKED, \ .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
} }
#endif /* __ASM_GENERIC_QRWLOCK_TYPES_H */ #endif /* __ASM_GENERIC_QRWLOCK_TYPES_H */
...@@ -86,7 +86,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts) ...@@ -86,7 +86,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
/* /*
* Put the reader into the wait queue * Put the reader into the wait queue
*/ */
arch_spin_lock(&lock->lock); arch_spin_lock(&lock->wait_lock);
/* /*
* The ACQUIRE semantics of the following spinning code ensure * The ACQUIRE semantics of the following spinning code ensure
...@@ -99,7 +99,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts) ...@@ -99,7 +99,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
/* /*
* Signal the next one in queue to become queue head * Signal the next one in queue to become queue head
*/ */
arch_spin_unlock(&lock->lock); arch_spin_unlock(&lock->wait_lock);
} }
EXPORT_SYMBOL(queued_read_lock_slowpath); EXPORT_SYMBOL(queued_read_lock_slowpath);
...@@ -112,7 +112,7 @@ void queued_write_lock_slowpath(struct qrwlock *lock) ...@@ -112,7 +112,7 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
u32 cnts; u32 cnts;
/* Put the writer into the wait queue */ /* Put the writer into the wait queue */
arch_spin_lock(&lock->lock); arch_spin_lock(&lock->wait_lock);
/* Try to acquire the lock directly if no reader is present */ /* Try to acquire the lock directly if no reader is present */
if (!atomic_read(&lock->cnts) && if (!atomic_read(&lock->cnts) &&
...@@ -144,6 +144,6 @@ void queued_write_lock_slowpath(struct qrwlock *lock) ...@@ -144,6 +144,6 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
cpu_relax_lowlatency(); cpu_relax_lowlatency();
} }
unlock: unlock:
arch_spin_unlock(&lock->lock); arch_spin_unlock(&lock->wait_lock);
} }
EXPORT_SYMBOL(queued_write_lock_slowpath); EXPORT_SYMBOL(queued_write_lock_slowpath);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment