Commit e0d02285 authored by Will Deacon's avatar Will Deacon Committed by Ingo Molnar

locking/qrwlock: Use 'struct qrwlock' instead of 'struct __qrwlock'

There's no good reason to keep the internal structure of struct qrwlock
hidden from qrwlock.h, particularly as it's actually needed for unlock
and ends up being abstracted independently behind the __qrwlock_write_byte()
function.

Stop pretending we can hide this stuff, and move the __qrwlock definition
into qrwlock, removing the __qrwlock_write_byte() nastiness and using the
same struct definition everywhere instead.
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Jeremy.Linton@arm.com
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <longman@redhat.com>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/1507810851-306-2-git-send-email-will.deacon@arm.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 5a8897cc
...@@ -128,23 +128,13 @@ static inline void queued_read_unlock(struct qrwlock *lock) ...@@ -128,23 +128,13 @@ static inline void queued_read_unlock(struct qrwlock *lock)
(void)atomic_sub_return_release(_QR_BIAS, &lock->cnts); (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
} }
/**
* __qrwlock_write_byte - retrieve the write byte address of a queue rwlock
* @lock : Pointer to queue rwlock structure
* Return: the write byte address of a queue rwlock
*/
static inline u8 *__qrwlock_write_byte(struct qrwlock *lock)
{
return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN);
}
/** /**
* queued_write_unlock - release write lock of a queue rwlock * queued_write_unlock - release write lock of a queue rwlock
* @lock : Pointer to queue rwlock structure * @lock : Pointer to queue rwlock structure
*/ */
static inline void queued_write_unlock(struct qrwlock *lock) static inline void queued_write_unlock(struct qrwlock *lock)
{ {
smp_store_release(__qrwlock_write_byte(lock), 0); smp_store_release(&lock->wmode, 0);
} }
/* /*
......
...@@ -9,12 +9,23 @@ ...@@ -9,12 +9,23 @@
*/ */
typedef struct qrwlock { typedef struct qrwlock {
union {
atomic_t cnts; atomic_t cnts;
struct {
#ifdef __LITTLE_ENDIAN
u8 wmode; /* Writer mode */
u8 rcnts[3]; /* Reader counts */
#else
u8 rcnts[3]; /* Reader counts */
u8 wmode; /* Writer mode */
#endif
};
};
arch_spinlock_t wait_lock; arch_spinlock_t wait_lock;
} arch_rwlock_t; } arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { \ #define __ARCH_RW_LOCK_UNLOCKED { \
.cnts = ATOMIC_INIT(0), \ { .cnts = ATOMIC_INIT(0), }, \
.wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
} }
......
...@@ -23,26 +23,6 @@ ...@@ -23,26 +23,6 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/qrwlock.h> #include <asm/qrwlock.h>
/*
* This internal data structure is used for optimizing access to some of
* the subfields within the atomic_t cnts.
*/
struct __qrwlock {
union {
atomic_t cnts;
struct {
#ifdef __LITTLE_ENDIAN
u8 wmode; /* Writer mode */
u8 rcnts[3]; /* Reader counts */
#else
u8 rcnts[3]; /* Reader counts */
u8 wmode; /* Writer mode */
#endif
};
};
arch_spinlock_t lock;
};
/** /**
* rspin_until_writer_unlock - inc reader count & spin until writer is gone * rspin_until_writer_unlock - inc reader count & spin until writer is gone
* @lock : Pointer to queue rwlock structure * @lock : Pointer to queue rwlock structure
...@@ -125,10 +105,8 @@ void queued_write_lock_slowpath(struct qrwlock *lock) ...@@ -125,10 +105,8 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
* or wait for a previous writer to go away. * or wait for a previous writer to go away.
*/ */
for (;;) { for (;;) {
struct __qrwlock *l = (struct __qrwlock *)lock; if (!READ_ONCE(lock->wmode) &&
(cmpxchg_relaxed(&lock->wmode, 0, _QW_WAITING) == 0))
if (!READ_ONCE(l->wmode) &&
(cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0))
break; break;
cpu_relax(); cpu_relax();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment