Commit c71fd893 authored by Waiman Long's avatar Waiman Long Committed by Ingo Molnar

locking/rwsem: Make owner available even if !CONFIG_RWSEM_SPIN_ON_OWNER

The owner field in the rw_semaphore structure is used primarily for
optimistic spinning. However, identifying the rwsem owner can also be
helpful in debugging as well as tracing locking related issues when
analyzing crash dump. The owner field may also store state information
that can be important to the operation of the rwsem.

So the owner field is now made a permanent member of the rw_semaphore
structure irrespective of CONFIG_RWSEM_SPIN_ON_OWNER.
Signed-off-by: default avatarWaiman Long <longman@redhat.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: huang ying <huang.ying.caritas@gmail.com>
Link: https://lkml.kernel.org/r/20190520205918.22251-2-longman@redhat.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 69d927bb
...@@ -34,12 +34,12 @@ ...@@ -34,12 +34,12 @@
*/ */
struct rw_semaphore { struct rw_semaphore {
atomic_long_t count; atomic_long_t count;
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
/* /*
* Write owner. Used as a speculative check to see * Write owner or one of the read owners. Can be used as a
* if the owner is running on the cpu. * speculative check to see if the owner is running on the cpu.
*/ */
struct task_struct *owner; struct task_struct *owner;
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
struct optimistic_spin_queue osq; /* spinner MCS lock */ struct optimistic_spin_queue osq; /* spinner MCS lock */
#endif #endif
raw_spinlock_t wait_lock; raw_spinlock_t wait_lock;
...@@ -73,13 +73,14 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) ...@@ -73,13 +73,14 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
#endif #endif
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL #define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED
#else #else
#define __RWSEM_OPT_INIT(lockname) #define __RWSEM_OPT_INIT(lockname)
#endif #endif
#define __RWSEM_INITIALIZER(name) \ #define __RWSEM_INITIALIZER(name) \
{ __RWSEM_INIT_COUNT(name), \ { __RWSEM_INIT_COUNT(name), \
.owner = NULL, \
.wait_list = LIST_HEAD_INIT((name).wait_list), \ .wait_list = LIST_HEAD_INIT((name).wait_list), \
.wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
__RWSEM_OPT_INIT(name) \ __RWSEM_OPT_INIT(name) \
......
...@@ -86,8 +86,8 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, ...@@ -86,8 +86,8 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE); atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
raw_spin_lock_init(&sem->wait_lock); raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list); INIT_LIST_HEAD(&sem->wait_list);
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
sem->owner = NULL; sem->owner = NULL;
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
osq_lock_init(&sem->osq); osq_lock_init(&sem->osq);
#endif #endif
} }
......
...@@ -61,7 +61,6 @@ ...@@ -61,7 +61,6 @@
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
/* /*
* All writes to owner are protected by WRITE_ONCE() to make sure that * All writes to owner are protected by WRITE_ONCE() to make sure that
* store tearing can't happen as optimistic spinners may read and use * store tearing can't happen as optimistic spinners may read and use
...@@ -126,7 +125,6 @@ static inline bool rwsem_has_anonymous_owner(struct task_struct *owner) ...@@ -126,7 +125,6 @@ static inline bool rwsem_has_anonymous_owner(struct task_struct *owner)
* real owner or one of the real owners. The only exception is when the * real owner or one of the real owners. The only exception is when the
* unlock is done by up_read_non_owner(). * unlock is done by up_read_non_owner().
*/ */
#define rwsem_clear_reader_owned rwsem_clear_reader_owned
static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem) static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
{ {
unsigned long val = (unsigned long)current | RWSEM_READER_OWNED unsigned long val = (unsigned long)current | RWSEM_READER_OWNED
...@@ -135,28 +133,7 @@ static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem) ...@@ -135,28 +133,7 @@ static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
cmpxchg_relaxed((unsigned long *)&sem->owner, val, cmpxchg_relaxed((unsigned long *)&sem->owner, val,
RWSEM_READER_OWNED | RWSEM_ANONYMOUSLY_OWNED); RWSEM_READER_OWNED | RWSEM_ANONYMOUSLY_OWNED);
} }
#endif
#else #else
static inline void rwsem_set_owner(struct rw_semaphore *sem)
{
}
static inline void rwsem_clear_owner(struct rw_semaphore *sem)
{
}
static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
struct task_struct *owner)
{
}
static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
{
}
#endif
#ifndef rwsem_clear_reader_owned
static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem) static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
{ {
} }
......
...@@ -1095,7 +1095,7 @@ config PROVE_LOCKING ...@@ -1095,7 +1095,7 @@ config PROVE_LOCKING
select DEBUG_SPINLOCK select DEBUG_SPINLOCK
select DEBUG_MUTEXES select DEBUG_MUTEXES
select DEBUG_RT_MUTEXES if RT_MUTEXES select DEBUG_RT_MUTEXES if RT_MUTEXES
select DEBUG_RWSEMS if RWSEM_SPIN_ON_OWNER select DEBUG_RWSEMS
select DEBUG_WW_MUTEX_SLOWPATH select DEBUG_WW_MUTEX_SLOWPATH
select DEBUG_LOCK_ALLOC select DEBUG_LOCK_ALLOC
select TRACE_IRQFLAGS select TRACE_IRQFLAGS
...@@ -1199,10 +1199,10 @@ config DEBUG_WW_MUTEX_SLOWPATH ...@@ -1199,10 +1199,10 @@ config DEBUG_WW_MUTEX_SLOWPATH
config DEBUG_RWSEMS config DEBUG_RWSEMS
bool "RW Semaphore debugging: basic checks" bool "RW Semaphore debugging: basic checks"
depends on DEBUG_KERNEL && RWSEM_SPIN_ON_OWNER depends on DEBUG_KERNEL
help help
This debugging feature allows mismatched rw semaphore locks and unlocks This debugging feature allows mismatched rw semaphore locks
to be detected and reported. and unlocks to be detected and reported.
config DEBUG_LOCK_ALLOC config DEBUG_LOCK_ALLOC
bool "Lock debugging: detect incorrect freeing of live locks" bool "Lock debugging: detect incorrect freeing of live locks"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment