Commit 6c2787f2 authored by Yanfei Xu's avatar Yanfei Xu Committed by Peter Zijlstra

locking: Remove rcu_read_{,un}lock() for preempt_{dis,en}able()

preempt_disable/enable() is equal to RCU read-side crital section, and
the spinning codes in mutex and rwsem could ensure that the preemption
is disabled. So let's remove the unnecessary rcu_read_lock/unlock for
saving some cycles in hot codes.
Signed-off-by: default avatarYanfei Xu <yanfei.xu@windriver.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarWaiman Long <longman@redhat.com>
Link: https://lore.kernel.org/r/20211013134154.1085649-2-yanfei.xu@windriver.com
parent 7cdacc5f
...@@ -351,13 +351,16 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, ...@@ -351,13 +351,16 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
{ {
bool ret = true; bool ret = true;
rcu_read_lock(); lockdep_assert_preemption_disabled();
while (__mutex_owner(lock) == owner) { while (__mutex_owner(lock) == owner) {
/* /*
* Ensure we emit the owner->on_cpu, dereference _after_ * Ensure we emit the owner->on_cpu, dereference _after_
* checking lock->owner still matches owner. If that fails, * checking lock->owner still matches owner. And we already
* owner might point to freed memory. If it still matches, * disabled preemption which is equal to the RCU read-side
* the rcu_read_lock() ensures the memory stays valid. * crital section in optimistic spinning code. Thus the
* task_strcut structure won't go away during the spinning
* period
*/ */
barrier(); barrier();
...@@ -377,7 +380,6 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, ...@@ -377,7 +380,6 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
cpu_relax(); cpu_relax();
} }
rcu_read_unlock();
return ret; return ret;
} }
...@@ -390,19 +392,25 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock) ...@@ -390,19 +392,25 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
struct task_struct *owner; struct task_struct *owner;
int retval = 1; int retval = 1;
lockdep_assert_preemption_disabled();
if (need_resched()) if (need_resched())
return 0; return 0;
rcu_read_lock(); /*
* We already disabled preemption which is equal to the RCU read-side
* crital section in optimistic spinning code. Thus the task_strcut
* structure won't go away during the spinning period.
*/
owner = __mutex_owner(lock); owner = __mutex_owner(lock);
/* /*
* As lock holder preemption issue, we both skip spinning if task is not * As lock holder preemption issue, we both skip spinning if task is not
* on cpu or its cpu is preempted * on cpu or its cpu is preempted
*/ */
if (owner) if (owner)
retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
rcu_read_unlock();
/* /*
* If lock->owner is not set, the mutex has been released. Return true * If lock->owner is not set, the mutex has been released. Return true
......
...@@ -635,7 +635,10 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) ...@@ -635,7 +635,10 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
} }
preempt_disable(); preempt_disable();
rcu_read_lock(); /*
* Disable preemption is equal to the RCU read-side crital section,
* thus the task_strcut structure won't go away.
*/
owner = rwsem_owner_flags(sem, &flags); owner = rwsem_owner_flags(sem, &flags);
/* /*
* Don't check the read-owner as the entry may be stale. * Don't check the read-owner as the entry may be stale.
...@@ -643,7 +646,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) ...@@ -643,7 +646,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
if ((flags & RWSEM_NONSPINNABLE) || if ((flags & RWSEM_NONSPINNABLE) ||
(owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner))) (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
ret = false; ret = false;
rcu_read_unlock();
preempt_enable(); preempt_enable();
lockevent_cond_inc(rwsem_opt_fail, !ret); lockevent_cond_inc(rwsem_opt_fail, !ret);
...@@ -671,12 +673,13 @@ rwsem_spin_on_owner(struct rw_semaphore *sem) ...@@ -671,12 +673,13 @@ rwsem_spin_on_owner(struct rw_semaphore *sem)
unsigned long flags, new_flags; unsigned long flags, new_flags;
enum owner_state state; enum owner_state state;
lockdep_assert_preemption_disabled();
owner = rwsem_owner_flags(sem, &flags); owner = rwsem_owner_flags(sem, &flags);
state = rwsem_owner_state(owner, flags); state = rwsem_owner_state(owner, flags);
if (state != OWNER_WRITER) if (state != OWNER_WRITER)
return state; return state;
rcu_read_lock();
for (;;) { for (;;) {
/* /*
* When a waiting writer set the handoff flag, it may spin * When a waiting writer set the handoff flag, it may spin
...@@ -694,7 +697,9 @@ rwsem_spin_on_owner(struct rw_semaphore *sem) ...@@ -694,7 +697,9 @@ rwsem_spin_on_owner(struct rw_semaphore *sem)
* Ensure we emit the owner->on_cpu, dereference _after_ * Ensure we emit the owner->on_cpu, dereference _after_
* checking sem->owner still matches owner, if that fails, * checking sem->owner still matches owner, if that fails,
* owner might point to free()d memory, if it still matches, * owner might point to free()d memory, if it still matches,
* the rcu_read_lock() ensures the memory stays valid. * our spinning context already disabled preemption which is
* equal to RCU read-side crital section ensures the memory
* stays valid.
*/ */
barrier(); barrier();
...@@ -705,7 +710,6 @@ rwsem_spin_on_owner(struct rw_semaphore *sem) ...@@ -705,7 +710,6 @@ rwsem_spin_on_owner(struct rw_semaphore *sem)
cpu_relax(); cpu_relax();
} }
rcu_read_unlock();
return state; return state;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment