Commit c0bed69d authored by Kefeng Wang's avatar Kefeng Wang Committed by Peter Zijlstra

locking: Make owner_on_cpu() into <linux/sched.h>

Move the owner_on_cpu() from kernel/locking/rwsem.c into
include/linux/sched.h with under CONFIG_SMP, then use it
in the mutex/rwsem/rtmutex to simplify the code.
Signed-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20211203075935.136808-2-wangkefeng.wang@huawei.com
parent 9a75bd0c
...@@ -2171,6 +2171,15 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask); ...@@ -2171,6 +2171,15 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static inline bool owner_on_cpu(struct task_struct *owner)
{
/*
* As lock holder preemption issue, we both skip spinning if
* task is not on cpu or its cpu is preempted
*/
return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
}
/* Returns effective CPU energy utilization, as seen by the scheduler */ /* Returns effective CPU energy utilization, as seen by the scheduler */
unsigned long sched_cpu_util(int cpu, unsigned long max); unsigned long sched_cpu_util(int cpu, unsigned long max);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -367,8 +367,7 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, ...@@ -367,8 +367,7 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
/* /*
* Use vcpu_is_preempted to detect lock holder preemption issue. * Use vcpu_is_preempted to detect lock holder preemption issue.
*/ */
if (!owner->on_cpu || need_resched() || if (!owner_on_cpu(owner) || need_resched()) {
vcpu_is_preempted(task_cpu(owner))) {
ret = false; ret = false;
break; break;
} }
...@@ -403,14 +402,8 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock) ...@@ -403,14 +402,8 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
* structure won't go away during the spinning period. * structure won't go away during the spinning period.
*/ */
owner = __mutex_owner(lock); owner = __mutex_owner(lock);
/*
* As lock holder preemption issue, we both skip spinning if task is not
* on cpu or its cpu is preempted
*/
if (owner) if (owner)
retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); retval = owner_on_cpu(owner);
/* /*
* If lock->owner is not set, the mutex has been released. Return true * If lock->owner is not set, the mutex has been released. Return true
......
...@@ -1382,9 +1382,8 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, ...@@ -1382,9 +1382,8 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
* for CONFIG_PREEMPT_RCU=y) * for CONFIG_PREEMPT_RCU=y)
* - the VCPU on which owner runs is preempted * - the VCPU on which owner runs is preempted
*/ */
if (!owner->on_cpu || need_resched() || if (!owner_on_cpu(owner) || need_resched() ||
rt_mutex_waiter_is_top_waiter(lock, waiter) || rt_mutex_waiter_is_top_waiter(lock, waiter)) {
vcpu_is_preempted(task_cpu(owner))) {
res = false; res = false;
break; break;
} }
......
...@@ -613,15 +613,6 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) ...@@ -613,15 +613,6 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
return false; return false;
} }
static inline bool owner_on_cpu(struct task_struct *owner)
{
/*
* As lock holder preemption issue, we both skip spinning if
* task is not on cpu or its cpu is preempted
*/
return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
}
static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
{ {
struct task_struct *owner; struct task_struct *owner;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment