Commit 07879c6a authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Ingo Molnar

sched/wake_q: Reduce reference counting for special users

Some users, specifically futexes and rwsems, required fixes
that allowed the callers to be safe when wakeups occur before
they are expected by wake_up_q(). Such scenarios also play
games and rely on reference counting, and until now were
pivoting on wake_q doing it. With the wake_q_add() call being
moved down, this can no longer be the case. As such we end up
with a a double task refcounting overhead; and these callers
care enough about this (being rather core-ish).

This patch introduces a wake_q_add_safe() call that serves
for callers that have already done refcounting and therefore the
task is 'safe' from wake_q point of view (int that it requires
reference throughout the entire queue/>wakeup cycle). In the one
case it has internal reference counting, in the other case it
consumes the reference counting.
Signed-off-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <longman@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Xie Yongji <xieyongji@baidu.com>
Cc: Yongji Xie <elohimes@gmail.com>
Cc: andrea.parri@amarulasolutions.com
Cc: lilin24@baidu.com
Cc: liuqi16@baidu.com
Cc: nixun@baidu.com
Cc: yuanlinsi01@baidu.com
Cc: zhangyu31@baidu.com
Link: https://lkml.kernel.org/r/20181218195352.7orq3upiwfdbrdne@linux-r8p5Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 513e1073
...@@ -51,8 +51,8 @@ static inline void wake_q_init(struct wake_q_head *head) ...@@ -51,8 +51,8 @@ static inline void wake_q_init(struct wake_q_head *head)
head->lastp = &head->first; head->lastp = &head->first;
} }
extern void wake_q_add(struct wake_q_head *head, extern void wake_q_add(struct wake_q_head *head, struct task_struct *task);
struct task_struct *task); extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task);
extern void wake_up_q(struct wake_q_head *head); extern void wake_up_q(struct wake_q_head *head);
#endif /* _LINUX_SCHED_WAKE_Q_H */ #endif /* _LINUX_SCHED_WAKE_Q_H */
...@@ -1463,8 +1463,7 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) ...@@ -1463,8 +1463,7 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
* Queue the task for later wakeup for after we've released * Queue the task for later wakeup for after we've released
* the hb->lock. wake_q_add() grabs reference to p. * the hb->lock. wake_q_add() grabs reference to p.
*/ */
wake_q_add(wake_q, p); wake_q_add_safe(wake_q, p);
put_task_struct(p);
} }
/* /*
......
...@@ -211,9 +211,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem, ...@@ -211,9 +211,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
* Ensure issuing the wakeup (either by us or someone else) * Ensure issuing the wakeup (either by us or someone else)
* after setting the reader waiter to nil. * after setting the reader waiter to nil.
*/ */
wake_q_add(wake_q, tsk); wake_q_add_safe(wake_q, tsk);
/* wake_q_add() already take the task ref */
put_task_struct(tsk);
} }
adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment; adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
......
...@@ -396,19 +396,7 @@ static bool set_nr_if_polling(struct task_struct *p) ...@@ -396,19 +396,7 @@ static bool set_nr_if_polling(struct task_struct *p)
#endif #endif
#endif #endif
/** static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
* wake_q_add() - queue a wakeup for 'later' waking.
* @head: the wake_q_head to add @task to
* @task: the task to queue for 'later' wakeup
*
* Queue a task for later wakeup, most likely by the wake_up_q() call in the
* same context, _HOWEVER_ this is not guaranteed, the wakeup can come
* instantly.
*
* This function must be used as-if it were wake_up_process(); IOW the task
* must be ready to be woken at this location.
*/
void wake_q_add(struct wake_q_head *head, struct task_struct *task)
{ {
struct wake_q_node *node = &task->wake_q; struct wake_q_node *node = &task->wake_q;
...@@ -422,15 +410,55 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task) ...@@ -422,15 +410,55 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
*/ */
smp_mb__before_atomic(); smp_mb__before_atomic();
if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
return; return false;
get_task_struct(task);
/* /*
* The head is context local, there can be no concurrency. * The head is context local, there can be no concurrency.
*/ */
*head->lastp = node; *head->lastp = node;
head->lastp = &node->next; head->lastp = &node->next;
return true;
}
/**
* wake_q_add() - queue a wakeup for 'later' waking.
* @head: the wake_q_head to add @task to
* @task: the task to queue for 'later' wakeup
*
* Queue a task for later wakeup, most likely by the wake_up_q() call in the
* same context, _HOWEVER_ this is not guaranteed, the wakeup can come
* instantly.
*
* This function must be used as-if it were wake_up_process(); IOW the task
* must be ready to be woken at this location.
*/
void wake_q_add(struct wake_q_head *head, struct task_struct *task)
{
if (__wake_q_add(head, task))
get_task_struct(task);
}
/**
* wake_q_add_safe() - safely queue a wakeup for 'later' waking.
* @head: the wake_q_head to add @task to
* @task: the task to queue for 'later' wakeup
*
* Queue a task for later wakeup, most likely by the wake_up_q() call in the
* same context, _HOWEVER_ this is not guaranteed, the wakeup can come
* instantly.
*
* This function must be used as-if it were wake_up_process(); IOW the task
* must be ready to be woken at this location.
*
* This function is essentially a task-safe equivalent to wake_q_add(). Callers
* that already hold reference to @task can call the 'safe' version and trust
* wake_q to do the right thing depending whether or not the @task is already
* queued for wakeup.
*/
void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
{
if (!__wake_q_add(head, task))
put_task_struct(task);
} }
void wake_up_q(struct wake_q_head *head) void wake_up_q(struct wake_q_head *head)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment