Commit 916671c0 authored by Mike Galbraith's avatar Mike Galbraith Committed by Ingo Molnar

sched: Set skip_clock_update in yield_task_fair()

This is another case where we are on our way to schedule(),
so can save a useless clock update and resulting microscopic
vruntime update.
Signed-off-by: default avatarMike Galbraith <efault@gmx.de>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1321971686.6855.18.camel@marge.simson.netSigned-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 76854c7e
...@@ -4547,6 +4547,13 @@ bool __sched yield_to(struct task_struct *p, bool preempt) ...@@ -4547,6 +4547,13 @@ bool __sched yield_to(struct task_struct *p, bool preempt)
*/ */
if (preempt && rq != p_rq) if (preempt && rq != p_rq)
resched_task(p_rq->curr); resched_task(p_rq->curr);
} else {
/*
* We might have set it in task_yield_fair(), but are
* not going to schedule(), so don't want to skip
* the next update.
*/
rq->skip_clock_update = 0;
} }
out: out:
......
...@@ -3075,6 +3075,12 @@ static void yield_task_fair(struct rq *rq) ...@@ -3075,6 +3075,12 @@ static void yield_task_fair(struct rq *rq)
* Update run-time statistics of the 'current'. * Update run-time statistics of the 'current'.
*/ */
update_curr(cfs_rq); update_curr(cfs_rq);
/*
* Tell update_rq_clock() that we've just updated,
* so we don't do microscopic update in schedule()
* and double the fastpath cost.
*/
rq->skip_clock_update = 1;
} }
set_skip_buddy(se); set_skip_buddy(se);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment