Commit adcc8da8 authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Ingo Molnar

sched/core: Simplify helpers for rq clock update skip requests

By renaming the functions we can get rid of the skip parameter
and have better code redability. It makes zero sense to have
things such as:

  rq_clock_skip_update(rq, false)

When the skip request is in fact not going to happen. Ever. Rename
things such that we end up with:

  rq_clock_skip_update(rq)
  rq_clock_cancel_skipupdate(rq)
Signed-off-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Cc: matt@codeblueprint.co.uk
Cc: rostedt@goodmis.org
Link: http://lkml.kernel.org/r/20180404161539.nhadkff2aats74jh@linux-n805Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent d29a2064
...@@ -874,7 +874,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) ...@@ -874,7 +874,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
* this case, we can save a useless back to back clock update. * this case, we can save a useless back to back clock update.
*/ */
if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
rq_clock_skip_update(rq, true); rq_clock_skip_update(rq);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -1560,7 +1560,7 @@ static void yield_task_dl(struct rq *rq) ...@@ -1560,7 +1560,7 @@ static void yield_task_dl(struct rq *rq)
* so we don't do microscopic update in schedule() * so we don't do microscopic update in schedule()
* and double the fastpath cost. * and double the fastpath cost.
*/ */
rq_clock_skip_update(rq, true); rq_clock_skip_update(rq);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -7089,7 +7089,7 @@ static void yield_task_fair(struct rq *rq) ...@@ -7089,7 +7089,7 @@ static void yield_task_fair(struct rq *rq)
* so we don't do microscopic update in schedule() * so we don't do microscopic update in schedule()
* and double the fastpath cost. * and double the fastpath cost.
*/ */
rq_clock_skip_update(rq, true); rq_clock_skip_update(rq);
} }
set_skip_buddy(se); set_skip_buddy(se);
......
...@@ -861,7 +861,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) ...@@ -861,7 +861,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
* 'runtime'. * 'runtime'.
*/ */
if (rt_rq->rt_nr_running && rq->curr == rq->idle) if (rt_rq->rt_nr_running && rq->curr == rq->idle)
rq_clock_skip_update(rq, false); rq_clock_cancel_skipupdate(rq);
} }
if (rt_rq->rt_time || rt_rq->rt_nr_running) if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0; idle = 0;
......
...@@ -976,12 +976,19 @@ static inline u64 rq_clock_task(struct rq *rq) ...@@ -976,12 +976,19 @@ static inline u64 rq_clock_task(struct rq *rq)
return rq->clock_task; return rq->clock_task;
} }
static inline void rq_clock_skip_update(struct rq *rq, bool skip) static inline void rq_clock_skip_update(struct rq *rq)
{ {
lockdep_assert_held(&rq->lock); lockdep_assert_held(&rq->lock);
if (skip)
rq->clock_update_flags |= RQCF_REQ_SKIP; rq->clock_update_flags |= RQCF_REQ_SKIP;
else }
/*
* See rt task throttoling, which is the only time a skip
* request is cancelled.
*/
static inline void rq_clock_cancel_skipupdate(struct rq *rq)
{
lockdep_assert_held(&rq->lock);
rq->clock_update_flags &= ~RQCF_REQ_SKIP; rq->clock_update_flags &= ~RQCF_REQ_SKIP;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment