Commit 80f5c1b8 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/core: Add missing update_rq_clock() in detach_task_cfs_rq()

Instead of adding the update_rq_clock() all the way at the bottom of
the callstack, add one at the top, this to aid later effort to
minimize update_rq_lock() calls.

  WARNING: CPU: 0 PID: 1 at ../kernel/sched/sched.h:797 detach_task_cfs_rq()
  rq->clock_update_flags < RQCF_ACT_SKIP

  Call Trace:
    dump_stack()
    __warn()
    warn_slowpath_fmt()
    detach_task_cfs_rq()
    switched_from_fair()
    __sched_setscheduler()
    _sched_setscheduler()
    sched_set_stop_task()
    cpu_stop_create()
    __smpboot_create_thread.part.2()
    smpboot_register_percpu_thread_cpumask()
    cpu_stop_init()
    do_one_initcall()
    ? print_cpu_info()
    kernel_init_freeable()
    ? rest_init()
    kernel_init()
    ret_from_fork()
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 4126bad6
...@@ -3655,6 +3655,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) ...@@ -3655,6 +3655,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
BUG_ON(prio > MAX_PRIO); BUG_ON(prio > MAX_PRIO);
rq = __task_rq_lock(p, &rf); rq = __task_rq_lock(p, &rf);
update_rq_clock(rq);
/* /*
* Idle task boosting is a nono in general. There is one * Idle task boosting is a nono in general. There is one
...@@ -4183,6 +4184,7 @@ static int __sched_setscheduler(struct task_struct *p, ...@@ -4183,6 +4184,7 @@ static int __sched_setscheduler(struct task_struct *p,
* runqueue lock must be held. * runqueue lock must be held.
*/ */
rq = task_rq_lock(p, &rf); rq = task_rq_lock(p, &rf);
update_rq_clock(rq);
/* /*
* Changing the policy of the stop threads its a very bad idea * Changing the policy of the stop threads its a very bad idea
...@@ -8435,6 +8437,7 @@ static void cpu_cgroup_fork(struct task_struct *task) ...@@ -8435,6 +8437,7 @@ static void cpu_cgroup_fork(struct task_struct *task)
rq = task_rq_lock(task, &rf); rq = task_rq_lock(task, &rf);
update_rq_clock(rq);
sched_change_group(task, TASK_SET_GROUP); sched_change_group(task, TASK_SET_GROUP);
task_rq_unlock(rq, task, &rf); task_rq_unlock(rq, task, &rf);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment