Commit 0e1f3483 authored by Hiroshi Shimamoto's avatar Hiroshi Shimamoto Committed by Ingo Molnar

sched: fix race in schedule()

Fix a hard to trigger crash seen in the -rt kernel that also affects
the vanilla scheduler.

There is a race condition between schedule() and some dequeue/enqueue
functions; rt_mutex_setprio(), __setscheduler() and sched_move_task().

When scheduling to idle, idle_balance() is called to pull tasks from
other busy processor. It might drop the rq lock. It means that those 3
functions encounter on_rq=0 and running=1. The current task should be
put when running.

Here is a possible scenario:

   CPU0                               CPU1
    |                              schedule()
    |                              ->deactivate_task()
    |                              ->idle_balance()
    |                              -->load_balance_newidle()
rt_mutex_setprio()                     |
    |                              --->double_lock_balance()
    *get lock                          *rel lock
    * on_rq=0, ruuning=1               |
    * sched_class is changed           |
    *rel lock                          *get lock
    :                                  |
                                       :
                                   ->put_prev_task_rt()
                                   ->pick_next_task_fair()
                                       => panic

The current process of CPU1(P1) is scheduling. Deactivated P1, and the
scheduler looks for another process on other CPU's runqueue because CPU1
will be idle. idle_balance(), load_balance_newidle() and
double_lock_balance() are called and double_lock_balance() could drop
the rq lock. On the other hand, CPU0 is trying to boost the priority of
P1. The result of boosting only P1's prio and sched_class are changed to
RT. The sched entities of P1 and P1's group are never put. It makes
cfs_rq invalid, because the cfs_rq has curr and no leaf, but
pick_next_task_fair() is called, then the kernel panics.
Signed-off-by: default avatarHiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 4faa8496
...@@ -4268,11 +4268,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio) ...@@ -4268,11 +4268,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
oldprio = p->prio; oldprio = p->prio;
on_rq = p->se.on_rq; on_rq = p->se.on_rq;
running = task_current(rq, p); running = task_current(rq, p);
if (on_rq) { if (on_rq)
dequeue_task(rq, p, 0); dequeue_task(rq, p, 0);
if (running) if (running)
p->sched_class->put_prev_task(rq, p); p->sched_class->put_prev_task(rq, p);
}
if (rt_prio(prio)) if (rt_prio(prio))
p->sched_class = &rt_sched_class; p->sched_class = &rt_sched_class;
...@@ -4281,10 +4280,9 @@ void rt_mutex_setprio(struct task_struct *p, int prio) ...@@ -4281,10 +4280,9 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
p->prio = prio; p->prio = prio;
if (running)
p->sched_class->set_curr_task(rq);
if (on_rq) { if (on_rq) {
if (running)
p->sched_class->set_curr_task(rq);
enqueue_task(rq, p, 0); enqueue_task(rq, p, 0);
check_class_changed(rq, p, prev_class, oldprio, running); check_class_changed(rq, p, prev_class, oldprio, running);
...@@ -4581,19 +4579,17 @@ int sched_setscheduler(struct task_struct *p, int policy, ...@@ -4581,19 +4579,17 @@ int sched_setscheduler(struct task_struct *p, int policy,
update_rq_clock(rq); update_rq_clock(rq);
on_rq = p->se.on_rq; on_rq = p->se.on_rq;
running = task_current(rq, p); running = task_current(rq, p);
if (on_rq) { if (on_rq)
deactivate_task(rq, p, 0); deactivate_task(rq, p, 0);
if (running) if (running)
p->sched_class->put_prev_task(rq, p); p->sched_class->put_prev_task(rq, p);
}
oldprio = p->prio; oldprio = p->prio;
__setscheduler(rq, p, policy, param->sched_priority); __setscheduler(rq, p, policy, param->sched_priority);
if (running)
p->sched_class->set_curr_task(rq);
if (on_rq) { if (on_rq) {
if (running)
p->sched_class->set_curr_task(rq);
activate_task(rq, p, 0); activate_task(rq, p, 0);
check_class_changed(rq, p, prev_class, oldprio, running); check_class_changed(rq, p, prev_class, oldprio, running);
...@@ -7618,11 +7614,10 @@ void sched_move_task(struct task_struct *tsk) ...@@ -7618,11 +7614,10 @@ void sched_move_task(struct task_struct *tsk)
running = task_current(rq, tsk); running = task_current(rq, tsk);
on_rq = tsk->se.on_rq; on_rq = tsk->se.on_rq;
if (on_rq) { if (on_rq)
dequeue_task(rq, tsk, 0); dequeue_task(rq, tsk, 0);
if (unlikely(running)) if (unlikely(running))
tsk->sched_class->put_prev_task(rq, tsk); tsk->sched_class->put_prev_task(rq, tsk);
}
set_task_rq(tsk, task_cpu(tsk)); set_task_rq(tsk, task_cpu(tsk));
...@@ -7631,11 +7626,10 @@ void sched_move_task(struct task_struct *tsk) ...@@ -7631,11 +7626,10 @@ void sched_move_task(struct task_struct *tsk)
tsk->sched_class->moved_group(tsk); tsk->sched_class->moved_group(tsk);
#endif #endif
if (on_rq) { if (unlikely(running))
if (unlikely(running)) tsk->sched_class->set_curr_task(rq);
tsk->sched_class->set_curr_task(rq); if (on_rq)
enqueue_task(rq, tsk, 0); enqueue_task(rq, tsk, 0);
}
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment