Commit 4c6c4e38 authored by Kirill Tkhai's avatar Kirill Tkhai Committed by Ingo Molnar

sched/core: Fix endless loop in pick_next_task()

1) Single cpu machine case.

When rq has only RT tasks, but no one of them can be picked
because of throttling, we enter in endless loop.

pick_next_task_{dl,rt} return NULL.

In pick_next_task_fair() we permanently go to retry

	if (rq->nr_running != rq->cfs.h_nr_running)
		return RETRY_TASK;

(rq->nr_running is not being decremented when rt_rq becomes
throttled).

No chances to unthrottle any rt_rq or to wake fair here,
because of rq is locked permanently and interrupts are
disabled.

2) In case of SMP this can cause a hang too. Although we unlock
   rq in idle_balance(), interrupts are still disabled.

The solution is to check for available tasks in DL and RT
classes instead of checking for sum.
Signed-off-by: default avatarKirill Tkhai <ktkhai@parallels.com>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1394098321.19290.11.camel@tkhaiSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent e4aa358b
...@@ -6728,7 +6728,9 @@ static int idle_balance(struct rq *this_rq) ...@@ -6728,7 +6728,9 @@ static int idle_balance(struct rq *this_rq)
out: out:
/* Is there a task of a high priority class? */ /* Is there a task of a high priority class? */
if (this_rq->nr_running != this_rq->cfs.h_nr_running) if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
(this_rq->dl.dl_nr_running ||
(this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt))))
pulled_task = -1; pulled_task = -1;
if (pulled_task) { if (pulled_task) {
......
...@@ -470,11 +470,6 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) ...@@ -470,11 +470,6 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
dequeue_rt_entity(rt_se); dequeue_rt_entity(rt_se);
} }
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
}
static int rt_se_boosted(struct sched_rt_entity *rt_se) static int rt_se_boosted(struct sched_rt_entity *rt_se)
{ {
struct rt_rq *rt_rq = group_rt_rq(rt_se); struct rt_rq *rt_rq = group_rt_rq(rt_se);
...@@ -545,11 +540,6 @@ static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) ...@@ -545,11 +540,6 @@ static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
{ {
} }
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
return rt_rq->rt_throttled;
}
static inline const struct cpumask *sched_rt_period_mask(void) static inline const struct cpumask *sched_rt_period_mask(void)
{ {
return cpu_online_mask; return cpu_online_mask;
......
...@@ -423,6 +423,18 @@ struct rt_rq { ...@@ -423,6 +423,18 @@ struct rt_rq {
#endif #endif
}; };
#ifdef CONFIG_RT_GROUP_SCHED
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
}
#else
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
return rt_rq->rt_throttled;
}
#endif
/* Deadline class' related fields in a runqueue */ /* Deadline class' related fields in a runqueue */
struct dl_rq { struct dl_rq {
/* runqueue is an rbtree, ordered by deadline */ /* runqueue is an rbtree, ordered by deadline */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment