Commit 0c3b9168 authored by Balbir Singh's avatar Balbir Singh Committed by Ingo Molnar

sched: Fix sched rt group scheduling when hierachy is enabled

The current sched rt code is broken when it comes to hierarchical
scheduling, this patch fixes two problems

1. It adds redundant enqueuing (harmless) when it finds a queue
   has tasks enqueued, but it has no run time and it is not
   throttled.

2. The most important change is in sched_rt_rq_enqueue/dequeue.
   The code just picks the rt_rq belonging to the current cpu
   on which the period timer runs, the patch fixes it, so that
   the correct rt_se is enqueued/dequeued.

Tested with a simple hierarchy

/c/d, c and d assigned similar runtimes of 50,000 and a while
1 loop runs within "d". Both c and d get throttled, without
the patch, the task just stops running and never runs (depends
on where the sched_rt b/w timer runs). With the patch, the
task is throttled and runs as expected.

[ bharata, suggestions on how to pick the rt_se belong to the
  rt_rq and correct cpu ]
Signed-off-by: default avatarBalbir Singh <balbir@linux.vnet.ibm.com>
Acked-by: default avatarBharata B Rao <bharata@linux.vnet.ibm.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: stable@kernel.org
LKML-Reference: <20110303113435.GA2868@balbir.in.ibm.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent b65a0e0c
...@@ -210,11 +210,12 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se); ...@@ -210,11 +210,12 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
{ {
int this_cpu = smp_processor_id();
struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
struct sched_rt_entity *rt_se; struct sched_rt_entity *rt_se;
rt_se = rt_rq->tg->rt_se[this_cpu]; int cpu = cpu_of(rq_of_rt_rq(rt_rq));
rt_se = rt_rq->tg->rt_se[cpu];
if (rt_rq->rt_nr_running) { if (rt_rq->rt_nr_running) {
if (rt_se && !on_rt_rq(rt_se)) if (rt_se && !on_rt_rq(rt_se))
...@@ -226,10 +227,10 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) ...@@ -226,10 +227,10 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
{ {
int this_cpu = smp_processor_id();
struct sched_rt_entity *rt_se; struct sched_rt_entity *rt_se;
int cpu = cpu_of(rq_of_rt_rq(rt_rq));
rt_se = rt_rq->tg->rt_se[this_cpu]; rt_se = rt_rq->tg->rt_se[cpu];
if (rt_se && on_rt_rq(rt_se)) if (rt_se && on_rt_rq(rt_se))
dequeue_rt_entity(rt_se); dequeue_rt_entity(rt_se);
...@@ -565,8 +566,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) ...@@ -565,8 +566,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
if (rt_rq->rt_time || rt_rq->rt_nr_running) if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0; idle = 0;
raw_spin_unlock(&rt_rq->rt_runtime_lock); raw_spin_unlock(&rt_rq->rt_runtime_lock);
} else if (rt_rq->rt_nr_running) } else if (rt_rq->rt_nr_running) {
idle = 0; idle = 0;
if (!rt_rq_throttled(rt_rq))
enqueue = 1;
}
if (enqueue) if (enqueue)
sched_rt_rq_enqueue(rt_rq); sched_rt_rq_enqueue(rt_rq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment