Commit 940959e9 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: fixlet for group load balance

We should not only correct the increment for the initial group, but should
be consistent and do so for all the groups we encounter.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 63e5c398
...@@ -1027,7 +1027,6 @@ static long effective_load(struct task_group *tg, int cpu, ...@@ -1027,7 +1027,6 @@ static long effective_load(struct task_group *tg, int cpu,
long wl, long wg) long wl, long wg)
{ {
struct sched_entity *se = tg->se[cpu]; struct sched_entity *se = tg->se[cpu];
long more_w;
if (!tg->parent) if (!tg->parent)
return wl; return wl;
...@@ -1039,6 +1038,10 @@ static long effective_load(struct task_group *tg, int cpu, ...@@ -1039,6 +1038,10 @@ static long effective_load(struct task_group *tg, int cpu,
if (!wl && sched_feat(ASYM_EFF_LOAD)) if (!wl && sched_feat(ASYM_EFF_LOAD))
return wl; return wl;
for_each_sched_entity(se) {
long S, rw, s, a, b;
long more_w;
/* /*
* Instead of using this increment, also add the difference * Instead of using this increment, also add the difference
* between when the shares were last updated and now. * between when the shares were last updated and now.
...@@ -1047,11 +1050,6 @@ static long effective_load(struct task_group *tg, int cpu, ...@@ -1047,11 +1050,6 @@ static long effective_load(struct task_group *tg, int cpu,
wl += more_w; wl += more_w;
wg += more_w; wg += more_w;
for_each_sched_entity(se) {
#define D(n) (likely(n) ? (n) : 1)
long S, rw, s, a, b;
S = se->my_q->tg->shares; S = se->my_q->tg->shares;
s = se->my_q->shares; s = se->my_q->shares;
rw = se->my_q->rq_weight; rw = se->my_q->rq_weight;
...@@ -1059,7 +1057,11 @@ static long effective_load(struct task_group *tg, int cpu, ...@@ -1059,7 +1057,11 @@ static long effective_load(struct task_group *tg, int cpu,
a = S*(rw + wl); a = S*(rw + wl);
b = S*rw + s*wg; b = S*rw + s*wg;
wl = s*(a-b)/D(b); wl = s*(a-b);
if (likely(b))
wl /= b;
/* /*
* Assume the group is already running and will * Assume the group is already running and will
* thus already be accounted for in the weight. * thus already be accounted for in the weight.
...@@ -1068,7 +1070,6 @@ static long effective_load(struct task_group *tg, int cpu, ...@@ -1068,7 +1070,6 @@ static long effective_load(struct task_group *tg, int cpu,
* alter the group weight. * alter the group weight.
*/ */
wg = 0; wg = 0;
#undef D
} }
return wl; return wl;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment