Commit 94fe0f3e authored by Darren Hart's avatar Darren Hart Committed by Linus Torvalds

[PATCH] sched: active_load_balance fixes

The following patch against the latest mm fixes several problems with
active_load_balance().

Rather than starting with the highest allowable domain (SD_LOAD_BALANCE is
still set) and depending on the order of the cpu groups, we start at the
lowest domain and work up until we find a suitable CPU or run out of
options (SD_LOAD_BALANCE is no longer set).  This is a more robust approach
as it is more explicit and not subject to the construction order of the cpu
groups.

We move the test for busiest_rq->nr_running <=1 into the domain loop so we
don't continue to try and move tasks when there are none left to move.
This new logic (testing for nr_running in the domain loop) should make the
busiest_rq==target_rq condition really impossible, so we have replaced the
graceful continue on fail with a BUG_ON.  (Bjorn Helgaas, please confirm)

We eliminate the exclusion of the busiest_cpu's group from the pool of
available groups to push to as it is the ideal group to push to, even if
not very likely to be available.  Note that by removing the test for
group==busy_group and allowing it to also be tested for suitability, the
running time is nearly the same.

We no longer force the destination CPU to be in a group of completely idle
CPUs, nor to be the last in that group.
Signed-off-by: default avatarDarren Hart <dvhltc@us.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 7e063818
...@@ -2060,70 +2060,85 @@ static inline void idle_balance(int this_cpu, runqueue_t *this_rq) ...@@ -2060,70 +2060,85 @@ static inline void idle_balance(int this_cpu, runqueue_t *this_rq)
} }
} }
#ifdef CONFIG_SCHED_SMT
static int cpu_and_siblings_are_idle(int cpu)
{
int sib;
for_each_cpu_mask(sib, cpu_sibling_map[cpu]) {
if (idle_cpu(sib))
continue;
return 0;
}
return 1;
}
#else
#define cpu_and_siblings_are_idle(A) idle_cpu(A)
#endif
/* /*
* active_load_balance is run by migration threads. It pushes a running * active_load_balance is run by migration threads. It pushes running tasks
* task off the cpu. It can be required to correctly have at least 1 task * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
* running on each physical CPU where possible, and not have a physical / * running on each physical CPU where possible, and avoids physical /
* logical imbalance. * logical imbalances.
* *
* Called with busiest locked. * Called with busiest_rq locked.
*/ */
static void active_load_balance(runqueue_t *busiest, int busiest_cpu) static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu)
{ {
struct sched_domain *sd; struct sched_domain *sd;
struct sched_group *group, *busy_group; struct sched_group *cpu_group;
int i; cpumask_t visited_cpus;
schedstat_inc(busiest, alb_cnt);
if (busiest->nr_running <= 1)
return;
for_each_domain(busiest_cpu, sd)
if (cpu_isset(busiest->push_cpu, sd->span))
break;
if (!sd)
return;
group = sd->groups;
while (!cpu_isset(busiest_cpu, group->cpumask))
group = group->next;
busy_group = group;
group = sd->groups; schedstat_inc(busiest_rq, alb_cnt);
do { /*
runqueue_t *rq; * Search for suitable CPUs to push tasks to in successively higher
int push_cpu = 0; * domains with SD_LOAD_BALANCE set.
*/
if (group == busy_group) visited_cpus = CPU_MASK_NONE;
goto next_group; for_each_domain(busiest_cpu, sd) {
if (!(sd->flags & SD_LOAD_BALANCE) || busiest_rq->nr_running <= 1)
for_each_cpu_mask(i, group->cpumask) { break; /* no more domains to search or no more tasks to move */
if (!idle_cpu(i))
goto next_group; cpu_group = sd->groups;
push_cpu = i; do { /* sched_groups should either use list_heads or be merged into the domains structure */
} int cpu, target_cpu = -1;
runqueue_t *target_rq;
for_each_cpu_mask(cpu, cpu_group->cpumask) {
if (cpu_isset(cpu, visited_cpus) || cpu == busiest_cpu ||
!cpu_and_siblings_are_idle(cpu)) {
cpu_set(cpu, visited_cpus);
continue;
}
target_cpu = cpu;
break;
}
if (target_cpu == -1)
goto next_group; /* failed to find a suitable target cpu in this domain */
rq = cpu_rq(push_cpu); target_rq = cpu_rq(target_cpu);
/* /*
* This condition is "impossible", but since load * This condition is "impossible", if it occurs we need to fix it
* balancing is inherently a bit racy and statistical, * Reported by Bjorn Helgaas on a 128-cpu setup.
* it can trigger.. Reported by Bjorn Helgaas on a */
* 128-cpu setup. BUG_ON(busiest_rq == target_rq);
*/
if (unlikely(busiest == rq)) /* move a task from busiest_rq to target_rq */
goto next_group; double_lock_balance(busiest_rq, target_rq);
double_lock_balance(busiest, rq); if (move_tasks(target_rq, target_cpu, busiest_rq, 1, sd, SCHED_IDLE)) {
if (move_tasks(rq, push_cpu, busiest, 1, sd, SCHED_IDLE)) { schedstat_inc(busiest_rq, alb_lost);
schedstat_inc(busiest, alb_lost); schedstat_inc(target_rq, alb_gained);
schedstat_inc(rq, alb_gained); } else {
} else { schedstat_inc(busiest_rq, alb_failed);
schedstat_inc(busiest, alb_failed); }
} spin_unlock(&target_rq->lock);
spin_unlock(&rq->lock);
next_group: next_group:
group = group->next; cpu_group = cpu_group->next;
} while (group != sd->groups); } while (cpu_group != sd->groups && busiest_rq->nr_running > 1);
}
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment