Commit 11f10e54 authored by Vincent Guittot's avatar Vincent Guittot Committed by Ingo Molnar

sched/fair: Use load instead of runnable load in wakeup path

Runnable load was originally introduced to take into account the case where
blocked load biases the wake up path which may end to select an overloaded
CPU with a large number of runnable tasks instead of an underutilized
CPU with a huge blocked load.

Tha wake up path now starts looking for idle CPUs before comparing
runnable load and it's worth aligning the wake up path with the
load_balance() logic.
Signed-off-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Cc: Ben Segall <bsegall@google.com>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Morten.Rasmussen@arm.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: hdanton@sina.com
Cc: parth@linux.ibm.com
Cc: pauld@redhat.com
Cc: quentin.perret@arm.com
Cc: riel@surriel.com
Cc: srikar@linux.vnet.ibm.com
Cc: valentin.schneider@arm.com
Link: https://lkml.kernel.org/r/1571405198-27570-10-git-send-email-vincent.guittot@linaro.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c63be7be
...@@ -1474,7 +1474,12 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page, ...@@ -1474,7 +1474,12 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4; group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
} }
static unsigned long cpu_runnable_load(struct rq *rq); static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
static unsigned long cpu_runnable_load(struct rq *rq)
{
return cfs_rq_runnable_load_avg(&rq->cfs);
}
/* Cached statistics for all CPUs within a node */ /* Cached statistics for all CPUs within a node */
struct numa_stats { struct numa_stats {
...@@ -5370,11 +5375,6 @@ static int sched_idle_cpu(int cpu) ...@@ -5370,11 +5375,6 @@ static int sched_idle_cpu(int cpu)
rq->nr_running); rq->nr_running);
} }
static unsigned long cpu_runnable_load(struct rq *rq)
{
return cfs_rq_runnable_load_avg(&rq->cfs);
}
static unsigned long cpu_load(struct rq *rq) static unsigned long cpu_load(struct rq *rq)
{ {
return cfs_rq_load_avg(&rq->cfs); return cfs_rq_load_avg(&rq->cfs);
...@@ -5475,7 +5475,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p, ...@@ -5475,7 +5475,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
s64 this_eff_load, prev_eff_load; s64 this_eff_load, prev_eff_load;
unsigned long task_load; unsigned long task_load;
this_eff_load = cpu_runnable_load(cpu_rq(this_cpu)); this_eff_load = cpu_load(cpu_rq(this_cpu));
if (sync) { if (sync) {
unsigned long current_load = task_h_load(current); unsigned long current_load = task_h_load(current);
...@@ -5493,7 +5493,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p, ...@@ -5493,7 +5493,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
this_eff_load *= 100; this_eff_load *= 100;
this_eff_load *= capacity_of(prev_cpu); this_eff_load *= capacity_of(prev_cpu);
prev_eff_load = cpu_runnable_load(cpu_rq(prev_cpu)); prev_eff_load = cpu_load(cpu_rq(prev_cpu));
prev_eff_load -= task_load; prev_eff_load -= task_load;
if (sched_feat(WA_BIAS)) if (sched_feat(WA_BIAS))
prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2; prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
...@@ -5581,7 +5581,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, ...@@ -5581,7 +5581,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
max_spare_cap = 0; max_spare_cap = 0;
for_each_cpu(i, sched_group_span(group)) { for_each_cpu(i, sched_group_span(group)) {
load = cpu_runnable_load(cpu_rq(i)); load = cpu_load(cpu_rq(i));
runnable_load += load; runnable_load += load;
avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs); avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
...@@ -5722,7 +5722,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this ...@@ -5722,7 +5722,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
continue; continue;
} }
load = cpu_runnable_load(cpu_rq(i)); load = cpu_load(cpu_rq(i));
if (load < min_load) { if (load < min_load) {
min_load = load; min_load = load;
least_loaded_cpu = i; least_loaded_cpu = i;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment