Commit d153b153 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/core: Fix wake_affine() performance regression

Eric reported a sysbench regression against commit:

  3fed382b ("sched/numa: Implement NUMA node level wake_affine()")

Similarly, Rik was looking at the NAS-lu.C benchmark, which regressed
against his v3.10 enterprise kernel.

PRE (current tip/master):

 ivb-ep sysbench:

   2: [30 secs]     transactions:                        64110  (2136.94 per sec.)
   5: [30 secs]     transactions:                        143644 (4787.99 per sec.)
  10: [30 secs]     transactions:                        274298 (9142.93 per sec.)
  20: [30 secs]     transactions:                        418683 (13955.45 per sec.)
  40: [30 secs]     transactions:                        320731 (10690.15 per sec.)
  80: [30 secs]     transactions:                        355096 (11834.28 per sec.)

 hsw-ex NAS:

 OMP_PROC_BIND/lu.C.x_threads_144_run_1.log: Time in seconds =                    18.01
 OMP_PROC_BIND/lu.C.x_threads_144_run_2.log: Time in seconds =                    17.89
 OMP_PROC_BIND/lu.C.x_threads_144_run_3.log: Time in seconds =                    17.93
 lu.C.x_threads_144_run_1.log: Time in seconds =                   434.68
 lu.C.x_threads_144_run_2.log: Time in seconds =                   405.36
 lu.C.x_threads_144_run_3.log: Time in seconds =                   433.83

POST (+patch):

 ivb-ep sysbench:

   2: [30 secs]     transactions:                        64494  (2149.75 per sec.)
   5: [30 secs]     transactions:                        145114 (4836.99 per sec.)
  10: [30 secs]     transactions:                        278311 (9276.69 per sec.)
  20: [30 secs]     transactions:                        437169 (14571.60 per sec.)
  40: [30 secs]     transactions:                        669837 (22326.73 per sec.)
  80: [30 secs]     transactions:                        631739 (21055.88 per sec.)

 hsw-ex NAS:

 lu.C.x_threads_144_run_1.log: Time in seconds =                    23.36
 lu.C.x_threads_144_run_2.log: Time in seconds =                    22.96
 lu.C.x_threads_144_run_3.log: Time in seconds =                    22.52

This patch takes out all the shiny wake_affine() stuff and goes back to
utter basics. Between the two CPUs involved with the wakeup (the CPU
doing the wakeup and the CPU we ran on previously) pick the CPU we can
run on _now_.

This restores much of the regressions against the older kernels,
but leaves some ground in the overloaded case. The default-enabled
WA_WEIGHT (which will be introduced in the next patch) is an attempt
to address the overloaded situation.
Reported-by: default avatarEric Farman <farman@linux.vnet.ibm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Matthew Rosato <mjrosato@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: jinpuwang@gmail.com
Cc: vcaputo@pengaru.com
Fixes: 3fed382b ("sched/numa: Implement NUMA node level wake_affine()")
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 529a86e0
...@@ -71,14 +71,6 @@ struct sched_domain_shared { ...@@ -71,14 +71,6 @@ struct sched_domain_shared {
atomic_t ref; atomic_t ref;
atomic_t nr_busy_cpus; atomic_t nr_busy_cpus;
int has_idle_cores; int has_idle_cores;
/*
* Some variables from the most recent sd_lb_stats for this domain,
* used by wake_affine().
*/
unsigned long nr_running;
unsigned long load;
unsigned long capacity;
}; };
struct sched_domain { struct sched_domain {
......
...@@ -5356,115 +5356,36 @@ static int wake_wide(struct task_struct *p) ...@@ -5356,115 +5356,36 @@ static int wake_wide(struct task_struct *p)
return 1; return 1;
} }
struct llc_stats {
unsigned long nr_running;
unsigned long load;
unsigned long capacity;
int has_capacity;
};
static bool get_llc_stats(struct llc_stats *stats, int cpu)
{
struct sched_domain_shared *sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
if (!sds)
return false;
stats->nr_running = READ_ONCE(sds->nr_running);
stats->load = READ_ONCE(sds->load);
stats->capacity = READ_ONCE(sds->capacity);
stats->has_capacity = stats->nr_running < per_cpu(sd_llc_size, cpu);
return true;
}
/* /*
* Can a task be moved from prev_cpu to this_cpu without causing a load * The purpose of wake_affine() is to quickly determine on which CPU we can run
* imbalance that would trigger the load balancer? * soonest. For the purpose of speed we only consider the waking and previous
* CPU.
* *
* Since we're running on 'stale' values, we might in fact create an imbalance * wake_affine_idle() - only considers 'now', it check if the waking CPU is (or
* but recomputing these values is expensive, as that'd mean iteration 2 cache * will be) idle.
* domains worth of CPUs.
*/ */
static bool static bool
wake_affine_llc(struct sched_domain *sd, struct task_struct *p, wake_affine_idle(struct sched_domain *sd, struct task_struct *p,
int this_cpu, int prev_cpu, int sync) int this_cpu, int prev_cpu, int sync)
{ {
struct llc_stats prev_stats, this_stats; if (idle_cpu(this_cpu))
s64 this_eff_load, prev_eff_load;
unsigned long task_load;
if (!get_llc_stats(&prev_stats, prev_cpu) ||
!get_llc_stats(&this_stats, this_cpu))
return false;
/*
* If sync wakeup then subtract the (maximum possible)
* effect of the currently running task from the load
* of the current LLC.
*/
if (sync) {
unsigned long current_load = task_h_load(current);
/* in this case load hits 0 and this LLC is considered 'idle' */
if (current_load > this_stats.load)
return true;
this_stats.load -= current_load;
}
/*
* The has_capacity stuff is not SMT aware, but by trying to balance
* the nr_running on both ends we try and fill the domain at equal
* rates, thereby first consuming cores before siblings.
*/
/* if the old cache has capacity, stay there */
if (prev_stats.has_capacity && prev_stats.nr_running < this_stats.nr_running+1)
return false;
/* if this cache has capacity, come here */
if (this_stats.has_capacity && this_stats.nr_running+1 < prev_stats.nr_running)
return true; return true;
/* if (sync && cpu_rq(this_cpu)->nr_running == 1)
* Check to see if we can move the load without causing too much return true;
* imbalance.
*/
task_load = task_h_load(p);
this_eff_load = 100;
this_eff_load *= prev_stats.capacity;
prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
prev_eff_load *= this_stats.capacity;
this_eff_load *= this_stats.load + task_load;
prev_eff_load *= prev_stats.load - task_load;
return this_eff_load <= prev_eff_load; return false;
} }
static int wake_affine(struct sched_domain *sd, struct task_struct *p, static int wake_affine(struct sched_domain *sd, struct task_struct *p,
int prev_cpu, int sync) int prev_cpu, int sync)
{ {
int this_cpu = smp_processor_id(); int this_cpu = smp_processor_id();
bool affine; bool affine = false;
/*
* Default to no affine wakeups; wake_affine() should not effect a task
* placement the load-balancer feels inclined to undo. The conservative
* option is therefore to not move tasks when they wake up.
*/
affine = false;
/* if (sched_feat(WA_IDLE) && !affine)
* If the wakeup is across cache domains, try to evaluate if movement affine = wake_affine_idle(sd, p, this_cpu, prev_cpu, sync);
* makes sense, otherwise rely on select_idle_siblings() to do
* placement inside the cache domain.
*/
if (!cpus_share_cache(prev_cpu, this_cpu))
affine = wake_affine_llc(sd, p, this_cpu, prev_cpu, sync);
schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts); schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
if (affine) { if (affine) {
...@@ -7600,7 +7521,6 @@ static inline enum fbq_type fbq_classify_rq(struct rq *rq) ...@@ -7600,7 +7521,6 @@ static inline enum fbq_type fbq_classify_rq(struct rq *rq)
*/ */
static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
{ {
struct sched_domain_shared *shared = env->sd->shared;
struct sched_domain *child = env->sd->child; struct sched_domain *child = env->sd->child;
struct sched_group *sg = env->sd->groups; struct sched_group *sg = env->sd->groups;
struct sg_lb_stats *local = &sds->local_stat; struct sg_lb_stats *local = &sds->local_stat;
...@@ -7672,22 +7592,6 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd ...@@ -7672,22 +7592,6 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
if (env->dst_rq->rd->overload != overload) if (env->dst_rq->rd->overload != overload)
env->dst_rq->rd->overload = overload; env->dst_rq->rd->overload = overload;
} }
if (!shared)
return;
/*
* Since these are sums over groups they can contain some CPUs
* multiple times for the NUMA domains.
*
* Currently only wake_affine_llc() and find_busiest_group()
* uses these numbers, only the last is affected by this problem.
*
* XXX fix that.
*/
WRITE_ONCE(shared->nr_running, sds->total_running);
WRITE_ONCE(shared->load, sds->total_load);
WRITE_ONCE(shared->capacity, sds->total_capacity);
} }
/** /**
......
...@@ -81,3 +81,4 @@ SCHED_FEAT(RT_RUNTIME_SHARE, true) ...@@ -81,3 +81,4 @@ SCHED_FEAT(RT_RUNTIME_SHARE, true)
SCHED_FEAT(LB_MIN, false) SCHED_FEAT(LB_MIN, false)
SCHED_FEAT(ATTACH_AGE_LOAD, true) SCHED_FEAT(ATTACH_AGE_LOAD, true)
SCHED_FEAT(WA_IDLE, true)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment