Commit e022e0d3 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/fair: Update blocked load from NEWIDLE

Since we already iterate CPUs looking for work on NEWIDLE, use this
iteration to age the blocked load. If the domain for which this is
done completely spand the idle set, we can push the ILB based aging
forward.
Suggested-by: default avatarBrendan Jackman <brendan.jackman@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent a4064fb6
...@@ -6074,6 +6074,7 @@ void __init sched_init(void) ...@@ -6074,6 +6074,7 @@ void __init sched_init(void)
rq_attach_root(rq, &def_root_domain); rq_attach_root(rq, &def_root_domain);
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
rq->last_load_update_tick = jiffies; rq->last_load_update_tick = jiffies;
rq->last_blocked_load_update_tick = jiffies;
atomic_set(&rq->nohz_flags, 0); atomic_set(&rq->nohz_flags, 0);
#endif #endif
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -5376,6 +5376,14 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) ...@@ -5376,6 +5376,14 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
} }
return load; return load;
} }
static struct {
cpumask_var_t idle_cpus_mask;
atomic_t nr_cpus;
unsigned long next_balance; /* in jiffy units */
unsigned long next_stats;
} nohz ____cacheline_aligned;
#endif /* CONFIG_NO_HZ_COMMON */ #endif /* CONFIG_NO_HZ_COMMON */
/** /**
...@@ -7022,6 +7030,7 @@ enum fbq_type { regular, remote, all }; ...@@ -7022,6 +7030,7 @@ enum fbq_type { regular, remote, all };
#define LBF_NEED_BREAK 0x02 #define LBF_NEED_BREAK 0x02
#define LBF_DST_PINNED 0x04 #define LBF_DST_PINNED 0x04
#define LBF_SOME_PINNED 0x08 #define LBF_SOME_PINNED 0x08
#define LBF_NOHZ_STATS 0x10
struct lb_env { struct lb_env {
struct sched_domain *sd; struct sched_domain *sd;
...@@ -7460,6 +7469,10 @@ static void update_blocked_averages(int cpu) ...@@ -7460,6 +7469,10 @@ static void update_blocked_averages(int cpu)
if (cfs_rq_is_decayed(cfs_rq)) if (cfs_rq_is_decayed(cfs_rq))
list_del_leaf_cfs_rq(cfs_rq); list_del_leaf_cfs_rq(cfs_rq);
} }
#ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies;
#endif
rq_unlock_irqrestore(rq, &rf); rq_unlock_irqrestore(rq, &rf);
} }
...@@ -7519,6 +7532,9 @@ static inline void update_blocked_averages(int cpu) ...@@ -7519,6 +7532,9 @@ static inline void update_blocked_averages(int cpu)
rq_lock_irqsave(rq, &rf); rq_lock_irqsave(rq, &rf);
update_rq_clock(rq); update_rq_clock(rq);
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq); update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
#ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies;
#endif
rq_unlock_irqrestore(rq, &rf); rq_unlock_irqrestore(rq, &rf);
} }
...@@ -7853,6 +7869,21 @@ group_type group_classify(struct sched_group *group, ...@@ -7853,6 +7869,21 @@ group_type group_classify(struct sched_group *group,
return group_other; return group_other;
} }
static void update_nohz_stats(struct rq *rq)
{
#ifdef CONFIG_NO_HZ_COMMON
unsigned int cpu = rq->cpu;
if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
return;
if (!time_after(jiffies, rq->last_blocked_load_update_tick))
return;
update_blocked_averages(cpu);
#endif
}
/** /**
* update_sg_lb_stats - Update sched_group's statistics for load balancing. * update_sg_lb_stats - Update sched_group's statistics for load balancing.
* @env: The load balancing environment. * @env: The load balancing environment.
...@@ -7875,6 +7906,9 @@ static inline void update_sg_lb_stats(struct lb_env *env, ...@@ -7875,6 +7906,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
for_each_cpu_and(i, sched_group_span(group), env->cpus) { for_each_cpu_and(i, sched_group_span(group), env->cpus) {
struct rq *rq = cpu_rq(i); struct rq *rq = cpu_rq(i);
if (env->flags & LBF_NOHZ_STATS)
update_nohz_stats(rq);
/* Bias balancing toward CPUs of our domain: */ /* Bias balancing toward CPUs of our domain: */
if (local_group) if (local_group)
load = target_load(i, load_idx); load = target_load(i, load_idx);
...@@ -8030,6 +8064,15 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd ...@@ -8030,6 +8064,15 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
if (child && child->flags & SD_PREFER_SIBLING) if (child && child->flags & SD_PREFER_SIBLING)
prefer_sibling = 1; prefer_sibling = 1;
#ifdef CONFIG_NO_HZ_COMMON
if (env->idle == CPU_NEWLY_IDLE) {
env->flags |= LBF_NOHZ_STATS;
if (cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd)))
nohz.next_stats = jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD);
}
#endif
load_idx = get_sd_load_idx(env->sd, env->idle); load_idx = get_sd_load_idx(env->sd, env->idle);
do { do {
...@@ -9049,12 +9092,6 @@ static inline int on_null_domain(struct rq *rq) ...@@ -9049,12 +9092,6 @@ static inline int on_null_domain(struct rq *rq)
* needed, they will kick the idle load balancer, which then does idle * needed, they will kick the idle load balancer, which then does idle
* load balancing for all the idle CPUs. * load balancing for all the idle CPUs.
*/ */
static struct {
cpumask_var_t idle_cpus_mask;
atomic_t nr_cpus;
unsigned long next_balance; /* in jiffy units */
unsigned long next_stats;
} nohz ____cacheline_aligned;
static inline int find_new_ilb(void) static inline int find_new_ilb(void)
{ {
......
...@@ -762,6 +762,7 @@ struct rq { ...@@ -762,6 +762,7 @@ struct rq {
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned long last_load_update_tick; unsigned long last_load_update_tick;
unsigned long last_blocked_load_update_tick;
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
atomic_t nohz_flags; atomic_t nohz_flags;
#endif /* CONFIG_NO_HZ_COMMON */ #endif /* CONFIG_NO_HZ_COMMON */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment