Commit 63928384 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/nohz: Optimize nohz_idle_balance()

Avoid calling update_blocked_averages() when it does not in fact have
any by re-using/extending update_nohz_stats().
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 1936c53c
...@@ -7898,7 +7898,7 @@ group_type group_classify(struct sched_group *group, ...@@ -7898,7 +7898,7 @@ group_type group_classify(struct sched_group *group,
return group_other; return group_other;
} }
static bool update_nohz_stats(struct rq *rq) static bool update_nohz_stats(struct rq *rq, bool force)
{ {
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
unsigned int cpu = rq->cpu; unsigned int cpu = rq->cpu;
...@@ -7909,7 +7909,7 @@ static bool update_nohz_stats(struct rq *rq) ...@@ -7909,7 +7909,7 @@ static bool update_nohz_stats(struct rq *rq)
if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask)) if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
return false; return false;
if (!time_after(jiffies, rq->last_blocked_load_update_tick)) if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick))
return true; return true;
update_blocked_averages(cpu); update_blocked_averages(cpu);
...@@ -7942,7 +7942,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, ...@@ -7942,7 +7942,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
for_each_cpu_and(i, sched_group_span(group), env->cpus) { for_each_cpu_and(i, sched_group_span(group), env->cpus) {
struct rq *rq = cpu_rq(i); struct rq *rq = cpu_rq(i);
if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq)) if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
env->flags |= LBF_NOHZ_AGAIN; env->flags |= LBF_NOHZ_AGAIN;
/* Bias balancing toward CPUs of our domain: */ /* Bias balancing toward CPUs of our domain: */
...@@ -9552,8 +9552,7 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) ...@@ -9552,8 +9552,7 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
rq = cpu_rq(balance_cpu); rq = cpu_rq(balance_cpu);
update_blocked_averages(rq->cpu); has_blocked_load |= update_nohz_stats(rq, true);
has_blocked_load |= rq->has_blocked_load;
/* /*
* If time for next balance is due, * If time for next balance is due,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment