Commit efd984c4 authored by Valentin Schneider's avatar Valentin Schneider Committed by Peter Zijlstra

sched/fair: Add NOHZ balancer flag for nohz.next_balance updates

A following patch will trigger NOHZ idle balances as a means to update
nohz.next_balance. Vincent noted that blocked load updates can have
non-negligible overhead, which should be avoided if the intent is to only
update nohz.next_balance.

Add a new NOHZ balance kick flag, NOHZ_NEXT_KICK. Gate NOHZ blocked load
update by the presence of NOHZ_STATS_KICK - currently all NOHZ balance
kicks will have the NOHZ_STATS_KICK flag set, so no change in behaviour is
expected.
Suggested-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20210823111700.2842997-2-valentin.schneider@arm.com
parent 9e1ff307
...@@ -10375,7 +10375,7 @@ static void nohz_balancer_kick(struct rq *rq) ...@@ -10375,7 +10375,7 @@ static void nohz_balancer_kick(struct rq *rq)
goto out; goto out;
if (rq->nr_running >= 2) { if (rq->nr_running >= 2) {
flags = NOHZ_KICK_MASK; flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
goto out; goto out;
} }
...@@ -10389,7 +10389,7 @@ static void nohz_balancer_kick(struct rq *rq) ...@@ -10389,7 +10389,7 @@ static void nohz_balancer_kick(struct rq *rq)
* on. * on.
*/ */
if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) { if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
flags = NOHZ_KICK_MASK; flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
goto unlock; goto unlock;
} }
} }
...@@ -10403,7 +10403,7 @@ static void nohz_balancer_kick(struct rq *rq) ...@@ -10403,7 +10403,7 @@ static void nohz_balancer_kick(struct rq *rq)
*/ */
for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) { for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
if (sched_asym_prefer(i, cpu)) { if (sched_asym_prefer(i, cpu)) {
flags = NOHZ_KICK_MASK; flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
goto unlock; goto unlock;
} }
} }
...@@ -10416,7 +10416,7 @@ static void nohz_balancer_kick(struct rq *rq) ...@@ -10416,7 +10416,7 @@ static void nohz_balancer_kick(struct rq *rq)
* to run the misfit task on. * to run the misfit task on.
*/ */
if (check_misfit_status(rq, sd)) { if (check_misfit_status(rq, sd)) {
flags = NOHZ_KICK_MASK; flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
goto unlock; goto unlock;
} }
...@@ -10443,7 +10443,7 @@ static void nohz_balancer_kick(struct rq *rq) ...@@ -10443,7 +10443,7 @@ static void nohz_balancer_kick(struct rq *rq)
*/ */
nr_busy = atomic_read(&sds->nr_busy_cpus); nr_busy = atomic_read(&sds->nr_busy_cpus);
if (nr_busy > 1) { if (nr_busy > 1) {
flags = NOHZ_KICK_MASK; flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
goto unlock; goto unlock;
} }
} }
...@@ -10605,6 +10605,7 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags, ...@@ -10605,6 +10605,7 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
* setting the flag, we are sure to not clear the state and not * setting the flag, we are sure to not clear the state and not
* check the load of an idle cpu. * check the load of an idle cpu.
*/ */
if (flags & NOHZ_STATS_KICK)
WRITE_ONCE(nohz.has_blocked, 0); WRITE_ONCE(nohz.has_blocked, 0);
/* /*
...@@ -10627,12 +10628,14 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags, ...@@ -10627,12 +10628,14 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
* balancing owner will pick it up. * balancing owner will pick it up.
*/ */
if (need_resched()) { if (need_resched()) {
if (flags & NOHZ_STATS_KICK)
has_blocked_load = true; has_blocked_load = true;
goto abort; goto abort;
} }
rq = cpu_rq(balance_cpu); rq = cpu_rq(balance_cpu);
if (flags & NOHZ_STATS_KICK)
has_blocked_load |= update_nohz_stats(rq); has_blocked_load |= update_nohz_stats(rq);
/* /*
...@@ -10664,6 +10667,7 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags, ...@@ -10664,6 +10667,7 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
if (likely(update_next_balance)) if (likely(update_next_balance))
nohz.next_balance = next_balance; nohz.next_balance = next_balance;
if (flags & NOHZ_STATS_KICK)
WRITE_ONCE(nohz.next_blocked, WRITE_ONCE(nohz.next_blocked,
now + msecs_to_jiffies(LOAD_AVG_PERIOD)); now + msecs_to_jiffies(LOAD_AVG_PERIOD));
......
...@@ -2709,12 +2709,18 @@ extern void cfs_bandwidth_usage_dec(void); ...@@ -2709,12 +2709,18 @@ extern void cfs_bandwidth_usage_dec(void);
#define NOHZ_BALANCE_KICK_BIT 0 #define NOHZ_BALANCE_KICK_BIT 0
#define NOHZ_STATS_KICK_BIT 1 #define NOHZ_STATS_KICK_BIT 1
#define NOHZ_NEWILB_KICK_BIT 2 #define NOHZ_NEWILB_KICK_BIT 2
#define NOHZ_NEXT_KICK_BIT 3
/* Run rebalance_domains() */
#define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT)
/* Update blocked load */
#define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT)
/* Update blocked load when entering idle */
#define NOHZ_NEWILB_KICK BIT(NOHZ_NEWILB_KICK_BIT) #define NOHZ_NEWILB_KICK BIT(NOHZ_NEWILB_KICK_BIT)
/* Update nohz.next_balance */
#define NOHZ_NEXT_KICK BIT(NOHZ_NEXT_KICK_BIT)
#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK) #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK | NOHZ_NEXT_KICK)
#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment