Commit e669ac8a authored by Valentin Schneider's avatar Valentin Schneider Committed by Peter Zijlstra

sched: Remove checks against SD_LOAD_BALANCE

The SD_LOAD_BALANCE flag is set unconditionally for all domains in
sd_init(). By making the sched_domain->flags syctl interface read-only, we
have removed the last piece of code that could clear that flag - as such,
it will now be always present. Rather than to keep carrying it along, we
can work towards getting rid of it entirely.

cpusets don't need it because they can make CPUs be attached to the NULL
domain (e.g. cpuset with sched_load_balance=0), or to a partitioned
root_domain, i.e. a sched_domain hierarchy that doesn't span the entire
system (e.g. root cpuset with sched_load_balance=0 and sibling cpusets with
sched_load_balance=1).

isolcpus apply the same "trick": isolated CPUs are explicitly taken out of
the sched_domain rebuild (using housekeeping_cpumask()), so they get the
NULL domain treatment as well.

Remove the checks against SD_LOAD_BALANCE.
Signed-off-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200415210512.805-4-valentin.schneider@arm.com
parent 9818427c
...@@ -6649,9 +6649,6 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f ...@@ -6649,9 +6649,6 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
rcu_read_lock(); rcu_read_lock();
for_each_domain(cpu, tmp) { for_each_domain(cpu, tmp) {
if (!(tmp->flags & SD_LOAD_BALANCE))
break;
/* /*
* If both 'cpu' and 'prev_cpu' are part of this domain, * If both 'cpu' and 'prev_cpu' are part of this domain,
* cpu is a valid SD_WAKE_AFFINE target. * cpu is a valid SD_WAKE_AFFINE target.
...@@ -9790,9 +9787,8 @@ static int active_load_balance_cpu_stop(void *data) ...@@ -9790,9 +9787,8 @@ static int active_load_balance_cpu_stop(void *data)
/* Search for an sd spanning us and the target CPU. */ /* Search for an sd spanning us and the target CPU. */
rcu_read_lock(); rcu_read_lock();
for_each_domain(target_cpu, sd) { for_each_domain(target_cpu, sd) {
if ((sd->flags & SD_LOAD_BALANCE) && if (cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) break;
break;
} }
if (likely(sd)) { if (likely(sd)) {
...@@ -9881,9 +9877,6 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) ...@@ -9881,9 +9877,6 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
} }
max_cost += sd->max_newidle_lb_cost; max_cost += sd->max_newidle_lb_cost;
if (!(sd->flags & SD_LOAD_BALANCE))
continue;
/* /*
* Stop the load balance at this level. There is another * Stop the load balance at this level. There is another
* CPU in our sched group which is doing load balancing more * CPU in our sched group which is doing load balancing more
...@@ -10472,9 +10465,6 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf) ...@@ -10472,9 +10465,6 @@ int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
int continue_balancing = 1; int continue_balancing = 1;
u64 t0, domain_cost; u64 t0, domain_cost;
if (!(sd->flags & SD_LOAD_BALANCE))
continue;
if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
update_next_balance(sd, &next_balance); update_next_balance(sd, &next_balance);
break; break;
......
...@@ -33,14 +33,6 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, ...@@ -33,14 +33,6 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
cpumask_clear(groupmask); cpumask_clear(groupmask);
printk(KERN_DEBUG "%*s domain-%d: ", level, "", level); printk(KERN_DEBUG "%*s domain-%d: ", level, "", level);
if (!(sd->flags & SD_LOAD_BALANCE)) {
printk("does not load-balance\n");
if (sd->parent)
printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain has parent");
return -1;
}
printk(KERN_CONT "span=%*pbl level=%s\n", printk(KERN_CONT "span=%*pbl level=%s\n",
cpumask_pr_args(sched_domain_span(sd)), sd->name); cpumask_pr_args(sched_domain_span(sd)), sd->name);
...@@ -151,8 +143,7 @@ static int sd_degenerate(struct sched_domain *sd) ...@@ -151,8 +143,7 @@ static int sd_degenerate(struct sched_domain *sd)
return 1; return 1;
/* Following flags need at least 2 groups */ /* Following flags need at least 2 groups */
if (sd->flags & (SD_LOAD_BALANCE | if (sd->flags & (SD_BALANCE_NEWIDLE |
SD_BALANCE_NEWIDLE |
SD_BALANCE_FORK | SD_BALANCE_FORK |
SD_BALANCE_EXEC | SD_BALANCE_EXEC |
SD_SHARE_CPUCAPACITY | SD_SHARE_CPUCAPACITY |
...@@ -183,15 +174,14 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) ...@@ -183,15 +174,14 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
/* Flags needing groups don't count if only 1 group in parent */ /* Flags needing groups don't count if only 1 group in parent */
if (parent->groups == parent->groups->next) { if (parent->groups == parent->groups->next) {
pflags &= ~(SD_LOAD_BALANCE | pflags &= ~(SD_BALANCE_NEWIDLE |
SD_BALANCE_NEWIDLE | SD_BALANCE_FORK |
SD_BALANCE_FORK | SD_BALANCE_EXEC |
SD_BALANCE_EXEC | SD_ASYM_CPUCAPACITY |
SD_ASYM_CPUCAPACITY | SD_SHARE_CPUCAPACITY |
SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES |
SD_SHARE_PKG_RESOURCES | SD_PREFER_SIBLING |
SD_PREFER_SIBLING | SD_SHARE_POWERDOMAIN);
SD_SHARE_POWERDOMAIN);
if (nr_node_ids == 1) if (nr_node_ids == 1)
pflags &= ~SD_SERIALIZE; pflags &= ~SD_SERIALIZE;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment