Commit 18bd1b4b authored by Brendan Jackman's avatar Brendan Jackman Committed by Ingo Molnar

sched/fair: Move select_task_rq_fair() slow-path into its own function

In preparation for changes that would otherwise require adding a new
level of indentation to the while(sd) loop, create a new function
find_idlest_cpu() which contains this loop, and rename the existing
find_idlest_cpu() to find_idlest_group_cpu().

Code inside the while(sd) loop is unchanged. @new_cpu is added as a
variable in the new function, with the same initial value as the
@new_cpu in select_task_rq_fair().
Suggested-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarBrendan Jackman <brendan.jackman@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarJosef Bacik <jbacik@fb.com>
Reviewed-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20171005114516.18617-2-brendan.jackman@arm.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 583ffd99
...@@ -5859,10 +5859,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, ...@@ -5859,10 +5859,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
} }
/* /*
* find_idlest_cpu - find the idlest cpu among the cpus in group. * find_idlest_group_cpu - find the idlest cpu among the cpus in group.
*/ */
static int static int
find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
{ {
unsigned long load, min_load = ULONG_MAX; unsigned long load, min_load = ULONG_MAX;
unsigned int min_exit_latency = UINT_MAX; unsigned int min_exit_latency = UINT_MAX;
...@@ -5911,6 +5911,50 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) ...@@ -5911,6 +5911,50 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu; return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
} }
static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
int cpu, int prev_cpu, int sd_flag)
{
int new_cpu = prev_cpu;
while (sd) {
struct sched_group *group;
struct sched_domain *tmp;
int weight;
if (!(sd->flags & sd_flag)) {
sd = sd->child;
continue;
}
group = find_idlest_group(sd, p, cpu, sd_flag);
if (!group) {
sd = sd->child;
continue;
}
new_cpu = find_idlest_group_cpu(group, p, cpu);
if (new_cpu == -1 || new_cpu == cpu) {
/* Now try balancing at a lower domain level of cpu */
sd = sd->child;
continue;
}
/* Now try balancing at a lower domain level of new_cpu */
cpu = new_cpu;
weight = sd->span_weight;
sd = NULL;
for_each_domain(cpu, tmp) {
if (weight <= tmp->span_weight)
break;
if (tmp->flags & sd_flag)
sd = tmp;
}
/* while loop will break here if sd == NULL */
}
return new_cpu;
}
#ifdef CONFIG_SCHED_SMT #ifdef CONFIG_SCHED_SMT
static inline void set_idle_cores(int cpu, int val) static inline void set_idle_cores(int cpu, int val)
...@@ -6277,39 +6321,8 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f ...@@ -6277,39 +6321,8 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */ if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
} else while (sd) { } else {
struct sched_group *group; new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
int weight;
if (!(sd->flags & sd_flag)) {
sd = sd->child;
continue;
}
group = find_idlest_group(sd, p, cpu, sd_flag);
if (!group) {
sd = sd->child;
continue;
}
new_cpu = find_idlest_cpu(group, p, cpu);
if (new_cpu == -1 || new_cpu == cpu) {
/* Now try balancing at a lower domain level of cpu */
sd = sd->child;
continue;
}
/* Now try balancing at a lower domain level of new_cpu */
cpu = new_cpu;
weight = sd->span_weight;
sd = NULL;
for_each_domain(cpu, tmp) {
if (weight <= tmp->span_weight)
break;
if (tmp->flags & sd_flag)
sd = tmp;
}
/* while loop will break here if sd == NULL */
} }
rcu_read_unlock(); rcu_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment