Commit 758b2cdc authored by Rusty Russell's avatar Rusty Russell Committed by Ingo Molnar

sched: wrap sched_group and sched_domain cpumask accesses.

Impact: trivial wrap of member accesses

This eases the transition in the next patch.

We also get rid of a temporary cpumask in find_idlest_cpu() thanks to
for_each_cpu_and, and sched_balance_self() due to getting weight before
setting sd to NULL.
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 1e5ce4f4
...@@ -786,6 +786,11 @@ struct sched_group { ...@@ -786,6 +786,11 @@ struct sched_group {
u32 reciprocal_cpu_power; u32 reciprocal_cpu_power;
}; };
static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
{
return &sg->cpumask;
}
enum sched_domain_level { enum sched_domain_level {
SD_LV_NONE = 0, SD_LV_NONE = 0,
SD_LV_SIBLING, SD_LV_SIBLING,
...@@ -866,6 +871,11 @@ struct sched_domain { ...@@ -866,6 +871,11 @@ struct sched_domain {
#endif #endif
}; };
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
{
return &sd->span;
}
extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
struct sched_domain_attr *dattr_new); struct sched_domain_attr *dattr_new);
extern int arch_reinit_sched_domains(void); extern int arch_reinit_sched_domains(void);
......
...@@ -1501,7 +1501,7 @@ static int tg_shares_up(struct task_group *tg, void *data) ...@@ -1501,7 +1501,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
struct sched_domain *sd = data; struct sched_domain *sd = data;
int i; int i;
for_each_cpu_mask(i, sd->span) { for_each_cpu(i, sched_domain_span(sd)) {
/* /*
* If there are currently no tasks on the cpu pretend there * If there are currently no tasks on the cpu pretend there
* is one of average load so that when a new task gets to * is one of average load so that when a new task gets to
...@@ -1522,7 +1522,7 @@ static int tg_shares_up(struct task_group *tg, void *data) ...@@ -1522,7 +1522,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
shares = tg->shares; shares = tg->shares;
for_each_cpu_mask(i, sd->span) for_each_cpu(i, sched_domain_span(sd))
update_group_shares_cpu(tg, i, shares, rq_weight); update_group_shares_cpu(tg, i, shares, rq_weight);
return 0; return 0;
...@@ -2053,15 +2053,17 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) ...@@ -2053,15 +2053,17 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
int i; int i;
/* Skip over this group if it has no CPUs allowed */ /* Skip over this group if it has no CPUs allowed */
if (!cpus_intersects(group->cpumask, p->cpus_allowed)) if (!cpumask_intersects(sched_group_cpus(group),
&p->cpus_allowed))
continue; continue;
local_group = cpu_isset(this_cpu, group->cpumask); local_group = cpumask_test_cpu(this_cpu,
sched_group_cpus(group));
/* Tally up the load of all CPUs in the group */ /* Tally up the load of all CPUs in the group */
avg_load = 0; avg_load = 0;
for_each_cpu(i, &group->cpumask) { for_each_cpu(i, sched_group_cpus(group)) {
/* Bias balancing toward cpus of our domain */ /* Bias balancing toward cpus of our domain */
if (local_group) if (local_group)
load = source_load(i, load_idx); load = source_load(i, load_idx);
...@@ -2093,17 +2095,14 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) ...@@ -2093,17 +2095,14 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
* find_idlest_cpu - find the idlest cpu among the cpus in group. * find_idlest_cpu - find the idlest cpu among the cpus in group.
*/ */
static int static int
find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu, find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
cpumask_t *tmp)
{ {
unsigned long load, min_load = ULONG_MAX; unsigned long load, min_load = ULONG_MAX;
int idlest = -1; int idlest = -1;
int i; int i;
/* Traverse only the allowed CPUs */ /* Traverse only the allowed CPUs */
cpus_and(*tmp, group->cpumask, p->cpus_allowed); for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
for_each_cpu(i, tmp) {
load = weighted_cpuload(i); load = weighted_cpuload(i);
if (load < min_load || (load == min_load && i == this_cpu)) { if (load < min_load || (load == min_load && i == this_cpu)) {
...@@ -2145,7 +2144,6 @@ static int sched_balance_self(int cpu, int flag) ...@@ -2145,7 +2144,6 @@ static int sched_balance_self(int cpu, int flag)
update_shares(sd); update_shares(sd);
while (sd) { while (sd) {
cpumask_t span, tmpmask;
struct sched_group *group; struct sched_group *group;
int new_cpu, weight; int new_cpu, weight;
...@@ -2154,14 +2152,13 @@ static int sched_balance_self(int cpu, int flag) ...@@ -2154,14 +2152,13 @@ static int sched_balance_self(int cpu, int flag)
continue; continue;
} }
span = sd->span;
group = find_idlest_group(sd, t, cpu); group = find_idlest_group(sd, t, cpu);
if (!group) { if (!group) {
sd = sd->child; sd = sd->child;
continue; continue;
} }
new_cpu = find_idlest_cpu(group, t, cpu, &tmpmask); new_cpu = find_idlest_cpu(group, t, cpu);
if (new_cpu == -1 || new_cpu == cpu) { if (new_cpu == -1 || new_cpu == cpu) {
/* Now try balancing at a lower domain level of cpu */ /* Now try balancing at a lower domain level of cpu */
sd = sd->child; sd = sd->child;
...@@ -2170,10 +2167,10 @@ static int sched_balance_self(int cpu, int flag) ...@@ -2170,10 +2167,10 @@ static int sched_balance_self(int cpu, int flag)
/* Now try balancing at a lower domain level of new_cpu */ /* Now try balancing at a lower domain level of new_cpu */
cpu = new_cpu; cpu = new_cpu;
weight = cpumask_weight(sched_domain_span(sd));
sd = NULL; sd = NULL;
weight = cpus_weight(span);
for_each_domain(cpu, tmp) { for_each_domain(cpu, tmp) {
if (weight <= cpus_weight(tmp->span)) if (weight <= cpumask_weight(sched_domain_span(tmp)))
break; break;
if (tmp->flags & flag) if (tmp->flags & flag)
sd = tmp; sd = tmp;
...@@ -2218,7 +2215,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) ...@@ -2218,7 +2215,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
cpu = task_cpu(p); cpu = task_cpu(p);
for_each_domain(this_cpu, sd) { for_each_domain(this_cpu, sd) {
if (cpu_isset(cpu, sd->span)) { if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
update_shares(sd); update_shares(sd);
break; break;
} }
...@@ -2266,7 +2263,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) ...@@ -2266,7 +2263,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
else { else {
struct sched_domain *sd; struct sched_domain *sd;
for_each_domain(this_cpu, sd) { for_each_domain(this_cpu, sd) {
if (cpu_isset(cpu, sd->span)) { if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
schedstat_inc(sd, ttwu_wake_remote); schedstat_inc(sd, ttwu_wake_remote);
break; break;
} }
...@@ -3109,10 +3106,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, ...@@ -3109,10 +3106,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
unsigned long sum_avg_load_per_task; unsigned long sum_avg_load_per_task;
unsigned long avg_load_per_task; unsigned long avg_load_per_task;
local_group = cpu_isset(this_cpu, group->cpumask); local_group = cpumask_test_cpu(this_cpu,
sched_group_cpus(group));
if (local_group) if (local_group)
balance_cpu = first_cpu(group->cpumask); balance_cpu = cpumask_first(sched_group_cpus(group));
/* Tally up the load of all CPUs in the group */ /* Tally up the load of all CPUs in the group */
sum_weighted_load = sum_nr_running = avg_load = 0; sum_weighted_load = sum_nr_running = avg_load = 0;
...@@ -3121,13 +3119,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, ...@@ -3121,13 +3119,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
max_cpu_load = 0; max_cpu_load = 0;
min_cpu_load = ~0UL; min_cpu_load = ~0UL;
for_each_cpu(i, &group->cpumask) { for_each_cpu_and(i, sched_group_cpus(group), cpus) {
struct rq *rq; struct rq *rq = cpu_rq(i);
if (!cpu_isset(i, *cpus))
continue;
rq = cpu_rq(i);
if (*sd_idle && rq->nr_running) if (*sd_idle && rq->nr_running)
*sd_idle = 0; *sd_idle = 0;
...@@ -3238,8 +3231,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, ...@@ -3238,8 +3231,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
*/ */
if ((sum_nr_running < min_nr_running) || if ((sum_nr_running < min_nr_running) ||
(sum_nr_running == min_nr_running && (sum_nr_running == min_nr_running &&
first_cpu(group->cpumask) < cpumask_first(sched_group_cpus(group)) <
first_cpu(group_min->cpumask))) { cpumask_first(sched_group_cpus(group_min)))) {
group_min = group; group_min = group;
min_nr_running = sum_nr_running; min_nr_running = sum_nr_running;
min_load_per_task = sum_weighted_load / min_load_per_task = sum_weighted_load /
...@@ -3254,8 +3247,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, ...@@ -3254,8 +3247,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
if (sum_nr_running <= group_capacity - 1) { if (sum_nr_running <= group_capacity - 1) {
if (sum_nr_running > leader_nr_running || if (sum_nr_running > leader_nr_running ||
(sum_nr_running == leader_nr_running && (sum_nr_running == leader_nr_running &&
first_cpu(group->cpumask) > cpumask_first(sched_group_cpus(group)) >
first_cpu(group_leader->cpumask))) { cpumask_first(sched_group_cpus(group_leader)))) {
group_leader = group; group_leader = group;
leader_nr_running = sum_nr_running; leader_nr_running = sum_nr_running;
} }
...@@ -3400,7 +3393,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, ...@@ -3400,7 +3393,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
unsigned long max_load = 0; unsigned long max_load = 0;
int i; int i;
for_each_cpu(i, &group->cpumask) { for_each_cpu(i, sched_group_cpus(group)) {
unsigned long wl; unsigned long wl;
if (!cpu_isset(i, *cpus)) if (!cpu_isset(i, *cpus))
...@@ -3746,7 +3739,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) ...@@ -3746,7 +3739,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
/* Search for an sd spanning us and the target CPU. */ /* Search for an sd spanning us and the target CPU. */
for_each_domain(target_cpu, sd) { for_each_domain(target_cpu, sd) {
if ((sd->flags & SD_LOAD_BALANCE) && if ((sd->flags & SD_LOAD_BALANCE) &&
cpu_isset(busiest_cpu, sd->span)) cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
break; break;
} }
...@@ -6618,7 +6611,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, ...@@ -6618,7 +6611,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
struct sched_group *group = sd->groups; struct sched_group *group = sd->groups;
char str[256]; char str[256];
cpulist_scnprintf(str, sizeof(str), sd->span); cpulist_scnprintf(str, sizeof(str), *sched_domain_span(sd));
cpus_clear(*groupmask); cpus_clear(*groupmask);
printk(KERN_DEBUG "%*s domain %d: ", level, "", level); printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
...@@ -6633,11 +6626,11 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, ...@@ -6633,11 +6626,11 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
printk(KERN_CONT "span %s level %s\n", str, sd->name); printk(KERN_CONT "span %s level %s\n", str, sd->name);
if (!cpu_isset(cpu, sd->span)) { if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
printk(KERN_ERR "ERROR: domain->span does not contain " printk(KERN_ERR "ERROR: domain->span does not contain "
"CPU%d\n", cpu); "CPU%d\n", cpu);
} }
if (!cpu_isset(cpu, group->cpumask)) { if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
printk(KERN_ERR "ERROR: domain->groups does not contain" printk(KERN_ERR "ERROR: domain->groups does not contain"
" CPU%d\n", cpu); " CPU%d\n", cpu);
} }
...@@ -6657,31 +6650,32 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, ...@@ -6657,31 +6650,32 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
break; break;
} }
if (!cpus_weight(group->cpumask)) { if (!cpumask_weight(sched_group_cpus(group))) {
printk(KERN_CONT "\n"); printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: empty group\n"); printk(KERN_ERR "ERROR: empty group\n");
break; break;
} }
if (cpus_intersects(*groupmask, group->cpumask)) { if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
printk(KERN_CONT "\n"); printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: repeated CPUs\n"); printk(KERN_ERR "ERROR: repeated CPUs\n");
break; break;
} }
cpus_or(*groupmask, *groupmask, group->cpumask); cpumask_or(groupmask, groupmask, sched_group_cpus(group));
cpulist_scnprintf(str, sizeof(str), group->cpumask); cpulist_scnprintf(str, sizeof(str), *sched_group_cpus(group));
printk(KERN_CONT " %s", str); printk(KERN_CONT " %s", str);
group = group->next; group = group->next;
} while (group != sd->groups); } while (group != sd->groups);
printk(KERN_CONT "\n"); printk(KERN_CONT "\n");
if (!cpus_equal(sd->span, *groupmask)) if (!cpumask_equal(sched_domain_span(sd), groupmask))
printk(KERN_ERR "ERROR: groups don't span domain->span\n"); printk(KERN_ERR "ERROR: groups don't span domain->span\n");
if (sd->parent && !cpus_subset(*groupmask, sd->parent->span)) if (sd->parent &&
!cpumask_subset(groupmask, sched_domain_span(sd->parent)))
printk(KERN_ERR "ERROR: parent span is not a superset " printk(KERN_ERR "ERROR: parent span is not a superset "
"of domain->span\n"); "of domain->span\n");
return 0; return 0;
...@@ -6721,7 +6715,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) ...@@ -6721,7 +6715,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
static int sd_degenerate(struct sched_domain *sd) static int sd_degenerate(struct sched_domain *sd)
{ {
if (cpus_weight(sd->span) == 1) if (cpumask_weight(sched_domain_span(sd)) == 1)
return 1; return 1;
/* Following flags need at least 2 groups */ /* Following flags need at least 2 groups */
...@@ -6752,7 +6746,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) ...@@ -6752,7 +6746,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
if (sd_degenerate(parent)) if (sd_degenerate(parent))
return 1; return 1;
if (!cpus_equal(sd->span, parent->span)) if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
return 0; return 0;
/* Does parent contain flags not in child? */ /* Does parent contain flags not in child? */
...@@ -6913,10 +6907,10 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, ...@@ -6913,10 +6907,10 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
int group = group_fn(i, cpu_map, &sg, tmpmask); int group = group_fn(i, cpu_map, &sg, tmpmask);
int j; int j;
if (cpu_isset(i, *covered)) if (cpumask_test_cpu(i, covered))
continue; continue;
cpus_clear(sg->cpumask); cpumask_clear(sched_group_cpus(sg));
sg->__cpu_power = 0; sg->__cpu_power = 0;
for_each_cpu(j, span) { for_each_cpu(j, span) {
...@@ -6924,7 +6918,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, ...@@ -6924,7 +6918,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
continue; continue;
cpu_set(j, *covered); cpu_set(j, *covered);
cpu_set(j, sg->cpumask); cpumask_set_cpu(j, sched_group_cpus(sg));
} }
if (!first) if (!first)
first = sg; first = sg;
...@@ -7119,11 +7113,11 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) ...@@ -7119,11 +7113,11 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
if (!sg) if (!sg)
return; return;
do { do {
for_each_cpu(j, &sg->cpumask) { for_each_cpu(j, sched_group_cpus(sg)) {
struct sched_domain *sd; struct sched_domain *sd;
sd = &per_cpu(phys_domains, j); sd = &per_cpu(phys_domains, j);
if (j != first_cpu(sd->groups->cpumask)) { if (j != cpumask_first(sched_group_cpus(sd->groups))) {
/* /*
* Only add "power" once for each * Only add "power" once for each
* physical package. * physical package.
...@@ -7200,7 +7194,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) ...@@ -7200,7 +7194,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
WARN_ON(!sd || !sd->groups); WARN_ON(!sd || !sd->groups);
if (cpu != first_cpu(sd->groups->cpumask)) if (cpu != cpumask_first(sched_group_cpus(sd->groups)))
return; return;
child = sd->child; child = sd->child;
...@@ -7372,7 +7366,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, ...@@ -7372,7 +7366,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
sd = &per_cpu(allnodes_domains, i); sd = &per_cpu(allnodes_domains, i);
SD_INIT(sd, ALLNODES); SD_INIT(sd, ALLNODES);
set_domain_attribute(sd, attr); set_domain_attribute(sd, attr);
sd->span = *cpu_map; cpumask_copy(sched_domain_span(sd), cpu_map);
cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
p = sd; p = sd;
sd_allnodes = 1; sd_allnodes = 1;
...@@ -7382,18 +7376,19 @@ static int __build_sched_domains(const cpumask_t *cpu_map, ...@@ -7382,18 +7376,19 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
sd = &per_cpu(node_domains, i); sd = &per_cpu(node_domains, i);
SD_INIT(sd, NODE); SD_INIT(sd, NODE);
set_domain_attribute(sd, attr); set_domain_attribute(sd, attr);
sched_domain_node_span(cpu_to_node(i), &sd->span); sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
sd->parent = p; sd->parent = p;
if (p) if (p)
p->child = sd; p->child = sd;
cpus_and(sd->span, sd->span, *cpu_map); cpumask_and(sched_domain_span(sd),
sched_domain_span(sd), cpu_map);
#endif #endif
p = sd; p = sd;
sd = &per_cpu(phys_domains, i); sd = &per_cpu(phys_domains, i);
SD_INIT(sd, CPU); SD_INIT(sd, CPU);
set_domain_attribute(sd, attr); set_domain_attribute(sd, attr);
sd->span = *nodemask; cpumask_copy(sched_domain_span(sd), nodemask);
sd->parent = p; sd->parent = p;
if (p) if (p)
p->child = sd; p->child = sd;
...@@ -7404,8 +7399,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map, ...@@ -7404,8 +7399,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
sd = &per_cpu(core_domains, i); sd = &per_cpu(core_domains, i);
SD_INIT(sd, MC); SD_INIT(sd, MC);
set_domain_attribute(sd, attr); set_domain_attribute(sd, attr);
sd->span = cpu_coregroup_map(i); *sched_domain_span(sd) = cpu_coregroup_map(i);
cpus_and(sd->span, sd->span, *cpu_map); cpumask_and(sched_domain_span(sd),
sched_domain_span(sd), cpu_map);
sd->parent = p; sd->parent = p;
p->child = sd; p->child = sd;
cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
...@@ -7416,8 +7412,8 @@ static int __build_sched_domains(const cpumask_t *cpu_map, ...@@ -7416,8 +7412,8 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
sd = &per_cpu(cpu_domains, i); sd = &per_cpu(cpu_domains, i);
SD_INIT(sd, SIBLING); SD_INIT(sd, SIBLING);
set_domain_attribute(sd, attr); set_domain_attribute(sd, attr);
sd->span = per_cpu(cpu_sibling_map, i); cpumask_and(sched_domain_span(sd),
cpus_and(sd->span, sd->span, *cpu_map); &per_cpu(cpu_sibling_map, i), cpu_map);
sd->parent = p; sd->parent = p;
p->child = sd; p->child = sd;
cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
...@@ -7503,7 +7499,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, ...@@ -7503,7 +7499,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
sd->groups = sg; sd->groups = sg;
} }
sg->__cpu_power = 0; sg->__cpu_power = 0;
sg->cpumask = *nodemask; cpumask_copy(sched_group_cpus(sg), nodemask);
sg->next = sg; sg->next = sg;
cpus_or(*covered, *covered, *nodemask); cpus_or(*covered, *covered, *nodemask);
prev = sg; prev = sg;
...@@ -7530,7 +7526,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, ...@@ -7530,7 +7526,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
goto error; goto error;
} }
sg->__cpu_power = 0; sg->__cpu_power = 0;
sg->cpumask = *tmpmask; cpumask_copy(sched_group_cpus(sg), tmpmask);
sg->next = prev->next; sg->next = prev->next;
cpus_or(*covered, *covered, *tmpmask); cpus_or(*covered, *covered, *tmpmask);
prev->next = sg; prev->next = sg;
......
...@@ -1024,7 +1024,6 @@ static void yield_task_fair(struct rq *rq) ...@@ -1024,7 +1024,6 @@ static void yield_task_fair(struct rq *rq)
#if defined(ARCH_HAS_SCHED_WAKE_IDLE) #if defined(ARCH_HAS_SCHED_WAKE_IDLE)
static int wake_idle(int cpu, struct task_struct *p) static int wake_idle(int cpu, struct task_struct *p)
{ {
cpumask_t tmp;
struct sched_domain *sd; struct sched_domain *sd;
int i; int i;
...@@ -1044,10 +1043,9 @@ static int wake_idle(int cpu, struct task_struct *p) ...@@ -1044,10 +1043,9 @@ static int wake_idle(int cpu, struct task_struct *p)
if ((sd->flags & SD_WAKE_IDLE) if ((sd->flags & SD_WAKE_IDLE)
|| ((sd->flags & SD_WAKE_IDLE_FAR) || ((sd->flags & SD_WAKE_IDLE_FAR)
&& !task_hot(p, task_rq(p)->clock, sd))) { && !task_hot(p, task_rq(p)->clock, sd))) {
cpus_and(tmp, sd->span, p->cpus_allowed); for_each_cpu_and(i, sched_domain_span(sd),
cpus_and(tmp, tmp, cpu_active_map); &p->cpus_allowed) {
for_each_cpu_mask_nr(i, tmp) { if (cpu_active(i) && idle_cpu(i)) {
if (idle_cpu(i)) {
if (i != task_cpu(p)) { if (i != task_cpu(p)) {
schedstat_inc(p, schedstat_inc(p,
se.nr_wakeups_idle); se.nr_wakeups_idle);
...@@ -1240,7 +1238,7 @@ static int select_task_rq_fair(struct task_struct *p, int sync) ...@@ -1240,7 +1238,7 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
* this_cpu and prev_cpu are present in: * this_cpu and prev_cpu are present in:
*/ */
for_each_domain(this_cpu, sd) { for_each_domain(this_cpu, sd) {
if (cpu_isset(prev_cpu, sd->span)) { if (cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) {
this_sd = sd; this_sd = sd;
break; break;
} }
......
...@@ -1017,7 +1017,8 @@ static int find_lowest_rq(struct task_struct *task) ...@@ -1017,7 +1017,8 @@ static int find_lowest_rq(struct task_struct *task)
cpumask_t domain_mask; cpumask_t domain_mask;
int best_cpu; int best_cpu;
cpus_and(domain_mask, sd->span, *lowest_mask); cpumask_and(&domain_mask, sched_domain_span(sd),
lowest_mask);
best_cpu = pick_optimal_cpu(this_cpu, best_cpu = pick_optimal_cpu(this_cpu,
&domain_mask); &domain_mask);
......
...@@ -42,7 +42,8 @@ static int show_schedstat(struct seq_file *seq, void *v) ...@@ -42,7 +42,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
for_each_domain(cpu, sd) { for_each_domain(cpu, sd) {
enum cpu_idle_type itype; enum cpu_idle_type itype;
cpumask_scnprintf(mask_str, mask_len, sd->span); cpumask_scnprintf(mask_str, mask_len,
*sched_domain_span(sd));
seq_printf(seq, "domain%d %s", dcount++, mask_str); seq_printf(seq, "domain%d %s", dcount++, mask_str);
for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
itype++) { itype++) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment