Commit 758b2cdc authored by Rusty Russell's avatar Rusty Russell Committed by Ingo Molnar

sched: wrap sched_group and sched_domain cpumask accesses.

Impact: trivial wrap of member accesses

This eases the transition in the next patch.

We also get rid of a temporary cpumask in find_idlest_cpu() thanks to
for_each_cpu_and, and sched_balance_self() due to getting weight before
setting sd to NULL.
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 1e5ce4f4
...@@ -786,6 +786,11 @@ struct sched_group { ...@@ -786,6 +786,11 @@ struct sched_group {
u32 reciprocal_cpu_power; u32 reciprocal_cpu_power;
}; };
static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
{
return &sg->cpumask;
}
enum sched_domain_level { enum sched_domain_level {
SD_LV_NONE = 0, SD_LV_NONE = 0,
SD_LV_SIBLING, SD_LV_SIBLING,
...@@ -866,6 +871,11 @@ struct sched_domain { ...@@ -866,6 +871,11 @@ struct sched_domain {
#endif #endif
}; };
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
{
return &sd->span;
}
extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
struct sched_domain_attr *dattr_new); struct sched_domain_attr *dattr_new);
extern int arch_reinit_sched_domains(void); extern int arch_reinit_sched_domains(void);
......
This diff is collapsed.
...@@ -1024,7 +1024,6 @@ static void yield_task_fair(struct rq *rq) ...@@ -1024,7 +1024,6 @@ static void yield_task_fair(struct rq *rq)
#if defined(ARCH_HAS_SCHED_WAKE_IDLE) #if defined(ARCH_HAS_SCHED_WAKE_IDLE)
static int wake_idle(int cpu, struct task_struct *p) static int wake_idle(int cpu, struct task_struct *p)
{ {
cpumask_t tmp;
struct sched_domain *sd; struct sched_domain *sd;
int i; int i;
...@@ -1044,10 +1043,9 @@ static int wake_idle(int cpu, struct task_struct *p) ...@@ -1044,10 +1043,9 @@ static int wake_idle(int cpu, struct task_struct *p)
if ((sd->flags & SD_WAKE_IDLE) if ((sd->flags & SD_WAKE_IDLE)
|| ((sd->flags & SD_WAKE_IDLE_FAR) || ((sd->flags & SD_WAKE_IDLE_FAR)
&& !task_hot(p, task_rq(p)->clock, sd))) { && !task_hot(p, task_rq(p)->clock, sd))) {
cpus_and(tmp, sd->span, p->cpus_allowed); for_each_cpu_and(i, sched_domain_span(sd),
cpus_and(tmp, tmp, cpu_active_map); &p->cpus_allowed) {
for_each_cpu_mask_nr(i, tmp) { if (cpu_active(i) && idle_cpu(i)) {
if (idle_cpu(i)) {
if (i != task_cpu(p)) { if (i != task_cpu(p)) {
schedstat_inc(p, schedstat_inc(p,
se.nr_wakeups_idle); se.nr_wakeups_idle);
...@@ -1240,7 +1238,7 @@ static int select_task_rq_fair(struct task_struct *p, int sync) ...@@ -1240,7 +1238,7 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
* this_cpu and prev_cpu are present in: * this_cpu and prev_cpu are present in:
*/ */
for_each_domain(this_cpu, sd) { for_each_domain(this_cpu, sd) {
if (cpu_isset(prev_cpu, sd->span)) { if (cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) {
this_sd = sd; this_sd = sd;
break; break;
} }
......
...@@ -1017,7 +1017,8 @@ static int find_lowest_rq(struct task_struct *task) ...@@ -1017,7 +1017,8 @@ static int find_lowest_rq(struct task_struct *task)
cpumask_t domain_mask; cpumask_t domain_mask;
int best_cpu; int best_cpu;
cpus_and(domain_mask, sd->span, *lowest_mask); cpumask_and(&domain_mask, sched_domain_span(sd),
lowest_mask);
best_cpu = pick_optimal_cpu(this_cpu, best_cpu = pick_optimal_cpu(this_cpu,
&domain_mask); &domain_mask);
......
...@@ -42,7 +42,8 @@ static int show_schedstat(struct seq_file *seq, void *v) ...@@ -42,7 +42,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
for_each_domain(cpu, sd) { for_each_domain(cpu, sd) {
enum cpu_idle_type itype; enum cpu_idle_type itype;
cpumask_scnprintf(mask_str, mask_len, sd->span); cpumask_scnprintf(mask_str, mask_len,
*sched_domain_span(sd));
seq_printf(seq, "domain%d %s", dcount++, mask_str); seq_printf(seq, "domain%d %s", dcount++, mask_str);
for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
itype++) { itype++) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment