Commit dc938520 authored by Gregory Haskins's avatar Gregory Haskins Committed by Ingo Molnar

sched: dynamically update the root-domain span/online maps

The baseline code statically builds the span maps when the domain is formed.
Previous attempts at dynamically updating the maps caused a suspend-to-ram
regression, which should now be fixed.
Signed-off-by: default avatarGregory Haskins <ghaskins@novell.com>
CC: Gautham R Shenoy <ego@in.ibm.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent f85d6c71
...@@ -359,8 +359,6 @@ struct rt_rq { ...@@ -359,8 +359,6 @@ struct rt_rq {
* exclusive cpuset is created, we also create and attach a new root-domain * exclusive cpuset is created, we also create and attach a new root-domain
* object. * object.
* *
* By default the system creates a single root-domain with all cpus as
* members (mimicking the global state we have today).
*/ */
struct root_domain { struct root_domain {
atomic_t refcount; atomic_t refcount;
...@@ -375,6 +373,10 @@ struct root_domain { ...@@ -375,6 +373,10 @@ struct root_domain {
atomic_t rto_count; atomic_t rto_count;
}; };
/*
* By default the system creates a single root-domain with all cpus as
* members (mimicking the global state we have today).
*/
static struct root_domain def_root_domain; static struct root_domain def_root_domain;
#endif #endif
...@@ -5859,6 +5861,9 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) ...@@ -5859,6 +5861,9 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
class->leave_domain(rq); class->leave_domain(rq);
} }
cpu_clear(rq->cpu, old_rd->span);
cpu_clear(rq->cpu, old_rd->online);
if (atomic_dec_and_test(&old_rd->refcount)) if (atomic_dec_and_test(&old_rd->refcount))
kfree(old_rd); kfree(old_rd);
} }
...@@ -5866,6 +5871,10 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) ...@@ -5866,6 +5871,10 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
atomic_inc(&rd->refcount); atomic_inc(&rd->refcount);
rq->rd = rd; rq->rd = rd;
cpu_set(rq->cpu, rd->span);
if (cpu_isset(rq->cpu, cpu_online_map))
cpu_set(rq->cpu, rd->online);
for (class = sched_class_highest; class; class = class->next) { for (class = sched_class_highest; class; class = class->next) {
if (class->join_domain) if (class->join_domain)
class->join_domain(rq); class->join_domain(rq);
...@@ -5874,23 +5883,21 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) ...@@ -5874,23 +5883,21 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
spin_unlock_irqrestore(&rq->lock, flags); spin_unlock_irqrestore(&rq->lock, flags);
} }
static void init_rootdomain(struct root_domain *rd, const cpumask_t *map) static void init_rootdomain(struct root_domain *rd)
{ {
memset(rd, 0, sizeof(*rd)); memset(rd, 0, sizeof(*rd));
rd->span = *map; cpus_clear(rd->span);
cpus_and(rd->online, rd->span, cpu_online_map); cpus_clear(rd->online);
} }
static void init_defrootdomain(void) static void init_defrootdomain(void)
{ {
cpumask_t cpus = CPU_MASK_ALL; init_rootdomain(&def_root_domain);
init_rootdomain(&def_root_domain, &cpus);
atomic_set(&def_root_domain.refcount, 1); atomic_set(&def_root_domain.refcount, 1);
} }
static struct root_domain *alloc_rootdomain(const cpumask_t *map) static struct root_domain *alloc_rootdomain(void)
{ {
struct root_domain *rd; struct root_domain *rd;
...@@ -5898,7 +5905,7 @@ static struct root_domain *alloc_rootdomain(const cpumask_t *map) ...@@ -5898,7 +5905,7 @@ static struct root_domain *alloc_rootdomain(const cpumask_t *map)
if (!rd) if (!rd)
return NULL; return NULL;
init_rootdomain(rd, map); init_rootdomain(rd);
return rd; return rd;
} }
...@@ -6319,7 +6326,7 @@ static int build_sched_domains(const cpumask_t *cpu_map) ...@@ -6319,7 +6326,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
#endif #endif
rd = alloc_rootdomain(cpu_map); rd = alloc_rootdomain();
if (!rd) { if (!rd) {
printk(KERN_WARNING "Cannot alloc root domain\n"); printk(KERN_WARNING "Cannot alloc root domain\n");
return -ENOMEM; return -ENOMEM;
...@@ -6894,7 +6901,6 @@ void __init sched_init(void) ...@@ -6894,7 +6901,6 @@ void __init sched_init(void)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
rq->sd = NULL; rq->sd = NULL;
rq->rd = NULL; rq->rd = NULL;
rq_attach_root(rq, &def_root_domain);
rq->active_balance = 0; rq->active_balance = 0;
rq->next_balance = jiffies; rq->next_balance = jiffies;
rq->push_cpu = 0; rq->push_cpu = 0;
...@@ -6903,6 +6909,7 @@ void __init sched_init(void) ...@@ -6903,6 +6909,7 @@ void __init sched_init(void)
INIT_LIST_HEAD(&rq->migration_queue); INIT_LIST_HEAD(&rq->migration_queue);
rq->rt.highest_prio = MAX_RT_PRIO; rq->rt.highest_prio = MAX_RT_PRIO;
rq->rt.overloaded = 0; rq->rt.overloaded = 0;
rq_attach_root(rq, &def_root_domain);
#endif #endif
atomic_set(&rq->nr_iowait, 0); atomic_set(&rq->nr_iowait, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment