Commit a5f39fd8 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] sched_domains: use cpu_possible_map

From: Nick Piggin <piggin@cyberone.com.au>

This changes sched domains to contain all possible CPUs, and check for
online as needed.  It's in order to play nicely with CPU hotplug.
parent 482b9933
......@@ -1138,7 +1138,7 @@ __init void arch_init_sched_domains(void)
struct sched_group *first_cpu = NULL, *last_cpu = NULL;
/* Set up domains */
for_each_cpu_mask(i, cpu_online_map) {
for_each_cpu(i) {
struct sched_domain *cpu_domain = cpu_sched_domain(i);
struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
struct sched_domain *node_domain = &per_cpu(node_domains, i);
......@@ -1152,11 +1152,11 @@ __init void arch_init_sched_domains(void)
phys_domain->span = nodemask;
*node_domain = SD_NODE_INIT;
node_domain->span = cpu_online_map;
node_domain->span = cpu_possible_map;
}
/* Set up CPU (sibling) groups */
for_each_cpu_mask(i, cpu_online_map) {
for_each_cpu(i) {
struct sched_domain *cpu_domain = cpu_sched_domain(i);
int j;
first_cpu = last_cpu = NULL;
......@@ -1184,7 +1184,7 @@ __init void arch_init_sched_domains(void)
int j;
cpumask_t nodemask;
struct sched_group *node = &sched_group_nodes[i];
cpus_and(nodemask, node_to_cpumask(i), cpu_online_map);
cpus_and(nodemask, node_to_cpumask(i), cpu_possible_map);
if (cpus_empty(nodemask))
continue;
......@@ -1220,7 +1220,7 @@ __init void arch_init_sched_domains(void)
for (i = 0; i < MAX_NUMNODES; i++) {
struct sched_group *cpu = &sched_group_nodes[i];
cpumask_t nodemask;
cpus_and(nodemask, node_to_cpumask(i), cpu_online_map);
cpus_and(nodemask, node_to_cpumask(i), cpu_possible_map);
if (cpus_empty(nodemask))
continue;
......@@ -1237,7 +1237,7 @@ __init void arch_init_sched_domains(void)
last_cpu->next = first_cpu;
mb();
for_each_cpu_mask(i, cpu_online_map) {
for_each_cpu(i) {
int node = cpu_to_node(i);
struct sched_domain *cpu_domain = cpu_sched_domain(i);
struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
......@@ -1264,7 +1264,7 @@ __init void arch_init_sched_domains(void)
struct sched_group *first_cpu = NULL, *last_cpu = NULL;
/* Set up domains */
for_each_cpu_mask(i, cpu_online_map) {
for_each_cpu(i) {
struct sched_domain *cpu_domain = cpu_sched_domain(i);
struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
......@@ -1272,11 +1272,11 @@ __init void arch_init_sched_domains(void)
cpu_domain->span = cpu_sibling_map[i];
*phys_domain = SD_CPU_INIT;
phys_domain->span = cpu_online_map;
phys_domain->span = cpu_possible_map;
}
/* Set up CPU (sibling) groups */
for_each_cpu_mask(i, cpu_online_map) {
for_each_cpu(i) {
struct sched_domain *cpu_domain = cpu_sched_domain(i);
int j;
first_cpu = last_cpu = NULL;
......@@ -1302,7 +1302,7 @@ __init void arch_init_sched_domains(void)
first_cpu = last_cpu = NULL;
/* Set up physical groups */
for_each_cpu_mask(i, cpu_online_map) {
for_each_cpu(i) {
struct sched_domain *cpu_domain = cpu_sched_domain(i);
struct sched_group *cpu = &sched_group_phys[i];
......@@ -1322,7 +1322,7 @@ __init void arch_init_sched_domains(void)
last_cpu->next = first_cpu;
mb();
for_each_cpu_mask(i, cpu_online_map) {
for_each_cpu(i) {
struct sched_domain *cpu_domain = cpu_sched_domain(i);
struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
struct sched_group *cpu_group = &sched_group_cpus[i];
......
......@@ -647,6 +647,7 @@ static inline unsigned long get_high_cpu_load(int cpu, int update)
#if defined(CONFIG_SMP) && defined(ARCH_HAS_SCHED_WAKE_BALANCE)
static int sched_balance_wake(int cpu, task_t *p)
{
cpumask_t tmp;
struct sched_domain *domain;
int i;
......@@ -657,10 +658,8 @@ static int sched_balance_wake(int cpu, task_t *p)
if (!(domain->flags & SD_FLAG_WAKE))
return cpu;
for_each_cpu_mask(i, domain->span) {
if (!cpu_online(i))
continue;
cpus_and(tmp, domain->span, cpu_online_map);
for_each_cpu_mask(i, tmp) {
if (!cpu_isset(i, p->cpus_allowed))
continue;
......@@ -1149,16 +1148,15 @@ static void sched_migrate_task(task_t *p, int dest_cpu)
*/
static int sched_best_cpu(struct task_struct *p, struct sched_domain *domain)
{
cpumask_t tmp;
int i, min_load, this_cpu, best_cpu;
best_cpu = this_cpu = task_cpu(p);
min_load = INT_MAX;
for_each_online_cpu(i) {
cpus_and(tmp, domain->span, cpu_online_map);
for_each_cpu_mask(i, tmp) {
unsigned long load;
if (!cpu_isset(i, domain->span))
continue;
if (i == this_cpu)
load = get_low_cpu_load(i, 0);
else
......@@ -1373,6 +1371,7 @@ find_busiest_group(struct sched_domain *domain, int this_cpu,
modify = 1;
do {
cpumask_t tmp;
unsigned long load;
int local_group;
int i, nr_cpus = 0;
......@@ -1381,10 +1380,8 @@ find_busiest_group(struct sched_domain *domain, int this_cpu,
/* Tally up the load of all CPUs in the group */
avg_load = 0;
for_each_cpu_mask(i, group->cpumask) {
if (!cpu_online(i))
continue;
cpus_and(tmp, group->cpumask, cpu_online_map);
for_each_cpu_mask(i, tmp) {
/* Bias balancing toward cpus of our domain */
if (local_group) {
load = get_high_cpu_load(i, modify);
......@@ -1496,16 +1493,15 @@ find_busiest_group(struct sched_domain *domain, int this_cpu,
*/
static runqueue_t *find_busiest_queue(struct sched_group *group)
{
cpumask_t tmp;
int i;
unsigned long max_load = 0;
runqueue_t *busiest = NULL;
for_each_cpu_mask(i, group->cpumask) {
cpus_and(tmp, group->cpumask, cpu_online_map);
for_each_cpu_mask(i, tmp) {
unsigned long load;
if (!cpu_online(i))
continue;
load = get_low_cpu_load(i, 0);
if (load > max_load) {
......@@ -1694,16 +1690,15 @@ static void active_load_balance(runqueue_t *busiest, int busiest_cpu)
group = sd->groups;
do {
cpumask_t tmp;
runqueue_t *rq;
int push_cpu = 0, nr = 0;
if (group == busy_group)
goto next_group;
for_each_cpu_mask(i, group->cpumask) {
if (!cpu_online(i))
continue;
cpus_and(tmp, group->cpumask, cpu_online_map);
for_each_cpu_mask(i, tmp) {
if (!idle_cpu(i))
goto next_group;
push_cpu = i;
......@@ -3298,17 +3293,17 @@ static void __init arch_init_sched_domains(void)
struct sched_group *first_node = NULL, *last_node = NULL;
/* Set up domains */
for_each_cpu_mask(i, cpu_online_map) {
for_each_cpu(i) {
int node = cpu_to_node(i);
cpumask_t nodemask = node_to_cpumask(node);
struct sched_domain *node_domain = &per_cpu(node_domains, i);
struct sched_domain *cpu_domain = cpu_sched_domain(i);
*node_domain = SD_NODE_INIT;
node_domain->span = cpu_online_map;
node_domain->span = cpu_possible_map;
*cpu_domain = SD_CPU_INIT;
cpus_and(cpu_domain->span, nodemask, cpu_online_map);
cpus_and(cpu_domain->span, nodemask, cpu_possible_map);
cpu_domain->parent = node_domain;
}
......@@ -3320,7 +3315,7 @@ static void __init arch_init_sched_domains(void)
struct sched_group *node = &sched_group_nodes[i];
cpumask_t tmp = node_to_cpumask(i);
cpus_and(nodemask, tmp, cpu_online_map);
cpus_and(nodemask, tmp, cpu_possible_map);
if (cpus_empty(nodemask))
continue;
......@@ -3352,7 +3347,7 @@ static void __init arch_init_sched_domains(void)
last_node->next = first_node;
mb();
for_each_cpu_mask(i, cpu_online_map) {
for_each_cpu(i) {
struct sched_domain *node_domain = &per_cpu(node_domains, i);
struct sched_domain *cpu_domain = cpu_sched_domain(i);
node_domain->groups = &sched_group_nodes[cpu_to_node(i)];
......@@ -3367,15 +3362,15 @@ static void __init arch_init_sched_domains(void)
struct sched_group *first_cpu = NULL, *last_cpu = NULL;
/* Set up domains */
for_each_cpu_mask(i, cpu_online_map) {
for_each_cpu(i) {
struct sched_domain *cpu_domain = cpu_sched_domain(i);
*cpu_domain = SD_CPU_INIT;
cpu_domain->span = cpu_online_map;
cpu_domain->span = cpu_possible_map;
}
/* Set up CPU groups */
for_each_cpu_mask(i, cpu_online_map) {
for_each_cpu_mask(i, cpu_possible_map) {
struct sched_group *cpu = &sched_group_cpus[i];
cpus_clear(cpu->cpumask);
......@@ -3391,7 +3386,7 @@ static void __init arch_init_sched_domains(void)
last_cpu->next = first_cpu;
mb();
for_each_cpu_mask(i, cpu_online_map) {
for_each_cpu(i) {
struct sched_domain *cpu_domain = cpu_sched_domain(i);
cpu_domain->groups = &sched_group_cpus[i];
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment