Commit dcc30a35 authored by Rusty Russell's avatar Rusty Russell Committed by Ingo Molnar

sched: convert cpu_isolated_map to cpumask_var_t.

Impact: stack usage reduction, (future) size reduction, cleanup

Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves
space for small nr_cpu_ids but big CONFIG_NR_CPUS.  cpumask_var_t
is just a struct cpumask for !CONFIG_CPUMASK_OFFSTACK.

We can also use cpulist_parse() instead of doing it manually in
isolated_cpu_setup.
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent d5dd3db1
...@@ -6917,19 +6917,12 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) ...@@ -6917,19 +6917,12 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
} }
/* cpus with isolated domains */ /* cpus with isolated domains */
static cpumask_t cpu_isolated_map = CPU_MASK_NONE; static cpumask_var_t cpu_isolated_map;
/* Setup the mask of cpus configured for isolated domains */ /* Setup the mask of cpus configured for isolated domains */
static int __init isolated_cpu_setup(char *str) static int __init isolated_cpu_setup(char *str)
{ {
static int __initdata ints[NR_CPUS]; cpulist_parse(str, *cpu_isolated_map);
int i;
str = get_options(str, ARRAY_SIZE(ints), ints);
cpus_clear(cpu_isolated_map);
for (i = 1; i <= ints[0]; i++)
if (ints[i] < NR_CPUS)
cpu_set(ints[i], cpu_isolated_map);
return 1; return 1;
} }
...@@ -7727,7 +7720,7 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map) ...@@ -7727,7 +7720,7 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map)
doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
if (!doms_cur) if (!doms_cur)
doms_cur = &fallback_doms; doms_cur = &fallback_doms;
cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map); cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map);
dattr_cur = NULL; dattr_cur = NULL;
err = build_sched_domains(doms_cur); err = build_sched_domains(doms_cur);
register_sched_domain_sysctl(); register_sched_domain_sysctl();
...@@ -7826,7 +7819,7 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, ...@@ -7826,7 +7819,7 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
if (doms_new == NULL) { if (doms_new == NULL) {
ndoms_cur = 0; ndoms_cur = 0;
doms_new = &fallback_doms; doms_new = &fallback_doms;
cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map);
WARN_ON_ONCE(dattr_new); WARN_ON_ONCE(dattr_new);
} }
...@@ -7985,7 +7978,9 @@ static int update_runtime(struct notifier_block *nfb, ...@@ -7985,7 +7978,9 @@ static int update_runtime(struct notifier_block *nfb,
void __init sched_init_smp(void) void __init sched_init_smp(void)
{ {
cpumask_t non_isolated_cpus; cpumask_var_t non_isolated_cpus;
alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
#if defined(CONFIG_NUMA) #if defined(CONFIG_NUMA)
sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
...@@ -7994,10 +7989,10 @@ void __init sched_init_smp(void) ...@@ -7994,10 +7989,10 @@ void __init sched_init_smp(void)
#endif #endif
get_online_cpus(); get_online_cpus();
mutex_lock(&sched_domains_mutex); mutex_lock(&sched_domains_mutex);
arch_init_sched_domains(&cpu_online_map); arch_init_sched_domains(cpu_online_mask);
cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
if (cpus_empty(non_isolated_cpus)) if (cpumask_empty(non_isolated_cpus))
cpu_set(smp_processor_id(), non_isolated_cpus); cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
mutex_unlock(&sched_domains_mutex); mutex_unlock(&sched_domains_mutex);
put_online_cpus(); put_online_cpus();
...@@ -8012,9 +8007,10 @@ void __init sched_init_smp(void) ...@@ -8012,9 +8007,10 @@ void __init sched_init_smp(void)
init_hrtick(); init_hrtick();
/* Move init over to a non-isolated CPU */ /* Move init over to a non-isolated CPU */
if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0) if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
BUG(); BUG();
sched_init_granularity(); sched_init_granularity();
free_cpumask_var(non_isolated_cpus);
} }
#else #else
void __init sched_init_smp(void) void __init sched_init_smp(void)
...@@ -8334,6 +8330,7 @@ void __init sched_init(void) ...@@ -8334,6 +8330,7 @@ void __init sched_init(void)
#ifdef CONFIG_NO_HZ #ifdef CONFIG_NO_HZ
alloc_bootmem_cpumask_var(&nohz.cpu_mask); alloc_bootmem_cpumask_var(&nohz.cpu_mask);
#endif #endif
alloc_bootmem_cpumask_var(&cpu_isolated_map);
scheduler_running = 1; scheduler_running = 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment