Commit f18f982a authored by Max Krasnyansky's avatar Max Krasnyansky Committed by Ingo Molnar

sched: CPU hotplug events must not destroy scheduler domains created by the cpusets

First issue is not related to the cpusets. We're simply leaking doms_cur.
It's allocated in arch_init_sched_domains() which is called for every
hotplug event. So we just keep reallocation doms_cur without freeing it.
I introduced free_sched_domains() function that cleans things up.

Second issue is that sched domains created by the cpusets are
completely destroyed by the CPU hotplug events. For all CPU hotplug
events scheduler attaches all CPUs to the NULL domain and then puts
them all into the single domain thereby destroying domains created
by the cpusets (partition_sched_domains).
The solution is simple, when cpusets are enabled scheduler should not
create default domain and instead let cpusets do that. Which is
exactly what the patch does.
Signed-off-by: default avatarMax Krasnyansky <maxk@qualcomm.com>
Cc: pj@sgi.com
Cc: menage@google.com
Cc: rostedt@goodmis.org
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 15a8641e
...@@ -1890,6 +1890,12 @@ static void common_cpu_mem_hotplug_unplug(void) ...@@ -1890,6 +1890,12 @@ static void common_cpu_mem_hotplug_unplug(void)
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
scan_for_empty_cpusets(&top_cpuset); scan_for_empty_cpusets(&top_cpuset);
/*
* Scheduler destroys domains on hotplug events.
* Rebuild them based on the current settings.
*/
rebuild_sched_domains();
cgroup_unlock(); cgroup_unlock();
} }
......
...@@ -7237,6 +7237,18 @@ void __attribute__((weak)) arch_update_cpu_topology(void) ...@@ -7237,6 +7237,18 @@ void __attribute__((weak)) arch_update_cpu_topology(void)
{ {
} }
/*
* Free current domain masks.
* Called after all cpus are attached to NULL domain.
*/
static void free_sched_domains(void)
{
ndoms_cur = 0;
if (doms_cur != &fallback_doms)
kfree(doms_cur);
doms_cur = &fallback_doms;
}
/* /*
* Set up scheduler domains and groups. Callers must hold the hotplug lock. * Set up scheduler domains and groups. Callers must hold the hotplug lock.
* For now this just excludes isolated cpus, but could be used to * For now this just excludes isolated cpus, but could be used to
...@@ -7384,6 +7396,7 @@ int arch_reinit_sched_domains(void) ...@@ -7384,6 +7396,7 @@ int arch_reinit_sched_domains(void)
get_online_cpus(); get_online_cpus();
mutex_lock(&sched_domains_mutex); mutex_lock(&sched_domains_mutex);
detach_destroy_domains(&cpu_online_map); detach_destroy_domains(&cpu_online_map);
free_sched_domains();
err = arch_init_sched_domains(&cpu_online_map); err = arch_init_sched_domains(&cpu_online_map);
mutex_unlock(&sched_domains_mutex); mutex_unlock(&sched_domains_mutex);
put_online_cpus(); put_online_cpus();
...@@ -7469,6 +7482,7 @@ static int update_sched_domains(struct notifier_block *nfb, ...@@ -7469,6 +7482,7 @@ static int update_sched_domains(struct notifier_block *nfb,
case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN: case CPU_DOWN_PREPARE_FROZEN:
detach_destroy_domains(&cpu_online_map); detach_destroy_domains(&cpu_online_map);
free_sched_domains();
return NOTIFY_OK; return NOTIFY_OK;
case CPU_UP_CANCELED: case CPU_UP_CANCELED:
...@@ -7487,8 +7501,16 @@ static int update_sched_domains(struct notifier_block *nfb, ...@@ -7487,8 +7501,16 @@ static int update_sched_domains(struct notifier_block *nfb,
return NOTIFY_DONE; return NOTIFY_DONE;
} }
#ifndef CONFIG_CPUSETS
/*
* Create default domain partitioning if cpusets are disabled.
* Otherwise we let cpusets rebuild the domains based on the
* current setup.
*/
/* The hotplug lock is already held by cpu_up/cpu_down */ /* The hotplug lock is already held by cpu_up/cpu_down */
arch_init_sched_domains(&cpu_online_map); arch_init_sched_domains(&cpu_online_map);
#endif
return NOTIFY_OK; return NOTIFY_OK;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment