Commit c62e7cdb authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds

[PATCH] sched: fork hotplug hanling cleanup

- remove the hotplug lock from around much of fork(), and re-copy the
  cpus_allowed mask to solve the hotplug race cleanly.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarSrivatsa Vaddagiri <vatsa@in.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent c15d3bea
...@@ -893,16 +893,6 @@ struct task_struct *copy_process(unsigned long clone_flags, ...@@ -893,16 +893,6 @@ struct task_struct *copy_process(unsigned long clone_flags,
if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
/*
* The newly dup'ed task shares the same cpus_allowed mask as its
* parent (ie. current), and it is not attached to the tasklist.
* The end result is that this CPU might go down and the parent
* be migrated away, leaving the task on a dead CPU. So take the
* hotplug lock here and release it after the child has been attached
* to the tasklist.
*/
lock_cpu_hotplug();
retval = security_task_create(clone_flags); retval = security_task_create(clone_flags);
if (retval) if (retval)
goto fork_out; goto fork_out;
...@@ -1043,6 +1033,17 @@ struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1043,6 +1033,17 @@ struct task_struct *copy_process(unsigned long clone_flags,
/* Need tasklist lock for parent etc handling! */ /* Need tasklist lock for parent etc handling! */
write_lock_irq(&tasklist_lock); write_lock_irq(&tasklist_lock);
/*
* The task hasn't been attached yet, so cpus_allowed mask cannot
* have changed. The cpus_allowed mask of the parent may have
* changed after it was copied first time, and it may then move to
* another CPU - so we re-copy it here and set the child's CPU to
* the parent's CPU. This avoids alot of nasty races.
*/
p->cpus_allowed = current->cpus_allowed;
set_task_cpu(p, smp_processor_id());
/* /*
* Check for pending SIGKILL! The new thread should not be allowed * Check for pending SIGKILL! The new thread should not be allowed
* to slip out of an OOM kill. (or normal SIGKILL.) * to slip out of an OOM kill. (or normal SIGKILL.)
...@@ -1108,7 +1109,6 @@ struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1108,7 +1109,6 @@ struct task_struct *copy_process(unsigned long clone_flags,
retval = 0; retval = 0;
fork_out: fork_out:
unlock_cpu_hotplug();
if (retval) if (retval)
return ERR_PTR(retval); return ERR_PTR(retval);
return p; return p;
......
...@@ -872,22 +872,10 @@ static int find_idlest_cpu(struct task_struct *p, int this_cpu, ...@@ -872,22 +872,10 @@ static int find_idlest_cpu(struct task_struct *p, int this_cpu,
/* /*
* Perform scheduler related setup for a newly forked process p. * Perform scheduler related setup for a newly forked process p.
* p is forked by current. The cpu hotplug lock is held. * p is forked by current.
*/ */
void fastcall sched_fork(task_t *p) void fastcall sched_fork(task_t *p)
{ {
int cpu = smp_processor_id();
/*
* The task hasn't been attached yet, so cpus_allowed mask cannot
* change. The cpus_allowed mask of the parent may have changed
* after it is copied, and it may then move to a CPU that is not
* allowed for the child.
*/
if (unlikely(!cpu_isset(cpu, p->cpus_allowed)))
cpu = any_online_cpu(p->cpus_allowed);
set_task_cpu(p, cpu);
/* /*
* We mark the process as running here, but have not actually * We mark the process as running here, but have not actually
* inserted it onto the runqueue yet. This guarantees that * inserted it onto the runqueue yet. This guarantees that
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment