Commit 6db840fa authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Linus Torvalds

exec: RT sub-thread can livelock and monopolize CPU on exec

de_thread() yields waiting for ->group_leader to be a zombie. This deadlocks
if an rt-prio execer shares the same cpu with ->group_leader. Change the code
to use ->group_exit_task/notify_count mechanics.

This patch certainly uglifies the code, perhaps someone can suggest something
better.
Signed-off-by: default avatarOleg Nesterov <oleg@tv-sign.ru>
Cc: Roland McGrath <roland@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 356d6d50
...@@ -801,16 +801,15 @@ static int de_thread(struct task_struct *tsk) ...@@ -801,16 +801,15 @@ static int de_thread(struct task_struct *tsk)
hrtimer_restart(&sig->real_timer); hrtimer_restart(&sig->real_timer);
spin_lock_irq(lock); spin_lock_irq(lock);
} }
while (atomic_read(&sig->count) > count) {
sig->group_exit_task = tsk;
sig->notify_count = count; sig->notify_count = count;
sig->group_exit_task = tsk;
while (atomic_read(&sig->count) > count) {
__set_current_state(TASK_UNINTERRUPTIBLE); __set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock_irq(lock); spin_unlock_irq(lock);
schedule(); schedule();
spin_lock_irq(lock); spin_lock_irq(lock);
} }
sig->group_exit_task = NULL;
sig->notify_count = 0;
spin_unlock_irq(lock); spin_unlock_irq(lock);
/* /*
...@@ -819,14 +818,17 @@ static int de_thread(struct task_struct *tsk) ...@@ -819,14 +818,17 @@ static int de_thread(struct task_struct *tsk)
* and to assume its PID: * and to assume its PID:
*/ */
if (!thread_group_leader(tsk)) { if (!thread_group_leader(tsk)) {
/*
* Wait for the thread group leader to be a zombie.
* It should already be zombie at this point, most
* of the time.
*/
leader = tsk->group_leader; leader = tsk->group_leader;
while (leader->exit_state != EXIT_ZOMBIE)
yield(); sig->notify_count = -1;
for (;;) {
write_lock_irq(&tasklist_lock);
if (likely(leader->exit_state))
break;
__set_current_state(TASK_UNINTERRUPTIBLE);
write_unlock_irq(&tasklist_lock);
schedule();
}
/* /*
* The only record we have of the real-time age of a * The only record we have of the real-time age of a
...@@ -840,8 +842,6 @@ static int de_thread(struct task_struct *tsk) ...@@ -840,8 +842,6 @@ static int de_thread(struct task_struct *tsk)
*/ */
tsk->start_time = leader->start_time; tsk->start_time = leader->start_time;
write_lock_irq(&tasklist_lock);
BUG_ON(leader->tgid != tsk->tgid); BUG_ON(leader->tgid != tsk->tgid);
BUG_ON(tsk->pid == tsk->tgid); BUG_ON(tsk->pid == tsk->tgid);
/* /*
...@@ -874,6 +874,8 @@ static int de_thread(struct task_struct *tsk) ...@@ -874,6 +874,8 @@ static int de_thread(struct task_struct *tsk)
write_unlock_irq(&tasklist_lock); write_unlock_irq(&tasklist_lock);
} }
sig->group_exit_task = NULL;
sig->notify_count = 0;
/* /*
* There may be one thread left which is just exiting, * There may be one thread left which is just exiting,
* but it's safe to stop telling the group to kill themselves. * but it's safe to stop telling the group to kill themselves.
......
...@@ -92,10 +92,9 @@ static void __exit_signal(struct task_struct *tsk) ...@@ -92,10 +92,9 @@ static void __exit_signal(struct task_struct *tsk)
* If there is any task waiting for the group exit * If there is any task waiting for the group exit
* then notify it: * then notify it:
*/ */
if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) { if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count)
wake_up_process(sig->group_exit_task); wake_up_process(sig->group_exit_task);
sig->group_exit_task = NULL;
}
if (tsk == sig->curr_target) if (tsk == sig->curr_target)
sig->curr_target = next_thread(tsk); sig->curr_target = next_thread(tsk);
/* /*
...@@ -827,6 +826,11 @@ static void exit_notify(struct task_struct *tsk) ...@@ -827,6 +826,11 @@ static void exit_notify(struct task_struct *tsk)
state = EXIT_DEAD; state = EXIT_DEAD;
tsk->exit_state = state; tsk->exit_state = state;
if (thread_group_leader(tsk) &&
tsk->signal->notify_count < 0 &&
tsk->signal->group_exit_task)
wake_up_process(tsk->signal->group_exit_task);
write_unlock_irq(&tasklist_lock); write_unlock_irq(&tasklist_lock);
list_for_each_safe(_p, _n, &ptrace_dead) { list_for_each_safe(_p, _n, &ptrace_dead) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment