Commit 55a101f8 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Linus Torvalds

[PATCH] kill PF_DEAD flag

After the previous change (->flags & PF_DEAD) <=> (->state == EXIT_DEAD), we
don't need PF_DEAD any longer.
Signed-off-by: default avatarOleg Nesterov <oleg@tv-sign.ru>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 29b88492
......@@ -1061,7 +1061,6 @@ static inline void put_task_struct(struct task_struct *t)
/* Not implemented yet, only for 486*/
#define PF_STARTING 0x00000002 /* being created */
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_DEAD 0x00000008 /* Dead */
#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
#define PF_DUMPCORE 0x00000200 /* dumped core */
......
......@@ -953,10 +953,8 @@ fastcall NORET_TYPE void do_exit(long code)
if (tsk->splice_pipe)
__free_pipe_info(tsk->splice_pipe);
/* PF_DEAD causes final put_task_struct after we schedule. */
preempt_disable();
BUG_ON(tsk->flags & PF_DEAD);
tsk->flags |= PF_DEAD;
/* causes final put_task_struct in finish_task_switch(). */
tsk->state = EXIT_DEAD;
schedule();
......@@ -972,7 +970,7 @@ NORET_TYPE void complete_and_exit(struct completion *comp, long code)
{
if (comp)
complete(comp);
do_exit(code);
}
......
......@@ -1755,27 +1755,27 @@ static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
__releases(rq->lock)
{
struct mm_struct *mm = rq->prev_mm;
unsigned long prev_task_flags;
long prev_state;
rq->prev_mm = NULL;
/*
* A task struct has one reference for the use as "current".
* If a task dies, then it sets EXIT_ZOMBIE in tsk->exit_state and
* calls schedule one last time. The schedule call will never return,
* and the scheduled task must drop that reference.
* The test for EXIT_ZOMBIE must occur while the runqueue locks are
* If a task dies, then it sets EXIT_DEAD in tsk->state and calls
* schedule one last time. The schedule call will never return, and
* the scheduled task must drop that reference.
* The test for EXIT_DEAD must occur while the runqueue locks are
* still held, otherwise prev could be scheduled on another cpu, die
* there before we look at prev->state, and then the reference would
* be dropped twice.
* Manfred Spraul <manfred@colorfullife.com>
*/
prev_task_flags = prev->flags;
prev_state = prev->state;
finish_arch_switch(prev);
finish_lock_switch(rq, prev);
if (mm)
mmdrop(mm);
if (unlikely(prev_task_flags & PF_DEAD)) {
if (unlikely(prev_state == EXIT_DEAD)) {
/*
* Remove function-return probe instances associated with this
* task and put them back on the free list.
......@@ -5153,7 +5153,7 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD);
/* Cannot have done final schedule yet: would have vanished. */
BUG_ON(p->flags & PF_DEAD);
BUG_ON(p->state == EXIT_DEAD);
get_task_struct(p);
......
......@@ -226,8 +226,8 @@ static struct task_struct *select_bad_process(unsigned long *ppoints)
releasing = test_tsk_thread_flag(p, TIF_MEMDIE) ||
p->flags & PF_EXITING;
if (releasing) {
/* PF_DEAD tasks have already released their mm */
if (p->flags & PF_DEAD)
/* TASK_DEAD tasks have already released their mm */
if (p->state == EXIT_DEAD)
continue;
if (p->flags & PF_EXITING && p == current) {
chosen = p;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment