Commit 0017d735 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: Fix TASK_WAKING vs fork deadlock

Oleg noticed a few races with the TASK_WAKING usage on fork.

 - since TASK_WAKING is basically a spinlock, it should be IRQ safe
 - since we set TASK_WAKING (*) without holding rq->lock it could
   be there still is a rq->lock holder, thereby not actually
   providing full serialization.

(*) in fact we clear PF_STARTING, which in effect enables TASK_WAKING.

Cure the second issue by not setting TASK_WAKING in sched_fork(), but
only temporarily in wake_up_new_task() while calling select_task_rq().

Cure the first by holding rq->lock around the select_task_rq() call,
this will disable IRQs, this however requires that we push down the
rq->lock release into select_task_rq_fair()'s cgroup stuff.

Because select_task_rq_fair() still needs to drop the rq->lock we
cannot fully get rid of TASK_WAKING.
Reported-by: default avatarOleg Nesterov <oleg@redhat.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 9084bb82
...@@ -1046,7 +1046,8 @@ struct sched_class { ...@@ -1046,7 +1046,8 @@ struct sched_class {
void (*put_prev_task) (struct rq *rq, struct task_struct *p); void (*put_prev_task) (struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); int (*select_task_rq)(struct rq *rq, struct task_struct *p,
int sd_flag, int flags);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq); void (*post_schedule) (struct rq *this_rq);
......
...@@ -916,14 +916,10 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) ...@@ -916,14 +916,10 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
/* /*
* Check whether the task is waking, we use this to synchronize against * Check whether the task is waking, we use this to synchronize against
* ttwu() so that task_cpu() reports a stable number. * ttwu() so that task_cpu() reports a stable number.
*
* We need to make an exception for PF_STARTING tasks because the fork
* path might require task_rq_lock() to work, eg. it can call
* set_cpus_allowed_ptr() from the cpuset clone_ns code.
*/ */
static inline int task_is_waking(struct task_struct *p) static inline int task_is_waking(struct task_struct *p)
{ {
return unlikely((p->state == TASK_WAKING) && !(p->flags & PF_STARTING)); return unlikely(p->state == TASK_WAKING);
} }
/* /*
...@@ -2320,9 +2316,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p) ...@@ -2320,9 +2316,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
* The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable. * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable.
*/ */
static inline static inline
int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags)
{ {
int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags); int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags);
/* /*
* In order not to call set_task_cpu() on a blocking task we need * In order not to call set_task_cpu() on a blocking task we need
...@@ -2393,17 +2389,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, ...@@ -2393,17 +2389,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
if (p->sched_class->task_waking) if (p->sched_class->task_waking)
p->sched_class->task_waking(rq, p); p->sched_class->task_waking(rq, p);
__task_rq_unlock(rq); cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
if (cpu != orig_cpu)
cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
if (cpu != orig_cpu) {
/*
* Since we migrate the task without holding any rq->lock,
* we need to be careful with task_rq_lock(), since that
* might end up locking an invalid rq.
*/
set_task_cpu(p, cpu); set_task_cpu(p, cpu);
} __task_rq_unlock(rq);
rq = cpu_rq(cpu); rq = cpu_rq(cpu);
raw_spin_lock(&rq->lock); raw_spin_lock(&rq->lock);
...@@ -2530,11 +2519,11 @@ void sched_fork(struct task_struct *p, int clone_flags) ...@@ -2530,11 +2519,11 @@ void sched_fork(struct task_struct *p, int clone_flags)
__sched_fork(p); __sched_fork(p);
/* /*
* We mark the process as waking here. This guarantees that * We mark the process as running here. This guarantees that
* nobody will actually run it, and a signal or other external * nobody will actually run it, and a signal or other external
* event cannot wake it up and insert it on the runqueue either. * event cannot wake it up and insert it on the runqueue either.
*/ */
p->state = TASK_WAKING; p->state = TASK_RUNNING;
/* /*
* Revert to default priority/policy on fork if requested. * Revert to default priority/policy on fork if requested.
...@@ -2601,28 +2590,25 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) ...@@ -2601,28 +2590,25 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
int cpu __maybe_unused = get_cpu(); int cpu __maybe_unused = get_cpu();
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
rq = task_rq_lock(p, &flags);
p->state = TASK_WAKING;
/* /*
* Fork balancing, do it here and not earlier because: * Fork balancing, do it here and not earlier because:
* - cpus_allowed can change in the fork path * - cpus_allowed can change in the fork path
* - any previously selected cpu might disappear through hotplug * - any previously selected cpu might disappear through hotplug
* *
* We still have TASK_WAKING but PF_STARTING is gone now, meaning * We set TASK_WAKING so that select_task_rq() can drop rq->lock
* ->cpus_allowed is stable, we have preemption disabled, meaning * without people poking at ->cpus_allowed.
* cpu_online_mask is stable.
*/ */
cpu = select_task_rq(p, SD_BALANCE_FORK, 0); cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0);
set_task_cpu(p, cpu); set_task_cpu(p, cpu);
#endif
/*
* Since the task is not on the rq and we still have TASK_WAKING set
* nobody else will migrate this task.
*/
rq = cpu_rq(cpu);
raw_spin_lock_irqsave(&rq->lock, flags);
BUG_ON(p->state != TASK_WAKING);
p->state = TASK_RUNNING; p->state = TASK_RUNNING;
task_rq_unlock(rq, &flags);
#endif
rq = task_rq_lock(p, &flags);
activate_task(rq, p, 0); activate_task(rq, p, 0);
trace_sched_wakeup_new(rq, p, 1); trace_sched_wakeup_new(rq, p, 1);
check_preempt_curr(rq, p, WF_FORK); check_preempt_curr(rq, p, WF_FORK);
...@@ -3068,19 +3054,15 @@ void sched_exec(void) ...@@ -3068,19 +3054,15 @@ void sched_exec(void)
{ {
struct task_struct *p = current; struct task_struct *p = current;
struct migration_req req; struct migration_req req;
int dest_cpu, this_cpu;
unsigned long flags; unsigned long flags;
struct rq *rq; struct rq *rq;
int dest_cpu;
this_cpu = get_cpu();
dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
if (dest_cpu == this_cpu) {
put_cpu();
return;
}
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
put_cpu(); dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0);
if (dest_cpu == smp_processor_id())
goto unlock;
/* /*
* select_task_rq() can race against ->cpus_allowed * select_task_rq() can race against ->cpus_allowed
*/ */
...@@ -3098,6 +3080,7 @@ void sched_exec(void) ...@@ -3098,6 +3080,7 @@ void sched_exec(void)
return; return;
} }
unlock:
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
} }
......
...@@ -1423,7 +1423,8 @@ select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target) ...@@ -1423,7 +1423,8 @@ select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target)
* *
* preempt must be disabled. * preempt must be disabled.
*/ */
static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) static int
select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags)
{ {
struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
...@@ -1521,8 +1522,11 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag ...@@ -1521,8 +1522,11 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
cpumask_weight(sched_domain_span(sd)))) cpumask_weight(sched_domain_span(sd))))
tmp = affine_sd; tmp = affine_sd;
if (tmp) if (tmp) {
raw_spin_unlock(&rq->lock);
update_shares(tmp); update_shares(tmp);
raw_spin_lock(&rq->lock);
}
} }
#endif #endif
......
...@@ -6,7 +6,8 @@ ...@@ -6,7 +6,8 @@
*/ */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int select_task_rq_idle(struct task_struct *p, int sd_flag, int flags) static int
select_task_rq_idle(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
{ {
return task_cpu(p); /* IDLE tasks as never migrated */ return task_cpu(p); /* IDLE tasks as never migrated */
} }
......
...@@ -948,10 +948,9 @@ static void yield_task_rt(struct rq *rq) ...@@ -948,10 +948,9 @@ static void yield_task_rt(struct rq *rq)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task); static int find_lowest_rq(struct task_struct *task);
static int select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) static int
select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
{ {
struct rq *rq = task_rq(p);
if (sd_flag != SD_BALANCE_WAKE) if (sd_flag != SD_BALANCE_WAKE)
return smp_processor_id(); return smp_processor_id();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment