Commit 475ea6c6 authored by Valentin Schneider's avatar Valentin Schneider Committed by Peter Zijlstra

sched: Don't defer CPU pick to migration_cpu_stop()

Will reported that the 'XXX __migrate_task() can fail' in migration_cpu_stop()
can happen, and it *is* sort of a big deal. Looking at it some more, one
will note there is a glaring hole in the deferred CPU selection:

  (w/ CONFIG_CPUSET=n, so that the affinity mask passed via taskset doesn't
  get AND'd with cpu_online_mask)

  $ taskset -pc 0-2 $PID
  # offline CPUs 3-4
  $ taskset -pc 3-5 $PID
    `\
      $PID may stay on 0-2 due to the cpumask_any_distribute() picking an
      offline CPU and __migrate_task() refusing to do anything due to
      cpu_is_allowed().

set_cpus_allowed_ptr() goes to some length to pick a dest_cpu that matches
the right constraints vs affinity and the online/active state of the
CPUs. Reuse that instead of discarding it in the affine_move_task() case.

Fixes: 6d337eab ("sched: Fix migrate_disable() vs set_cpus_allowed_ptr()")
Reported-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20210526205751.842360-2-valentin.schneider@arm.com
parent 08f7c2f4
...@@ -2273,7 +2273,6 @@ static int migration_cpu_stop(void *data) ...@@ -2273,7 +2273,6 @@ static int migration_cpu_stop(void *data)
struct migration_arg *arg = data; struct migration_arg *arg = data;
struct set_affinity_pending *pending = arg->pending; struct set_affinity_pending *pending = arg->pending;
struct task_struct *p = arg->task; struct task_struct *p = arg->task;
int dest_cpu = arg->dest_cpu;
struct rq *rq = this_rq(); struct rq *rq = this_rq();
bool complete = false; bool complete = false;
struct rq_flags rf; struct rq_flags rf;
...@@ -2311,19 +2310,15 @@ static int migration_cpu_stop(void *data) ...@@ -2311,19 +2310,15 @@ static int migration_cpu_stop(void *data)
if (pending) { if (pending) {
p->migration_pending = NULL; p->migration_pending = NULL;
complete = true; complete = true;
}
if (dest_cpu < 0) {
if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask))
goto out; goto out;
dest_cpu = cpumask_any_distribute(&p->cpus_mask);
} }
if (task_on_rq_queued(p)) if (task_on_rq_queued(p))
rq = __migrate_task(rq, &rf, p, dest_cpu); rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
else else
p->wake_cpu = dest_cpu; p->wake_cpu = arg->dest_cpu;
/* /*
* XXX __migrate_task() can fail, at which point we might end * XXX __migrate_task() can fail, at which point we might end
...@@ -2606,7 +2601,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag ...@@ -2606,7 +2601,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
init_completion(&my_pending.done); init_completion(&my_pending.done);
my_pending.arg = (struct migration_arg) { my_pending.arg = (struct migration_arg) {
.task = p, .task = p,
.dest_cpu = -1, /* any */ .dest_cpu = dest_cpu,
.pending = &my_pending, .pending = &my_pending,
}; };
...@@ -2614,6 +2609,15 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag ...@@ -2614,6 +2609,15 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
} else { } else {
pending = p->migration_pending; pending = p->migration_pending;
refcount_inc(&pending->refs); refcount_inc(&pending->refs);
/*
* Affinity has changed, but we've already installed a
* pending. migration_cpu_stop() *must* see this, else
* we risk a completion of the pending despite having a
* task on a disallowed CPU.
*
* Serialized by p->pi_lock, so this is safe.
*/
pending->arg.dest_cpu = dest_cpu;
} }
} }
pending = p->migration_pending; pending = p->migration_pending;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment