Commit e76bd8d9 authored by Rusty Russell's avatar Rusty Russell Committed by Ingo Molnar

sched: avoid stack var in move_task_off_dead_cpu

Impact: stack usage reduction

With some care, we can avoid needing a temporary cpumask (we can't
really allocate here, since we can't fail).

This version calls cpuset_cpus_allowed_locked() with the task_rq_lock
held.  I'm fairly sure this works, but there might be a deadlock
hiding.

And of course, we can't get rid of the last cpumask on stack until we
can use cpumask_of_node instead of node_to_cpumask.
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent f17c8607
...@@ -6112,38 +6112,28 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) ...@@ -6112,38 +6112,28 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
{ {
unsigned long flags; unsigned long flags;
cpumask_t mask;
struct rq *rq; struct rq *rq;
int dest_cpu; int dest_cpu;
/* FIXME: Use cpumask_of_node here. */
cpumask_t _nodemask = node_to_cpumask(cpu_to_node(dead_cpu));
const struct cpumask *nodemask = &_nodemask;
do { again:
/* On same node? */ /* Look for allowed, online CPU in same node. */
node_to_cpumask_ptr(pnodemask, cpu_to_node(dead_cpu)); for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
cpus_and(mask, *pnodemask, p->cpus_allowed); goto move;
dest_cpu = cpumask_any_and(cpu_online_mask, &mask);
/* On any allowed CPU? */ /* Any allowed, online CPU? */
if (dest_cpu >= nr_cpu_ids) dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
dest_cpu = cpumask_any_and(cpu_online_mask, if (dest_cpu < nr_cpu_ids)
&p->cpus_allowed); goto move;
/* No more Mr. Nice Guy. */ /* No more Mr. Nice Guy. */
if (dest_cpu >= nr_cpu_ids) { if (dest_cpu >= nr_cpu_ids) {
cpumask_t cpus_allowed;
cpuset_cpus_allowed_locked(p, &cpus_allowed);
/*
* Try to stay on the same cpuset, where the
* current cpuset may be a subset of all cpus.
* The cpuset_cpus_allowed_locked() variant of
* cpuset_cpus_allowed() will not block. It must be
* called within calls to cpuset_lock/cpuset_unlock.
*/
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
p->cpus_allowed = cpus_allowed; cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
dest_cpu = cpumask_any_and(cpu_online_mask, dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
&p->cpus_allowed);
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
/* /*
...@@ -6157,7 +6147,11 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) ...@@ -6157,7 +6147,11 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
task_pid_nr(p), p->comm, dead_cpu); task_pid_nr(p), p->comm, dead_cpu);
} }
} }
} while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
move:
/* It can have affinity changed while we were choosing. */
if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
goto again;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment