Commit 5e16bbc2 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

sched: Streamline the task migration locking a little

The whole migrate_task{,s}() locking seems a little shaky, there's a
lot of dropping an require happening. Pull the locking up into the
callers as far as possible to streamline the lot.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: ktkhai@parallels.com
Cc: rostedt@goodmis.org
Cc: juri.lelli@gmail.com
Cc: pang.xunlei@linaro.org
Cc: oleg@redhat.com
Cc: wanpeng.li@linux.intel.com
Cc: umgwanakikbuti@gmail.com
Link: http://lkml.kernel.org/r/20150611124743.755256708@infradead.orgSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 5cc389bc
...@@ -1065,10 +1065,8 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) ...@@ -1065,10 +1065,8 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
* *
* Returns (locked) new rq. Old rq's lock is released. * Returns (locked) new rq. Old rq's lock is released.
*/ */
static struct rq *move_queued_task(struct task_struct *p, int new_cpu) static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu)
{ {
struct rq *rq = task_rq(p);
lockdep_assert_held(&rq->lock); lockdep_assert_held(&rq->lock);
dequeue_task(rq, p, 0); dequeue_task(rq, p, 0);
...@@ -1100,41 +1098,19 @@ struct migration_arg { ...@@ -1100,41 +1098,19 @@ struct migration_arg {
* *
* So we race with normal scheduler movements, but that's OK, as long * So we race with normal scheduler movements, but that's OK, as long
* as the task is no longer on this CPU. * as the task is no longer on this CPU.
*
* Returns non-zero if task was successfully migrated.
*/ */
static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)
{ {
struct rq *rq;
int ret = 0;
if (unlikely(!cpu_active(dest_cpu))) if (unlikely(!cpu_active(dest_cpu)))
return ret; return rq;
rq = cpu_rq(src_cpu);
raw_spin_lock(&p->pi_lock);
raw_spin_lock(&rq->lock);
/* Already moved. */
if (task_cpu(p) != src_cpu)
goto done;
/* Affinity changed (again). */ /* Affinity changed (again). */
if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
goto fail; return rq;
/* rq = move_queued_task(rq, p, dest_cpu);
* If we're not on a rq, the next wake-up will ensure we're
* placed properly. return rq;
*/
if (task_on_rq_queued(p))
rq = move_queued_task(p, dest_cpu);
done:
ret = 1;
fail:
raw_spin_unlock(&rq->lock);
raw_spin_unlock(&p->pi_lock);
return ret;
} }
/* /*
...@@ -1145,6 +1121,8 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) ...@@ -1145,6 +1121,8 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
static int migration_cpu_stop(void *data) static int migration_cpu_stop(void *data)
{ {
struct migration_arg *arg = data; struct migration_arg *arg = data;
struct task_struct *p = arg->task;
struct rq *rq = this_rq();
/* /*
* The original target cpu might have gone down and we might * The original target cpu might have gone down and we might
...@@ -1157,7 +1135,19 @@ static int migration_cpu_stop(void *data) ...@@ -1157,7 +1135,19 @@ static int migration_cpu_stop(void *data)
* during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
*/ */
sched_ttwu_pending(); sched_ttwu_pending();
__migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
raw_spin_lock(&p->pi_lock);
raw_spin_lock(&rq->lock);
/*
* If task_rq(p) != rq, it cannot be migrated here, because we're
* holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
* we're holding p->pi_lock.
*/
if (task_rq(p) == rq && task_on_rq_queued(p))
rq = __migrate_task(rq, p, arg->dest_cpu);
raw_spin_unlock(&rq->lock);
raw_spin_unlock(&p->pi_lock);
local_irq_enable(); local_irq_enable();
return 0; return 0;
} }
...@@ -1212,7 +1202,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) ...@@ -1212,7 +1202,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
tlb_migrate_finish(p->mm); tlb_migrate_finish(p->mm);
return 0; return 0;
} else if (task_on_rq_queued(p)) } else if (task_on_rq_queued(p))
rq = move_queued_task(p, dest_cpu); rq = move_queued_task(rq, p, dest_cpu);
out: out:
task_rq_unlock(rq, p, &flags); task_rq_unlock(rq, p, &flags);
...@@ -5043,9 +5033,9 @@ static struct task_struct fake_task = { ...@@ -5043,9 +5033,9 @@ static struct task_struct fake_task = {
* there's no concurrency possible, we hold the required locks anyway * there's no concurrency possible, we hold the required locks anyway
* because of lock validation efforts. * because of lock validation efforts.
*/ */
static void migrate_tasks(unsigned int dead_cpu) static void migrate_tasks(struct rq *dead_rq)
{ {
struct rq *rq = cpu_rq(dead_cpu); struct rq *rq = dead_rq;
struct task_struct *next, *stop = rq->stop; struct task_struct *next, *stop = rq->stop;
int dest_cpu; int dest_cpu;
...@@ -5067,7 +5057,7 @@ static void migrate_tasks(unsigned int dead_cpu) ...@@ -5067,7 +5057,7 @@ static void migrate_tasks(unsigned int dead_cpu)
*/ */
update_rq_clock(rq); update_rq_clock(rq);
for ( ; ; ) { for (;;) {
/* /*
* There's this thread running, bail when that's the only * There's this thread running, bail when that's the only
* remaining thread. * remaining thread.
...@@ -5080,13 +5070,15 @@ static void migrate_tasks(unsigned int dead_cpu) ...@@ -5080,13 +5070,15 @@ static void migrate_tasks(unsigned int dead_cpu)
next->sched_class->put_prev_task(rq, next); next->sched_class->put_prev_task(rq, next);
/* Find suitable destination for @next, with force if needed. */ /* Find suitable destination for @next, with force if needed. */
dest_cpu = select_fallback_rq(dead_cpu, next); dest_cpu = select_fallback_rq(dead_rq->cpu, next);
raw_spin_unlock(&rq->lock);
__migrate_task(next, dead_cpu, dest_cpu);
rq = __migrate_task(rq, next, dest_cpu);
if (rq != dead_rq) {
raw_spin_unlock(&rq->lock);
rq = dead_rq;
raw_spin_lock(&rq->lock); raw_spin_lock(&rq->lock);
} }
}
rq->stop = stop; rq->stop = stop;
} }
...@@ -5337,7 +5329,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) ...@@ -5337,7 +5329,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq); set_rq_offline(rq);
} }
migrate_tasks(cpu); migrate_tasks(rq);
BUG_ON(rq->nr_running != 1); /* the migration thread */ BUG_ON(rq->nr_running != 1); /* the migration thread */
raw_spin_unlock_irqrestore(&rq->lock, flags); raw_spin_unlock_irqrestore(&rq->lock, flags);
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment