Commit d8bc8535 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Ingo Molnar

stop_machine: Change cpu_stop_queue_two_works() to rely on stopper->enabled

Change cpu_stop_queue_two_works() to ensure that both CPU's have
stopper->enabled == T or fail otherwise.

This way stop_two_cpus() no longer needs to check cpu_active() to
avoid the deadlock. This patch doesn't remove these checks, we will
do this later.

Note: we need to take both stopper->lock's at the same time, but this
will also help to remove lglock from stop_machine.c, so I hope this
is fine.
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: heiko.carstens@de.ibm.com
Link: http://lkml.kernel.org/r/20151008170141.GA25537@redhat.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 5caa1c08
...@@ -219,12 +219,27 @@ static int multi_cpu_stop(void *data) ...@@ -219,12 +219,27 @@ static int multi_cpu_stop(void *data)
static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
int cpu2, struct cpu_stop_work *work2) int cpu2, struct cpu_stop_work *work2)
{ {
struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
int err;
lg_double_lock(&stop_cpus_lock, cpu1, cpu2); lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
cpu_stop_queue_work(cpu1, work1); spin_lock_irq(&stopper1->lock);
cpu_stop_queue_work(cpu2, work2); spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
err = -ENOENT;
if (!stopper1->enabled || !stopper2->enabled)
goto unlock;
err = 0;
__cpu_stop_queue_work(stopper1, work1);
__cpu_stop_queue_work(stopper2, work2);
unlock:
spin_unlock(&stopper2->lock);
spin_unlock_irq(&stopper1->lock);
lg_double_unlock(&stop_cpus_lock, cpu1, cpu2); lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
return 0; return err;
} }
/** /**
* stop_two_cpus - stops two cpus * stop_two_cpus - stops two cpus
...@@ -261,12 +276,8 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void * ...@@ -261,12 +276,8 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
set_state(&msdata, MULTI_STOP_PREPARE); set_state(&msdata, MULTI_STOP_PREPARE);
/* /*
* If we observe both CPUs active we know _cpu_down() cannot yet have * We do not want to migrate to inactive CPU. FIXME: move this
* queued its stop_machine works and therefore ours will get executed * into migrate_swap_stop() callback.
* first. Or its not either one of our CPUs that's getting unplugged,
* in which case we don't care.
*
* This relies on the stopper workqueues to be FIFO.
*/ */
if (!cpu_active(cpu1) || !cpu_active(cpu2)) { if (!cpu_active(cpu1) || !cpu_active(cpu2)) {
preempt_enable(); preempt_enable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment