Commit 02cb7aa9 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Ingo Molnar

stop_machine: Move 'cpu_stopper_task' and 'stop_cpus_work' into 'struct cpu_stopper'

Multpiple DEFINE_PER_CPU's do not make sense, move all the per-cpu
variables into 'struct cpu_stopper'.
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dave@stgolabs.net
Cc: der.herr@hofr.at
Cc: paulmck@linux.vnet.ibm.com
Cc: riel@redhat.com
Cc: viro@ZenIV.linux.org.uk
Link: http://lkml.kernel.org/r/20150630012944.GA23924@redhat.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent fe32d3cd
...@@ -35,13 +35,16 @@ struct cpu_stop_done { ...@@ -35,13 +35,16 @@ struct cpu_stop_done {
/* the actual stopper, one per every possible cpu, enabled on online cpus */ /* the actual stopper, one per every possible cpu, enabled on online cpus */
struct cpu_stopper { struct cpu_stopper {
struct task_struct *thread;
spinlock_t lock; spinlock_t lock;
bool enabled; /* is this stopper enabled? */ bool enabled; /* is this stopper enabled? */
struct list_head works; /* list of pending works */ struct list_head works; /* list of pending works */
struct cpu_stop_work stop_work; /* for stop_cpus */
}; };
static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
static DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task);
static bool stop_machine_initialized = false; static bool stop_machine_initialized = false;
/* /*
...@@ -74,7 +77,6 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed) ...@@ -74,7 +77,6 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
{ {
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
struct task_struct *p = per_cpu(cpu_stopper_task, cpu);
unsigned long flags; unsigned long flags;
...@@ -82,7 +84,7 @@ static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) ...@@ -82,7 +84,7 @@ static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
if (stopper->enabled) { if (stopper->enabled) {
list_add_tail(&work->list, &stopper->works); list_add_tail(&work->list, &stopper->works);
wake_up_process(p); wake_up_process(stopper->thread);
} else } else
cpu_stop_signal_done(work->done, false); cpu_stop_signal_done(work->done, false);
...@@ -293,7 +295,6 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, ...@@ -293,7 +295,6 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
/* static data for stop_cpus */ /* static data for stop_cpus */
static DEFINE_MUTEX(stop_cpus_mutex); static DEFINE_MUTEX(stop_cpus_mutex);
static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
static void queue_stop_cpus_work(const struct cpumask *cpumask, static void queue_stop_cpus_work(const struct cpumask *cpumask,
cpu_stop_fn_t fn, void *arg, cpu_stop_fn_t fn, void *arg,
...@@ -304,7 +305,7 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask, ...@@ -304,7 +305,7 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask,
/* initialize works and done */ /* initialize works and done */
for_each_cpu(cpu, cpumask) { for_each_cpu(cpu, cpumask) {
work = &per_cpu(stop_cpus_work, cpu); work = &per_cpu(cpu_stopper.stop_work, cpu);
work->fn = fn; work->fn = fn;
work->arg = arg; work->arg = arg;
work->done = done; work->done = done;
...@@ -317,7 +318,7 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask, ...@@ -317,7 +318,7 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask,
*/ */
lg_global_lock(&stop_cpus_lock); lg_global_lock(&stop_cpus_lock);
for_each_cpu(cpu, cpumask) for_each_cpu(cpu, cpumask)
cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu)); cpu_stop_queue_work(cpu, &per_cpu(cpu_stopper.stop_work, cpu));
lg_global_unlock(&stop_cpus_lock); lg_global_unlock(&stop_cpus_lock);
} }
...@@ -458,7 +459,7 @@ extern void sched_set_stop_task(int cpu, struct task_struct *stop); ...@@ -458,7 +459,7 @@ extern void sched_set_stop_task(int cpu, struct task_struct *stop);
static void cpu_stop_create(unsigned int cpu) static void cpu_stop_create(unsigned int cpu)
{ {
sched_set_stop_task(cpu, per_cpu(cpu_stopper_task, cpu)); sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
} }
static void cpu_stop_park(unsigned int cpu) static void cpu_stop_park(unsigned int cpu)
...@@ -485,7 +486,7 @@ static void cpu_stop_unpark(unsigned int cpu) ...@@ -485,7 +486,7 @@ static void cpu_stop_unpark(unsigned int cpu)
} }
static struct smp_hotplug_thread cpu_stop_threads = { static struct smp_hotplug_thread cpu_stop_threads = {
.store = &cpu_stopper_task, .store = &cpu_stopper.thread,
.thread_should_run = cpu_stop_should_run, .thread_should_run = cpu_stop_should_run,
.thread_fn = cpu_stopper_thread, .thread_fn = cpu_stopper_thread,
.thread_comm = "migration/%u", .thread_comm = "migration/%u",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment