Commit 090e77c3 authored by Thomas Gleixner's avatar Thomas Gleixner

cpu/hotplug: Restructure FROZEN state handling

There are only a few callbacks which really care about FROZEN
vs. !FROZEN. No need to have extra states for this.

Publish the frozen state in an extra variable which is updated under
the hotplug lock and let the users interested deal with it w/o
imposing that extra state checks on everyone.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: Rik van Riel <riel@redhat.com>
Cc: Rafael Wysocki <rafael.j.wysocki@intel.com>
Cc: "Srivatsa S. Bhat" <srivatsa@mit.edu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul Turner <pjt@google.com>
Link: http://lkml.kernel.org/r/20160226182340.334912357@linutronix.deSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent fc77dbd3
...@@ -118,6 +118,7 @@ enum { ...@@ -118,6 +118,7 @@ enum {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern bool cpuhp_tasks_frozen;
/* Need to know about CPUs going up/down? */ /* Need to know about CPUs going up/down? */
#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
#define cpu_notifier(fn, pri) { \ #define cpu_notifier(fn, pri) { \
...@@ -177,6 +178,7 @@ extern void cpu_maps_update_done(void); ...@@ -177,6 +178,7 @@ extern void cpu_maps_update_done(void);
#define cpu_notifier_register_done cpu_maps_update_done #define cpu_notifier_register_done cpu_maps_update_done
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
#define cpuhp_tasks_frozen 0
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0) #define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0) #define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
......
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* Serializes the updates to cpu_online_mask, cpu_present_mask */ /* Serializes the updates to cpu_online_mask, cpu_present_mask */
static DEFINE_MUTEX(cpu_add_remove_lock); static DEFINE_MUTEX(cpu_add_remove_lock);
bool cpuhp_tasks_frozen;
EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
/* /*
* The following two APIs (cpu_maps_update_begin/done) must be used when * The following two APIs (cpu_maps_update_begin/done) must be used when
...@@ -207,27 +209,30 @@ int __register_cpu_notifier(struct notifier_block *nb) ...@@ -207,27 +209,30 @@ int __register_cpu_notifier(struct notifier_block *nb)
return raw_notifier_chain_register(&cpu_chain, nb); return raw_notifier_chain_register(&cpu_chain, nb);
} }
static int __cpu_notify(unsigned long val, void *v, int nr_to_call, static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
int *nr_calls) int *nr_calls)
{ {
unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
void *hcpu = (void *)(long)cpu;
int ret; int ret;
ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call, ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
nr_calls); nr_calls);
return notifier_to_errno(ret); return notifier_to_errno(ret);
} }
static int cpu_notify(unsigned long val, void *v) static int cpu_notify(unsigned long val, unsigned int cpu)
{ {
return __cpu_notify(val, v, -1, NULL); return __cpu_notify(val, cpu, -1, NULL);
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static void cpu_notify_nofail(unsigned long val, void *v) static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
{ {
BUG_ON(cpu_notify(val, v)); BUG_ON(cpu_notify(val, cpu));
} }
EXPORT_SYMBOL(register_cpu_notifier); EXPORT_SYMBOL(register_cpu_notifier);
EXPORT_SYMBOL(__register_cpu_notifier); EXPORT_SYMBOL(__register_cpu_notifier);
...@@ -311,27 +316,21 @@ static inline void check_for_tasks(int dead_cpu) ...@@ -311,27 +316,21 @@ static inline void check_for_tasks(int dead_cpu)
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
} }
struct take_cpu_down_param {
unsigned long mod;
void *hcpu;
};
/* Take this CPU down. */ /* Take this CPU down. */
static int take_cpu_down(void *_param) static int take_cpu_down(void *_param)
{ {
struct take_cpu_down_param *param = _param; int err, cpu = smp_processor_id();
int err;
/* Ensure this CPU doesn't handle any more interrupts. */ /* Ensure this CPU doesn't handle any more interrupts. */
err = __cpu_disable(); err = __cpu_disable();
if (err < 0) if (err < 0)
return err; return err;
cpu_notify(CPU_DYING | param->mod, param->hcpu); cpu_notify(CPU_DYING, cpu);
/* Give up timekeeping duties */ /* Give up timekeeping duties */
tick_handover_do_timer(); tick_handover_do_timer();
/* Park the stopper thread */ /* Park the stopper thread */
stop_machine_park((long)param->hcpu); stop_machine_park(cpu);
return 0; return 0;
} }
...@@ -339,12 +338,6 @@ static int take_cpu_down(void *_param) ...@@ -339,12 +338,6 @@ static int take_cpu_down(void *_param)
static int _cpu_down(unsigned int cpu, int tasks_frozen) static int _cpu_down(unsigned int cpu, int tasks_frozen)
{ {
int err, nr_calls = 0; int err, nr_calls = 0;
void *hcpu = (void *)(long)cpu;
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
struct take_cpu_down_param tcd_param = {
.mod = mod,
.hcpu = hcpu,
};
if (num_online_cpus() == 1) if (num_online_cpus() == 1)
return -EBUSY; return -EBUSY;
...@@ -354,10 +347,12 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen) ...@@ -354,10 +347,12 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
cpu_hotplug_begin(); cpu_hotplug_begin();
err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); cpuhp_tasks_frozen = tasks_frozen;
err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
if (err) { if (err) {
nr_calls--; nr_calls--;
__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
pr_warn("%s: attempt to take down CPU %u failed\n", pr_warn("%s: attempt to take down CPU %u failed\n",
__func__, cpu); __func__, cpu);
goto out_release; goto out_release;
...@@ -389,10 +384,10 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen) ...@@ -389,10 +384,10 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
/* /*
* So now all preempt/rcu users must observe !cpu_active(). * So now all preempt/rcu users must observe !cpu_active().
*/ */
err = stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
if (err) { if (err) {
/* CPU didn't die: tell everyone. Can't complain. */ /* CPU didn't die: tell everyone. Can't complain. */
cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
irq_unlock_sparse(); irq_unlock_sparse();
goto out_release; goto out_release;
} }
...@@ -419,14 +414,14 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen) ...@@ -419,14 +414,14 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
/* CPU is completely dead: tell everyone. Too late to complain. */ /* CPU is completely dead: tell everyone. Too late to complain. */
tick_cleanup_dead_cpu(cpu); tick_cleanup_dead_cpu(cpu);
cpu_notify_nofail(CPU_DEAD | mod, hcpu); cpu_notify_nofail(CPU_DEAD, cpu);
check_for_tasks(cpu); check_for_tasks(cpu);
out_release: out_release:
cpu_hotplug_done(); cpu_hotplug_done();
if (!err) if (!err)
cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); cpu_notify_nofail(CPU_POST_DEAD, cpu);
return err; return err;
} }
...@@ -485,10 +480,8 @@ void smpboot_thread_init(void) ...@@ -485,10 +480,8 @@ void smpboot_thread_init(void)
/* Requires cpu_add_remove_lock to be held */ /* Requires cpu_add_remove_lock to be held */
static int _cpu_up(unsigned int cpu, int tasks_frozen) static int _cpu_up(unsigned int cpu, int tasks_frozen)
{ {
int ret, nr_calls = 0;
void *hcpu = (void *)(long)cpu;
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
struct task_struct *idle; struct task_struct *idle;
int ret, nr_calls = 0;
cpu_hotplug_begin(); cpu_hotplug_begin();
...@@ -507,7 +500,9 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen) ...@@ -507,7 +500,9 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen)
if (ret) if (ret)
goto out; goto out;
ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); cpuhp_tasks_frozen = tasks_frozen;
ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
if (ret) { if (ret) {
nr_calls--; nr_calls--;
pr_warn("%s: attempt to bring up CPU %u failed\n", pr_warn("%s: attempt to bring up CPU %u failed\n",
...@@ -523,11 +518,11 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen) ...@@ -523,11 +518,11 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen)
BUG_ON(!cpu_online(cpu)); BUG_ON(!cpu_online(cpu));
/* Now call notifier in preparation. */ /* Now call notifier in preparation. */
cpu_notify(CPU_ONLINE | mod, hcpu); cpu_notify(CPU_ONLINE, cpu);
out_notify: out_notify:
if (ret != 0) if (ret != 0)
__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
out: out:
cpu_hotplug_done(); cpu_hotplug_done();
...@@ -719,13 +714,7 @@ core_initcall(cpu_hotplug_pm_sync_init); ...@@ -719,13 +714,7 @@ core_initcall(cpu_hotplug_pm_sync_init);
*/ */
void notify_cpu_starting(unsigned int cpu) void notify_cpu_starting(unsigned int cpu)
{ {
unsigned long val = CPU_STARTING; cpu_notify(CPU_STARTING, cpu);
#ifdef CONFIG_PM_SLEEP_SMP
if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
val = CPU_STARTING_FROZEN;
#endif /* CONFIG_PM_SLEEP_SMP */
cpu_notify(val, (void *)(long)cpu);
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment