Commit 1b2bc75c authored by Ralf Baechle's avatar Ralf Baechle

MIPS: Add arch generic CPU hotplug

Each platform has to add support for CPU hotplugging itself by providing
suitable definitions for the cpu_disable and cpu_die of the smp_ops
methods and setting SYS_SUPPORTS_HOTPLUG_CPU.  A platform should only set
SYS_SUPPORTS_HOTPLUG_CPU once all it's smp_ops definitions have the
necessary changes.  This patch contains the changes to the dummy smp_ops
definition for uni-processor systems.

Parts of the code contributed by Cavium Inc.
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 4ac4aa5c
...@@ -784,8 +784,17 @@ config SYS_HAS_EARLY_PRINTK ...@@ -784,8 +784,17 @@ config SYS_HAS_EARLY_PRINTK
bool bool
config HOTPLUG_CPU config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
depends on SMP && HOTPLUG && SYS_SUPPORTS_HOTPLUG_CPU
help
Say Y here to allow turning CPUs off and on. CPUs can be
controlled through /sys/devices/system/cpu.
(Note: power management support will enable this option
automatically on SMP systems. )
Say N if you want to disable CPU hotplug.
config SYS_SUPPORTS_HOTPLUG_CPU
bool bool
default n
config I8259 config I8259
bool bool
......
...@@ -26,6 +26,10 @@ struct plat_smp_ops { ...@@ -26,6 +26,10 @@ struct plat_smp_ops {
void (*boot_secondary)(int cpu, struct task_struct *idle); void (*boot_secondary)(int cpu, struct task_struct *idle);
void (*smp_setup)(void); void (*smp_setup)(void);
void (*prepare_cpus)(unsigned int max_cpus); void (*prepare_cpus)(unsigned int max_cpus);
#ifdef CONFIG_HOTPLUG_CPU
int (*cpu_disable)(void);
void (*cpu_die)(unsigned int cpu);
#endif
}; };
extern void register_smp_ops(struct plat_smp_ops *ops); extern void register_smp_ops(struct plat_smp_ops *ops);
......
...@@ -41,6 +41,7 @@ extern int __cpu_logical_map[NR_CPUS]; ...@@ -41,6 +41,7 @@ extern int __cpu_logical_map[NR_CPUS];
/* Octeon - Tell another core to flush its icache */ /* Octeon - Tell another core to flush its icache */
#define SMP_ICACHE_FLUSH 0x4 #define SMP_ICACHE_FLUSH 0x4
extern cpumask_t cpu_callin_map;
extern void asmlinkage smp_bootstrap(void); extern void asmlinkage smp_bootstrap(void);
...@@ -56,6 +57,24 @@ static inline void smp_send_reschedule(int cpu) ...@@ -56,6 +57,24 @@ static inline void smp_send_reschedule(int cpu)
mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF); mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF);
} }
#ifdef CONFIG_HOTPLUG_CPU
static inline int __cpu_disable(void)
{
extern struct plat_smp_ops *mp_ops; /* private */
return mp_ops->cpu_disable();
}
static inline void __cpu_die(unsigned int cpu)
{
extern struct plat_smp_ops *mp_ops; /* private */
mp_ops->cpu_die(cpu);
}
extern void play_dead(void);
#endif
extern asmlinkage void smp_call_function_interrupt(void); extern asmlinkage void smp_call_function_interrupt(void);
extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_single_ipi(int cpu);
......
...@@ -50,10 +50,15 @@ ...@@ -50,10 +50,15 @@
*/ */
void __noreturn cpu_idle(void) void __noreturn cpu_idle(void)
{ {
int cpu;
/* CPU is going idle. */
cpu = smp_processor_id();
/* endless idle loop with no priority at all */ /* endless idle loop with no priority at all */
while (1) { while (1) {
tick_nohz_stop_sched_tick(1); tick_nohz_stop_sched_tick(1);
while (!need_resched()) { while (!need_resched() && cpu_online(cpu)) {
#ifdef CONFIG_MIPS_MT_SMTC #ifdef CONFIG_MIPS_MT_SMTC
extern void smtc_idle_loop_hook(void); extern void smtc_idle_loop_hook(void);
...@@ -62,6 +67,12 @@ void __noreturn cpu_idle(void) ...@@ -62,6 +67,12 @@ void __noreturn cpu_idle(void)
if (cpu_wait) if (cpu_wait)
(*cpu_wait)(); (*cpu_wait)();
} }
#ifdef CONFIG_HOTPLUG_CPU
if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) &&
(system_state == SYSTEM_RUNNING ||
system_state == SYSTEM_BOOTING))
play_dead();
#endif
tick_nohz_restart_sched_tick(); tick_nohz_restart_sched_tick();
preempt_enable_no_resched(); preempt_enable_no_resched();
schedule(); schedule();
......
...@@ -55,6 +55,18 @@ static void __init up_prepare_cpus(unsigned int max_cpus) ...@@ -55,6 +55,18 @@ static void __init up_prepare_cpus(unsigned int max_cpus)
{ {
} }
#ifdef CONFIG_HOTPLUG_CPU
static int up_cpu_disable(void)
{
return -ENOSYS;
}
static void up_cpu_die(unsigned int cpu)
{
BUG();
}
#endif
struct plat_smp_ops up_smp_ops = { struct plat_smp_ops up_smp_ops = {
.send_ipi_single = up_send_ipi_single, .send_ipi_single = up_send_ipi_single,
.send_ipi_mask = up_send_ipi_mask, .send_ipi_mask = up_send_ipi_mask,
...@@ -64,4 +76,8 @@ struct plat_smp_ops up_smp_ops = { ...@@ -64,4 +76,8 @@ struct plat_smp_ops up_smp_ops = {
.boot_secondary = up_boot_secondary, .boot_secondary = up_boot_secondary,
.smp_setup = up_smp_setup, .smp_setup = up_smp_setup,
.prepare_cpus = up_prepare_cpus, .prepare_cpus = up_prepare_cpus,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = up_cpu_disable,
.cpu_die = up_cpu_die,
#endif
}; };
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
#include <asm/mipsmtregs.h> #include <asm/mipsmtregs.h>
#endif /* CONFIG_MIPS_MT_SMTC */ #endif /* CONFIG_MIPS_MT_SMTC */
static volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
...@@ -201,6 +201,8 @@ void __devinit smp_prepare_boot_cpu(void) ...@@ -201,6 +201,8 @@ void __devinit smp_prepare_boot_cpu(void)
* and keep control until "cpu_online(cpu)" is set. Note: cpu is * and keep control until "cpu_online(cpu)" is set. Note: cpu is
* physical, not logical. * physical, not logical.
*/ */
static struct task_struct *cpu_idle_thread[NR_CPUS];
int __cpuinit __cpu_up(unsigned int cpu) int __cpuinit __cpu_up(unsigned int cpu)
{ {
struct task_struct *idle; struct task_struct *idle;
...@@ -210,9 +212,16 @@ int __cpuinit __cpu_up(unsigned int cpu) ...@@ -210,9 +212,16 @@ int __cpuinit __cpu_up(unsigned int cpu)
* The following code is purely to make sure * The following code is purely to make sure
* Linux can schedule processes on this slave. * Linux can schedule processes on this slave.
*/ */
if (!cpu_idle_thread[cpu]) {
idle = fork_idle(cpu); idle = fork_idle(cpu);
cpu_idle_thread[cpu] = idle;
if (IS_ERR(idle)) if (IS_ERR(idle))
panic(KERN_ERR "Fork failed for CPU %d", cpu); panic(KERN_ERR "Fork failed for CPU %d", cpu);
} else {
idle = cpu_idle_thread[cpu];
init_idle(idle, cpu);
}
mp_ops->boot_secondary(cpu, idle); mp_ops->boot_secondary(cpu, idle);
......
...@@ -17,7 +17,10 @@ static int __init topology_init(void) ...@@ -17,7 +17,10 @@ static int __init topology_init(void)
#endif /* CONFIG_NUMA */ #endif /* CONFIG_NUMA */
for_each_present_cpu(i) { for_each_present_cpu(i) {
ret = register_cpu(&per_cpu(cpu_devices, i), i); struct cpu *c = &per_cpu(cpu_devices, i);
c->hotpluggable = 1;
ret = register_cpu(c, i);
if (ret) if (ret)
printk(KERN_WARNING "topology_init: register_cpu %d " printk(KERN_WARNING "topology_init: register_cpu %d "
"failed (%d)\n", i, ret); "failed (%d)\n", i, ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment