Commit 6de92920 authored by Russell King's avatar Russell King

Merge commit 'smp-hotplug^{/omap2}' into for-linus

parents 97b6f89f 07a496de
...@@ -69,8 +69,6 @@ static const struct omap_smp_config omap5_cfg __initconst = { ...@@ -69,8 +69,6 @@ static const struct omap_smp_config omap5_cfg __initconst = {
.startup_addr = omap5_secondary_startup, .startup_addr = omap5_secondary_startup,
}; };
static DEFINE_SPINLOCK(boot_lock);
void __iomem *omap4_get_scu_base(void) void __iomem *omap4_get_scu_base(void)
{ {
return cfg.scu_base; return cfg.scu_base;
...@@ -173,12 +171,6 @@ static void omap4_secondary_init(unsigned int cpu) ...@@ -173,12 +171,6 @@ static void omap4_secondary_init(unsigned int cpu)
/* Enable ACR to allow for ICUALLU workaround */ /* Enable ACR to allow for ICUALLU workaround */
omap5_secondary_harden_predictor(); omap5_secondary_harden_predictor();
} }
/*
* Synchronise with the boot thread.
*/
spin_lock(&boot_lock);
spin_unlock(&boot_lock);
} }
static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
...@@ -187,12 +179,6 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) ...@@ -187,12 +179,6 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
static bool booted; static bool booted;
static struct powerdomain *cpu1_pwrdm; static struct powerdomain *cpu1_pwrdm;
/*
* Set synchronisation state between this boot processor
* and the secondary one
*/
spin_lock(&boot_lock);
/* /*
* Update the AuxCoreBoot0 with boot state for secondary core. * Update the AuxCoreBoot0 with boot state for secondary core.
* omap4_secondary_startup() routine will hold the secondary core till * omap4_secondary_startup() routine will hold the secondary core till
...@@ -266,12 +252,6 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) ...@@ -266,12 +252,6 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
arch_send_wakeup_ipi_mask(cpumask_of(cpu)); arch_send_wakeup_ipi_mask(cpumask_of(cpu));
/*
* Now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
spin_unlock(&boot_lock);
return 0; return 0;
} }
......
...@@ -5,4 +5,3 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-versatile/inc ...@@ -5,4 +5,3 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/arch/arm/plat-versatile/inc
obj-y += realview-dt.o obj-y += realview-dt.o
obj-$(CONFIG_SMP) += platsmp-dt.o obj-$(CONFIG_SMP) += platsmp-dt.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
/*
* linux/arch/arm/mach-realview/hotplug.c
*
* Copyright (C) 2002 ARM Ltd.
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/smp.h>
#include <asm/cp15.h>
#include <asm/smp_plat.h>
static inline void cpu_enter_lowpower(void)
{
unsigned int v;
asm volatile(
" mcr p15, 0, %1, c7, c5, 0\n"
" mcr p15, 0, %1, c7, c10, 4\n"
/*
* Turn off coherency
*/
" mrc p15, 0, %0, c1, c0, 1\n"
" bic %0, %0, #0x20\n"
" mcr p15, 0, %0, c1, c0, 1\n"
" mrc p15, 0, %0, c1, c0, 0\n"
" bic %0, %0, %2\n"
" mcr p15, 0, %0, c1, c0, 0\n"
: "=&r" (v)
: "r" (0), "Ir" (CR_C)
: "cc");
}
static inline void cpu_leave_lowpower(void)
{
unsigned int v;
asm volatile( "mrc p15, 0, %0, c1, c0, 0\n"
" orr %0, %0, %1\n"
" mcr p15, 0, %0, c1, c0, 0\n"
" mrc p15, 0, %0, c1, c0, 1\n"
" orr %0, %0, #0x20\n"
" mcr p15, 0, %0, c1, c0, 1\n"
: "=&r" (v)
: "Ir" (CR_C)
: "cc");
}
static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
{
/*
* there is no power-control hardware on this platform, so all
* we can do is put the core into WFI; this is safe as the calling
* code will have already disabled interrupts
*/
for (;;) {
/*
* here's the WFI
*/
asm(".word 0xe320f003\n"
:
:
: "memory", "cc");
if (pen_release == cpu_logical_map(cpu)) {
/*
* OK, proper wakeup, we're done
*/
break;
}
/*
* Getting here, means that we have come out of WFI without
* having been woken up - this shouldn't happen
*
* Just note it happening - when we're woken, we can report
* its occurrence.
*/
(*spurious)++;
}
}
/*
* platform-specific code to shutdown a CPU
*
* Called with IRQs disabled
*/
void realview_cpu_die(unsigned int cpu)
{
int spurious = 0;
/*
* we're ready for shutdown now, so do it
*/
cpu_enter_lowpower();
platform_do_lowpower(cpu, &spurious);
/*
* bring this CPU back into the world of cache
* coherency, and then restore interrupts
*/
cpu_leave_lowpower();
if (spurious)
pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
}
void realview_cpu_die(unsigned int cpu);
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <asm/smp_scu.h> #include <asm/smp_scu.h>
#include <plat/platsmp.h> #include <plat/platsmp.h>
#include "hotplug.h"
#define REALVIEW_SYS_FLAGSSET_OFFSET 0x30 #define REALVIEW_SYS_FLAGSSET_OFFSET 0x30
...@@ -79,6 +78,13 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus) ...@@ -79,6 +78,13 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus)
__pa_symbol(versatile_secondary_startup)); __pa_symbol(versatile_secondary_startup));
} }
#ifdef CONFIG_HOTPLUG_CPU
static void realview_cpu_die(unsigned int cpu)
{
return versatile_immitation_cpu_die(cpu, 0x20);
}
#endif
static const struct smp_operations realview_dt_smp_ops __initconst = { static const struct smp_operations realview_dt_smp_ops __initconst = {
.smp_prepare_cpus = realview_smp_prepare_cpus, .smp_prepare_cpus = realview_smp_prepare_cpus,
.smp_secondary_init = versatile_secondary_init, .smp_secondary_init = versatile_secondary_init,
......
...@@ -15,6 +15,5 @@ obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += tc2_pm.o ...@@ -15,6 +15,5 @@ obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += tc2_pm.o
CFLAGS_tc2_pm.o += -march=armv7-a CFLAGS_tc2_pm.o += -march=armv7-a
CFLAGS_REMOVE_tc2_pm.o = -pg CFLAGS_REMOVE_tc2_pm.o = -pg
obj-$(CONFIG_SMP) += platsmp.o obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
obj-$(CONFIG_ARCH_MPS2) += v2m-mps2.o obj-$(CONFIG_ARCH_MPS2) += v2m-mps2.o
bool vexpress_smp_init_ops(void); bool vexpress_smp_init_ops(void);
extern const struct smp_operations vexpress_smp_dt_ops; extern const struct smp_operations vexpress_smp_dt_ops;
extern void vexpress_cpu_die(unsigned int cpu);
...@@ -82,6 +82,13 @@ static void __init vexpress_smp_dt_prepare_cpus(unsigned int max_cpus) ...@@ -82,6 +82,13 @@ static void __init vexpress_smp_dt_prepare_cpus(unsigned int max_cpus)
vexpress_flags_set(__pa_symbol(versatile_secondary_startup)); vexpress_flags_set(__pa_symbol(versatile_secondary_startup));
} }
#ifdef CONFIG_HOTPLUG_CPU
static void vexpress_cpu_die(unsigned int cpu)
{
versatile_immitation_cpu_die(cpu, 0x40);
}
#endif
const struct smp_operations vexpress_smp_dt_ops __initconst = { const struct smp_operations vexpress_smp_dt_ops __initconst = {
.smp_prepare_cpus = vexpress_smp_dt_prepare_cpus, .smp_prepare_cpus = vexpress_smp_dt_prepare_cpus,
.smp_secondary_init = versatile_secondary_init, .smp_secondary_init = versatile_secondary_init,
......
...@@ -2,3 +2,4 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include ...@@ -2,3 +2,4 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include
obj-$(CONFIG_PLAT_VERSATILE_SCHED_CLOCK) += sched-clock.o obj-$(CONFIG_PLAT_VERSATILE_SCHED_CLOCK) += sched-clock.o
obj-$(CONFIG_SMP) += headsmp.o platsmp.o obj-$(CONFIG_SMP) += headsmp.o platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
...@@ -37,5 +37,5 @@ pen: ldr r7, [r6] ...@@ -37,5 +37,5 @@ pen: ldr r7, [r6]
.align .align
1: .long . 1: .long .
.long pen_release .long versatile_cpu_release
ENDPROC(versatile_secondary_startup) ENDPROC(versatile_secondary_startup)
/* /*
* linux/arch/arm/mach-realview/hotplug.c
*
* Copyright (C) 2002 ARM Ltd. * Copyright (C) 2002 ARM Ltd.
* All Rights Reserved * All Rights Reserved
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
*
* This hotplug implementation is _specific_ to the situation found on
* ARM development platforms where there is _no_ possibility of actually
* taking a CPU offline, resetting it, or otherwise. Real platforms must
* NOT copy this code.
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/errno.h> #include <linux/errno.h>
...@@ -15,9 +18,9 @@ ...@@ -15,9 +18,9 @@
#include <asm/smp_plat.h> #include <asm/smp_plat.h>
#include <asm/cp15.h> #include <asm/cp15.h>
#include "core.h" #include <plat/platsmp.h>
static inline void cpu_enter_lowpower(void) static inline void versatile_immitation_enter_lowpower(unsigned int actrl_mask)
{ {
unsigned int v; unsigned int v;
...@@ -34,11 +37,11 @@ static inline void cpu_enter_lowpower(void) ...@@ -34,11 +37,11 @@ static inline void cpu_enter_lowpower(void)
" bic %0, %0, %2\n" " bic %0, %0, %2\n"
" mcr p15, 0, %0, c1, c0, 0\n" " mcr p15, 0, %0, c1, c0, 0\n"
: "=&r" (v) : "=&r" (v)
: "r" (0), "Ir" (CR_C), "Ir" (0x40) : "r" (0), "Ir" (CR_C), "Ir" (actrl_mask)
: "cc"); : "cc");
} }
static inline void cpu_leave_lowpower(void) static inline void versatile_immitation_leave_lowpower(unsigned int actrl_mask)
{ {
unsigned int v; unsigned int v;
...@@ -50,21 +53,23 @@ static inline void cpu_leave_lowpower(void) ...@@ -50,21 +53,23 @@ static inline void cpu_leave_lowpower(void)
" orr %0, %0, %2\n" " orr %0, %0, %2\n"
" mcr p15, 0, %0, c1, c0, 1\n" " mcr p15, 0, %0, c1, c0, 1\n"
: "=&r" (v) : "=&r" (v)
: "Ir" (CR_C), "Ir" (0x40) : "Ir" (CR_C), "Ir" (actrl_mask)
: "cc"); : "cc");
} }
static inline void platform_do_lowpower(unsigned int cpu, int *spurious) static inline void versatile_immitation_do_lowpower(unsigned int cpu, int *spurious)
{ {
/* /*
* there is no power-control hardware on this platform, so all * there is no power-control hardware on this platform, so all
* we can do is put the core into WFI; this is safe as the calling * we can do is put the core into WFI; this is safe as the calling
* code will have already disabled interrupts * code will have already disabled interrupts.
*
* This code should not be used outside Versatile platforms.
*/ */
for (;;) { for (;;) {
wfi(); wfi();
if (pen_release == cpu_logical_map(cpu)) { if (versatile_cpu_release == cpu_logical_map(cpu)) {
/* /*
* OK, proper wakeup, we're done * OK, proper wakeup, we're done
*/ */
...@@ -83,25 +88,17 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious) ...@@ -83,25 +88,17 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
} }
/* /*
* platform-specific code to shutdown a CPU * platform-specific code to shutdown a CPU.
* * This code supports immitation-style CPU hotplug for Versatile/Realview/
* Called with IRQs disabled * Versatile Express platforms that are unable to do real CPU hotplug.
*/ */
void vexpress_cpu_die(unsigned int cpu) void versatile_immitation_cpu_die(unsigned int cpu, unsigned int actrl_mask)
{ {
int spurious = 0; int spurious = 0;
/* versatile_immitation_enter_lowpower(actrl_mask);
* we're ready for shutdown now, so do it versatile_immitation_do_lowpower(cpu, &spurious);
*/ versatile_immitation_leave_lowpower(actrl_mask);
cpu_enter_lowpower();
platform_do_lowpower(cpu, &spurious);
/*
* bring this CPU back into the world of cache
* coherency, and then restore interrupts
*/
cpu_leave_lowpower();
if (spurious) if (spurious)
pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
......
...@@ -8,7 +8,9 @@ ...@@ -8,7 +8,9 @@
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
extern volatile int versatile_cpu_release;
extern void versatile_secondary_startup(void); extern void versatile_secondary_startup(void);
extern void versatile_secondary_init(unsigned int cpu); extern void versatile_secondary_init(unsigned int cpu);
extern int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle); extern int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle);
void versatile_immitation_cpu_die(unsigned int cpu, unsigned int actrl_mask);
...@@ -7,6 +7,11 @@ ...@@ -7,6 +7,11 @@
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
*
* This code is specific to the hardware found on ARM Realview and
* Versatile Express platforms where the CPUs are unable to be individually
* woken, and where there is no way to hot-unplug CPUs. Real platforms
* should not copy this code.
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/errno.h> #include <linux/errno.h>
...@@ -21,18 +26,32 @@ ...@@ -21,18 +26,32 @@
#include <plat/platsmp.h> #include <plat/platsmp.h>
/* /*
* Write pen_release in a way that is guaranteed to be visible to all * versatile_cpu_release controls the release of CPUs from the holding
* observers, irrespective of whether they're taking part in coherency * pen in headsmp.S, which exists because we are not always able to
* control the release of individual CPUs from the board firmware.
* Production platforms do not need this.
*/
volatile int versatile_cpu_release = -1;
/*
* Write versatile_cpu_release in a way that is guaranteed to be visible to
* all observers, irrespective of whether they're taking part in coherency
* or not. This is necessary for the hotplug code to work reliably. * or not. This is necessary for the hotplug code to work reliably.
*/ */
static void write_pen_release(int val) static void versatile_write_cpu_release(int val)
{ {
pen_release = val; versatile_cpu_release = val;
smp_wmb(); smp_wmb();
sync_cache_w(&pen_release); sync_cache_w(&versatile_cpu_release);
} }
static DEFINE_SPINLOCK(boot_lock); /*
* versatile_lock exists to avoid running the loops_per_jiffy delay loop
* calibrations on the secondary CPU while the requesting CPU is using
* the limited-bandwidth bus - which affects the calibration value.
* Production platforms do not need this.
*/
static DEFINE_RAW_SPINLOCK(versatile_lock);
void versatile_secondary_init(unsigned int cpu) void versatile_secondary_init(unsigned int cpu)
{ {
...@@ -40,13 +59,13 @@ void versatile_secondary_init(unsigned int cpu) ...@@ -40,13 +59,13 @@ void versatile_secondary_init(unsigned int cpu)
* let the primary processor know we're out of the * let the primary processor know we're out of the
* pen, then head off into the C entry point * pen, then head off into the C entry point
*/ */
write_pen_release(-1); versatile_write_cpu_release(-1);
/* /*
* Synchronise with the boot thread. * Synchronise with the boot thread.
*/ */
spin_lock(&boot_lock); raw_spin_lock(&versatile_lock);
spin_unlock(&boot_lock); raw_spin_unlock(&versatile_lock);
} }
int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
...@@ -57,7 +76,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) ...@@ -57,7 +76,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
* Set synchronisation state between this boot processor * Set synchronisation state between this boot processor
* and the secondary one * and the secondary one
*/ */
spin_lock(&boot_lock); raw_spin_lock(&versatile_lock);
/* /*
* This is really belt and braces; we hold unintended secondary * This is really belt and braces; we hold unintended secondary
...@@ -65,7 +84,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) ...@@ -65,7 +84,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
* since we haven't sent them a soft interrupt, they shouldn't * since we haven't sent them a soft interrupt, they shouldn't
* be there. * be there.
*/ */
write_pen_release(cpu_logical_map(cpu)); versatile_write_cpu_release(cpu_logical_map(cpu));
/* /*
* Send the secondary CPU a soft interrupt, thereby causing * Send the secondary CPU a soft interrupt, thereby causing
...@@ -77,7 +96,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) ...@@ -77,7 +96,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
timeout = jiffies + (1 * HZ); timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) { while (time_before(jiffies, timeout)) {
smp_rmb(); smp_rmb();
if (pen_release == -1) if (versatile_cpu_release == -1)
break; break;
udelay(10); udelay(10);
...@@ -87,7 +106,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) ...@@ -87,7 +106,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
* now the secondary core is starting up let it run its * now the secondary core is starting up let it run its
* calibrations, then wait for it to finish * calibrations, then wait for it to finish
*/ */
spin_unlock(&boot_lock); raw_spin_unlock(&versatile_lock);
return pen_release != -1 ? -ENOSYS : 0; return versatile_cpu_release != -1 ? -ENOSYS : 0;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment