Commit 04092204 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-arm-for-3.16' of...

Merge tag 'kvm-arm-for-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-next

Changed for the 3.16 merge window.

This includes KVM support for PSCI v0.2 and also includes generic Linux
support for PSCI v0.2 (on hosts that advertise that feature via their
DT), since the latter depends on headers introduced by the former.

Finally there's a small patch from Marc that enables Cortex-A53 support.
parents 9b88ae99 1252b331
...@@ -21,7 +21,15 @@ to #0. ...@@ -21,7 +21,15 @@ to #0.
Main node required properties: Main node required properties:
- compatible : Must be "arm,psci" - compatible : should contain at least one of:
* "arm,psci" : for implementations complying to PSCI versions prior to
0.2. For these cases function IDs must be provided.
* "arm,psci-0.2" : for implementations complying to PSCI 0.2. Function
IDs are not required and should be ignored by an OS with PSCI 0.2
support, but are permitted to be present for compatibility with
existing software when "arm,psci" is later in the compatible list.
- method : The method of calling the PSCI firmware. Permitted - method : The method of calling the PSCI firmware. Permitted
values are: values are:
...@@ -45,6 +53,8 @@ Main node optional properties: ...@@ -45,6 +53,8 @@ Main node optional properties:
Example: Example:
Case 1: PSCI v0.1 only.
psci { psci {
compatible = "arm,psci"; compatible = "arm,psci";
method = "smc"; method = "smc";
...@@ -53,3 +63,28 @@ Example: ...@@ -53,3 +63,28 @@ Example:
cpu_on = <0x95c10002>; cpu_on = <0x95c10002>;
migrate = <0x95c10003>; migrate = <0x95c10003>;
}; };
Case 2: PSCI v0.2 only
psci {
compatible = "arm,psci-0.2";
method = "smc";
};
Case 3: PSCI v0.2 and PSCI v0.1.
A DTB may provide IDs for use by kernels without PSCI 0.2 support,
enabling firmware and hypervisors to support existing and new kernels.
These IDs will be ignored by kernels with PSCI 0.2 support, which will
use the standard PSCI 0.2 IDs exclusively.
psci {
compatible = "arm,psci-0.2", "arm,psci";
method = "hvc";
cpu_on = < arbitrary value >;
cpu_off = < arbitrary value >;
...
};
...@@ -2378,6 +2378,8 @@ Possible features: ...@@ -2378,6 +2378,8 @@ Possible features:
Depends on KVM_CAP_ARM_PSCI. Depends on KVM_CAP_ARM_PSCI.
- KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode. - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only). Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
- KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU.
Depends on KVM_CAP_ARM_PSCI_0_2.
4.83 KVM_ARM_PREFERRED_TARGET 4.83 KVM_ARM_PREFERRED_TARGET
...@@ -2740,6 +2742,21 @@ It gets triggered whenever both KVM_CAP_PPC_EPR are enabled and an ...@@ -2740,6 +2742,21 @@ It gets triggered whenever both KVM_CAP_PPC_EPR are enabled and an
external interrupt has just been delivered into the guest. User space external interrupt has just been delivered into the guest. User space
should put the acknowledged interrupt vector into the 'epr' field. should put the acknowledged interrupt vector into the 'epr' field.
/* KVM_EXIT_SYSTEM_EVENT */
struct {
#define KVM_SYSTEM_EVENT_SHUTDOWN 1
#define KVM_SYSTEM_EVENT_RESET 2
__u32 type;
__u64 flags;
} system_event;
If exit_reason is KVM_EXIT_SYSTEM_EVENT then the vcpu has triggered
a system-level event using some architecture specific mechanism (hypercall
or some special instruction). In case of ARM/ARM64, this is triggered using
HVC instruction based PSCI call from the vcpu. The 'type' field describes
the system-level event type. The 'flags' field describes architecture
specific flags for the system-level event.
/* Fix the size of the union. */ /* Fix the size of the union. */
char padding[256]; char padding[256];
}; };
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_HAVE_ONE_REG #define KVM_HAVE_ONE_REG
#define KVM_VCPU_MAX_FEATURES 1 #define KVM_VCPU_MAX_FEATURES 2
#include <kvm/arm_vgic.h> #include <kvm/arm_vgic.h>
......
...@@ -18,6 +18,10 @@ ...@@ -18,6 +18,10 @@
#ifndef __ARM_KVM_PSCI_H__ #ifndef __ARM_KVM_PSCI_H__
#define __ARM_KVM_PSCI_H__ #define __ARM_KVM_PSCI_H__
bool kvm_psci_call(struct kvm_vcpu *vcpu); #define KVM_ARM_PSCI_0_1 1
#define KVM_ARM_PSCI_0_2 2
int kvm_psci_version(struct kvm_vcpu *vcpu);
int kvm_psci_call(struct kvm_vcpu *vcpu);
#endif /* __ARM_KVM_PSCI_H__ */ #endif /* __ARM_KVM_PSCI_H__ */
...@@ -29,16 +29,19 @@ struct psci_operations { ...@@ -29,16 +29,19 @@ struct psci_operations {
int (*cpu_off)(struct psci_power_state state); int (*cpu_off)(struct psci_power_state state);
int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
int (*migrate)(unsigned long cpuid); int (*migrate)(unsigned long cpuid);
int (*affinity_info)(unsigned long target_affinity,
unsigned long lowest_affinity_level);
int (*migrate_info_type)(void);
}; };
extern struct psci_operations psci_ops; extern struct psci_operations psci_ops;
extern struct smp_operations psci_smp_ops; extern struct smp_operations psci_smp_ops;
#ifdef CONFIG_ARM_PSCI #ifdef CONFIG_ARM_PSCI
void psci_init(void); int psci_init(void);
bool psci_smp_available(void); bool psci_smp_available(void);
#else #else
static inline void psci_init(void) { } static inline int psci_init(void) { }
static inline bool psci_smp_available(void) { return false; } static inline bool psci_smp_available(void) { return false; }
#endif #endif
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#define __ARM_KVM_H__ #define __ARM_KVM_H__
#include <linux/types.h> #include <linux/types.h>
#include <linux/psci.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#define __KVM_HAVE_GUEST_DEBUG #define __KVM_HAVE_GUEST_DEBUG
...@@ -83,6 +84,7 @@ struct kvm_regs { ...@@ -83,6 +84,7 @@ struct kvm_regs {
#define KVM_VGIC_V2_CPU_SIZE 0x2000 #define KVM_VGIC_V2_CPU_SIZE 0x2000
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ #define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
#define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */
struct kvm_vcpu_init { struct kvm_vcpu_init {
__u32 target; __u32 target;
...@@ -201,9 +203,9 @@ struct kvm_arch_memory_slot { ...@@ -201,9 +203,9 @@ struct kvm_arch_memory_slot {
#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2) #define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2)
#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3) #define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3)
#define KVM_PSCI_RET_SUCCESS 0 #define KVM_PSCI_RET_SUCCESS PSCI_RET_SUCCESS
#define KVM_PSCI_RET_NI ((unsigned long)-1) #define KVM_PSCI_RET_NI PSCI_RET_NOT_SUPPORTED
#define KVM_PSCI_RET_INVAL ((unsigned long)-2) #define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS
#define KVM_PSCI_RET_DENIED ((unsigned long)-3) #define KVM_PSCI_RET_DENIED PSCI_RET_DENIED
#endif /* __ARM_KVM_H__ */ #endif /* __ARM_KVM_H__ */
...@@ -17,63 +17,58 @@ ...@@ -17,63 +17,58 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/reboot.h>
#include <linux/pm.h>
#include <uapi/linux/psci.h>
#include <asm/compiler.h> #include <asm/compiler.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/opcodes-sec.h> #include <asm/opcodes-sec.h>
#include <asm/opcodes-virt.h> #include <asm/opcodes-virt.h>
#include <asm/psci.h> #include <asm/psci.h>
#include <asm/system_misc.h>
struct psci_operations psci_ops; struct psci_operations psci_ops;
static int (*invoke_psci_fn)(u32, u32, u32, u32); static int (*invoke_psci_fn)(u32, u32, u32, u32);
typedef int (*psci_initcall_t)(const struct device_node *);
enum psci_function { enum psci_function {
PSCI_FN_CPU_SUSPEND, PSCI_FN_CPU_SUSPEND,
PSCI_FN_CPU_ON, PSCI_FN_CPU_ON,
PSCI_FN_CPU_OFF, PSCI_FN_CPU_OFF,
PSCI_FN_MIGRATE, PSCI_FN_MIGRATE,
PSCI_FN_AFFINITY_INFO,
PSCI_FN_MIGRATE_INFO_TYPE,
PSCI_FN_MAX, PSCI_FN_MAX,
}; };
static u32 psci_function_id[PSCI_FN_MAX]; static u32 psci_function_id[PSCI_FN_MAX];
#define PSCI_RET_SUCCESS 0
#define PSCI_RET_EOPNOTSUPP -1
#define PSCI_RET_EINVAL -2
#define PSCI_RET_EPERM -3
static int psci_to_linux_errno(int errno) static int psci_to_linux_errno(int errno)
{ {
switch (errno) { switch (errno) {
case PSCI_RET_SUCCESS: case PSCI_RET_SUCCESS:
return 0; return 0;
case PSCI_RET_EOPNOTSUPP: case PSCI_RET_NOT_SUPPORTED:
return -EOPNOTSUPP; return -EOPNOTSUPP;
case PSCI_RET_EINVAL: case PSCI_RET_INVALID_PARAMS:
return -EINVAL; return -EINVAL;
case PSCI_RET_EPERM: case PSCI_RET_DENIED:
return -EPERM; return -EPERM;
}; };
return -EINVAL; return -EINVAL;
} }
#define PSCI_POWER_STATE_ID_MASK 0xffff
#define PSCI_POWER_STATE_ID_SHIFT 0
#define PSCI_POWER_STATE_TYPE_MASK 0x1
#define PSCI_POWER_STATE_TYPE_SHIFT 16
#define PSCI_POWER_STATE_AFFL_MASK 0x3
#define PSCI_POWER_STATE_AFFL_SHIFT 24
static u32 psci_power_state_pack(struct psci_power_state state) static u32 psci_power_state_pack(struct psci_power_state state)
{ {
return ((state.id & PSCI_POWER_STATE_ID_MASK) return ((state.id << PSCI_0_2_POWER_STATE_ID_SHIFT)
<< PSCI_POWER_STATE_ID_SHIFT) | & PSCI_0_2_POWER_STATE_ID_MASK) |
((state.type & PSCI_POWER_STATE_TYPE_MASK) ((state.type << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
<< PSCI_POWER_STATE_TYPE_SHIFT) | & PSCI_0_2_POWER_STATE_TYPE_MASK) |
((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK) ((state.affinity_level << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
<< PSCI_POWER_STATE_AFFL_SHIFT); & PSCI_0_2_POWER_STATE_AFFL_MASK);
} }
/* /*
...@@ -110,6 +105,14 @@ static noinline int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, ...@@ -110,6 +105,14 @@ static noinline int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1,
return function_id; return function_id;
} }
static int psci_get_version(void)
{
int err;
err = invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
return err;
}
static int psci_cpu_suspend(struct psci_power_state state, static int psci_cpu_suspend(struct psci_power_state state,
unsigned long entry_point) unsigned long entry_point)
{ {
...@@ -153,26 +156,36 @@ static int psci_migrate(unsigned long cpuid) ...@@ -153,26 +156,36 @@ static int psci_migrate(unsigned long cpuid)
return psci_to_linux_errno(err); return psci_to_linux_errno(err);
} }
static const struct of_device_id psci_of_match[] __initconst = { static int psci_affinity_info(unsigned long target_affinity,
{ .compatible = "arm,psci", }, unsigned long lowest_affinity_level)
{}, {
}; int err;
u32 fn;
fn = psci_function_id[PSCI_FN_AFFINITY_INFO];
err = invoke_psci_fn(fn, target_affinity, lowest_affinity_level, 0);
return err;
}
void __init psci_init(void) static int psci_migrate_info_type(void)
{ {
struct device_node *np; int err;
const char *method; u32 fn;
u32 id;
np = of_find_matching_node(NULL, psci_of_match); fn = psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE];
if (!np) err = invoke_psci_fn(fn, 0, 0, 0);
return; return err;
}
static int get_set_conduit_method(struct device_node *np)
{
const char *method;
pr_info("probing function IDs from device-tree\n"); pr_info("probing for conduit method from DT.\n");
if (of_property_read_string(np, "method", &method)) { if (of_property_read_string(np, "method", &method)) {
pr_warning("missing \"method\" property\n"); pr_warn("missing \"method\" property\n");
goto out_put_node; return -ENXIO;
} }
if (!strcmp("hvc", method)) { if (!strcmp("hvc", method)) {
...@@ -180,10 +193,99 @@ void __init psci_init(void) ...@@ -180,10 +193,99 @@ void __init psci_init(void)
} else if (!strcmp("smc", method)) { } else if (!strcmp("smc", method)) {
invoke_psci_fn = __invoke_psci_fn_smc; invoke_psci_fn = __invoke_psci_fn_smc;
} else { } else {
pr_warning("invalid \"method\" property: %s\n", method); pr_warn("invalid \"method\" property: %s\n", method);
return -EINVAL;
}
return 0;
}
static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
{
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
}
static void psci_sys_poweroff(void)
{
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
}
/*
* PSCI Function IDs for v0.2+ are well defined so use
* standard values.
*/
static int psci_0_2_init(struct device_node *np)
{
int err, ver;
err = get_set_conduit_method(np);
if (err)
goto out_put_node;
ver = psci_get_version();
if (ver == PSCI_RET_NOT_SUPPORTED) {
/* PSCI v0.2 mandates implementation of PSCI_ID_VERSION. */
pr_err("PSCI firmware does not comply with the v0.2 spec.\n");
err = -EOPNOTSUPP;
goto out_put_node; goto out_put_node;
} else {
pr_info("PSCIv%d.%d detected in firmware.\n",
PSCI_VERSION_MAJOR(ver),
PSCI_VERSION_MINOR(ver));
if (PSCI_VERSION_MAJOR(ver) == 0 &&
PSCI_VERSION_MINOR(ver) < 2) {
err = -EINVAL;
pr_err("Conflicting PSCI version detected.\n");
goto out_put_node;
}
} }
pr_info("Using standard PSCI v0.2 function IDs\n");
psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN_CPU_SUSPEND;
psci_ops.cpu_suspend = psci_cpu_suspend;
psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
psci_ops.cpu_off = psci_cpu_off;
psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN_CPU_ON;
psci_ops.cpu_on = psci_cpu_on;
psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN_MIGRATE;
psci_ops.migrate = psci_migrate;
psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN_AFFINITY_INFO;
psci_ops.affinity_info = psci_affinity_info;
psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] =
PSCI_0_2_FN_MIGRATE_INFO_TYPE;
psci_ops.migrate_info_type = psci_migrate_info_type;
arm_pm_restart = psci_sys_reset;
pm_power_off = psci_sys_poweroff;
out_put_node:
of_node_put(np);
return err;
}
/*
* PSCI < v0.2 get PSCI Function IDs via DT.
*/
static int psci_0_1_init(struct device_node *np)
{
u32 id;
int err;
err = get_set_conduit_method(np);
if (err)
goto out_put_node;
pr_info("Using PSCI v0.1 Function IDs from DT\n");
if (!of_property_read_u32(np, "cpu_suspend", &id)) { if (!of_property_read_u32(np, "cpu_suspend", &id)) {
psci_function_id[PSCI_FN_CPU_SUSPEND] = id; psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
psci_ops.cpu_suspend = psci_cpu_suspend; psci_ops.cpu_suspend = psci_cpu_suspend;
...@@ -206,5 +308,25 @@ void __init psci_init(void) ...@@ -206,5 +308,25 @@ void __init psci_init(void)
out_put_node: out_put_node:
of_node_put(np); of_node_put(np);
return; return err;
}
static const struct of_device_id psci_of_match[] __initconst = {
{ .compatible = "arm,psci", .data = psci_0_1_init},
{ .compatible = "arm,psci-0.2", .data = psci_0_2_init},
{},
};
int __init psci_init(void)
{
struct device_node *np;
const struct of_device_id *matched_np;
psci_initcall_t init_fn;
np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
if (!np)
return -ENODEV;
init_fn = (psci_initcall_t)matched_np->data;
return init_fn(np);
} }
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/delay.h>
#include <uapi/linux/psci.h>
#include <asm/psci.h> #include <asm/psci.h>
#include <asm/smp_plat.h> #include <asm/smp_plat.h>
...@@ -66,6 +68,36 @@ void __ref psci_cpu_die(unsigned int cpu) ...@@ -66,6 +68,36 @@ void __ref psci_cpu_die(unsigned int cpu)
/* We should never return */ /* We should never return */
panic("psci: cpu %d failed to shutdown\n", cpu); panic("psci: cpu %d failed to shutdown\n", cpu);
} }
int __ref psci_cpu_kill(unsigned int cpu)
{
int err, i;
if (!psci_ops.affinity_info)
return 1;
/*
* cpu_kill could race with cpu_die and we can
* potentially end up declaring this cpu undead
* while it is dying. So, try again a few times.
*/
for (i = 0; i < 10; i++) {
err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
pr_info("CPU%d killed.\n", cpu);
return 1;
}
msleep(10);
pr_info("Retrying again to check for CPU kill\n");
}
pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
cpu, err);
/* Make platform_cpu_kill() fail. */
return 0;
}
#endif #endif
bool __init psci_smp_available(void) bool __init psci_smp_available(void)
...@@ -78,5 +110,6 @@ struct smp_operations __initdata psci_smp_ops = { ...@@ -78,5 +110,6 @@ struct smp_operations __initdata psci_smp_ops = {
.smp_boot_secondary = psci_boot_secondary, .smp_boot_secondary = psci_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
.cpu_die = psci_cpu_die, .cpu_die = psci_cpu_die,
.cpu_kill = psci_cpu_kill,
#endif #endif
}; };
...@@ -197,6 +197,7 @@ int kvm_dev_ioctl_check_extension(long ext) ...@@ -197,6 +197,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
case KVM_CAP_ONE_REG: case KVM_CAP_ONE_REG:
case KVM_CAP_ARM_PSCI: case KVM_CAP_ARM_PSCI:
case KVM_CAP_ARM_PSCI_0_2:
r = 1; r = 1;
break; break;
case KVM_CAP_COALESCED_MMIO: case KVM_CAP_COALESCED_MMIO:
......
...@@ -38,14 +38,18 @@ static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -38,14 +38,18 @@ static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{ {
int ret;
trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
kvm_vcpu_hvc_get_imm(vcpu)); kvm_vcpu_hvc_get_imm(vcpu));
if (kvm_psci_call(vcpu)) ret = kvm_psci_call(vcpu);
if (ret < 0) {
kvm_inject_undefined(vcpu);
return 1; return 1;
}
kvm_inject_undefined(vcpu); return ret;
return 1;
} }
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
......
...@@ -27,6 +27,36 @@ ...@@ -27,6 +27,36 @@
* as described in ARM document number ARM DEN 0022A. * as described in ARM document number ARM DEN 0022A.
*/ */
#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
static unsigned long psci_affinity_mask(unsigned long affinity_level)
{
if (affinity_level <= 3)
return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
return 0;
}
static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
{
/*
* NOTE: For simplicity, we make VCPU suspend emulation to be
* same-as WFI (Wait-for-interrupt) emulation.
*
* This means for KVM the wakeup events are interrupts and
* this is consistent with intended use of StateID as described
* in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
*
* Further, we also treat power-down request to be same as
* stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
* specification (ARM DEN 0022A). This means all suspend states
* for KVM will preserve the register state.
*/
kvm_vcpu_block(vcpu);
return PSCI_RET_SUCCESS;
}
static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.pause = true; vcpu->arch.pause = true;
...@@ -38,6 +68,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) ...@@ -38,6 +68,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
struct kvm_vcpu *vcpu = NULL, *tmp; struct kvm_vcpu *vcpu = NULL, *tmp;
wait_queue_head_t *wq; wait_queue_head_t *wq;
unsigned long cpu_id; unsigned long cpu_id;
unsigned long context_id;
unsigned long mpidr; unsigned long mpidr;
phys_addr_t target_pc; phys_addr_t target_pc;
int i; int i;
...@@ -58,10 +89,17 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) ...@@ -58,10 +89,17 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
* Make sure the caller requested a valid CPU and that the CPU is * Make sure the caller requested a valid CPU and that the CPU is
* turned off. * turned off.
*/ */
if (!vcpu || !vcpu->arch.pause) if (!vcpu)
return KVM_PSCI_RET_INVAL; return PSCI_RET_INVALID_PARAMS;
if (!vcpu->arch.pause) {
if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
return PSCI_RET_ALREADY_ON;
else
return PSCI_RET_INVALID_PARAMS;
}
target_pc = *vcpu_reg(source_vcpu, 2); target_pc = *vcpu_reg(source_vcpu, 2);
context_id = *vcpu_reg(source_vcpu, 3);
kvm_reset_vcpu(vcpu); kvm_reset_vcpu(vcpu);
...@@ -76,26 +114,160 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) ...@@ -76,26 +114,160 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
kvm_vcpu_set_be(vcpu); kvm_vcpu_set_be(vcpu);
*vcpu_pc(vcpu) = target_pc; *vcpu_pc(vcpu) = target_pc;
/*
* NOTE: We always update r0 (or x0) because for PSCI v0.1
* the general puspose registers are undefined upon CPU_ON.
*/
*vcpu_reg(vcpu, 0) = context_id;
vcpu->arch.pause = false; vcpu->arch.pause = false;
smp_mb(); /* Make sure the above is visible */ smp_mb(); /* Make sure the above is visible */
wq = kvm_arch_vcpu_wq(vcpu); wq = kvm_arch_vcpu_wq(vcpu);
wake_up_interruptible(wq); wake_up_interruptible(wq);
return KVM_PSCI_RET_SUCCESS; return PSCI_RET_SUCCESS;
} }
/** static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
* kvm_psci_call - handle PSCI call if r0 value is in range {
* @vcpu: Pointer to the VCPU struct int i;
* unsigned long mpidr;
* Handle PSCI calls from guests through traps from HVC instructions. unsigned long target_affinity;
* The calling convention is similar to SMC calls to the secure world where unsigned long target_affinity_mask;
* the function number is placed in r0 and this function returns true if the unsigned long lowest_affinity_level;
* function number specified in r0 is withing the PSCI range, and false struct kvm *kvm = vcpu->kvm;
* otherwise. struct kvm_vcpu *tmp;
*/
bool kvm_psci_call(struct kvm_vcpu *vcpu) target_affinity = *vcpu_reg(vcpu, 1);
lowest_affinity_level = *vcpu_reg(vcpu, 2);
/* Determine target affinity mask */
target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
if (!target_affinity_mask)
return PSCI_RET_INVALID_PARAMS;
/* Ignore other bits of target affinity */
target_affinity &= target_affinity_mask;
/*
* If one or more VCPU matching target affinity are running
* then ON else OFF
*/
kvm_for_each_vcpu(i, tmp, kvm) {
mpidr = kvm_vcpu_get_mpidr(tmp);
if (((mpidr & target_affinity_mask) == target_affinity) &&
!tmp->arch.pause) {
return PSCI_0_2_AFFINITY_LEVEL_ON;
}
}
return PSCI_0_2_AFFINITY_LEVEL_OFF;
}
static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
{
memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
vcpu->run->system_event.type = type;
vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
}
static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
{
kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
}
static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
{
kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
}
int kvm_psci_version(struct kvm_vcpu *vcpu)
{
if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
return KVM_ARM_PSCI_0_2;
return KVM_ARM_PSCI_0_1;
}
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
{
int ret = 1;
unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
unsigned long val;
switch (psci_fn) {
case PSCI_0_2_FN_PSCI_VERSION:
/*
* Bits[31:16] = Major Version = 0
* Bits[15:0] = Minor Version = 2
*/
val = 2;
break;
case PSCI_0_2_FN_CPU_SUSPEND:
case PSCI_0_2_FN64_CPU_SUSPEND:
val = kvm_psci_vcpu_suspend(vcpu);
break;
case PSCI_0_2_FN_CPU_OFF:
kvm_psci_vcpu_off(vcpu);
val = PSCI_RET_SUCCESS;
break;
case PSCI_0_2_FN_CPU_ON:
case PSCI_0_2_FN64_CPU_ON:
val = kvm_psci_vcpu_on(vcpu);
break;
case PSCI_0_2_FN_AFFINITY_INFO:
case PSCI_0_2_FN64_AFFINITY_INFO:
val = kvm_psci_vcpu_affinity_info(vcpu);
break;
case PSCI_0_2_FN_MIGRATE:
case PSCI_0_2_FN64_MIGRATE:
val = PSCI_RET_NOT_SUPPORTED;
break;
case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
/*
* Trusted OS is MP hence does not require migration
* or
* Trusted OS is not present
*/
val = PSCI_0_2_TOS_MP;
break;
case PSCI_0_2_FN_MIGRATE_INFO_UP_CPU:
case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
val = PSCI_RET_NOT_SUPPORTED;
break;
case PSCI_0_2_FN_SYSTEM_OFF:
kvm_psci_system_off(vcpu);
/*
* We should'nt be going back to guest VCPU after
* receiving SYSTEM_OFF request.
*
* If user space accidently/deliberately resumes
* guest VCPU after SYSTEM_OFF request then guest
* VCPU should see internal failure from PSCI return
* value. To achieve this, we preload r0 (or x0) with
* PSCI return value INTERNAL_FAILURE.
*/
val = PSCI_RET_INTERNAL_FAILURE;
ret = 0;
break;
case PSCI_0_2_FN_SYSTEM_RESET:
kvm_psci_system_reset(vcpu);
/*
* Same reason as SYSTEM_OFF for preloading r0 (or x0)
* with PSCI return value INTERNAL_FAILURE.
*/
val = PSCI_RET_INTERNAL_FAILURE;
ret = 0;
break;
default:
return -EINVAL;
}
*vcpu_reg(vcpu, 0) = val;
return ret;
}
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
{ {
unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
unsigned long val; unsigned long val;
...@@ -103,20 +275,45 @@ bool kvm_psci_call(struct kvm_vcpu *vcpu) ...@@ -103,20 +275,45 @@ bool kvm_psci_call(struct kvm_vcpu *vcpu)
switch (psci_fn) { switch (psci_fn) {
case KVM_PSCI_FN_CPU_OFF: case KVM_PSCI_FN_CPU_OFF:
kvm_psci_vcpu_off(vcpu); kvm_psci_vcpu_off(vcpu);
val = KVM_PSCI_RET_SUCCESS; val = PSCI_RET_SUCCESS;
break; break;
case KVM_PSCI_FN_CPU_ON: case KVM_PSCI_FN_CPU_ON:
val = kvm_psci_vcpu_on(vcpu); val = kvm_psci_vcpu_on(vcpu);
break; break;
case KVM_PSCI_FN_CPU_SUSPEND: case KVM_PSCI_FN_CPU_SUSPEND:
case KVM_PSCI_FN_MIGRATE: case KVM_PSCI_FN_MIGRATE:
val = KVM_PSCI_RET_NI; val = PSCI_RET_NOT_SUPPORTED;
break; break;
default: default:
return false; return -EINVAL;
} }
*vcpu_reg(vcpu, 0) = val; *vcpu_reg(vcpu, 0) = val;
return true; return 1;
}
/**
* kvm_psci_call - handle PSCI call if r0 value is in range
* @vcpu: Pointer to the VCPU struct
*
* Handle PSCI calls from guests through traps from HVC instructions.
* The calling convention is similar to SMC calls to the secure world
* where the function number is placed in r0.
*
* This function returns: > 0 (success), 0 (success but exit to user
* space), and < 0 (errors)
*
* Errors:
* -EINVAL: Unrecognized PSCI function
*/
int kvm_psci_call(struct kvm_vcpu *vcpu)
{
switch (kvm_psci_version(vcpu)) {
case KVM_ARM_PSCI_0_2:
return kvm_psci_0_2_call(vcpu);
case KVM_ARM_PSCI_0_1:
return kvm_psci_0_1_call(vcpu);
default:
return -EINVAL;
};
} }
...@@ -39,6 +39,7 @@ struct device_node; ...@@ -39,6 +39,7 @@ struct device_node;
* from the cpu to be killed. * from the cpu to be killed.
* @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
* cpu being killed. * cpu being killed.
* @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu.
* @cpu_suspend: Suspends a cpu and saves the required context. May fail owing * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing
* to wrong parameters or error conditions. Called from the * to wrong parameters or error conditions. Called from the
* CPU being suspended. Must be called with IRQs disabled. * CPU being suspended. Must be called with IRQs disabled.
...@@ -52,6 +53,7 @@ struct cpu_operations { ...@@ -52,6 +53,7 @@ struct cpu_operations {
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
int (*cpu_disable)(unsigned int cpu); int (*cpu_disable)(unsigned int cpu);
void (*cpu_die)(unsigned int cpu); void (*cpu_die)(unsigned int cpu);
int (*cpu_kill)(unsigned int cpu);
#endif #endif
#ifdef CONFIG_ARM64_CPU_SUSPEND #ifdef CONFIG_ARM64_CPU_SUSPEND
int (*cpu_suspend)(unsigned long); int (*cpu_suspend)(unsigned long);
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#define ARM_CPU_PART_AEM_V8 0xD0F0 #define ARM_CPU_PART_AEM_V8 0xD0F0
#define ARM_CPU_PART_FOUNDATION 0xD000 #define ARM_CPU_PART_FOUNDATION 0xD000
#define ARM_CPU_PART_CORTEX_A53 0xD030
#define ARM_CPU_PART_CORTEX_A57 0xD070 #define ARM_CPU_PART_CORTEX_A57 0xD070
#define APM_CPU_PART_POTENZA 0x0000 #define APM_CPU_PART_POTENZA 0x0000
......
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
#include <kvm/arm_vgic.h> #include <kvm/arm_vgic.h>
#include <kvm/arm_arch_timer.h> #include <kvm/arm_arch_timer.h>
#define KVM_VCPU_MAX_FEATURES 2 #define KVM_VCPU_MAX_FEATURES 3
struct kvm_vcpu; struct kvm_vcpu;
int kvm_target_cpu(void); int kvm_target_cpu(void);
......
...@@ -18,6 +18,10 @@ ...@@ -18,6 +18,10 @@
#ifndef __ARM64_KVM_PSCI_H__ #ifndef __ARM64_KVM_PSCI_H__
#define __ARM64_KVM_PSCI_H__ #define __ARM64_KVM_PSCI_H__
bool kvm_psci_call(struct kvm_vcpu *vcpu); #define KVM_ARM_PSCI_0_1 1
#define KVM_ARM_PSCI_0_2 2
int kvm_psci_version(struct kvm_vcpu *vcpu);
int kvm_psci_call(struct kvm_vcpu *vcpu);
#endif /* __ARM64_KVM_PSCI_H__ */ #endif /* __ARM64_KVM_PSCI_H__ */
...@@ -14,6 +14,6 @@ ...@@ -14,6 +14,6 @@
#ifndef __ASM_PSCI_H #ifndef __ASM_PSCI_H
#define __ASM_PSCI_H #define __ASM_PSCI_H
void psci_init(void); int psci_init(void);
#endif /* __ASM_PSCI_H */ #endif /* __ASM_PSCI_H */
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#define KVM_NR_SPSR 5 #define KVM_NR_SPSR 5
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/psci.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
...@@ -56,8 +57,9 @@ struct kvm_regs { ...@@ -56,8 +57,9 @@ struct kvm_regs {
#define KVM_ARM_TARGET_FOUNDATION_V8 1 #define KVM_ARM_TARGET_FOUNDATION_V8 1
#define KVM_ARM_TARGET_CORTEX_A57 2 #define KVM_ARM_TARGET_CORTEX_A57 2
#define KVM_ARM_TARGET_XGENE_POTENZA 3 #define KVM_ARM_TARGET_XGENE_POTENZA 3
#define KVM_ARM_TARGET_CORTEX_A53 4
#define KVM_ARM_NUM_TARGETS 4 #define KVM_ARM_NUM_TARGETS 5
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */ /* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
#define KVM_ARM_DEVICE_TYPE_SHIFT 0 #define KVM_ARM_DEVICE_TYPE_SHIFT 0
...@@ -77,6 +79,7 @@ struct kvm_regs { ...@@ -77,6 +79,7 @@ struct kvm_regs {
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ #define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */ #define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
#define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */
struct kvm_vcpu_init { struct kvm_vcpu_init {
__u32 target; __u32 target;
...@@ -186,10 +189,10 @@ struct kvm_arch_memory_slot { ...@@ -186,10 +189,10 @@ struct kvm_arch_memory_slot {
#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2) #define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2)
#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3) #define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3)
#define KVM_PSCI_RET_SUCCESS 0 #define KVM_PSCI_RET_SUCCESS PSCI_RET_SUCCESS
#define KVM_PSCI_RET_NI ((unsigned long)-1) #define KVM_PSCI_RET_NI PSCI_RET_NOT_SUPPORTED
#define KVM_PSCI_RET_INVAL ((unsigned long)-2) #define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS
#define KVM_PSCI_RET_DENIED ((unsigned long)-3) #define KVM_PSCI_RET_DENIED PSCI_RET_DENIED
#endif #endif
......
...@@ -18,12 +18,17 @@ ...@@ -18,12 +18,17 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/reboot.h>
#include <linux/pm.h>
#include <linux/delay.h>
#include <uapi/linux/psci.h>
#include <asm/compiler.h> #include <asm/compiler.h>
#include <asm/cpu_ops.h> #include <asm/cpu_ops.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/psci.h> #include <asm/psci.h>
#include <asm/smp_plat.h> #include <asm/smp_plat.h>
#include <asm/system_misc.h>
#define PSCI_POWER_STATE_TYPE_STANDBY 0 #define PSCI_POWER_STATE_TYPE_STANDBY 0
#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 #define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
...@@ -40,58 +45,52 @@ struct psci_operations { ...@@ -40,58 +45,52 @@ struct psci_operations {
int (*cpu_off)(struct psci_power_state state); int (*cpu_off)(struct psci_power_state state);
int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
int (*migrate)(unsigned long cpuid); int (*migrate)(unsigned long cpuid);
int (*affinity_info)(unsigned long target_affinity,
unsigned long lowest_affinity_level);
int (*migrate_info_type)(void);
}; };
static struct psci_operations psci_ops; static struct psci_operations psci_ops;
static int (*invoke_psci_fn)(u64, u64, u64, u64); static int (*invoke_psci_fn)(u64, u64, u64, u64);
typedef int (*psci_initcall_t)(const struct device_node *);
enum psci_function { enum psci_function {
PSCI_FN_CPU_SUSPEND, PSCI_FN_CPU_SUSPEND,
PSCI_FN_CPU_ON, PSCI_FN_CPU_ON,
PSCI_FN_CPU_OFF, PSCI_FN_CPU_OFF,
PSCI_FN_MIGRATE, PSCI_FN_MIGRATE,
PSCI_FN_AFFINITY_INFO,
PSCI_FN_MIGRATE_INFO_TYPE,
PSCI_FN_MAX, PSCI_FN_MAX,
}; };
static u32 psci_function_id[PSCI_FN_MAX]; static u32 psci_function_id[PSCI_FN_MAX];
#define PSCI_RET_SUCCESS 0
#define PSCI_RET_EOPNOTSUPP -1
#define PSCI_RET_EINVAL -2
#define PSCI_RET_EPERM -3
static int psci_to_linux_errno(int errno) static int psci_to_linux_errno(int errno)
{ {
switch (errno) { switch (errno) {
case PSCI_RET_SUCCESS: case PSCI_RET_SUCCESS:
return 0; return 0;
case PSCI_RET_EOPNOTSUPP: case PSCI_RET_NOT_SUPPORTED:
return -EOPNOTSUPP; return -EOPNOTSUPP;
case PSCI_RET_EINVAL: case PSCI_RET_INVALID_PARAMS:
return -EINVAL; return -EINVAL;
case PSCI_RET_EPERM: case PSCI_RET_DENIED:
return -EPERM; return -EPERM;
}; };
return -EINVAL; return -EINVAL;
} }
#define PSCI_POWER_STATE_ID_MASK 0xffff
#define PSCI_POWER_STATE_ID_SHIFT 0
#define PSCI_POWER_STATE_TYPE_MASK 0x1
#define PSCI_POWER_STATE_TYPE_SHIFT 16
#define PSCI_POWER_STATE_AFFL_MASK 0x3
#define PSCI_POWER_STATE_AFFL_SHIFT 24
static u32 psci_power_state_pack(struct psci_power_state state) static u32 psci_power_state_pack(struct psci_power_state state)
{ {
return ((state.id & PSCI_POWER_STATE_ID_MASK) return ((state.id << PSCI_0_2_POWER_STATE_ID_SHIFT)
<< PSCI_POWER_STATE_ID_SHIFT) | & PSCI_0_2_POWER_STATE_ID_MASK) |
((state.type & PSCI_POWER_STATE_TYPE_MASK) ((state.type << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
<< PSCI_POWER_STATE_TYPE_SHIFT) | & PSCI_0_2_POWER_STATE_TYPE_MASK) |
((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK) ((state.affinity_level << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
<< PSCI_POWER_STATE_AFFL_SHIFT); & PSCI_0_2_POWER_STATE_AFFL_MASK);
} }
/* /*
...@@ -128,6 +127,14 @@ static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, ...@@ -128,6 +127,14 @@ static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1,
return function_id; return function_id;
} }
static int psci_get_version(void)
{
int err;
err = invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
return err;
}
static int psci_cpu_suspend(struct psci_power_state state, static int psci_cpu_suspend(struct psci_power_state state,
unsigned long entry_point) unsigned long entry_point)
{ {
...@@ -171,26 +178,36 @@ static int psci_migrate(unsigned long cpuid) ...@@ -171,26 +178,36 @@ static int psci_migrate(unsigned long cpuid)
return psci_to_linux_errno(err); return psci_to_linux_errno(err);
} }
static const struct of_device_id psci_of_match[] __initconst = { static int psci_affinity_info(unsigned long target_affinity,
{ .compatible = "arm,psci", }, unsigned long lowest_affinity_level)
{}, {
}; int err;
u32 fn;
fn = psci_function_id[PSCI_FN_AFFINITY_INFO];
err = invoke_psci_fn(fn, target_affinity, lowest_affinity_level, 0);
return err;
}
void __init psci_init(void) static int psci_migrate_info_type(void)
{ {
struct device_node *np; int err;
const char *method; u32 fn;
u32 id;
np = of_find_matching_node(NULL, psci_of_match); fn = psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE];
if (!np) err = invoke_psci_fn(fn, 0, 0, 0);
return; return err;
}
pr_info("probing function IDs from device-tree\n"); static int get_set_conduit_method(struct device_node *np)
{
const char *method;
pr_info("probing for conduit method from DT.\n");
if (of_property_read_string(np, "method", &method)) { if (of_property_read_string(np, "method", &method)) {
pr_warning("missing \"method\" property\n"); pr_warn("missing \"method\" property\n");
goto out_put_node; return -ENXIO;
} }
if (!strcmp("hvc", method)) { if (!strcmp("hvc", method)) {
...@@ -198,10 +215,99 @@ void __init psci_init(void) ...@@ -198,10 +215,99 @@ void __init psci_init(void)
} else if (!strcmp("smc", method)) { } else if (!strcmp("smc", method)) {
invoke_psci_fn = __invoke_psci_fn_smc; invoke_psci_fn = __invoke_psci_fn_smc;
} else { } else {
pr_warning("invalid \"method\" property: %s\n", method); pr_warn("invalid \"method\" property: %s\n", method);
return -EINVAL;
}
return 0;
}
static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
{
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
}
static void psci_sys_poweroff(void)
{
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
}
/*
* PSCI Function IDs for v0.2+ are well defined so use
* standard values.
*/
static int psci_0_2_init(struct device_node *np)
{
int err, ver;
err = get_set_conduit_method(np);
if (err)
goto out_put_node;
ver = psci_get_version();
if (ver == PSCI_RET_NOT_SUPPORTED) {
/* PSCI v0.2 mandates implementation of PSCI_ID_VERSION. */
pr_err("PSCI firmware does not comply with the v0.2 spec.\n");
err = -EOPNOTSUPP;
goto out_put_node; goto out_put_node;
} else {
pr_info("PSCIv%d.%d detected in firmware.\n",
PSCI_VERSION_MAJOR(ver),
PSCI_VERSION_MINOR(ver));
if (PSCI_VERSION_MAJOR(ver) == 0 &&
PSCI_VERSION_MINOR(ver) < 2) {
err = -EINVAL;
pr_err("Conflicting PSCI version detected.\n");
goto out_put_node;
}
} }
pr_info("Using standard PSCI v0.2 function IDs\n");
psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND;
psci_ops.cpu_suspend = psci_cpu_suspend;
psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
psci_ops.cpu_off = psci_cpu_off;
psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN64_CPU_ON;
psci_ops.cpu_on = psci_cpu_on;
psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE;
psci_ops.migrate = psci_migrate;
psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN64_AFFINITY_INFO;
psci_ops.affinity_info = psci_affinity_info;
psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] =
PSCI_0_2_FN_MIGRATE_INFO_TYPE;
psci_ops.migrate_info_type = psci_migrate_info_type;
arm_pm_restart = psci_sys_reset;
pm_power_off = psci_sys_poweroff;
out_put_node:
of_node_put(np);
return err;
}
/*
* PSCI < v0.2 get PSCI Function IDs via DT.
*/
static int psci_0_1_init(struct device_node *np)
{
u32 id;
int err;
err = get_set_conduit_method(np);
if (err)
goto out_put_node;
pr_info("Using PSCI v0.1 Function IDs from DT\n");
if (!of_property_read_u32(np, "cpu_suspend", &id)) { if (!of_property_read_u32(np, "cpu_suspend", &id)) {
psci_function_id[PSCI_FN_CPU_SUSPEND] = id; psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
psci_ops.cpu_suspend = psci_cpu_suspend; psci_ops.cpu_suspend = psci_cpu_suspend;
...@@ -224,7 +330,28 @@ void __init psci_init(void) ...@@ -224,7 +330,28 @@ void __init psci_init(void)
out_put_node: out_put_node:
of_node_put(np); of_node_put(np);
return; return err;
}
static const struct of_device_id psci_of_match[] __initconst = {
{ .compatible = "arm,psci", .data = psci_0_1_init},
{ .compatible = "arm,psci-0.2", .data = psci_0_2_init},
{},
};
int __init psci_init(void)
{
struct device_node *np;
const struct of_device_id *matched_np;
psci_initcall_t init_fn;
np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
if (!np)
return -ENODEV;
init_fn = (psci_initcall_t)matched_np->data;
return init_fn(np);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -277,6 +404,35 @@ static void cpu_psci_cpu_die(unsigned int cpu) ...@@ -277,6 +404,35 @@ static void cpu_psci_cpu_die(unsigned int cpu)
pr_crit("unable to power off CPU%u (%d)\n", cpu, ret); pr_crit("unable to power off CPU%u (%d)\n", cpu, ret);
} }
static int cpu_psci_cpu_kill(unsigned int cpu)
{
int err, i;
if (!psci_ops.affinity_info)
return 1;
/*
* cpu_kill could race with cpu_die and we can
* potentially end up declaring this cpu undead
* while it is dying. So, try again a few times.
*/
for (i = 0; i < 10; i++) {
err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
pr_info("CPU%d killed.\n", cpu);
return 1;
}
msleep(10);
pr_info("Retrying again to check for CPU kill\n");
}
pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
cpu, err);
/* Make op_cpu_kill() fail. */
return 0;
}
#endif #endif
const struct cpu_operations cpu_psci_ops = { const struct cpu_operations cpu_psci_ops = {
...@@ -287,6 +443,7 @@ const struct cpu_operations cpu_psci_ops = { ...@@ -287,6 +443,7 @@ const struct cpu_operations cpu_psci_ops = {
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = cpu_psci_cpu_disable, .cpu_disable = cpu_psci_cpu_disable,
.cpu_die = cpu_psci_cpu_die, .cpu_die = cpu_psci_cpu_die,
.cpu_kill = cpu_psci_cpu_kill,
#endif #endif
}; };
......
...@@ -228,6 +228,19 @@ int __cpu_disable(void) ...@@ -228,6 +228,19 @@ int __cpu_disable(void)
return 0; return 0;
} }
static int op_cpu_kill(unsigned int cpu)
{
/*
* If we have no means of synchronising with the dying CPU, then assume
* that it is really dead. We can only wait for an arbitrary length of
* time and hope that it's dead, so let's skip the wait and just hope.
*/
if (!cpu_ops[cpu]->cpu_kill)
return 1;
return cpu_ops[cpu]->cpu_kill(cpu);
}
static DECLARE_COMPLETION(cpu_died); static DECLARE_COMPLETION(cpu_died);
/* /*
...@@ -241,6 +254,15 @@ void __cpu_die(unsigned int cpu) ...@@ -241,6 +254,15 @@ void __cpu_die(unsigned int cpu)
return; return;
} }
pr_notice("CPU%u: shutdown\n", cpu); pr_notice("CPU%u: shutdown\n", cpu);
/*
* Now that the dying CPU is beyond the point of no return w.r.t.
* in-kernel synchronisation, try to get the firwmare to help us to
* verify that it has really left the kernel before we consider
* clobbering anything it might still be using.
*/
if (!op_cpu_kill(cpu))
pr_warn("CPU%d may not have shut down cleanly\n", cpu);
} }
/* /*
......
...@@ -214,6 +214,8 @@ int __attribute_const__ kvm_target_cpu(void) ...@@ -214,6 +214,8 @@ int __attribute_const__ kvm_target_cpu(void)
return KVM_ARM_TARGET_AEM_V8; return KVM_ARM_TARGET_AEM_V8;
case ARM_CPU_PART_FOUNDATION: case ARM_CPU_PART_FOUNDATION:
return KVM_ARM_TARGET_FOUNDATION_V8; return KVM_ARM_TARGET_FOUNDATION_V8;
case ARM_CPU_PART_CORTEX_A53:
return KVM_ARM_TARGET_CORTEX_A53;
case ARM_CPU_PART_CORTEX_A57: case ARM_CPU_PART_CORTEX_A57:
return KVM_ARM_TARGET_CORTEX_A57; return KVM_ARM_TARGET_CORTEX_A57;
}; };
......
...@@ -30,11 +30,15 @@ typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); ...@@ -30,11 +30,15 @@ typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{ {
if (kvm_psci_call(vcpu)) int ret;
ret = kvm_psci_call(vcpu);
if (ret < 0) {
kvm_inject_undefined(vcpu);
return 1; return 1;
}
kvm_inject_undefined(vcpu); return ret;
return 1;
} }
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
......
...@@ -88,6 +88,8 @@ static int __init sys_reg_genericv8_init(void) ...@@ -88,6 +88,8 @@ static int __init sys_reg_genericv8_init(void)
&genericv8_target_table); &genericv8_target_table);
kvm_register_target_sys_reg_table(KVM_ARM_TARGET_FOUNDATION_V8, kvm_register_target_sys_reg_table(KVM_ARM_TARGET_FOUNDATION_V8,
&genericv8_target_table); &genericv8_target_table);
kvm_register_target_sys_reg_table(KVM_ARM_TARGET_CORTEX_A53,
&genericv8_target_table);
kvm_register_target_sys_reg_table(KVM_ARM_TARGET_CORTEX_A57, kvm_register_target_sys_reg_table(KVM_ARM_TARGET_CORTEX_A57,
&genericv8_target_table); &genericv8_target_table);
kvm_register_target_sys_reg_table(KVM_ARM_TARGET_XGENE_POTENZA, kvm_register_target_sys_reg_table(KVM_ARM_TARGET_XGENE_POTENZA,
......
...@@ -317,6 +317,7 @@ header-y += ppp-ioctl.h ...@@ -317,6 +317,7 @@ header-y += ppp-ioctl.h
header-y += ppp_defs.h header-y += ppp_defs.h
header-y += pps.h header-y += pps.h
header-y += prctl.h header-y += prctl.h
header-y += psci.h
header-y += ptp_clock.h header-y += ptp_clock.h
header-y += ptrace.h header-y += ptrace.h
header-y += qnx4_fs.h header-y += qnx4_fs.h
......
...@@ -171,6 +171,7 @@ struct kvm_pit_config { ...@@ -171,6 +171,7 @@ struct kvm_pit_config {
#define KVM_EXIT_WATCHDOG 21 #define KVM_EXIT_WATCHDOG 21
#define KVM_EXIT_S390_TSCH 22 #define KVM_EXIT_S390_TSCH 22
#define KVM_EXIT_EPR 23 #define KVM_EXIT_EPR 23
#define KVM_EXIT_SYSTEM_EVENT 24
/* For KVM_EXIT_INTERNAL_ERROR */ /* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */ /* Emulate instruction failed. */
...@@ -301,6 +302,13 @@ struct kvm_run { ...@@ -301,6 +302,13 @@ struct kvm_run {
struct { struct {
__u32 epr; __u32 epr;
} epr; } epr;
/* KVM_EXIT_SYSTEM_EVENT */
struct {
#define KVM_SYSTEM_EVENT_SHUTDOWN 1
#define KVM_SYSTEM_EVENT_RESET 2
__u32 type;
__u64 flags;
} system_event;
/* Fix the size of the union. */ /* Fix the size of the union. */
char padding[256]; char padding[256];
}; };
...@@ -748,6 +756,7 @@ struct kvm_ppc_smmu_info { ...@@ -748,6 +756,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_S390_IRQCHIP 99 #define KVM_CAP_S390_IRQCHIP 99
#define KVM_CAP_IOEVENTFD_NO_LENGTH 100 #define KVM_CAP_IOEVENTFD_NO_LENGTH 100
#define KVM_CAP_VM_ATTRIBUTES 101 #define KVM_CAP_VM_ATTRIBUTES 101
#define KVM_CAP_ARM_PSCI_0_2 102
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
......
/*
* ARM Power State and Coordination Interface (PSCI) header
*
* This header holds common PSCI defines and macros shared
* by: ARM kernel, ARM64 kernel, KVM ARM/ARM64 and user space.
*
* Copyright (C) 2014 Linaro Ltd.
* Author: Anup Patel <anup.patel@linaro.org>
*/
#ifndef _UAPI_LINUX_PSCI_H
#define _UAPI_LINUX_PSCI_H
/*
* PSCI v0.1 interface
*
* The PSCI v0.1 function numbers are implementation defined.
*
* Only PSCI return values such as: SUCCESS, NOT_SUPPORTED,
* INVALID_PARAMS, and DENIED defined below are applicable
* to PSCI v0.1.
*/
/* PSCI v0.2 interface */
#define PSCI_0_2_FN_BASE 0x84000000
#define PSCI_0_2_FN(n) (PSCI_0_2_FN_BASE + (n))
#define PSCI_0_2_64BIT 0x40000000
#define PSCI_0_2_FN64_BASE \
(PSCI_0_2_FN_BASE + PSCI_0_2_64BIT)
#define PSCI_0_2_FN64(n) (PSCI_0_2_FN64_BASE + (n))
#define PSCI_0_2_FN_PSCI_VERSION PSCI_0_2_FN(0)
#define PSCI_0_2_FN_CPU_SUSPEND PSCI_0_2_FN(1)
#define PSCI_0_2_FN_CPU_OFF PSCI_0_2_FN(2)
#define PSCI_0_2_FN_CPU_ON PSCI_0_2_FN(3)
#define PSCI_0_2_FN_AFFINITY_INFO PSCI_0_2_FN(4)
#define PSCI_0_2_FN_MIGRATE PSCI_0_2_FN(5)
#define PSCI_0_2_FN_MIGRATE_INFO_TYPE PSCI_0_2_FN(6)
#define PSCI_0_2_FN_MIGRATE_INFO_UP_CPU PSCI_0_2_FN(7)
#define PSCI_0_2_FN_SYSTEM_OFF PSCI_0_2_FN(8)
#define PSCI_0_2_FN_SYSTEM_RESET PSCI_0_2_FN(9)
#define PSCI_0_2_FN64_CPU_SUSPEND PSCI_0_2_FN64(1)
#define PSCI_0_2_FN64_CPU_ON PSCI_0_2_FN64(3)
#define PSCI_0_2_FN64_AFFINITY_INFO PSCI_0_2_FN64(4)
#define PSCI_0_2_FN64_MIGRATE PSCI_0_2_FN64(5)
#define PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU PSCI_0_2_FN64(7)
/* PSCI v0.2 power state encoding for CPU_SUSPEND function */
#define PSCI_0_2_POWER_STATE_ID_MASK 0xffff
#define PSCI_0_2_POWER_STATE_ID_SHIFT 0
#define PSCI_0_2_POWER_STATE_TYPE_SHIFT 16
#define PSCI_0_2_POWER_STATE_TYPE_MASK \
(0x1 << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
#define PSCI_0_2_POWER_STATE_AFFL_SHIFT 24
#define PSCI_0_2_POWER_STATE_AFFL_MASK \
(0x3 << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
/* PSCI v0.2 affinity level state returned by AFFINITY_INFO */
#define PSCI_0_2_AFFINITY_LEVEL_ON 0
#define PSCI_0_2_AFFINITY_LEVEL_OFF 1
#define PSCI_0_2_AFFINITY_LEVEL_ON_PENDING 2
/* PSCI v0.2 multicore support in Trusted OS returned by MIGRATE_INFO_TYPE */
#define PSCI_0_2_TOS_UP_MIGRATE 0
#define PSCI_0_2_TOS_UP_NO_MIGRATE 1
#define PSCI_0_2_TOS_MP 2
/* PSCI version decoding (independent of PSCI version) */
#define PSCI_VERSION_MAJOR_SHIFT 16
#define PSCI_VERSION_MINOR_MASK \
((1U << PSCI_VERSION_MAJOR_SHIFT) - 1)
#define PSCI_VERSION_MAJOR_MASK ~PSCI_VERSION_MINOR_MASK
#define PSCI_VERSION_MAJOR(ver) \
(((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT)
#define PSCI_VERSION_MINOR(ver) \
((ver) & PSCI_VERSION_MINOR_MASK)
/* PSCI return values (inclusive of all PSCI versions) */
#define PSCI_RET_SUCCESS 0
#define PSCI_RET_NOT_SUPPORTED -1
#define PSCI_RET_INVALID_PARAMS -2
#define PSCI_RET_DENIED -3
#define PSCI_RET_ALREADY_ON -4
#define PSCI_RET_ON_PENDING -5
#define PSCI_RET_INTERNAL_FAILURE -6
#define PSCI_RET_NOT_PRESENT -7
#define PSCI_RET_DISABLED -8
#endif /* _UAPI_LINUX_PSCI_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment