Commit 06e22bb6 authored by Radim Krčmář's avatar Radim Krčmář

Merge tag 'kvmarm-fixes-for-4.17-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm

KVM/arm fixes for 4.17, take #1

- PSCI selection API, a leftover from 4.16
- Kick vcpu on active interrupt affinity change
- Plug a VMID allocation race on oversubscribed systems
- Silence debug messages
- Update Christoffer's email address
parents 6d08b06e 85bd0ba1
...@@ -1960,6 +1960,9 @@ ARM 32-bit VFP control registers have the following id bit patterns: ...@@ -1960,6 +1960,9 @@ ARM 32-bit VFP control registers have the following id bit patterns:
ARM 64-bit FP registers have the following id bit patterns: ARM 64-bit FP registers have the following id bit patterns:
0x4030 0000 0012 0 <regno:12> 0x4030 0000 0012 0 <regno:12>
ARM firmware pseudo-registers have the following bit pattern:
0x4030 0000 0014 <regno:16>
arm64 registers are mapped using the lower 32 bits. The upper 16 of arm64 registers are mapped using the lower 32 bits. The upper 16 of
that is the register group type, or coprocessor number: that is the register group type, or coprocessor number:
...@@ -1976,6 +1979,9 @@ arm64 CCSIDR registers are demultiplexed by CSSELR value: ...@@ -1976,6 +1979,9 @@ arm64 CCSIDR registers are demultiplexed by CSSELR value:
arm64 system registers have the following id bit patterns: arm64 system registers have the following id bit patterns:
0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3> 0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3>
arm64 firmware pseudo-registers have the following bit pattern:
0x6030 0000 0014 <regno:16>
MIPS registers are mapped using the lower 32 bits. The upper 16 of that is MIPS registers are mapped using the lower 32 bits. The upper 16 of that is
the register group type: the register group type:
...@@ -2510,7 +2516,8 @@ Possible features: ...@@ -2510,7 +2516,8 @@ Possible features:
and execute guest code when KVM_RUN is called. and execute guest code when KVM_RUN is called.
- KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode. - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only). Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
- KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU. - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 (or a future revision
backward compatible with v0.2) for the CPU.
Depends on KVM_CAP_ARM_PSCI_0_2. Depends on KVM_CAP_ARM_PSCI_0_2.
- KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU. - KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU.
Depends on KVM_CAP_ARM_PMU_V3. Depends on KVM_CAP_ARM_PMU_V3.
......
KVM implements the PSCI (Power State Coordination Interface)
specification in order to provide services such as CPU on/off, reset
and power-off to the guest.
The PSCI specification is regularly updated to provide new features,
and KVM implements these updates if they make sense from a virtualization
point of view.
This means that a guest booted on two different versions of KVM can
observe two different "firmware" revisions. This could cause issues if
a given guest is tied to a particular PSCI revision (unlikely), or if
a migration causes a different PSCI version to be exposed out of the
blue to an unsuspecting guest.
In order to remedy this situation, KVM exposes a set of "firmware
pseudo-registers" that can be manipulated using the GET/SET_ONE_REG
interface. These registers can be saved/restored by userspace, and set
to a convenient value if required.
The following register is defined:
* KVM_REG_ARM_PSCI_VERSION:
- Only valid if the vcpu has the KVM_ARM_VCPU_PSCI_0_2 feature set
(and thus has already been initialized)
- Returns the current PSCI version on GET_ONE_REG (defaulting to the
highest PSCI version implemented by KVM and compatible with v0.2)
- Allows any PSCI version implemented by KVM and compatible with
v0.2 to be set with SET_ONE_REG
- Affects the whole VM (even if the register view is per-vcpu)
...@@ -7753,7 +7753,7 @@ F: arch/x86/include/asm/svm.h ...@@ -7753,7 +7753,7 @@ F: arch/x86/include/asm/svm.h
F: arch/x86/kvm/svm.c F: arch/x86/kvm/svm.c
KERNEL VIRTUAL MACHINE FOR ARM (KVM/arm) KERNEL VIRTUAL MACHINE FOR ARM (KVM/arm)
M: Christoffer Dall <christoffer.dall@linaro.org> M: Christoffer Dall <christoffer.dall@arm.com>
M: Marc Zyngier <marc.zyngier@arm.com> M: Marc Zyngier <marc.zyngier@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu L: kvmarm@lists.cs.columbia.edu
...@@ -7767,7 +7767,7 @@ F: virt/kvm/arm/ ...@@ -7767,7 +7767,7 @@ F: virt/kvm/arm/
F: include/kvm/arm_* F: include/kvm/arm_*
KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64) KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
M: Christoffer Dall <christoffer.dall@linaro.org> M: Christoffer Dall <christoffer.dall@arm.com>
M: Marc Zyngier <marc.zyngier@arm.com> M: Marc Zyngier <marc.zyngier@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu L: kvmarm@lists.cs.columbia.edu
......
...@@ -77,6 +77,9 @@ struct kvm_arch { ...@@ -77,6 +77,9 @@ struct kvm_arch {
/* Interrupt controller */ /* Interrupt controller */
struct vgic_dist vgic; struct vgic_dist vgic;
int max_vcpus; int max_vcpus;
/* Mandated version of PSCI */
u32 psci_version;
}; };
#define KVM_NR_MEM_OBJS 40 #define KVM_NR_MEM_OBJS 40
......
...@@ -195,6 +195,12 @@ struct kvm_arch_memory_slot { ...@@ -195,6 +195,12 @@ struct kvm_arch_memory_slot {
#define KVM_REG_ARM_VFP_FPINST 0x1009 #define KVM_REG_ARM_VFP_FPINST 0x1009
#define KVM_REG_ARM_VFP_FPINST2 0x100A #define KVM_REG_ARM_VFP_FPINST2 0x100A
/* KVM-as-firmware specific pseudo-registers */
#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \
KVM_REG_ARM_FW | ((r) & 0xffff))
#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
/* Device Control API: ARM VGIC */ /* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 #define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <kvm/arm_psci.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/kvm.h> #include <asm/kvm.h>
...@@ -176,6 +177,7 @@ static unsigned long num_core_regs(void) ...@@ -176,6 +177,7 @@ static unsigned long num_core_regs(void)
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{ {
return num_core_regs() + kvm_arm_num_coproc_regs(vcpu) return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
+ kvm_arm_get_fw_num_regs(vcpu)
+ NUM_TIMER_REGS; + NUM_TIMER_REGS;
} }
...@@ -196,6 +198,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) ...@@ -196,6 +198,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
uindices++; uindices++;
} }
ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
if (ret)
return ret;
uindices += kvm_arm_get_fw_num_regs(vcpu);
ret = copy_timer_indices(vcpu, uindices); ret = copy_timer_indices(vcpu, uindices);
if (ret) if (ret)
return ret; return ret;
...@@ -214,6 +221,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -214,6 +221,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return get_core_reg(vcpu, reg); return get_core_reg(vcpu, reg);
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
return kvm_arm_get_fw_reg(vcpu, reg);
if (is_timer_reg(reg->id)) if (is_timer_reg(reg->id))
return get_timer_reg(vcpu, reg); return get_timer_reg(vcpu, reg);
...@@ -230,6 +240,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -230,6 +240,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return set_core_reg(vcpu, reg); return set_core_reg(vcpu, reg);
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
return kvm_arm_set_fw_reg(vcpu, reg);
if (is_timer_reg(reg->id)) if (is_timer_reg(reg->id))
return set_timer_reg(vcpu, reg); return set_timer_reg(vcpu, reg);
......
...@@ -75,6 +75,9 @@ struct kvm_arch { ...@@ -75,6 +75,9 @@ struct kvm_arch {
/* Interrupt controller */ /* Interrupt controller */
struct vgic_dist vgic; struct vgic_dist vgic;
/* Mandated version of PSCI */
u32 psci_version;
}; };
#define KVM_NR_MEM_OBJS 40 #define KVM_NR_MEM_OBJS 40
......
...@@ -206,6 +206,12 @@ struct kvm_arch_memory_slot { ...@@ -206,6 +206,12 @@ struct kvm_arch_memory_slot {
#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
/* KVM-as-firmware specific pseudo-registers */
#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
KVM_REG_ARM_FW | ((r) & 0xffff))
#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
/* Device Control API: ARM VGIC */ /* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 #define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <kvm/arm_psci.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/kvm.h> #include <asm/kvm.h>
...@@ -205,7 +206,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -205,7 +206,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{ {
return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu) return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
+ NUM_TIMER_REGS; + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS;
} }
/** /**
...@@ -225,6 +226,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) ...@@ -225,6 +226,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
uindices++; uindices++;
} }
ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
if (ret)
return ret;
uindices += kvm_arm_get_fw_num_regs(vcpu);
ret = copy_timer_indices(vcpu, uindices); ret = copy_timer_indices(vcpu, uindices);
if (ret) if (ret)
return ret; return ret;
...@@ -243,6 +249,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -243,6 +249,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return get_core_reg(vcpu, reg); return get_core_reg(vcpu, reg);
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
return kvm_arm_get_fw_reg(vcpu, reg);
if (is_timer_reg(reg->id)) if (is_timer_reg(reg->id))
return get_timer_reg(vcpu, reg); return get_timer_reg(vcpu, reg);
...@@ -259,6 +268,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -259,6 +268,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return set_core_reg(vcpu, reg); return set_core_reg(vcpu, reg);
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
return kvm_arm_set_fw_reg(vcpu, reg);
if (is_timer_reg(reg->id)) if (is_timer_reg(reg->id))
return set_timer_reg(vcpu, reg); return set_timer_reg(vcpu, reg);
......
...@@ -996,14 +996,12 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz) ...@@ -996,14 +996,12 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
if (id == SYS_ID_AA64PFR0_EL1) { if (id == SYS_ID_AA64PFR0_EL1) {
if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT)) if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT))
pr_err_once("kvm [%i]: SVE unsupported for guests, suppressing\n", kvm_debug("SVE unsupported for guests, suppressing\n");
task_pid_nr(current));
val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
} else if (id == SYS_ID_AA64MMFR1_EL1) { } else if (id == SYS_ID_AA64MMFR1_EL1) {
if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT)) if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
pr_err_once("kvm [%i]: LORegions unsupported for guests, suppressing\n", kvm_debug("LORegions unsupported for guests, suppressing\n");
task_pid_nr(current));
val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT); val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT);
} }
......
...@@ -37,10 +37,15 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm) ...@@ -37,10 +37,15 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
* Our PSCI implementation stays the same across versions from * Our PSCI implementation stays the same across versions from
* v0.2 onward, only adding the few mandatory functions (such * v0.2 onward, only adding the few mandatory functions (such
* as FEATURES with 1.0) that are required by newer * as FEATURES with 1.0) that are required by newer
* revisions. It is thus safe to return the latest. * revisions. It is thus safe to return the latest, unless
* userspace has instructed us otherwise.
*/ */
if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) {
if (vcpu->kvm->arch.psci_version)
return vcpu->kvm->arch.psci_version;
return KVM_ARM_PSCI_LATEST; return KVM_ARM_PSCI_LATEST;
}
return KVM_ARM_PSCI_0_1; return KVM_ARM_PSCI_0_1;
} }
...@@ -48,4 +53,11 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm) ...@@ -48,4 +53,11 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
int kvm_hvc_call_handler(struct kvm_vcpu *vcpu); int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
struct kvm_one_reg;
int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
#endif /* __KVM_ARM_PSCI_H__ */ #endif /* __KVM_ARM_PSCI_H__ */
...@@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); ...@@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
static u32 kvm_next_vmid; static u32 kvm_next_vmid;
static unsigned int kvm_vmid_bits __read_mostly; static unsigned int kvm_vmid_bits __read_mostly;
static DEFINE_SPINLOCK(kvm_vmid_lock); static DEFINE_RWLOCK(kvm_vmid_lock);
static bool vgic_present; static bool vgic_present;
...@@ -473,11 +473,16 @@ static void update_vttbr(struct kvm *kvm) ...@@ -473,11 +473,16 @@ static void update_vttbr(struct kvm *kvm)
{ {
phys_addr_t pgd_phys; phys_addr_t pgd_phys;
u64 vmid; u64 vmid;
bool new_gen;
if (!need_new_vmid_gen(kvm)) read_lock(&kvm_vmid_lock);
new_gen = need_new_vmid_gen(kvm);
read_unlock(&kvm_vmid_lock);
if (!new_gen)
return; return;
spin_lock(&kvm_vmid_lock); write_lock(&kvm_vmid_lock);
/* /*
* We need to re-check the vmid_gen here to ensure that if another vcpu * We need to re-check the vmid_gen here to ensure that if another vcpu
...@@ -485,7 +490,7 @@ static void update_vttbr(struct kvm *kvm) ...@@ -485,7 +490,7 @@ static void update_vttbr(struct kvm *kvm)
* use the same vmid. * use the same vmid.
*/ */
if (!need_new_vmid_gen(kvm)) { if (!need_new_vmid_gen(kvm)) {
spin_unlock(&kvm_vmid_lock); write_unlock(&kvm_vmid_lock);
return; return;
} }
...@@ -519,7 +524,7 @@ static void update_vttbr(struct kvm *kvm) ...@@ -519,7 +524,7 @@ static void update_vttbr(struct kvm *kvm)
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid; kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
spin_unlock(&kvm_vmid_lock); write_unlock(&kvm_vmid_lock);
} }
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/arm-smccc.h> #include <linux/arm-smccc.h>
#include <linux/preempt.h> #include <linux/preempt.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/uaccess.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <asm/cputype.h> #include <asm/cputype.h>
...@@ -427,3 +428,62 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) ...@@ -427,3 +428,62 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
smccc_set_retval(vcpu, val, 0, 0, 0); smccc_set_retval(vcpu, val, 0, 0, 0);
return 1; return 1;
} }
int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
{
return 1; /* PSCI version */
}
int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices))
return -EFAULT;
return 0;
}
int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
void __user *uaddr = (void __user *)(long)reg->addr;
u64 val;
val = kvm_psci_version(vcpu, vcpu->kvm);
if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
}
return -EINVAL;
}
int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
void __user *uaddr = (void __user *)(long)reg->addr;
bool wants_02;
u64 val;
if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
switch (val) {
case KVM_ARM_PSCI_0_1:
if (wants_02)
return -EINVAL;
vcpu->kvm->arch.psci_version = val;
return 0;
case KVM_ARM_PSCI_0_2:
case KVM_ARM_PSCI_1_0:
if (!wants_02)
return -EINVAL;
vcpu->kvm->arch.psci_version = val;
return 0;
}
}
return -EINVAL;
}
...@@ -594,6 +594,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) ...@@ -594,6 +594,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
bool target_vcpu_needs_kick = false;
spin_lock(&irq->irq_lock); spin_lock(&irq->irq_lock);
...@@ -664,11 +665,18 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) ...@@ -664,11 +665,18 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
list_del(&irq->ap_list); list_del(&irq->ap_list);
irq->vcpu = target_vcpu; irq->vcpu = target_vcpu;
list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
target_vcpu_needs_kick = true;
} }
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags); spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
if (target_vcpu_needs_kick) {
kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
kvm_vcpu_kick(target_vcpu);
}
goto retry; goto retry;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment