Commit 6e1d72f1 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvmarm-fixes-5.8-2' of...

Merge tag 'kvmarm-fixes-5.8-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-master

KVM/arm fixes for 5.8, take #2

- Make sure a vcpu becoming non-resident doesn't race against the doorbell delivery
- Only advertise pvtime if accounting is enabled
- Return the correct error code if reset fails with SVE
- Make sure that pseudo-NMI functions are annotated as __always_inline
parents 5ecad245 a3f574cd
...@@ -109,7 +109,7 @@ static inline u32 gic_read_pmr(void) ...@@ -109,7 +109,7 @@ static inline u32 gic_read_pmr(void)
return read_sysreg_s(SYS_ICC_PMR_EL1); return read_sysreg_s(SYS_ICC_PMR_EL1);
} }
static inline void gic_write_pmr(u32 val) static __always_inline void gic_write_pmr(u32 val)
{ {
write_sysreg_s(val, SYS_ICC_PMR_EL1); write_sysreg_s(val, SYS_ICC_PMR_EL1);
} }
......
...@@ -675,7 +675,7 @@ static inline bool system_supports_generic_auth(void) ...@@ -675,7 +675,7 @@ static inline bool system_supports_generic_auth(void)
cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH); cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
} }
static inline bool system_uses_irq_prio_masking(void) static __always_inline bool system_uses_irq_prio_masking(void)
{ {
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING); cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/arm-smccc.h> #include <linux/arm-smccc.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/sched/stat.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <asm/pvclock-abi.h> #include <asm/pvclock-abi.h>
...@@ -73,6 +74,11 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu) ...@@ -73,6 +74,11 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
return base; return base;
} }
static bool kvm_arm_pvtime_supported(void)
{
return !!sched_info_on();
}
int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu, int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr) struct kvm_device_attr *attr)
{ {
...@@ -82,7 +88,8 @@ int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu, ...@@ -82,7 +88,8 @@ int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
int ret = 0; int ret = 0;
int idx; int idx;
if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA) if (!kvm_arm_pvtime_supported() ||
attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
return -ENXIO; return -ENXIO;
if (get_user(ipa, user)) if (get_user(ipa, user))
...@@ -110,7 +117,8 @@ int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu, ...@@ -110,7 +117,8 @@ int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
u64 __user *user = (u64 __user *)attr->addr; u64 __user *user = (u64 __user *)attr->addr;
u64 ipa; u64 ipa;
if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA) if (!kvm_arm_pvtime_supported() ||
attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
return -ENXIO; return -ENXIO;
ipa = vcpu->arch.steal.base; ipa = vcpu->arch.steal.base;
...@@ -125,7 +133,8 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu, ...@@ -125,7 +133,8 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
{ {
switch (attr->attr) { switch (attr->attr) {
case KVM_ARM_VCPU_PVTIME_IPA: case KVM_ARM_VCPU_PVTIME_IPA:
return 0; if (kvm_arm_pvtime_supported())
return 0;
} }
return -ENXIO; return -ENXIO;
} }
...@@ -245,7 +245,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) ...@@ -245,7 +245,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
*/ */
int kvm_reset_vcpu(struct kvm_vcpu *vcpu) int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{ {
int ret = -EINVAL; int ret;
bool loaded; bool loaded;
u32 pstate; u32 pstate;
...@@ -269,15 +269,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) ...@@ -269,15 +269,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) { test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
if (kvm_vcpu_enable_ptrauth(vcpu)) if (kvm_vcpu_enable_ptrauth(vcpu)) {
ret = -EINVAL;
goto out; goto out;
}
} }
switch (vcpu->arch.target) { switch (vcpu->arch.target) {
default: default:
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
ret = -EINVAL;
goto out; goto out;
}
pstate = VCPU_RESET_PSTATE_SVC; pstate = VCPU_RESET_PSTATE_SVC;
} else { } else {
pstate = VCPU_RESET_PSTATE_EL1; pstate = VCPU_RESET_PSTATE_EL1;
......
...@@ -90,7 +90,15 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info) ...@@ -90,7 +90,15 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
!irqd_irq_disabled(&irq_to_desc(irq)->irq_data)) !irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
disable_irq_nosync(irq); disable_irq_nosync(irq);
/*
* The v4.1 doorbell can fire concurrently with the vPE being
* made non-resident. Ensure we only update pending_last
* *after* the non-residency sequence has completed.
*/
raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true; vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
......
...@@ -4054,16 +4054,24 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe, ...@@ -4054,16 +4054,24 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
u64 val; u64 val;
if (info->req_db) { if (info->req_db) {
unsigned long flags;
/* /*
* vPE is going to block: make the vPE non-resident with * vPE is going to block: make the vPE non-resident with
* PendingLast clear and DB set. The GIC guarantees that if * PendingLast clear and DB set. The GIC guarantees that if
* we read-back PendingLast clear, then a doorbell will be * we read-back PendingLast clear, then a doorbell will be
* delivered when an interrupt comes. * delivered when an interrupt comes.
*
* Note the locking to deal with the concurrent update of
* pending_last from the doorbell interrupt handler that can
* run concurrently.
*/ */
raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
val = its_clear_vpend_valid(vlpi_base, val = its_clear_vpend_valid(vlpi_base,
GICR_VPENDBASER_PendingLast, GICR_VPENDBASER_PendingLast,
GICR_VPENDBASER_4_1_DB); GICR_VPENDBASER_4_1_DB);
vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
} else { } else {
/* /*
* We're not blocking, so just make the vPE non-resident * We're not blocking, so just make the vPE non-resident
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment