Commit 26bf74bd authored by Reiji Watanabe's avatar Reiji Watanabe Committed by Marc Zyngier

KVM: arm64: mixed-width check should be skipped for uninitialized vCPUs

KVM allows userspace to configure either all EL1 32bit or 64bit vCPUs
for a guest.  At vCPU reset, vcpu_allowed_register_width() checks
if the vcpu's register width is consistent with all other vCPUs'.
Since the checking is done even against vCPUs that are not initialized
(KVM_ARM_VCPU_INIT has not been done) yet, the uninitialized vCPUs
are erroneously treated as 64bit vCPU, which causes the function to
incorrectly detect a mixed-width VM.

Introduce KVM_ARCH_FLAG_EL1_32BIT and KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED
bits for kvm->arch.flags.  A value of the EL1_32BIT bit indicates that
the guest needs to be configured with all 32bit or 64bit vCPUs, and
a value of the REG_WIDTH_CONFIGURED bit indicates if a value of the
EL1_32BIT bit is valid (already set up). Values in those bits are set at
the first KVM_ARM_VCPU_INIT for the guest based on KVM_ARM_VCPU_EL1_32BIT
configuration for the vCPU.

Check vcpu's register width against those new bits at the vcpu's
KVM_ARM_VCPU_INIT (instead of against other vCPUs' register width).

Fixes: 66e94d5c ("KVM: arm64: Prevent mixed-width VM creation")
Signed-off-by: default avatarReiji Watanabe <reijiw@google.com>
Reviewed-by: default avatarOliver Upton <oupton@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220329031924.619453-2-reijiw@google.com
parent c707663e
...@@ -43,10 +43,22 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); ...@@ -43,10 +43,22 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_vcpu_wfi(struct kvm_vcpu *vcpu); void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
#if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
{ {
return !(vcpu->arch.hcr_el2 & HCR_RW); return !(vcpu->arch.hcr_el2 & HCR_RW);
} }
#else
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED,
&kvm->arch.flags));
return test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
}
#endif
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{ {
...@@ -72,15 +84,14 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) ...@@ -72,15 +84,14 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 |= HCR_TVM; vcpu->arch.hcr_el2 |= HCR_TVM;
} }
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) if (vcpu_el1_is_32bit(vcpu))
vcpu->arch.hcr_el2 &= ~HCR_RW; vcpu->arch.hcr_el2 &= ~HCR_RW;
else
/* /*
* TID3: trap feature register accesses that we virtualise. * TID3: trap feature register accesses that we virtualise.
* For now this is conditional, since no AArch32 feature regs * For now this is conditional, since no AArch32 feature regs
* are currently virtualised. * are currently virtualised.
*/ */
if (!vcpu_el1_is_32bit(vcpu))
vcpu->arch.hcr_el2 |= HCR_TID3; vcpu->arch.hcr_el2 |= HCR_TID3;
if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) || if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
......
...@@ -127,6 +127,16 @@ struct kvm_arch { ...@@ -127,6 +127,16 @@ struct kvm_arch {
#define KVM_ARCH_FLAG_MTE_ENABLED 1 #define KVM_ARCH_FLAG_MTE_ENABLED 1
/* At least one vCPU has ran in the VM */ /* At least one vCPU has ran in the VM */
#define KVM_ARCH_FLAG_HAS_RAN_ONCE 2 #define KVM_ARCH_FLAG_HAS_RAN_ONCE 2
/*
* The following two bits are used to indicate the guest's EL1
* register width configuration. A value of KVM_ARCH_FLAG_EL1_32BIT
* bit is valid only when KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED is set.
* Otherwise, the guest's EL1 register width has not yet been
* determined yet.
*/
#define KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED 3
#define KVM_ARCH_FLAG_EL1_32BIT 4
unsigned long flags; unsigned long flags;
/* /*
......
...@@ -181,27 +181,51 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) ...@@ -181,27 +181,51 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu) /**
* kvm_set_vm_width() - set the register width for the guest
* @vcpu: Pointer to the vcpu being configured
*
* Set both KVM_ARCH_FLAG_EL1_32BIT and KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED
* in the VM flags based on the vcpu's requested register width, the HW
* capabilities and other options (such as MTE).
* When REG_WIDTH_CONFIGURED is already set, the vcpu settings must be
* consistent with the value of the FLAG_EL1_32BIT bit in the flags.
*
* Return: 0 on success, negative error code on failure.
*/
static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu *tmp; struct kvm *kvm = vcpu->kvm;
bool is32bit; bool is32bit;
unsigned long i;
is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT); is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
lockdep_assert_held(&kvm->lock);
if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
/*
* The guest's register width is already configured.
* Make sure that the vcpu is consistent with it.
*/
if (is32bit == test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags))
return 0;
return -EINVAL;
}
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit) if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
return false; return -EINVAL;
/* MTE is incompatible with AArch32 */ /* MTE is incompatible with AArch32 */
if (kvm_has_mte(vcpu->kvm) && is32bit) if (kvm_has_mte(kvm) && is32bit)
return false; return -EINVAL;
/* Check that the vcpus are either all 32bit or all 64bit */ if (is32bit)
kvm_for_each_vcpu(i, tmp, vcpu->kvm) { set_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
return false;
}
return true; set_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags);
return 0;
} }
/** /**
...@@ -230,10 +254,16 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) ...@@ -230,10 +254,16 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
u32 pstate; u32 pstate;
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->lock);
reset_state = vcpu->arch.reset_state; ret = kvm_set_vm_width(vcpu);
WRITE_ONCE(vcpu->arch.reset_state.reset, false); if (!ret) {
reset_state = vcpu->arch.reset_state;
WRITE_ONCE(vcpu->arch.reset_state.reset, false);
}
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
if (ret)
return ret;
/* Reset PMU outside of the non-preemptible section */ /* Reset PMU outside of the non-preemptible section */
kvm_pmu_vcpu_reset(vcpu); kvm_pmu_vcpu_reset(vcpu);
...@@ -260,14 +290,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) ...@@ -260,14 +290,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
} }
} }
if (!vcpu_allowed_register_width(vcpu)) {
ret = -EINVAL;
goto out;
}
switch (vcpu->arch.target) { switch (vcpu->arch.target) {
default: default:
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { if (vcpu_el1_is_32bit(vcpu)) {
pstate = VCPU_RESET_PSTATE_SVC; pstate = VCPU_RESET_PSTATE_SVC;
} else { } else {
pstate = VCPU_RESET_PSTATE_EL1; pstate = VCPU_RESET_PSTATE_EL1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment