Commit 9fdb86c8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
 "x86 bugfix patches and one compilation fix for ARM"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: arm64/sve: Fix vq_present() macro to yield a bool
  KVM: LAPIC: Fix pending interrupt in IRR blocked by software disable LAPIC
  KVM: nVMX: Change KVM_STATE_NESTED_EVMCS to signal vmcs12 is copied from eVMCS
  KVM: nVMX: Allow restore nested-state to enable eVMCS when vCPU in SMM
  KVM: x86: degrade WARN to pr_warn_ratelimited
parents 0e63665a e644fa18
...@@ -208,7 +208,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -208,7 +208,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
#define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64) #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
#define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64) #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
#define vq_present(vqs, vq) ((vqs)[vq_word(vq)] & vq_mask(vq)) #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq)))
static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{ {
......
...@@ -2339,7 +2339,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) ...@@ -2339,7 +2339,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
u32 ppr; u32 ppr;
if (!apic_enabled(apic)) if (!kvm_apic_hw_enabled(apic))
return -1; return -1;
__apic_update_ppr(apic, &ppr); __apic_update_ppr(apic, &ppr);
......
...@@ -5240,9 +5240,6 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, ...@@ -5240,9 +5240,6 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
vmx = to_vmx(vcpu); vmx = to_vmx(vcpu);
vmcs12 = get_vmcs12(vcpu); vmcs12 = get_vmcs12(vcpu);
if (nested_vmx_allowed(vcpu) && vmx->nested.enlightened_vmcs_enabled)
kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
if (nested_vmx_allowed(vcpu) && if (nested_vmx_allowed(vcpu) &&
(vmx->nested.vmxon || vmx->nested.smm.vmxon)) { (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
...@@ -5251,6 +5248,9 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, ...@@ -5251,6 +5248,9 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
if (vmx_has_valid_vmcs12(vcpu)) { if (vmx_has_valid_vmcs12(vcpu)) {
kvm_state.size += sizeof(user_vmx_nested_state->vmcs12); kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
if (vmx->nested.hv_evmcs)
kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
if (is_guest_mode(vcpu) && if (is_guest_mode(vcpu) &&
nested_cpu_has_shadow_vmcs(vmcs12) && nested_cpu_has_shadow_vmcs(vmcs12) &&
vmcs12->vmcs_link_pointer != -1ull) vmcs12->vmcs_link_pointer != -1ull)
...@@ -5350,6 +5350,15 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, ...@@ -5350,6 +5350,15 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) if (kvm_state->hdr.vmx.vmcs12_pa != -1ull)
return -EINVAL; return -EINVAL;
/*
* KVM_STATE_NESTED_EVMCS used to signal that KVM should
* enable eVMCS capability on vCPU. However, since then
* code was changed such that flag signals vmcs12 should
* be copied into eVMCS in guest memory.
*
* To preserve backwards compatability, allow user
* to set this flag even when there is no VMXON region.
*/
if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS) if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
return -EINVAL; return -EINVAL;
} else { } else {
...@@ -5358,7 +5367,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, ...@@ -5358,7 +5367,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa)) if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
return -EINVAL; return -EINVAL;
} }
if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
...@@ -5373,20 +5382,21 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, ...@@ -5373,20 +5382,21 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
* nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
* must be zero. * must be zero.
*/ */
if (is_smm(vcpu) ? kvm_state->flags : kvm_state->hdr.vmx.smm.flags) if (is_smm(vcpu) ?
(kvm_state->flags &
(KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING))
: kvm_state->hdr.vmx.smm.flags)
return -EINVAL; return -EINVAL;
if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
!(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
return -EINVAL; return -EINVAL;
vmx_leave_nested(vcpu); if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) &&
if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled))
if (!nested_vmx_allowed(vcpu))
return -EINVAL; return -EINVAL;
nested_enable_evmcs(vcpu, NULL); vmx_leave_nested(vcpu);
}
if (kvm_state->hdr.vmx.vmxon_pa == -1ull) if (kvm_state->hdr.vmx.vmxon_pa == -1ull)
return 0; return 0;
......
...@@ -1554,7 +1554,7 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) ...@@ -1554,7 +1554,7 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
vcpu->arch.tsc_always_catchup = 1; vcpu->arch.tsc_always_catchup = 1;
return 0; return 0;
} else { } else {
WARN(1, "user requested TSC rate below hardware speed\n"); pr_warn_ratelimited("user requested TSC rate below hardware speed\n");
return -1; return -1;
} }
} }
...@@ -1564,8 +1564,8 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) ...@@ -1564,8 +1564,8 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
user_tsc_khz, tsc_khz); user_tsc_khz, tsc_khz);
if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) { if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) {
WARN_ONCE(1, "Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
user_tsc_khz); user_tsc_khz);
return -1; return -1;
} }
......
...@@ -146,6 +146,7 @@ int main(int argc, char *argv[]) ...@@ -146,6 +146,7 @@ int main(int argc, char *argv[])
kvm_vm_restart(vm, O_RDWR); kvm_vm_restart(vm, O_RDWR);
vm_vcpu_add(vm, VCPU_ID, 0, 0); vm_vcpu_add(vm, VCPU_ID, 0, 0);
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
vcpu_load_state(vm, VCPU_ID, state); vcpu_load_state(vm, VCPU_ID, state);
run = vcpu_state(vm, VCPU_ID); run = vcpu_state(vm, VCPU_ID);
free(state); free(state);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment