Commit c30e9bc8 authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini

KVM: nVMX: hyper-v: Enable L2 TLB flush

Enable L2 TLB flush feature on nVMX when:
- Enlightened VMCS is in use.
- The feature flag is enabled in eVMCS.
- The feature flag is enabled in partition assist page.

Perform synthetic vmexit to L1 after processing TLB flush call upon
request (HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH).

Note: nested_evmcs_l2_tlb_flush_enabled() uses cached VP assist page copy
which gets updated from nested_vmx_handle_enlightened_vmptrld(). This is
also guaranteed to happen post migration with eVMCS backed L2 running.
Reviewed-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Message-Id: <20221101145426.251680-27-vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 046f5756
...@@ -600,6 +600,15 @@ struct hv_enlightened_vmcs { ...@@ -600,6 +600,15 @@ struct hv_enlightened_vmcs {
#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL 0xFFFF #define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL 0xFFFF
/*
* Note, Hyper-V isn't actually stealing bit 28 from Intel, just abusing it by
* pairing it with architecturally impossible exit reasons. Bit 28 is set only
* on SMI exits to a SMI transfer monitor (STM) and if and only if a MTF VM-Exit
* is pending. I.e. it will never be set by hardware for non-SMI exits (there
* are only three), nor will it ever be set unless the VMM is an STM.
*/
#define HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH 0x10000031
/* /*
* Hyper-V uses the software reserved 32 bytes in VMCB control area to expose * Hyper-V uses the software reserved 32 bytes in VMCB control area to expose
* SVM enlightenments to guests. * SVM enlightenments to guests.
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "../cpuid.h" #include "../cpuid.h"
#include "hyperv.h" #include "hyperv.h"
#include "nested.h"
#include "vmcs.h" #include "vmcs.h"
#include "vmx.h" #include "vmx.h"
#include "trace.h" #include "trace.h"
...@@ -500,6 +501,22 @@ int nested_enable_evmcs(struct kvm_vcpu *vcpu, ...@@ -500,6 +501,22 @@ int nested_enable_evmcs(struct kvm_vcpu *vcpu,
return 0; return 0;
} }
bool nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
if (!hv_vcpu || !evmcs)
return false;
if (!evmcs->hv_enlightenments_control.nested_flush_hypercall)
return false;
return hv_vcpu->vp_assist_page.nested_control.features.directhypercall;
}
void vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu) void vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu)
{ {
nested_vmx_vmexit(vcpu, HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH, 0, 0);
} }
...@@ -243,6 +243,7 @@ int nested_enable_evmcs(struct kvm_vcpu *vcpu, ...@@ -243,6 +243,7 @@ int nested_enable_evmcs(struct kvm_vcpu *vcpu,
uint16_t *vmcs_version); uint16_t *vmcs_version);
void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
int nested_evmcs_check_controls(struct vmcs12 *vmcs12); int nested_evmcs_check_controls(struct vmcs12 *vmcs12);
bool nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu);
void vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu); void vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu);
#endif /* __KVM_X86_VMX_HYPERV_H */ #endif /* __KVM_X86_VMX_HYPERV_H */
...@@ -1132,6 +1132,15 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu, ...@@ -1132,6 +1132,15 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
/*
* KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or
* L2's VP_ID upon request from the guest. Make sure we check for
* pending entries in the right FIFO upon L1/L2 transition as these
* requests are put by other vCPUs asynchronously.
*/
if (to_hv_vcpu(vcpu) && enable_ept)
kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
/* /*
* If vmcs12 doesn't use VPID, L1 expects linear and combined mappings * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
* for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a * for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a
...@@ -3267,6 +3276,12 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) ...@@ -3267,6 +3276,12 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu) static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
{ {
/*
* Note: nested_get_evmcs_page() also updates 'vp_assist_page' copy
* in 'struct kvm_vcpu_hv' in case eVMCS is in use, this is mandatory
* to make nested_evmcs_l2_tlb_flush_enabled() work correctly post
* migration.
*/
if (!nested_get_evmcs_page(vcpu)) { if (!nested_get_evmcs_page(vcpu)) {
pr_debug_ratelimited("%s: enlightened vmptrld failed\n", pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
__func__); __func__);
...@@ -6144,6 +6159,11 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, ...@@ -6144,6 +6159,11 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
* Handle L2's bus locks in L0 directly. * Handle L2's bus locks in L0 directly.
*/ */
return true; return true;
case EXIT_REASON_VMCALL:
/* Hyper-V L2 TLB flush hypercall is handled by L0 */
return guest_hv_cpuid_has_l2_tlb_flush(vcpu) &&
nested_evmcs_l2_tlb_flush_enabled(vcpu) &&
kvm_hv_is_tlb_flush_hcall(vcpu);
default: default:
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment