Commit 50b265a4 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: nVMX: Add helper to handle TLB flushes on nested VM-Enter/VM-Exit

Add a helper to determine whether or not a full TLB flush needs to be
performed on nested VM-Enter/VM-Exit, as the logic is identical for both
flows and needs a fairly beefy comment to boot.  This also provides a
common point to make future adjustments to the logic.

Handle vpid12 changes the new helper as well even though it is specific
to VM-Enter.  The vpid12 logic is an extension of the flushing logic,
and it's worth the extra bool parameter to provide a single location for
the flushing logic.

Cc: Liran Alon <liran.alon@oracle.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200320212833.3507-24-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 7780938c
...@@ -1132,6 +1132,48 @@ static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu) ...@@ -1132,6 +1132,48 @@ static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
(nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
} }
static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12,
bool is_vmenter)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
/*
* If VPID is disabled, linear and combined mappings are flushed on
* VM-Enter/VM-Exit, and guest-physical mappings are valid only for
* their associated EPTP.
*/
if (!enable_vpid)
return;
/*
* If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
* for *all* contexts to be flushed on VM-Enter/VM-Exit.
*
* If VPID is enabled and used by vmc12, but L2 does not have a unique
* TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate
* a VPID for L2, flush the TLB as the effective ASID is common to both
* L1 and L2.
*
* Defer the flush so that it runs after vmcs02.EPTP has been set by
* KVM_REQ_LOAD_MMU_PGD (if nested EPT is enabled) and to avoid
* redundant flushes further down the nested pipeline.
*
* If a TLB flush isn't required due to any of the above, and vpid12 is
* changing then the new "virtual" VPID (vpid12) will reuse the same
* "real" VPID (vpid02), and so needs to be sync'd. There is no direct
* mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for
* all nested vCPUs.
*/
if (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu)) {
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
} else if (is_vmenter &&
vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
vmx->nested.last_vpid = vmcs12->virtual_processor_id;
vpid_sync_context(nested_get_vpid02(vcpu));
}
}
static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask) static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
{ {
superset &= mask; superset &= mask;
...@@ -2440,32 +2482,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -2440,32 +2482,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
if (kvm_has_tsc_control) if (kvm_has_tsc_control)
decache_tsc_multiplier(vmx); decache_tsc_multiplier(vmx);
if (enable_vpid) { nested_vmx_transition_tlb_flush(vcpu, vmcs12, true);
/*
* There is no direct mapping between vpid02 and vpid12, the
* vpid02 is per-vCPU for L0 and reused while the value of
* vpid12 is changed w/ one invvpid during nested vmentry.
* The vpid12 is allocated by L1 for L2, so it will not
* influence global bitmap(for vpid01 and vpid02 allocation)
* even if spawn a lot of nested vCPUs.
*/
if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) {
if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
vmx->nested.last_vpid = vmcs12->virtual_processor_id;
vpid_sync_context(nested_get_vpid02(vcpu));
}
} else {
/*
* If L1 use EPT, then L0 needs to execute INVEPT on
* EPTP02 instead of EPTP01. Therefore, delay TLB
* flush until vmcs02->eptp is fully updated by
* KVM_REQ_LOAD_MMU_PGD. Note that this assumes
* KVM_REQ_TLB_FLUSH is evaluated after
* KVM_REQ_LOAD_MMU_PGD in vcpu_enter_guest().
*/
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
}
if (nested_cpu_has_ept(vmcs12)) if (nested_cpu_has_ept(vmcs12))
nested_ept_init_mmu_context(vcpu); nested_ept_init_mmu_context(vcpu);
...@@ -4033,24 +4050,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, ...@@ -4033,24 +4050,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
if (!enable_ept) if (!enable_ept)
vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
/* nested_vmx_transition_tlb_flush(vcpu, vmcs12, false);
* If vmcs01 doesn't use VPID, CPU flushes TLB on every
* VMEntry/VMExit. Thus, no need to flush TLB.
*
* If vmcs12 doesn't use VPID, L1 expects TLB to be
* flushed on every VMEntry/VMExit.
*
* Otherwise, we can preserve TLB entries as long as we are
* able to tag L1 TLB entries differently than L2 TLB entries.
*
* If vmcs12 uses EPT, we need to execute this flush on EPTP01
* and therefore we request the TLB flush to happen only after VMCS EPTP
* has been set by KVM_REQ_LOAD_MMU_PGD.
*/
if (enable_vpid &&
(!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) {
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment