Commit 956bf353 authored by Junaid Shahid's avatar Junaid Shahid Committed by Paolo Bonzini

kvm: x86: Skip shadow page resync on CR3 switch when indicated by guest

When the guest indicates that the TLB doesn't need to be flushed in a
CR3 switch, we can also skip resyncing the shadow page tables since an
out-of-sync shadow page table is equivalent to an out-of-sync TLB.
Signed-off-by: default avatarJunaid Shahid <junaids@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 08fb59d8
...@@ -4098,9 +4098,19 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3, ...@@ -4098,9 +4098,19 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
*/ */
kvm_make_request(KVM_REQ_LOAD_CR3, vcpu); kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
if (!skip_tlb_flush) {
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
if (!skip_tlb_flush)
kvm_x86_ops->tlb_flush(vcpu, true); kvm_x86_ops->tlb_flush(vcpu, true);
}
/*
* The last MMIO access's GVA and GPA are cached in the
* VCPU. When switching to a new CR3, that GVA->GPA
* mapping may no longer be valid. So clear any cached
* MMIO info even when we don't need to sync the shadow
* page tables.
*/
vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
__clear_sp_write_flooding_count( __clear_sp_write_flooding_count(
page_header(mmu->root_hpa)); page_header(mmu->root_hpa));
...@@ -5217,6 +5227,21 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -5217,6 +5227,21 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
struct kvm_mmu *mmu = &vcpu->arch.mmu; struct kvm_mmu *mmu = &vcpu->arch.mmu;
mmu->invlpg(vcpu, gva, mmu->root_hpa); mmu->invlpg(vcpu, gva, mmu->root_hpa);
/*
* INVLPG is required to invalidate any global mappings for the VA,
* irrespective of PCID. Since it would take us roughly similar amount
* of work to determine whether the prev_root mapping of the VA is
* marked global, or to just sync it blindly, so we might as well just
* always sync it.
*
* Mappings not reachable via the current cr3 or the prev_root.cr3 will
* be synced when switching to that cr3, so nothing needs to be done
* here for them.
*/
if (VALID_PAGE(mmu->prev_root.hpa))
mmu->invlpg(vcpu, gva, mmu->prev_root.hpa);
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
++vcpu->stat.invlpg; ++vcpu->stat.invlpg;
} }
...@@ -5232,8 +5257,10 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) ...@@ -5232,8 +5257,10 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
} }
if (VALID_PAGE(mmu->prev_root.hpa) && if (VALID_PAGE(mmu->prev_root.hpa) &&
pcid == kvm_get_pcid(vcpu, mmu->prev_root.cr3)) pcid == kvm_get_pcid(vcpu, mmu->prev_root.cr3)) {
mmu->invlpg(vcpu, gva, mmu->prev_root.hpa);
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
++vcpu->stat.invlpg; ++vcpu->stat.invlpg;
......
...@@ -8821,7 +8821,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) ...@@ -8821,7 +8821,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
if (kvm_get_pcid(vcpu, vcpu->arch.mmu.prev_root.cr3) if (kvm_get_pcid(vcpu, vcpu->arch.mmu.prev_root.cr3)
== operand.pcid) == operand.pcid)
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); kvm_mmu_free_roots(vcpu, KVM_MMU_ROOT_PREVIOUS);
/* /*
* If neither the current cr3 nor the prev_root.cr3 use the * If neither the current cr3 nor the prev_root.cr3 use the
......
...@@ -858,10 +858,10 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -858,10 +858,10 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
#endif #endif
if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
if (!skip_tlb_flush) {
kvm_mmu_sync_roots(vcpu); kvm_mmu_sync_roots(vcpu);
if (!skip_tlb_flush)
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment