Commit 4771450c authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

Revert "KVM: MMU: drop kvm_mmu_zap_mmio_sptes"

Revert back to a dedicated (and slower) mechanism for handling the
scenario where all MMIO shadow PTEs need to be zapped due to overflowing
the MMIO generation number.  The MMIO generation scenario is almost
literally a one-in-a-million occurrence, i.e. is not a performance
sensitive scenario.

Restoring kvm_mmu_zap_mmio_sptes() leaves VM teardown as the only user
of kvm_mmu_invalidate_zap_all_pages() and paves the way for removing
the fast invalidate mechanism altogether.

This reverts commit a8eca9dc.

Cc: Xiao Guangrong <guangrong.xiao@gmail.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent a592a3b8
...@@ -319,6 +319,7 @@ struct kvm_mmu_page { ...@@ -319,6 +319,7 @@ struct kvm_mmu_page {
struct list_head link; struct list_head link;
struct hlist_node hash_link; struct hlist_node hash_link;
bool unsync; bool unsync;
bool mmio_cached;
/* /*
* The following two entries are used to key the shadow page in the * The following two entries are used to key the shadow page in the
......
...@@ -391,6 +391,8 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, ...@@ -391,6 +391,8 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
mask |= (gpa & shadow_nonpresent_or_rsvd_mask) mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
<< shadow_nonpresent_or_rsvd_mask_len; << shadow_nonpresent_or_rsvd_mask_len;
page_header(__pa(sptep))->mmio_cached = true;
trace_mark_mmio_spte(sptep, gfn, access, gen); trace_mark_mmio_spte(sptep, gfn, access, gen);
mmu_spte_set(sptep, mask); mmu_spte_set(sptep, mask);
} }
...@@ -5942,6 +5944,24 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm) ...@@ -5942,6 +5944,24 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages)); return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
} }
static void kvm_mmu_zap_mmio_sptes(struct kvm *kvm)
{
struct kvm_mmu_page *sp, *node;
LIST_HEAD(invalid_list);
spin_lock(&kvm->mmu_lock);
restart:
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
if (!sp->mmio_cached)
continue;
if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
goto restart;
}
kvm_mmu_commit_zap_page(kvm, &invalid_list);
spin_unlock(&kvm->mmu_lock);
}
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
{ {
WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
...@@ -5963,7 +5983,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) ...@@ -5963,7 +5983,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
*/ */
if (unlikely(gen == 0)) { if (unlikely(gen == 0)) {
kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n"); kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
kvm_mmu_invalidate_zap_all_pages(kvm); kvm_mmu_zap_mmio_sptes(kvm);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment