Commit 4b85c921 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Remove spurious TLB flushes in TDP MMU zap collapsible path

Drop the "flush" param and return values to/from the TDP MMU's helper for
zapping collapsible SPTEs.  Because the helper runs with mmu_lock held
for read, not write, it uses tdp_mmu_zap_spte_atomic(), and the atomic
zap handles the necessary remote TLB flush.

Similarly, because mmu_lock is dropped and re-acquired between zapping
legacy MMUs and zapping TDP MMUs, kvm_mmu_zap_collapsible_sptes() must
handle remote TLB flushes from the legacy MMU before calling into the TDP
MMU.

Fixes: e2209710 ("KVM: x86/mmu: Skip rmap operations if rmaps not allocated")
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20211120045046.3940942-4-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 75333772
...@@ -5848,8 +5848,6 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, ...@@ -5848,8 +5848,6 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *slot) const struct kvm_memory_slot *slot)
{ {
bool flush;
if (kvm_memslots_have_rmaps(kvm)) { if (kvm_memslots_have_rmaps(kvm)) {
write_lock(&kvm->mmu_lock); write_lock(&kvm->mmu_lock);
/* /*
...@@ -5857,17 +5855,14 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, ...@@ -5857,17 +5855,14 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
* logging at a 4k granularity and never creates collapsible * logging at a 4k granularity and never creates collapsible
* 2m SPTEs during dirty logging. * 2m SPTEs during dirty logging.
*/ */
flush = slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true); if (slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true))
if (flush)
kvm_arch_flush_remote_tlbs_memslot(kvm, slot); kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
write_unlock(&kvm->mmu_lock); write_unlock(&kvm->mmu_lock);
} }
if (is_tdp_mmu_enabled(kvm)) { if (is_tdp_mmu_enabled(kvm)) {
read_lock(&kvm->mmu_lock); read_lock(&kvm->mmu_lock);
flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, false); kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
if (flush)
kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
read_unlock(&kvm->mmu_lock); read_unlock(&kvm->mmu_lock);
} }
} }
......
...@@ -1362,10 +1362,9 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, ...@@ -1362,10 +1362,9 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
* Clear leaf entries which could be replaced by large mappings, for * Clear leaf entries which could be replaced by large mappings, for
* GFNs within the slot. * GFNs within the slot.
*/ */
static bool zap_collapsible_spte_range(struct kvm *kvm, static void zap_collapsible_spte_range(struct kvm *kvm,
struct kvm_mmu_page *root, struct kvm_mmu_page *root,
const struct kvm_memory_slot *slot, const struct kvm_memory_slot *slot)
bool flush)
{ {
gfn_t start = slot->base_gfn; gfn_t start = slot->base_gfn;
gfn_t end = start + slot->npages; gfn_t end = start + slot->npages;
...@@ -1376,10 +1375,8 @@ static bool zap_collapsible_spte_range(struct kvm *kvm, ...@@ -1376,10 +1375,8 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
tdp_root_for_each_pte(iter, root, start, end) { tdp_root_for_each_pte(iter, root, start, end) {
retry: retry:
if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) { if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
flush = false;
continue; continue;
}
if (!is_shadow_present_pte(iter.old_spte) || if (!is_shadow_present_pte(iter.old_spte) ||
!is_last_spte(iter.old_spte, iter.level)) !is_last_spte(iter.old_spte, iter.level))
...@@ -1391,6 +1388,7 @@ static bool zap_collapsible_spte_range(struct kvm *kvm, ...@@ -1391,6 +1388,7 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
pfn, PG_LEVEL_NUM)) pfn, PG_LEVEL_NUM))
continue; continue;
/* Note, a successful atomic zap also does a remote TLB flush. */
if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) { if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
/* /*
* The iter must explicitly re-read the SPTE because * The iter must explicitly re-read the SPTE because
...@@ -1399,30 +1397,24 @@ static bool zap_collapsible_spte_range(struct kvm *kvm, ...@@ -1399,30 +1397,24 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep)); iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
goto retry; goto retry;
} }
flush = true;
} }
rcu_read_unlock(); rcu_read_unlock();
return flush;
} }
/* /*
* Clear non-leaf entries (and free associated page tables) which could * Clear non-leaf entries (and free associated page tables) which could
* be replaced by large mappings, for GFNs within the slot. * be replaced by large mappings, for GFNs within the slot.
*/ */
bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *slot, const struct kvm_memory_slot *slot)
bool flush)
{ {
struct kvm_mmu_page *root; struct kvm_mmu_page *root;
lockdep_assert_held_read(&kvm->mmu_lock); lockdep_assert_held_read(&kvm->mmu_lock);
for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
flush = zap_collapsible_spte_range(kvm, root, slot, flush); zap_collapsible_spte_range(kvm, root, slot);
return flush;
} }
/* /*
......
...@@ -64,9 +64,8 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, ...@@ -64,9 +64,8 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
gfn_t gfn, unsigned long mask, gfn_t gfn, unsigned long mask,
bool wrprot); bool wrprot);
bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *slot, const struct kvm_memory_slot *slot);
bool flush);
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn, struct kvm_memory_slot *slot, gfn_t gfn,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment