Commit 85f44f8c authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Don't bottom out on leafs when zapping collapsible SPTEs

When zapping collapsible SPTEs in the TDP MMU, don't bottom out on a leaf
SPTE now that KVM doesn't require a PFN to compute the host mapping level,
i.e. now that there's no need to first find a leaf SPTE and then step
back up.

Drop the now unused tdp_iter_step_up(), as it is not the safest of
helpers (using any of the low level iterators requires some understanding
of the various side effects).
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20220715232107.3775620-4-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 65e3b446
...@@ -145,15 +145,6 @@ static bool try_step_up(struct tdp_iter *iter) ...@@ -145,15 +145,6 @@ static bool try_step_up(struct tdp_iter *iter)
return true; return true;
} }
/*
* Step the iterator back up a level in the paging structure. Should only be
* used when the iterator is below the root level.
*/
void tdp_iter_step_up(struct tdp_iter *iter)
{
WARN_ON(!try_step_up(iter));
}
/* /*
* Step to the next SPTE in a pre-order traversal of the paging structure. * Step to the next SPTE in a pre-order traversal of the paging structure.
* To get to the next SPTE, the iterator either steps down towards the goal * To get to the next SPTE, the iterator either steps down towards the goal
......
...@@ -114,6 +114,5 @@ void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root, ...@@ -114,6 +114,5 @@ void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root,
int min_level, gfn_t next_last_level_gfn); int min_level, gfn_t next_last_level_gfn);
void tdp_iter_next(struct tdp_iter *iter); void tdp_iter_next(struct tdp_iter *iter);
void tdp_iter_restart(struct tdp_iter *iter); void tdp_iter_restart(struct tdp_iter *iter);
void tdp_iter_step_up(struct tdp_iter *iter);
#endif /* __KVM_X86_MMU_TDP_ITER_H */ #endif /* __KVM_X86_MMU_TDP_ITER_H */
...@@ -1721,10 +1721,6 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, ...@@ -1721,10 +1721,6 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
} }
/*
* Clear leaf entries which could be replaced by large mappings, for
* GFNs within the slot.
*/
static void zap_collapsible_spte_range(struct kvm *kvm, static void zap_collapsible_spte_range(struct kvm *kvm,
struct kvm_mmu_page *root, struct kvm_mmu_page *root,
const struct kvm_memory_slot *slot) const struct kvm_memory_slot *slot)
...@@ -1736,48 +1732,49 @@ static void zap_collapsible_spte_range(struct kvm *kvm, ...@@ -1736,48 +1732,49 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
rcu_read_lock(); rcu_read_lock();
tdp_root_for_each_pte(iter, root, start, end) { for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) {
retry:
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
continue; continue;
if (!is_shadow_present_pte(iter.old_spte) || if (iter.level > KVM_MAX_HUGEPAGE_LEVEL ||
!is_last_spte(iter.old_spte, iter.level)) !is_shadow_present_pte(iter.old_spte))
continue; continue;
max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot,
iter.gfn, PG_LEVEL_NUM);
WARN_ON(max_mapping_level < iter.level);
/* /*
* If this page is already mapped at the highest * Don't zap leaf SPTEs, if a leaf SPTE could be replaced with
* viable level, there's nothing more to do. * a large page size, then its parent would have been zapped
* instead of stepping down.
*/ */
if (max_mapping_level == iter.level) if (is_last_spte(iter.old_spte, iter.level))
continue; continue;
/* /*
* The page can be remapped at a higher level, so step * If iter.gfn resides outside of the slot, i.e. the page for
* up to zap the parent SPTE. * the current level overlaps but is not contained by the slot,
* then the SPTE can't be made huge. More importantly, trying
* to query that info from slot->arch.lpage_info will cause an
* out-of-bounds access.
*/ */
while (max_mapping_level > iter.level) if (iter.gfn < start || iter.gfn >= end)
tdp_iter_step_up(&iter); continue;
/* Note, a successful atomic zap also does a remote TLB flush. */ max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot,
tdp_mmu_zap_spte_atomic(kvm, &iter); iter.gfn, PG_LEVEL_NUM);
if (max_mapping_level < iter.level)
continue;
/* /* Note, a successful atomic zap also does a remote TLB flush. */
* If the atomic zap fails, the iter will recurse back into if (tdp_mmu_zap_spte_atomic(kvm, &iter))
* the same subtree to retry. goto retry;
*/
} }
rcu_read_unlock(); rcu_read_unlock();
} }
/* /*
* Clear non-leaf entries (and free associated page tables) which could * Zap non-leaf SPTEs (and free their associated page tables) which could
* be replaced by large mappings, for GFNs within the slot. * be replaced by huge pages, for GFNs within the slot.
*/ */
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *slot) const struct kvm_memory_slot *slot)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment