Commit 29cf0f50 authored by Ben Gardon's avatar Ben Gardon Committed by Paolo Bonzini

kvm: x86/mmu: NX largepage recovery for TDP MMU

When KVM maps a largepage backed region at a lower level in order to
make it executable (i.e. NX large page shattering), it reduces the TLB
performance of that region. In order to avoid making this degradation
permanent, KVM must periodically reclaim shattered NX largepages by
zapping them and allowing them to be rebuilt in the page fault handler.

With this patch, the TDP MMU does not respect KVM's rate limiting on
reclaim. It traverses the entire TDP structure every time. This will be
addressed in a future patch.

Tested by running kvm-unit-tests and KVM selftests on an Intel Haswell
machine. This series introduced no new failures.

This series can be viewed in Gerrit at:
	https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2538Signed-off-by: default avatarBen Gardon <bgardon@google.com>
Message-Id: <20201014182700.2888246-21-bgardon@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent daa5b6c1
...@@ -776,7 +776,7 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -776,7 +776,7 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm_mmu_gfn_disallow_lpage(slot, gfn); kvm_mmu_gfn_disallow_lpage(slot, gfn);
} }
static void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{ {
if (sp->lpage_disallowed) if (sp->lpage_disallowed)
return; return;
...@@ -804,7 +804,7 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -804,7 +804,7 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm_mmu_gfn_allow_lpage(slot, gfn); kvm_mmu_gfn_allow_lpage(slot, gfn);
} }
static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{ {
--kvm->stat.nx_lpage_splits; --kvm->stat.nx_lpage_splits;
sp->lpage_disallowed = false; sp->lpage_disallowed = false;
...@@ -5988,8 +5988,13 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) ...@@ -5988,8 +5988,13 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
struct kvm_mmu_page, struct kvm_mmu_page,
lpage_disallowed_link); lpage_disallowed_link);
WARN_ON_ONCE(!sp->lpage_disallowed); WARN_ON_ONCE(!sp->lpage_disallowed);
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); if (sp->tdp_mmu_page)
WARN_ON_ONCE(sp->lpage_disallowed); kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn,
sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level));
else {
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
WARN_ON_ONCE(sp->lpage_disallowed);
}
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
kvm_mmu_commit_zap_page(kvm, &invalid_list); kvm_mmu_commit_zap_page(kvm, &invalid_list);
......
...@@ -143,4 +143,7 @@ bool is_nx_huge_page_enabled(void); ...@@ -143,4 +143,7 @@ bool is_nx_huge_page_enabled(void);
void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
#endif /* __KVM_X86_MMU_INTERNAL_H */ #endif /* __KVM_X86_MMU_INTERNAL_H */
...@@ -273,6 +273,9 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, ...@@ -273,6 +273,9 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
list_del(&sp->link); list_del(&sp->link);
if (sp->lpage_disallowed)
unaccount_huge_nx_page(kvm, sp);
for (i = 0; i < PT64_ENT_PER_PAGE; i++) { for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
old_child_spte = READ_ONCE(*(pt + i)); old_child_spte = READ_ONCE(*(pt + i));
WRITE_ONCE(*(pt + i), 0); WRITE_ONCE(*(pt + i), 0);
...@@ -571,6 +574,9 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, ...@@ -571,6 +574,9 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
!shadow_accessed_mask); !shadow_accessed_mask);
trace_kvm_mmu_get_page(sp, true); trace_kvm_mmu_get_page(sp, true);
if (huge_page_disallowed && req_level >= iter.level)
account_huge_nx_page(vcpu->kvm, sp);
tdp_mmu_set_spte(vcpu->kvm, &iter, new_spte); tdp_mmu_set_spte(vcpu->kvm, &iter, new_spte);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment