Commit 89c0fd49 authored by Ben Gardon's avatar Ben Gardon Committed by Paolo Bonzini

kvm: x86/mmu: Allocate struct kvm_mmu_pages for all pages in TDP MMU

Attach struct kvm_mmu_pages to every page in the TDP MMU to track
metadata, facilitate NX reclaim, and enable inproved parallelism of MMU
operations in future patches.

Tested by running kvm-unit-tests and KVM selftests on an Intel Haswell
machine. This series introduced no new failures.

This series can be viewed in Gerrit at:
	https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2538Signed-off-by: default avatarBen Gardon <bgardon@google.com>
Message-Id: <20201014182700.2888246-12-bgardon@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent bb18842e
......@@ -1004,7 +1004,11 @@ struct kvm_arch {
* operations.
*/
bool tdp_mmu_enabled;
/* List of struct tdp_mmu_pages being used as roots */
struct list_head tdp_mmu_roots;
/* List of struct tdp_mmu_pages not being used as roots */
struct list_head tdp_mmu_pages;
};
struct kvm_vm_stat {
......
......@@ -28,6 +28,7 @@ void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
kvm->arch.tdp_mmu_enabled = true;
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
}
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
......@@ -169,6 +170,7 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
bool is_leaf = is_present && is_last_spte(new_spte, level);
bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
u64 *pt;
struct kvm_mmu_page *sp;
u64 old_child_spte;
int i;
......@@ -234,6 +236,9 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
*/
if (was_present && !was_leaf && (pfn_changed || !is_present)) {
pt = spte_to_child_pt(old_spte, level);
sp = sptep_to_sp(pt);
list_del(&sp->link);
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
old_child_spte = READ_ONCE(*(pt + i));
......@@ -247,6 +252,7 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
KVM_PAGES_PER_HPAGE(level));
free_page((unsigned long)pt);
kmem_cache_free(mmu_page_header_cache, sp);
}
}
......@@ -424,8 +430,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
struct kvm_mmu *mmu = vcpu->arch.mmu;
struct tdp_iter iter;
struct kvm_mmu_memory_cache *pf_pt_cache =
&vcpu->arch.mmu_shadow_page_cache;
struct kvm_mmu_page *sp;
u64 *child_pt;
u64 new_spte;
int ret;
......@@ -471,7 +476,9 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
}
if (!is_shadow_present_pte(iter.old_spte)) {
child_pt = kvm_mmu_memory_cache_alloc(pf_pt_cache);
sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
list_add(&sp->link, &vcpu->kvm->arch.tdp_mmu_pages);
child_pt = sp->spt;
clear_page(child_pt);
new_spte = make_nonleaf_spte(child_pt,
!shadow_accessed_mask);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment