Commit 76eb54e7 authored by Ben Gardon's avatar Ben Gardon Committed by Paolo Bonzini

KVM: x86/mmu: Move kvm_mmu_(get|put)_root to TDP MMU

The TDP MMU is almost the only user of kvm_mmu_get_root and
kvm_mmu_put_root. There is only one use of put_root in mmu.c for the
legacy / shadow MMU. Open code that one use and move the get / put
functions to the TDP MMU so they can be extended in future commits.

No functional change intended.
Signed-off-by: default avatarBen Gardon <bgardon@google.com>
Message-Id: <20210401233736.638171-3-bgardon@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 8ca6f063
...@@ -3120,12 +3120,10 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, ...@@ -3120,12 +3120,10 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK); sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
if (kvm_mmu_put_root(kvm, sp)) { if (is_tdp_mmu_page(sp) && kvm_tdp_mmu_put_root(kvm, sp))
if (is_tdp_mmu_page(sp)) kvm_tdp_mmu_free_root(kvm, sp);
kvm_tdp_mmu_free_root(kvm, sp); else if (!--sp->root_count && sp->role.invalid)
else if (sp->role.invalid) kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
}
*root_hpa = INVALID_PAGE; *root_hpa = INVALID_PAGE;
} }
......
...@@ -123,22 +123,6 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, ...@@ -123,22 +123,6 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
u64 start_gfn, u64 pages); u64 start_gfn, u64 pages);
static inline void kvm_mmu_get_root(struct kvm *kvm, struct kvm_mmu_page *sp)
{
BUG_ON(!sp->root_count);
lockdep_assert_held(&kvm->mmu_lock);
++sp->root_count;
}
static inline bool kvm_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *sp)
{
lockdep_assert_held(&kvm->mmu_lock);
--sp->root_count;
return !sp->root_count;
}
/* /*
* Return values of handle_mmio_page_fault, mmu.page_fault, and fast_page_fault(). * Return values of handle_mmio_page_fault, mmu.page_fault, and fast_page_fault().
* *
......
...@@ -43,7 +43,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) ...@@ -43,7 +43,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root) static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
{ {
if (kvm_mmu_put_root(kvm, root)) if (kvm_tdp_mmu_put_root(kvm, root))
kvm_tdp_mmu_free_root(kvm, root); kvm_tdp_mmu_free_root(kvm, root);
} }
...@@ -55,7 +55,7 @@ static inline bool tdp_mmu_next_root_valid(struct kvm *kvm, ...@@ -55,7 +55,7 @@ static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link)) if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
return false; return false;
kvm_mmu_get_root(kvm, root); kvm_tdp_mmu_get_root(kvm, root);
return true; return true;
} }
...@@ -154,7 +154,7 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) ...@@ -154,7 +154,7 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
/* Check for an existing root before allocating a new one. */ /* Check for an existing root before allocating a new one. */
for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) { for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
if (root->role.word == role.word) { if (root->role.word == role.word) {
kvm_mmu_get_root(kvm, root); kvm_tdp_mmu_get_root(kvm, root);
goto out; goto out;
} }
} }
......
...@@ -8,6 +8,24 @@ ...@@ -8,6 +8,24 @@
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu); hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root); void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
static inline void kvm_tdp_mmu_get_root(struct kvm *kvm,
struct kvm_mmu_page *root)
{
BUG_ON(!root->root_count);
lockdep_assert_held(&kvm->mmu_lock);
++root->root_count;
}
static inline bool kvm_tdp_mmu_put_root(struct kvm *kvm,
struct kvm_mmu_page *root)
{
lockdep_assert_held(&kvm->mmu_lock);
--root->root_count;
return !root->root_count;
}
bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start, bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
gfn_t end, bool can_yield, bool flush); gfn_t end, bool can_yield, bool flush);
static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment