Commit 87654643 authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini

KVM: x86/mmu: Rename shadow MMU functions that deal with shadow pages

Rename 2 functions:

  kvm_mmu_get_page() -> kvm_mmu_get_shadow_page()
  kvm_mmu_free_page() -> kvm_mmu_free_shadow_page()

This change makes it clear that these functions deal with shadow pages
rather than struct pages. It also aligns these functions with the naming
scheme for kvm_mmu_find_shadow_page() and kvm_mmu_alloc_shadow_page().

Prefer "shadow_page" over the shorter "sp" since these are core
functions and the line lengths aren't terrible.

No functional change intended.
Reviewed-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Message-Id: <20220516232138.1783324-9-dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c306aec8
...@@ -1626,7 +1626,7 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr) ...@@ -1626,7 +1626,7 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
percpu_counter_add(&kvm_total_used_mmu_pages, nr); percpu_counter_add(&kvm_total_used_mmu_pages, nr);
} }
static void kvm_mmu_free_page(struct kvm_mmu_page *sp) static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
{ {
MMU_WARN_ON(!is_empty_shadow_page(sp->spt)); MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
hlist_del(&sp->hash_link); hlist_del(&sp->hash_link);
...@@ -2081,7 +2081,8 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm_vcpu *vcpu, ...@@ -2081,7 +2081,8 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm_vcpu *vcpu,
return sp; return sp;
} }
static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, gfn_t gfn, static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
gfn_t gfn,
union kvm_mmu_page_role role) union kvm_mmu_page_role role)
{ {
struct hlist_head *sp_list; struct hlist_head *sp_list;
...@@ -2146,7 +2147,7 @@ static struct kvm_mmu_page *kvm_mmu_get_child_sp(struct kvm_vcpu *vcpu, ...@@ -2146,7 +2147,7 @@ static struct kvm_mmu_page *kvm_mmu_get_child_sp(struct kvm_vcpu *vcpu,
union kvm_mmu_page_role role; union kvm_mmu_page_role role;
role = kvm_mmu_child_role(sptep, direct, access); role = kvm_mmu_child_role(sptep, direct, access);
return kvm_mmu_get_page(vcpu, gfn, role); return kvm_mmu_get_shadow_page(vcpu, gfn, role);
} }
static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator, static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
...@@ -2422,7 +2423,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm, ...@@ -2422,7 +2423,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
list_for_each_entry_safe(sp, nsp, invalid_list, link) { list_for_each_entry_safe(sp, nsp, invalid_list, link) {
WARN_ON(!sp->role.invalid || sp->root_count); WARN_ON(!sp->role.invalid || sp->root_count);
kvm_mmu_free_page(sp); kvm_mmu_free_shadow_page(sp);
} }
} }
...@@ -3415,7 +3416,7 @@ static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant, ...@@ -3415,7 +3416,7 @@ static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant,
WARN_ON_ONCE(quadrant && !role.has_4_byte_gpte); WARN_ON_ONCE(quadrant && !role.has_4_byte_gpte);
WARN_ON_ONCE(role.direct && role.has_4_byte_gpte); WARN_ON_ONCE(role.direct && role.has_4_byte_gpte);
sp = kvm_mmu_get_page(vcpu, gfn, role); sp = kvm_mmu_get_shadow_page(vcpu, gfn, role);
++sp->root_count; ++sp->root_count;
return __pa(sp->spt); return __pa(sp->spt);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment