Commit afd28fe1 authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Paolo Bonzini

KVM: x86: MMU: Remove is_rmap_spte() and use is_shadow_present_pte()

is_rmap_spte(), originally named is_rmap_pte(), was introduced when the
simple reverse mapping was implemented by commit cd4a4e53
("[PATCH] KVM: MMU: Implement simple reverse mapping").  At that point,
its role was clear and only rmap_add() and rmap_remove() were using it
to select sptes that need to be reverse-mapped.

Independently of that, is_shadow_present_pte() was first introduced by
commit c7addb90 ("KVM: Allow not-present guest page faults to
bypass kvm") to do bypass_guest_pf optimization, which does not exist
any more.

These two seem to have changed their roles somewhat, and is_rmap_spte()
just calls is_shadow_present_pte() now.

Since using both of them without clear distinction just makes the code
confusing, remove is_rmap_spte().
Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 029499b4
...@@ -311,11 +311,6 @@ static int is_large_pte(u64 pte) ...@@ -311,11 +311,6 @@ static int is_large_pte(u64 pte)
return pte & PT_PAGE_SIZE_MASK; return pte & PT_PAGE_SIZE_MASK;
} }
static int is_rmap_spte(u64 pte)
{
return is_shadow_present_pte(pte);
}
static int is_last_spte(u64 pte, int level) static int is_last_spte(u64 pte, int level)
{ {
if (level == PT_PAGE_TABLE_LEVEL) if (level == PT_PAGE_TABLE_LEVEL)
...@@ -540,7 +535,7 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte) ...@@ -540,7 +535,7 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
u64 old_spte = *sptep; u64 old_spte = *sptep;
bool ret = false; bool ret = false;
WARN_ON(!is_rmap_spte(new_spte)); WARN_ON(!is_shadow_present_pte(new_spte));
if (!is_shadow_present_pte(old_spte)) { if (!is_shadow_present_pte(old_spte)) {
mmu_spte_set(sptep, new_spte); mmu_spte_set(sptep, new_spte);
...@@ -595,7 +590,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep) ...@@ -595,7 +590,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
else else
old_spte = __update_clear_spte_slow(sptep, 0ull); old_spte = __update_clear_spte_slow(sptep, 0ull);
if (!is_rmap_spte(old_spte)) if (!is_shadow_present_pte(old_spte))
return 0; return 0;
pfn = spte_to_pfn(old_spte); pfn = spte_to_pfn(old_spte);
...@@ -2575,7 +2570,7 @@ static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, ...@@ -2575,7 +2570,7 @@ static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
*sptep, write_fault, gfn); *sptep, write_fault, gfn);
if (is_rmap_spte(*sptep)) { if (is_shadow_present_pte(*sptep)) {
/* /*
* If we overwrite a PTE page pointer with a 2MB PMD, unlink * If we overwrite a PTE page pointer with a 2MB PMD, unlink
* the parent of the now unreachable PTE. * the parent of the now unreachable PTE.
...@@ -2919,7 +2914,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, ...@@ -2919,7 +2914,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
* If the mapping has been changed, let the vcpu fault on the * If the mapping has been changed, let the vcpu fault on the
* same address again. * same address again.
*/ */
if (!is_rmap_spte(spte)) { if (!is_shadow_present_pte(spte)) {
ret = true; ret = true;
goto exit; goto exit;
} }
......
...@@ -183,7 +183,7 @@ static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -183,7 +183,7 @@ static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
return; return;
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
if (!is_rmap_spte(sp->spt[i])) if (!is_shadow_present_pte(sp->spt[i]))
continue; continue;
inspect_spte_has_rmap(kvm, sp->spt + i); inspect_spte_has_rmap(kvm, sp->spt + i);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment