Commit ace569e0 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Move flush logic from mmu_page_zap_pte() to FNAME(invlpg)

Move the logic that controls whether or not FNAME(invlpg) needs to flush
fully into FNAME(invlpg) so that mmu_page_zap_pte() doesn't return a
value.  This allows a future patch to redefine the return semantics for
mmu_page_zap_pte() so that it can recursively zap orphaned child shadow
pages for nested TDP MMUs.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200923221406.16297-2-sean.j.christopherson@intel.com>
Reviewed-by: default avatarBen Gardon <bgardon@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent dbcf3f96
...@@ -2615,7 +2615,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -2615,7 +2615,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
} }
} }
static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
u64 *spte) u64 *spte)
{ {
u64 pte; u64 pte;
...@@ -2631,13 +2631,9 @@ static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, ...@@ -2631,13 +2631,9 @@ static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
child = to_shadow_page(pte & PT64_BASE_ADDR_MASK); child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
drop_parent_pte(child, spte); drop_parent_pte(child, spte);
} }
return true; } else if (is_mmio_spte(pte)) {
}
if (is_mmio_spte(pte))
mmu_spte_clear_no_track(spte); mmu_spte_clear_no_track(spte);
}
return false;
} }
static void kvm_mmu_page_unlink_children(struct kvm *kvm, static void kvm_mmu_page_unlink_children(struct kvm *kvm,
......
...@@ -895,6 +895,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) ...@@ -895,6 +895,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
{ {
struct kvm_shadow_walk_iterator iterator; struct kvm_shadow_walk_iterator iterator;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
u64 old_spte;
int level; int level;
u64 *sptep; u64 *sptep;
...@@ -917,7 +918,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) ...@@ -917,7 +918,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
sptep = iterator.sptep; sptep = iterator.sptep;
sp = sptep_to_sp(sptep); sp = sptep_to_sp(sptep);
if (is_last_spte(*sptep, level)) { old_spte = *sptep;
if (is_last_spte(old_spte, level)) {
pt_element_t gpte; pt_element_t gpte;
gpa_t pte_gpa; gpa_t pte_gpa;
...@@ -927,7 +929,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) ...@@ -927,7 +929,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
pte_gpa = FNAME(get_level1_sp_gpa)(sp); pte_gpa = FNAME(get_level1_sp_gpa)(sp);
pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
if (mmu_page_zap_pte(vcpu->kvm, sp, sptep)) mmu_page_zap_pte(vcpu->kvm, sp, sptep);
if (is_shadow_present_pte(old_spte))
kvm_flush_remote_tlbs_with_address(vcpu->kvm, kvm_flush_remote_tlbs_with_address(vcpu->kvm,
sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level)); sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment