Commit bcc4f2bc authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: MMU: mark page dirty in make_spte

This simplifies set_spte, which we want to remove, and unifies code
between the shadow MMU and the TDP MMU.  The warning will be added
back later to make_spte as well.

There is a small disadvantage in the TDP MMU; it may unnecessarily mark
a page as dirty twice if two vCPUs end up mapping the same page twice.
However, this is a very small cost for a case that is already rare.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 68be1306
...@@ -2688,9 +2688,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -2688,9 +2688,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative, ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
can_unsync, host_writable, sp_ad_disabled(sp), &spte); can_unsync, host_writable, sp_ad_disabled(sp), &spte);
if (spte & PT_WRITABLE_MASK)
kvm_vcpu_mark_page_dirty(vcpu, gfn);
if (*sptep == spte) if (*sptep == spte)
ret |= SET_SPTE_SPURIOUS; ret |= SET_SPTE_SPURIOUS;
else if (mmu_spte_update(sptep, spte)) else if (mmu_spte_update(sptep, spte))
......
...@@ -179,6 +179,9 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, ...@@ -179,6 +179,9 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
"spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level, "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level,
get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level)); get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level));
if (spte & PT_WRITABLE_MASK)
kvm_vcpu_mark_page_dirty(vcpu, gfn);
*new_spte = spte; *new_spte = spte;
return ret; return ret;
} }
......
...@@ -542,26 +542,7 @@ static inline bool tdp_mmu_map_set_spte_atomic(struct kvm_vcpu *vcpu, ...@@ -542,26 +542,7 @@ static inline bool tdp_mmu_map_set_spte_atomic(struct kvm_vcpu *vcpu,
struct tdp_iter *iter, struct tdp_iter *iter,
u64 new_spte) u64 new_spte)
{ {
struct kvm *kvm = vcpu->kvm; return tdp_mmu_set_spte_atomic_no_dirty_log(vcpu->kvm, iter, new_spte);
if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, iter, new_spte))
return false;
/*
* Use kvm_vcpu_gfn_to_memslot() instead of going through
* handle_changed_spte_dirty_log() to leverage vcpu->last_used_slot.
*/
if (is_writable_pte(new_spte)) {
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, iter->gfn);
if (slot && kvm_slot_dirty_track_enabled(slot)) {
/* Enforced by kvm_mmu_hugepage_adjust. */
WARN_ON_ONCE(iter->level > PG_LEVEL_4K);
mark_page_dirty_in_slot(kvm, slot, iter->gfn);
}
}
return true;
} }
static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm, static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment