Commit d786c778 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: MMU: inline set_spte in mmu_set_spte

Since the two callers of set_spte do different things with the results,
inlining it actually makes the code simpler to reason about.  For example,
mmu_set_spte looks quite like tdp_mmu_map_handle_target_level, but the
similarity is hidden by set_spte.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 88810413
...@@ -2700,10 +2700,12 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -2700,10 +2700,12 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
gfn_t gfn, kvm_pfn_t pfn, bool speculative, gfn_t gfn, kvm_pfn_t pfn, bool speculative,
bool host_writable) bool host_writable)
{ {
struct kvm_mmu_page *sp = sptep_to_sp(sptep);
int was_rmapped = 0; int was_rmapped = 0;
int set_spte_ret;
int ret = RET_PF_FIXED; int ret = RET_PF_FIXED;
bool flush = false; bool flush = false;
int make_spte_ret;
u64 spte;
pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
*sptep, write_fault, gfn); *sptep, write_fault, gfn);
...@@ -2734,30 +2736,29 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -2734,30 +2736,29 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
was_rmapped = 1; was_rmapped = 1;
} }
set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn, make_spte_ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
speculative, true, host_writable); true, host_writable, sp_ad_disabled(sp), &spte);
if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
if (*sptep == spte) {
ret = RET_PF_SPURIOUS;
} else {
trace_kvm_mmu_set_spte(level, gfn, sptep);
flush |= mmu_spte_update(sptep, spte);
}
if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
if (write_fault) if (write_fault)
ret = RET_PF_EMULATE; ret = RET_PF_EMULATE;
} }
if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush) if (flush)
kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
KVM_PAGES_PER_HPAGE(level)); KVM_PAGES_PER_HPAGE(level));
/*
* The fault is fully spurious if and only if the new SPTE and old SPTE
* are identical, and emulation is not required.
*/
if ((set_spte_ret & SET_SPTE_SPURIOUS) && ret == RET_PF_FIXED) {
WARN_ON_ONCE(!was_rmapped);
return RET_PF_SPURIOUS;
}
pgprintk("%s: setting spte %llx\n", __func__, *sptep); pgprintk("%s: setting spte %llx\n", __func__, *sptep);
trace_kvm_mmu_set_spte(level, gfn, sptep);
if (!was_rmapped) { if (!was_rmapped) {
WARN_ON_ONCE(ret == RET_PF_SPURIOUS);
kvm_update_page_stats(vcpu->kvm, level, 1); kvm_update_page_stats(vcpu->kvm, level, 1);
rmap_add(vcpu, sptep, gfn); rmap_add(vcpu, sptep, gfn);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment