Commit 4758d47e authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: MMU: inline set_spte in FNAME(sync_page)

Since the two callers of set_spte do different things with the results,
inlining it actually makes the code simpler to reason about.  For example,
FNAME(sync_page) already has a struct kvm_mmu_page *, but set_spte had to
fish it back out of sptep's private page data.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent d786c778
...@@ -2674,27 +2674,6 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync, ...@@ -2674,27 +2674,6 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync,
return 0; return 0;
} }
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned int pte_access, int level,
gfn_t gfn, kvm_pfn_t pfn, bool speculative,
bool can_unsync, bool host_writable)
{
u64 spte;
struct kvm_mmu_page *sp;
int ret;
sp = sptep_to_sp(sptep);
ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
can_unsync, host_writable, sp_ad_disabled(sp), &spte);
if (*sptep == spte)
ret |= SET_SPTE_SPURIOUS;
else if (mmu_spte_update(sptep, spte))
ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
return ret;
}
static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned int pte_access, bool write_fault, int level, unsigned int pte_access, bool write_fault, int level,
gfn_t gfn, kvm_pfn_t pfn, bool speculative, gfn_t gfn, kvm_pfn_t pfn, bool speculative,
......
...@@ -1061,7 +1061,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -1061,7 +1061,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
int i; int i;
bool host_writable; bool host_writable;
gpa_t first_pte_gpa; gpa_t first_pte_gpa;
int set_spte_ret = 0; bool flush = false;
/* /*
* Ignore various flags when verifying that it's safe to sync a shadow * Ignore various flags when verifying that it's safe to sync a shadow
...@@ -1091,6 +1091,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -1091,6 +1091,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
first_pte_gpa = FNAME(get_level1_sp_gpa)(sp); first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
for (i = 0; i < PT64_ENT_PER_PAGE; i++) { for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
u64 *sptep, spte;
unsigned pte_access; unsigned pte_access;
pt_element_t gpte; pt_element_t gpte;
gpa_t pte_gpa; gpa_t pte_gpa;
...@@ -1106,7 +1107,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -1106,7 +1107,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
return -1; return -1;
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH; flush = true;
continue; continue;
} }
...@@ -1120,19 +1121,21 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -1120,19 +1121,21 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
if (gfn != sp->gfns[i]) { if (gfn != sp->gfns[i]) {
drop_spte(vcpu->kvm, &sp->spt[i]); drop_spte(vcpu->kvm, &sp->spt[i]);
set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH; flush = true;
continue; continue;
} }
host_writable = sp->spt[i] & shadow_host_writable_mask; sptep = &sp->spt[i];
spte = *sptep;
host_writable = spte & shadow_host_writable_mask;
make_spte(vcpu, pte_access, PG_LEVEL_4K, gfn,
spte_to_pfn(spte), spte, true, false,
host_writable, sp_ad_disabled(sp), &spte);
set_spte_ret |= set_spte(vcpu, &sp->spt[i], flush |= mmu_spte_update(sptep, spte);
pte_access, PG_LEVEL_4K,
gfn, spte_to_pfn(sp->spt[i]),
true, false, host_writable);
} }
return set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH; return flush;
} }
#undef pt_element_t #undef pt_element_t
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment