Commit 90c54c19 authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini

KVM: x86/mmu: Grab mmu_invalidate_seq in kvm_faultin_pfn()

Grab mmu_invalidate_seq in kvm_faultin_pfn() and stash it in struct
kvm_page_fault. The eliminates duplicate code and reduces the amount of
parameters needed for is_page_fault_stale().

Preemptively split out __kvm_faultin_pfn() to a separate function for
use in subsequent commits.

No functional change intended.
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Reviewed-by: default avatarIsaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Message-Id: <20220921173546.2674386-4-dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 991c8047
...@@ -4189,7 +4189,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) ...@@ -4189,7 +4189,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true); kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
} }
static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{ {
struct kvm_memory_slot *slot = fault->slot; struct kvm_memory_slot *slot = fault->slot;
bool async; bool async;
...@@ -4250,12 +4250,20 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) ...@@ -4250,12 +4250,20 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
return RET_PF_CONTINUE; return RET_PF_CONTINUE;
} }
static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{
fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq;
smp_rmb();
return __kvm_faultin_pfn(vcpu, fault);
}
/* /*
* Returns true if the page fault is stale and needs to be retried, i.e. if the * Returns true if the page fault is stale and needs to be retried, i.e. if the
* root was invalidated by a memslot update or a relevant mmu_notifier fired. * root was invalidated by a memslot update or a relevant mmu_notifier fired.
*/ */
static bool is_page_fault_stale(struct kvm_vcpu *vcpu, static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault, int mmu_seq) struct kvm_page_fault *fault)
{ {
struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root.hpa); struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root.hpa);
...@@ -4275,14 +4283,12 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu, ...@@ -4275,14 +4283,12 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
return true; return true;
return fault->slot && return fault->slot &&
mmu_invalidate_retry_hva(vcpu->kvm, mmu_seq, fault->hva); mmu_invalidate_retry_hva(vcpu->kvm, fault->mmu_seq, fault->hva);
} }
static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{ {
bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu); bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
unsigned long mmu_seq;
int r; int r;
fault->gfn = fault->addr >> PAGE_SHIFT; fault->gfn = fault->addr >> PAGE_SHIFT;
...@@ -4299,9 +4305,6 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault ...@@ -4299,9 +4305,6 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
if (r) if (r)
return r; return r;
mmu_seq = vcpu->kvm->mmu_invalidate_seq;
smp_rmb();
r = kvm_faultin_pfn(vcpu, fault); r = kvm_faultin_pfn(vcpu, fault);
if (r != RET_PF_CONTINUE) if (r != RET_PF_CONTINUE)
return r; return r;
...@@ -4317,7 +4320,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault ...@@ -4317,7 +4320,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
else else
write_lock(&vcpu->kvm->mmu_lock); write_lock(&vcpu->kvm->mmu_lock);
if (is_page_fault_stale(vcpu, fault, mmu_seq)) if (is_page_fault_stale(vcpu, fault))
goto out_unlock; goto out_unlock;
r = make_mmu_pages_available(vcpu); r = make_mmu_pages_available(vcpu);
......
...@@ -222,6 +222,7 @@ struct kvm_page_fault { ...@@ -222,6 +222,7 @@ struct kvm_page_fault {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
/* Outputs of kvm_faultin_pfn. */ /* Outputs of kvm_faultin_pfn. */
unsigned long mmu_seq;
kvm_pfn_t pfn; kvm_pfn_t pfn;
hva_t hva; hva_t hva;
bool map_writable; bool map_writable;
......
...@@ -791,7 +791,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault ...@@ -791,7 +791,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
{ {
struct guest_walker walker; struct guest_walker walker;
int r; int r;
unsigned long mmu_seq;
bool is_self_change_mapping; bool is_self_change_mapping;
pgprintk("%s: addr %lx err %x\n", __func__, fault->addr, fault->error_code); pgprintk("%s: addr %lx err %x\n", __func__, fault->addr, fault->error_code);
...@@ -838,9 +837,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault ...@@ -838,9 +837,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
else else
fault->max_level = walker.level; fault->max_level = walker.level;
mmu_seq = vcpu->kvm->mmu_invalidate_seq;
smp_rmb();
r = kvm_faultin_pfn(vcpu, fault); r = kvm_faultin_pfn(vcpu, fault);
if (r != RET_PF_CONTINUE) if (r != RET_PF_CONTINUE)
return r; return r;
...@@ -871,7 +867,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault ...@@ -871,7 +867,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
r = RET_PF_RETRY; r = RET_PF_RETRY;
write_lock(&vcpu->kvm->mmu_lock); write_lock(&vcpu->kvm->mmu_lock);
if (is_page_fault_stale(vcpu, fault, mmu_seq)) if (is_page_fault_stale(vcpu, fault))
goto out_unlock; goto out_unlock;
r = make_mmu_pages_available(vcpu); r = make_mmu_pages_available(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment