Commit a158127f authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini

KVM: x86/mmu: Split out TDP MMU page fault handling

Split out the page fault handling for the TDP MMU to a separate
function.  This creates some duplicate code, but makes the TDP MMU fault
handler simpler to read by eliminating branches and will enable future
cleanups by allowing the TDP MMU and non-TDP MMU fault paths to diverge.

Only compile in the TDP MMU fault handler for 64-bit builds since
kvm_tdp_mmu_map() does not exist in 32-bit builds.

No functional change intended.
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Reviewed-by: default avatarIsaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Message-Id: <20220921173546.2674386-9-dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 2d75ce03
...@@ -4303,7 +4303,6 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu, ...@@ -4303,7 +4303,6 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{ {
bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
int r; int r;
if (page_fault_handle_page_track(vcpu, fault)) if (page_fault_handle_page_track(vcpu, fault))
...@@ -4322,10 +4321,6 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault ...@@ -4322,10 +4321,6 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
return r; return r;
r = RET_PF_RETRY; r = RET_PF_RETRY;
if (is_tdp_mmu_fault)
read_lock(&vcpu->kvm->mmu_lock);
else
write_lock(&vcpu->kvm->mmu_lock); write_lock(&vcpu->kvm->mmu_lock);
if (is_page_fault_stale(vcpu, fault)) if (is_page_fault_stale(vcpu, fault))
...@@ -4335,15 +4330,9 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault ...@@ -4335,15 +4330,9 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
if (r) if (r)
goto out_unlock; goto out_unlock;
if (is_tdp_mmu_fault)
r = kvm_tdp_mmu_map(vcpu, fault);
else
r = __direct_map(vcpu, fault); r = __direct_map(vcpu, fault);
out_unlock: out_unlock:
if (is_tdp_mmu_fault)
read_unlock(&vcpu->kvm->mmu_lock);
else
write_unlock(&vcpu->kvm->mmu_lock); write_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(fault->pfn); kvm_release_pfn_clean(fault->pfn);
return r; return r;
...@@ -4392,6 +4381,46 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, ...@@ -4392,6 +4381,46 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
} }
EXPORT_SYMBOL_GPL(kvm_handle_page_fault); EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
#ifdef CONFIG_X86_64
static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault)
{
int r;
if (page_fault_handle_page_track(vcpu, fault))
return RET_PF_EMULATE;
r = fast_page_fault(vcpu, fault);
if (r != RET_PF_INVALID)
return r;
r = mmu_topup_memory_caches(vcpu, false);
if (r)
return r;
r = kvm_faultin_pfn(vcpu, fault, ACC_ALL);
if (r != RET_PF_CONTINUE)
return r;
r = RET_PF_RETRY;
read_lock(&vcpu->kvm->mmu_lock);
if (is_page_fault_stale(vcpu, fault))
goto out_unlock;
r = make_mmu_pages_available(vcpu);
if (r)
goto out_unlock;
r = kvm_tdp_mmu_map(vcpu, fault);
out_unlock:
read_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(fault->pfn);
return r;
}
#endif
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{ {
/* /*
...@@ -4416,6 +4445,11 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) ...@@ -4416,6 +4445,11 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
} }
} }
#ifdef CONFIG_X86_64
if (tdp_mmu_enabled)
return kvm_tdp_mmu_page_fault(vcpu, fault);
#endif
return direct_page_fault(vcpu, fault); return direct_page_fault(vcpu, fault);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment