Commit 3c8ad5a6 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: MMU: change fast_page_fault() arguments to kvm_page_fault

Pass struct kvm_page_fault to fast_page_fault() instead of
extracting the arguments from the struct.
Suggested-by: default avatarIsaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent cdc47767
...@@ -3083,18 +3083,17 @@ static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fa ...@@ -3083,18 +3083,17 @@ static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fa
return false; return false;
} }
static bool page_fault_can_be_fast(u32 error_code) static bool page_fault_can_be_fast(struct kvm_page_fault *fault)
{ {
/* /*
* Do not fix the mmio spte with invalid generation number which * Do not fix the mmio spte with invalid generation number which
* need to be updated by slow page fault path. * need to be updated by slow page fault path.
*/ */
if (unlikely(error_code & PFERR_RSVD_MASK)) if (fault->rsvd)
return false; return false;
/* See if the page fault is due to an NX violation */ /* See if the page fault is due to an NX violation */
if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK)) if (unlikely(fault->exec && fault->present))
== (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))))
return false; return false;
/* /*
...@@ -3111,9 +3110,7 @@ static bool page_fault_can_be_fast(u32 error_code) ...@@ -3111,9 +3110,7 @@ static bool page_fault_can_be_fast(u32 error_code)
* accesses to a present page. * accesses to a present page.
*/ */
return shadow_acc_track_mask != 0 || return shadow_acc_track_mask != 0 || (fault->write && fault->present);
((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK))
== (PFERR_WRITE_MASK | PFERR_PRESENT_MASK));
} }
/* /*
...@@ -3155,12 +3152,12 @@ fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, ...@@ -3155,12 +3152,12 @@ fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
return true; return true;
} }
static bool is_access_allowed(u32 fault_err_code, u64 spte) static bool is_access_allowed(struct kvm_page_fault *fault, u64 spte)
{ {
if (fault_err_code & PFERR_FETCH_MASK) if (fault->exec)
return is_executable_pte(spte); return is_executable_pte(spte);
if (fault_err_code & PFERR_WRITE_MASK) if (fault->write)
return is_writable_pte(spte); return is_writable_pte(spte);
/* Fault was on Read access */ /* Fault was on Read access */
...@@ -3193,7 +3190,7 @@ static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte) ...@@ -3193,7 +3190,7 @@ static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
/* /*
* Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS. * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
*/ */
static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code) static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{ {
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
int ret = RET_PF_INVALID; int ret = RET_PF_INVALID;
...@@ -3201,7 +3198,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code) ...@@ -3201,7 +3198,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code)
u64 *sptep = NULL; u64 *sptep = NULL;
uint retry_count = 0; uint retry_count = 0;
if (!page_fault_can_be_fast(error_code)) if (!page_fault_can_be_fast(fault))
return ret; return ret;
walk_shadow_page_lockless_begin(vcpu); walk_shadow_page_lockless_begin(vcpu);
...@@ -3210,9 +3207,9 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code) ...@@ -3210,9 +3207,9 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code)
u64 new_spte; u64 new_spte;
if (is_tdp_mmu(vcpu->arch.mmu)) if (is_tdp_mmu(vcpu->arch.mmu))
sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, gpa, &spte); sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
else else
sptep = fast_pf_get_last_sptep(vcpu, gpa, &spte); sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
if (!is_shadow_present_pte(spte)) if (!is_shadow_present_pte(spte))
break; break;
...@@ -3231,7 +3228,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code) ...@@ -3231,7 +3228,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code)
* Need not check the access of upper level table entries since * Need not check the access of upper level table entries since
* they are always ACC_ALL. * they are always ACC_ALL.
*/ */
if (is_access_allowed(error_code, spte)) { if (is_access_allowed(fault, spte)) {
ret = RET_PF_SPURIOUS; ret = RET_PF_SPURIOUS;
break; break;
} }
...@@ -3246,7 +3243,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code) ...@@ -3246,7 +3243,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code)
* be removed in the fast path only if the SPTE was * be removed in the fast path only if the SPTE was
* write-protected for dirty-logging or access tracking. * write-protected for dirty-logging or access tracking.
*/ */
if ((error_code & PFERR_WRITE_MASK) && if (fault->write &&
spte_can_locklessly_be_made_writable(spte)) { spte_can_locklessly_be_made_writable(spte)) {
new_spte |= PT_WRITABLE_MASK; new_spte |= PT_WRITABLE_MASK;
...@@ -3267,7 +3264,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code) ...@@ -3267,7 +3264,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code)
/* Verify that the fault can be handled in the fast path */ /* Verify that the fault can be handled in the fast path */
if (new_spte == spte || if (new_spte == spte ||
!is_access_allowed(error_code, new_spte)) !is_access_allowed(fault, new_spte))
break; break;
/* /*
...@@ -3288,7 +3285,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code) ...@@ -3288,7 +3285,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code)
} while (true); } while (true);
trace_fast_page_fault(vcpu, gpa, error_code, sptep, spte, ret); trace_fast_page_fault(vcpu, fault->addr, fault->error_code, sptep, spte, ret);
walk_shadow_page_lockless_end(vcpu); walk_shadow_page_lockless_end(vcpu);
return ret; return ret;
...@@ -3946,18 +3943,16 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, ...@@ -3946,18 +3943,16 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{ {
gpa_t gpa = fault->addr;
u32 error_code = fault->error_code;
bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu); bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
unsigned long mmu_seq; unsigned long mmu_seq;
int r; int r;
fault->gfn = gpa >> PAGE_SHIFT; fault->gfn = fault->addr >> PAGE_SHIFT;
if (page_fault_handle_page_track(vcpu, fault)) if (page_fault_handle_page_track(vcpu, fault))
return RET_PF_EMULATE; return RET_PF_EMULATE;
r = fast_page_fault(vcpu, gpa, error_code); r = fast_page_fault(vcpu, fault);
if (r != RET_PF_INVALID) if (r != RET_PF_INVALID)
return r; return r;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment