Commit c501040a authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: MMU: change mmu->page_fault() arguments to kvm_page_fault

Pass struct kvm_page_fault to mmu->page_fault() instead of
extracting the arguments from the struct.  FNAME(page_fault) can use
the precomputed bools from the error code.
Suggested-by: default avatarIsaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 6defd9bb
...@@ -407,6 +407,7 @@ struct kvm_mmu_root_info { ...@@ -407,6 +407,7 @@ struct kvm_mmu_root_info {
#define KVM_HAVE_MMU_RWLOCK #define KVM_HAVE_MMU_RWLOCK
struct kvm_mmu_page; struct kvm_mmu_page;
struct kvm_page_fault;
/* /*
* x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit, * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit,
...@@ -416,8 +417,7 @@ struct kvm_mmu_page; ...@@ -416,8 +417,7 @@ struct kvm_mmu_page;
struct kvm_mmu { struct kvm_mmu {
unsigned long (*get_guest_pgd)(struct kvm_vcpu *vcpu); unsigned long (*get_guest_pgd)(struct kvm_vcpu *vcpu);
u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index); u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
int (*page_fault)(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err, int (*page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
bool prefault);
void (*inject_page_fault)(struct kvm_vcpu *vcpu, void (*inject_page_fault)(struct kvm_vcpu *vcpu,
struct x86_exception *fault); struct x86_exception *fault);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t gva_or_gpa, gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t gva_or_gpa,
......
...@@ -131,8 +131,7 @@ struct kvm_page_fault { ...@@ -131,8 +131,7 @@ struct kvm_page_fault {
const bool is_tdp; const bool is_tdp;
}; };
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
bool prefault);
static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
u32 err, bool prefault) u32 err, bool prefault)
...@@ -150,9 +149,9 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ...@@ -150,9 +149,9 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
}; };
#ifdef CONFIG_RETPOLINE #ifdef CONFIG_RETPOLINE
if (fault.is_tdp) if (fault.is_tdp)
return kvm_tdp_page_fault(vcpu, fault.addr, fault.error_code, fault.prefault); return kvm_tdp_page_fault(vcpu, &fault);
#endif #endif
return vcpu->arch.mmu->page_fault(vcpu, fault.addr, fault.error_code, fault.prefault); return vcpu->arch.mmu->page_fault(vcpu, &fault);
} }
/* /*
......
...@@ -4012,13 +4012,14 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, ...@@ -4012,13 +4012,14 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
return r; return r;
} }
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, static int nonpaging_page_fault(struct kvm_vcpu *vcpu,
u32 error_code, bool prefault) struct kvm_page_fault *fault)
{ {
pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code); pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);
/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */ /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
return direct_page_fault(vcpu, gpa, error_code, prefault, return direct_page_fault(vcpu, fault->addr,
fault->error_code, fault->prefault,
PG_LEVEL_2M, false); PG_LEVEL_2M, false);
} }
...@@ -4055,10 +4056,10 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, ...@@ -4055,10 +4056,10 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
} }
EXPORT_SYMBOL_GPL(kvm_handle_page_fault); EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
bool prefault)
{ {
int max_level; int max_level;
gpa_t gpa = fault->addr;
for (max_level = KVM_MAX_HUGEPAGE_LEVEL; for (max_level = KVM_MAX_HUGEPAGE_LEVEL;
max_level > PG_LEVEL_4K; max_level > PG_LEVEL_4K;
...@@ -4070,8 +4071,8 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, ...@@ -4070,8 +4071,8 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
break; break;
} }
return direct_page_fault(vcpu, gpa, error_code, prefault, return direct_page_fault(vcpu, gpa, fault->error_code,
max_level, true); fault->prefault, max_level, true);
} }
static void nonpaging_init_context(struct kvm_mmu *context) static void nonpaging_init_context(struct kvm_mmu *context)
......
...@@ -833,11 +833,10 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu, ...@@ -833,11 +833,10 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
* Returns: 1 if we need to emulate the instruction, 0 otherwise, or * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
* a negative value on error. * a negative value on error.
*/ */
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
bool prefault)
{ {
bool write_fault = error_code & PFERR_WRITE_MASK; gpa_t addr = fault->addr;
bool user_fault = error_code & PFERR_USER_MASK; u32 error_code = fault->error_code;
struct guest_walker walker; struct guest_walker walker;
int r; int r;
kvm_pfn_t pfn; kvm_pfn_t pfn;
...@@ -847,6 +846,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, ...@@ -847,6 +846,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
int max_level; int max_level;
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
WARN_ON_ONCE(fault->is_tdp);
/* /*
* If PFEC.RSVD is set, this is a shadow page fault. * If PFEC.RSVD is set, this is a shadow page fault.
...@@ -864,7 +864,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, ...@@ -864,7 +864,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
*/ */
if (!r) { if (!r) {
pgprintk("%s: guest page fault\n", __func__); pgprintk("%s: guest page fault\n", __func__);
if (!prefault) if (!fault->prefault)
kvm_inject_emulated_page_fault(vcpu, &walker.fault); kvm_inject_emulated_page_fault(vcpu, &walker.fault);
return RET_PF_RETRY; return RET_PF_RETRY;
...@@ -882,7 +882,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, ...@@ -882,7 +882,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
vcpu->arch.write_fault_to_shadow_pgtable = false; vcpu->arch.write_fault_to_shadow_pgtable = false;
is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu, is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
&walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable); &walker, fault->user, &vcpu->arch.write_fault_to_shadow_pgtable);
if (is_self_change_mapping) if (is_self_change_mapping)
max_level = PG_LEVEL_4K; max_level = PG_LEVEL_4K;
...@@ -892,8 +892,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, ...@@ -892,8 +892,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
mmu_seq = vcpu->kvm->mmu_notifier_seq; mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb(); smp_rmb();
if (kvm_faultin_pfn(vcpu, prefault, walker.gfn, addr, &pfn, &hva, if (kvm_faultin_pfn(vcpu, fault->prefault, walker.gfn, addr, &pfn, &hva,
write_fault, &map_writable, &r)) fault->write, &map_writable, &r))
return r; return r;
if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r)) if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r))
...@@ -903,8 +903,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, ...@@ -903,8 +903,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
* Do not change pte_access if the pfn is a mmio page, otherwise * Do not change pte_access if the pfn is a mmio page, otherwise
* we will cache the incorrect access into mmio spte. * we will cache the incorrect access into mmio spte.
*/ */
if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) && if (fault->write && !(walker.pte_access & ACC_WRITE_MASK) &&
!is_cr0_wp(vcpu->arch.mmu) && !user_fault && !is_noslot_pfn(pfn)) { !is_cr0_wp(vcpu->arch.mmu) && !fault->user && !is_noslot_pfn(pfn)) {
walker.pte_access |= ACC_WRITE_MASK; walker.pte_access |= ACC_WRITE_MASK;
walker.pte_access &= ~ACC_USER_MASK; walker.pte_access &= ~ACC_USER_MASK;
...@@ -928,7 +928,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, ...@@ -928,7 +928,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
if (r) if (r)
goto out_unlock; goto out_unlock;
r = FNAME(fetch)(vcpu, addr, &walker, error_code, max_level, pfn, r = FNAME(fetch)(vcpu, addr, &walker, error_code, max_level, pfn,
map_writable, prefault); map_writable, fault->prefault);
kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
out_unlock: out_unlock:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment