Commit 7a02674d authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Avoid retpoline on ->page_fault() with TDP

Wrap calls to ->page_fault() with a small shim to directly invoke the
TDP fault handler when the kernel is using retpolines and TDP is being
used.  Single out the TDP fault handler and annotate the TDP path as
likely to coerce the compiler into preferring it over the indirect
function call.

Rename tdp_page_fault() to kvm_tdp_page_fault(), as it's exposed outside
of mmu.c to allow inlining the shim.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 331ca0f8
...@@ -102,6 +102,19 @@ static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu) ...@@ -102,6 +102,19 @@ static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu)
kvm_get_active_pcid(vcpu)); kvm_get_active_pcid(vcpu));
} }
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
bool prefault);
static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
u32 err, bool prefault)
{
#ifdef CONFIG_RETPOLINE
if (likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault))
return kvm_tdp_page_fault(vcpu, cr2_or_gpa, err, prefault);
#endif
return vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, err, prefault);
}
/* /*
* Currently, we have two sorts of write-protection, a) the first one * Currently, we have two sorts of write-protection, a) the first one
* write-protects guest page to sync the guest modification, b) another one is * write-protects guest page to sync the guest modification, b) another one is
......
...@@ -4219,8 +4219,8 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, ...@@ -4219,8 +4219,8 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
} }
EXPORT_SYMBOL_GPL(kvm_handle_page_fault); EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
bool prefault) bool prefault)
{ {
int max_level; int max_level;
...@@ -4925,7 +4925,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) ...@@ -4925,7 +4925,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
return; return;
context->mmu_role.as_u64 = new_role.as_u64; context->mmu_role.as_u64 = new_role.as_u64;
context->page_fault = tdp_page_fault; context->page_fault = kvm_tdp_page_fault;
context->sync_page = nonpaging_sync_page; context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg; context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte; context->update_pte = nonpaging_update_pte;
...@@ -5436,9 +5436,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, ...@@ -5436,9 +5436,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
} }
if (r == RET_PF_INVALID) { if (r == RET_PF_INVALID) {
r = vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
lower_32_bits(error_code), lower_32_bits(error_code), false);
false);
WARN_ON(r == RET_PF_INVALID); WARN_ON(r == RET_PF_INVALID);
} }
......
...@@ -10192,7 +10192,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) ...@@ -10192,7 +10192,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu)) work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu))
return; return;
vcpu->arch.mmu->page_fault(vcpu, work->cr2_or_gpa, 0, true); kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
} }
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment