Commit 618232e2 authored by Brijesh Singh's avatar Brijesh Singh Committed by Radim Krčmář

KVM: x86: Avoid guest page table walk when gpa_available is set

When a guest causes a page fault which requires emulation, the
vcpu->arch.gpa_available flag is set to indicate that cr2 contains a
valid GPA.

Currently, emulator_read_write_onepage() makes use of gpa_available flag
to avoid a guest page walk for a known MMIO regions. Lets not limit
the gpa_available optimization to just MMIO region. The patch extends
the check to avoid page walk whenever gpa_available flag is set.
Signed-off-by: default avatarBrijesh Singh <brijesh.singh@amd.com>
[Fix EPT=0 according to Wanpeng Li's fix, plus ensure VMX also uses the
 new code. - Paolo]
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
[Moved "ret < 0" to the else brach, as per David's review. - Radim]
Signed-off-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
parent e08d26f0
...@@ -685,8 +685,9 @@ struct kvm_vcpu_arch { ...@@ -685,8 +685,9 @@ struct kvm_vcpu_arch {
int pending_ioapic_eoi; int pending_ioapic_eoi;
int pending_external_vector; int pending_external_vector;
/* GPA available (AMD only) */ /* GPA available */
bool gpa_available; bool gpa_available;
gpa_t gpa_val;
/* be preempted when it's in kernel-mode(cpl=0) */ /* be preempted when it's in kernel-mode(cpl=0) */
bool preempted_in_kernel; bool preempted_in_kernel;
......
...@@ -4843,6 +4843,12 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, ...@@ -4843,6 +4843,12 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
enum emulation_result er; enum emulation_result er;
bool direct = vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu); bool direct = vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu);
/* With shadow page tables, fault_address contains a GVA or nGPA. */
if (vcpu->arch.mmu.direct_map) {
vcpu->arch.gpa_available = true;
vcpu->arch.gpa_val = cr2;
}
if (unlikely(error_code & PFERR_RSVD_MASK)) { if (unlikely(error_code & PFERR_RSVD_MASK)) {
r = handle_mmio_page_fault(vcpu, cr2, direct); r = handle_mmio_page_fault(vcpu, cr2, direct);
if (r == RET_MMIO_PF_EMULATE) { if (r == RET_MMIO_PF_EMULATE) {
......
...@@ -4236,8 +4236,6 @@ static int handle_exit(struct kvm_vcpu *vcpu) ...@@ -4236,8 +4236,6 @@ static int handle_exit(struct kvm_vcpu *vcpu)
trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM); trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
vcpu->arch.gpa_available = (exit_code == SVM_EXIT_NPF);
if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE)) if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
vcpu->arch.cr0 = svm->vmcb->save.cr0; vcpu->arch.cr0 = svm->vmcb->save.cr0;
if (npt_enabled) if (npt_enabled)
......
...@@ -6393,9 +6393,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) ...@@ -6393,9 +6393,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
error_code |= (exit_qualification & 0x100) != 0 ? error_code |= (exit_qualification & 0x100) != 0 ?
PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK; PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
vcpu->arch.gpa_available = true;
vcpu->arch.exit_qualification = exit_qualification; vcpu->arch.exit_qualification = exit_qualification;
return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
} }
...@@ -6410,7 +6408,6 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) ...@@ -6410,7 +6408,6 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
vcpu->arch.gpa_available = true;
ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
if (ret >= 0) if (ret >= 0)
return ret; return ret;
...@@ -8644,7 +8641,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) ...@@ -8644,7 +8641,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
u32 vectoring_info = vmx->idt_vectoring_info; u32 vectoring_info = vmx->idt_vectoring_info;
trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX); trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
vcpu->arch.gpa_available = false;
/* /*
* Flush logged GPAs PML buffer, this will make dirty_bitmap more * Flush logged GPAs PML buffer, this will make dirty_bitmap more
......
...@@ -4657,25 +4657,18 @@ static int emulator_read_write_onepage(unsigned long addr, void *val, ...@@ -4657,25 +4657,18 @@ static int emulator_read_write_onepage(unsigned long addr, void *val,
*/ */
if (vcpu->arch.gpa_available && if (vcpu->arch.gpa_available &&
emulator_can_use_gpa(ctxt) && emulator_can_use_gpa(ctxt) &&
vcpu_is_mmio_gpa(vcpu, addr, exception->address, write) && (addr & ~PAGE_MASK) == (vcpu->arch.gpa_val & ~PAGE_MASK)) {
(addr & ~PAGE_MASK) == (exception->address & ~PAGE_MASK)) { gpa = vcpu->arch.gpa_val;
gpa = exception->address; ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write);
goto mmio; } else {
ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
if (ret < 0)
return X86EMUL_PROPAGATE_FAULT;
} }
ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes))
if (ret < 0)
return X86EMUL_PROPAGATE_FAULT;
/* For APIC access vmexit */
if (ret)
goto mmio;
if (ops->read_write_emulate(vcpu, gpa, val, bytes))
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
mmio:
/* /*
* Is this MMIO handled locally? * Is this MMIO handled locally?
*/ */
...@@ -7002,6 +6995,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -7002,6 +6995,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (vcpu->arch.apic_attention) if (vcpu->arch.apic_attention)
kvm_lapic_sync_from_vapic(vcpu); kvm_lapic_sync_from_vapic(vcpu);
vcpu->arch.gpa_available = false;
r = kvm_x86_ops->handle_exit(vcpu); r = kvm_x86_ops->handle_exit(vcpu);
return r; return r;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment