Commit eebed243 authored by Paolo Bonzini's avatar Paolo Bonzini

kvm: nVMX: Add support for fast unprotection of nested guest page tables

This is the same as commit 14727754 ("kvm: svm: Add support for
additional SVM NPF error codes", 2016-11-23), but for Intel processors.
In this case, the exit qualification field's bit 8 says whether the
EPT violation occurred while translating the guest's final physical
address or rather while translating the guest page tables.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 64531a3b
...@@ -204,7 +204,6 @@ enum { ...@@ -204,7 +204,6 @@ enum {
#define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT) #define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)
#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \ #define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \
PFERR_USER_MASK | \
PFERR_WRITE_MASK | \ PFERR_WRITE_MASK | \
PFERR_PRESENT_MASK) PFERR_PRESENT_MASK)
......
...@@ -4836,12 +4836,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, ...@@ -4836,12 +4836,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
* This can occur when using nested virtualization with nested * This can occur when using nested virtualization with nested
* paging in both guests. If true, we simply unprotect the page * paging in both guests. If true, we simply unprotect the page
* and resume the guest. * and resume the guest.
*
* Note: AMD only (since it supports the PFERR_GUEST_PAGE_MASK used
* in PFERR_NEXT_GUEST_PAGE)
*/ */
if (vcpu->arch.mmu.direct_map && if (vcpu->arch.mmu.direct_map &&
error_code == PFERR_NESTED_GUEST_PAGE) { (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2)); kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
return 1; return 1;
} }
......
...@@ -6358,7 +6358,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) ...@@ -6358,7 +6358,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
{ {
unsigned long exit_qualification; unsigned long exit_qualification;
gpa_t gpa; gpa_t gpa;
u32 error_code; u64 error_code;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION); exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
...@@ -6390,6 +6390,9 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) ...@@ -6390,6 +6390,9 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
EPT_VIOLATION_EXECUTABLE)) EPT_VIOLATION_EXECUTABLE))
? PFERR_PRESENT_MASK : 0; ? PFERR_PRESENT_MASK : 0;
error_code |= (exit_qualification & 0x100) != 0 ?
PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
vcpu->arch.gpa_available = true; vcpu->arch.gpa_available = true;
vcpu->arch.exit_qualification = exit_qualification; vcpu->arch.exit_qualification = exit_qualification;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment