Commit d95c5568 authored by Bandan Das's avatar Bandan Das Committed by Paolo Bonzini

kvm: mmu: track read permission explicitly for shadow EPT page tables

To support execute only mappings on behalf of L1 hypervisors,
reuse ACC_USER_MASK to signify if the L1 hypervisor has the R bit
set.

For the nested EPT case, we assumed that the U bit was always set
since there was no equivalent in EPT page tables.  Strictly
speaking, this was not necessary because handle_ept_violation
never set PFERR_USER_MASK in the error code (uf=0 in the
parlance of update_permission_bitmask).  We now have to set
both U and UF correctly, respectively in FNAME(gpte_access)
and in handle_ept_violation.

Also in handle_ept_violation bit 3 of the exit qualification is
not enough to detect a present PTE; all three bits 3-5 have to
be checked.
Signed-off-by: default avatarBandan Das <bsd@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ffb128c8
...@@ -2522,6 +2522,12 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -2522,6 +2522,12 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
return 0; return 0;
/*
* For the EPT case, shadow_present_mask is 0 if hardware
* supports exec-only page table entries. In that case,
* ACC_USER_MASK and shadow_user_mask are used to represent
* read access. See FNAME(gpte_access) in paging_tmpl.h.
*/
spte |= shadow_present_mask; spte |= shadow_present_mask;
if (!speculative) if (!speculative)
spte |= shadow_accessed_mask; spte |= shadow_accessed_mask;
...@@ -3915,9 +3921,7 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu, ...@@ -3915,9 +3921,7 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
* clearer. * clearer.
*/ */
smap = cr4_smap && u && !uf && !ff; smap = cr4_smap && u && !uf && !ff;
} else }
/* Not really needed: no U/S accesses on ept */
u = 1;
fault = (ff && !x) || (uf && !u) || (wf && !w) || fault = (ff && !x) || (uf && !u) || (wf && !w) ||
(smapf && smap); (smapf && smap);
......
...@@ -181,13 +181,19 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, ...@@ -181,13 +181,19 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
return true; return true;
} }
/*
* For PTTYPE_EPT, a page table can be executable but not readable
* on supported processors. Therefore, set_spte does not automatically
* set bit 0 if execute only is supported. Here, we repurpose ACC_USER_MASK
* to signify readability since it isn't used in the EPT case
*/
static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte) static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte)
{ {
unsigned access; unsigned access;
#if PTTYPE == PTTYPE_EPT #if PTTYPE == PTTYPE_EPT
access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) | access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) |
((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) | ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) |
ACC_USER_MASK; ((gpte & VMX_EPT_READABLE_MASK) ? ACC_USER_MASK : 0);
#else #else
BUILD_BUG_ON(ACC_EXEC_MASK != PT_PRESENT_MASK); BUILD_BUG_ON(ACC_EXEC_MASK != PT_PRESENT_MASK);
BUILD_BUG_ON(ACC_EXEC_MASK != 1); BUILD_BUG_ON(ACC_EXEC_MASK != 1);
......
...@@ -6117,12 +6117,14 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) ...@@ -6117,12 +6117,14 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
trace_kvm_page_fault(gpa, exit_qualification); trace_kvm_page_fault(gpa, exit_qualification);
/* It is a write fault? */ /* it is a read fault? */
error_code = exit_qualification & PFERR_WRITE_MASK; error_code = (exit_qualification << 2) & PFERR_USER_MASK;
/* it is a write fault? */
error_code |= exit_qualification & PFERR_WRITE_MASK;
/* It is a fetch fault? */ /* It is a fetch fault? */
error_code |= (exit_qualification << 2) & PFERR_FETCH_MASK; error_code |= (exit_qualification << 2) & PFERR_FETCH_MASK;
/* ept page table is present? */ /* ept page table is present? */
error_code |= (exit_qualification >> 3) & PFERR_PRESENT_MASK; error_code |= (exit_qualification & 0x38) != 0;
vcpu->arch.exit_qualification = exit_qualification; vcpu->arch.exit_qualification = exit_qualification;
...@@ -6469,11 +6471,12 @@ static __init int hardware_setup(void) ...@@ -6469,11 +6471,12 @@ static __init int hardware_setup(void)
vmx_disable_intercept_msr_write_x2apic(0x83f); vmx_disable_intercept_msr_write_x2apic(0x83f);
if (enable_ept) { if (enable_ept) {
kvm_mmu_set_mask_ptes(0ull, kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK,
(enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull, (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
(enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull, (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
0ull, VMX_EPT_EXECUTABLE_MASK); 0ull, VMX_EPT_EXECUTABLE_MASK,
0ull, VMX_EPT_EXECUTABLE_MASK, VMX_EPT_READABLE_MASK); cpu_has_vmx_ept_execute_only() ?
0ull : VMX_EPT_READABLE_MASK);
ept_set_mmio_spte_mask(); ept_set_mmio_spte_mask();
kvm_enable_tdp(); kvm_enable_tdp();
} else } else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment