Commit ceee7df7 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Paolo Bonzini

KVM: MMU: fix smap permission check

Current permission check assumes that RSVD bit in PFEC is always zero,
however, it is not true since MMIO #PF will use it to quickly identify
MMIO access

Fix it by clearing the bit if walking guest page table is needed
Signed-off-by: default avatarXiao Guangrong <guangrong.xiao@linux.intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent a4cca3b4
...@@ -166,6 +166,8 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, ...@@ -166,6 +166,8 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
int index = (pfec >> 1) + int index = (pfec >> 1) +
(smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1)); (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
WARN_ON(pfec & PFERR_RSVD_MASK);
return (mmu->permissions[index] >> pte_access) & 1; return (mmu->permissions[index] >> pte_access) & 1;
} }
......
...@@ -718,6 +718,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, ...@@ -718,6 +718,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
mmu_is_nested(vcpu)); mmu_is_nested(vcpu));
if (likely(r != RET_MMIO_PF_INVALID)) if (likely(r != RET_MMIO_PF_INVALID))
return r; return r;
/*
* page fault with PFEC.RSVD = 1 is caused by shadow
* page fault, should not be used to walk guest page
* table.
*/
error_code &= ~PFERR_RSVD_MASK;
}; };
r = mmu_topup_memory_caches(vcpu); r = mmu_topup_memory_caches(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment