Commit f13577e8 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: MMU: return page fault error code from permission_fault

This will help in the implementation of PKRU, where the PK bit of the page
fault error code cannot be computed in advance (unlike I/D, R/W and U/S).
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e9ad4ec8
...@@ -141,11 +141,15 @@ static inline bool is_write_protection(struct kvm_vcpu *vcpu) ...@@ -141,11 +141,15 @@ static inline bool is_write_protection(struct kvm_vcpu *vcpu)
} }
/* /*
* Will a fault with a given page-fault error code (pfec) cause a permission * Check if a given access (described through the I/D, W/R and U/S bits of a
* fault with the given access (in ACC_* format)? * page fault error code pfec) causes a permission fault with the given PTE
* access rights (in ACC_* format).
*
* Return zero if the access does not fault; return the page fault error code
* if the access faults.
*/ */
static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
unsigned pte_access, unsigned pfec) unsigned pte_access, unsigned pfec)
{ {
int cpl = kvm_x86_ops->get_cpl(vcpu); int cpl = kvm_x86_ops->get_cpl(vcpu);
unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
...@@ -169,7 +173,8 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, ...@@ -169,7 +173,8 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
WARN_ON(pfec & PFERR_RSVD_MASK); WARN_ON(pfec & PFERR_RSVD_MASK);
return (mmu->permissions[index] >> pte_access) & 1; pfec |= PFERR_PRESENT_MASK;
return -((mmu->permissions[index] >> pte_access) & 1) & pfec;
} }
void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm); void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
......
...@@ -359,10 +359,9 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -359,10 +359,9 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
walker->ptes[walker->level - 1] = pte; walker->ptes[walker->level - 1] = pte;
} while (!is_last_gpte(mmu, walker->level, pte)); } while (!is_last_gpte(mmu, walker->level, pte));
if (unlikely(permission_fault(vcpu, mmu, pte_access, access))) { errcode = permission_fault(vcpu, mmu, pte_access, access);
errcode |= PFERR_PRESENT_MASK; if (unlikely(errcode))
goto error; goto error;
}
gfn = gpte_to_gfn_lvl(pte, walker->level); gfn = gpte_to_gfn_lvl(pte, walker->level);
gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT; gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment