Commit 15cabbc2 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Subsume nested GPA read helper into load_pdptrs()

Open code the call to mmu->translate_gpa() when loading nested PDPTRs and
kill off the existing helper, kvm_read_guest_page_mmu(), to discourage
incorrect use.  Reading guest memory straight from an L2 GPA is extremely
rare (as evidenced by the lack of users), as very few constructs in x86
specify physical addresses, even fewer are virtualized by KVM, and even
fewer yet require emulation of L2 by L0 KVM.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Message-Id: <20210831164224.1119728-3-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent a1c42dde
...@@ -1714,9 +1714,6 @@ void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) ...@@ -1714,9 +1714,6 @@ void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault); struct x86_exception *fault);
int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gfn_t gfn, void *data, int offset, int len,
u32 access);
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr); bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
......
...@@ -790,30 +790,6 @@ bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) ...@@ -790,30 +790,6 @@ bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
} }
EXPORT_SYMBOL_GPL(kvm_require_dr); EXPORT_SYMBOL_GPL(kvm_require_dr);
/*
* This function will be used to read from the physical memory of the currently
* running guest. The difference to kvm_vcpu_read_guest_page is that this function
* can read from guest physical or from the guest's guest physical memory.
*/
int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gfn_t ngfn, void *data, int offset, int len,
u32 access)
{
struct x86_exception exception;
gfn_t real_gfn;
gpa_t ngpa;
ngpa = gfn_to_gpa(ngfn);
real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
if (real_gfn == UNMAPPED_GVA)
return -EFAULT;
real_gfn = gpa_to_gfn(real_gfn);
return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2); return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2);
...@@ -825,34 +801,38 @@ static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) ...@@ -825,34 +801,38 @@ static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
{ {
gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; unsigned offset = (((cr3 & (PAGE_SIZE-1)) >> 5) << 2) * sizeof(u64);
gpa_t real_gpa;
int i; int i;
int ret; int ret;
u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, /*
offset * sizeof(u64), sizeof(pdpte), * If the MMU is nested, CR3 holds an L2 GPA and needs to be translated
PFERR_USER_MASK|PFERR_WRITE_MASK); * to an L1 GPA.
if (ret < 0) { */
ret = 0; real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(pdpt_gfn),
goto out; PFERR_USER_MASK | PFERR_WRITE_MASK, NULL);
} if (real_gpa == UNMAPPED_GVA)
return 0;
ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(real_gpa), pdpte,
offset, sizeof(pdpte));
if (ret < 0)
return 0;
for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
if ((pdpte[i] & PT_PRESENT_MASK) && if ((pdpte[i] & PT_PRESENT_MASK) &&
(pdpte[i] & pdptr_rsvd_bits(vcpu))) { (pdpte[i] & pdptr_rsvd_bits(vcpu))) {
ret = 0; return 0;
goto out;
} }
} }
ret = 1;
memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
vcpu->arch.pdptrs_from_userspace = false; vcpu->arch.pdptrs_from_userspace = false;
out: return 1;
return ret;
} }
EXPORT_SYMBOL_GPL(load_pdptrs); EXPORT_SYMBOL_GPL(load_pdptrs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment