Commit a9db9609 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: selftests: Add helpers to allocate N pages of virtual memory

Add wrappers to allocate 1 and N pages of memory using de facto standard
values as the defaults for minimum virtual address, data memslot, and
page table memslot.  Convert all compatible users.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210622200529.3650424-7-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 95be3709
...@@ -142,6 +142,9 @@ void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); ...@@ -142,6 +142,9 @@ void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid); void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid);
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
uint32_t data_memslot, uint32_t pgd_memslot); uint32_t data_memslot, uint32_t pgd_memslot);
vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
unsigned int npages, uint32_t pgd_memslot); unsigned int npages, uint32_t pgd_memslot);
void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
......
...@@ -1276,6 +1276,44 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, ...@@ -1276,6 +1276,44 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
return vaddr_start; return vaddr_start;
} }
/*
* VM Virtual Address Allocate Pages
*
* Input Args:
* vm - Virtual Machine
*
* Output Args: None
*
* Return:
* Starting guest virtual address
*
* Allocates at least N system pages worth of bytes within the virtual address
* space of the vm.
*/
vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages)
{
return vm_vaddr_alloc(vm, nr_pages * getpagesize(), 0x10000, 0, 0);
}
/*
* VM Virtual Address Allocate Page
*
* Input Args:
* vm - Virtual Machine
*
* Output Args: None
*
* Return:
* Starting guest virtual address
*
* Allocates at least one system page worth of bytes within the virtual address
* space of the vm.
*/
vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm)
{
return vm_vaddr_alloc_pages(vm, 1);
}
/* /*
* Map a range of VM virtual address to the VM's physical address * Map a range of VM virtual address to the VM's physical address
* *
......
...@@ -30,17 +30,14 @@ u64 rflags; ...@@ -30,17 +30,14 @@ u64 rflags;
struct svm_test_data * struct svm_test_data *
vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva) vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva)
{ {
vm_vaddr_t svm_gva = vm_vaddr_alloc(vm, getpagesize(), vm_vaddr_t svm_gva = vm_vaddr_alloc_page(vm);
0x10000, 0, 0);
struct svm_test_data *svm = addr_gva2hva(vm, svm_gva); struct svm_test_data *svm = addr_gva2hva(vm, svm_gva);
svm->vmcb = (void *)vm_vaddr_alloc(vm, getpagesize(), svm->vmcb = (void *)vm_vaddr_alloc_page(vm);
0x10000, 0, 0);
svm->vmcb_hva = addr_gva2hva(vm, (uintptr_t)svm->vmcb); svm->vmcb_hva = addr_gva2hva(vm, (uintptr_t)svm->vmcb);
svm->vmcb_gpa = addr_gva2gpa(vm, (uintptr_t)svm->vmcb); svm->vmcb_gpa = addr_gva2gpa(vm, (uintptr_t)svm->vmcb);
svm->save_area = (void *)vm_vaddr_alloc(vm, getpagesize(), svm->save_area = (void *)vm_vaddr_alloc_page(vm);
0x10000, 0, 0);
svm->save_area_hva = addr_gva2hva(vm, (uintptr_t)svm->save_area); svm->save_area_hva = addr_gva2hva(vm, (uintptr_t)svm->save_area);
svm->save_area_gpa = addr_gva2gpa(vm, (uintptr_t)svm->save_area); svm->save_area_gpa = addr_gva2gpa(vm, (uintptr_t)svm->save_area);
......
...@@ -77,50 +77,48 @@ int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id) ...@@ -77,50 +77,48 @@ int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id)
struct vmx_pages * struct vmx_pages *
vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva)
{ {
vm_vaddr_t vmx_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm);
struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva); struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva);
/* Setup of a region of guest memory for the vmxon region. */ /* Setup of a region of guest memory for the vmxon region. */
vmx->vmxon = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); vmx->vmxon = (void *)vm_vaddr_alloc_page(vm);
vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon); vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon);
vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon); vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon);
/* Setup of a region of guest memory for a vmcs. */ /* Setup of a region of guest memory for a vmcs. */
vmx->vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); vmx->vmcs = (void *)vm_vaddr_alloc_page(vm);
vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs); vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs);
vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs); vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs);
/* Setup of a region of guest memory for the MSR bitmap. */ /* Setup of a region of guest memory for the MSR bitmap. */
vmx->msr = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); vmx->msr = (void *)vm_vaddr_alloc_page(vm);
vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr); vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr);
vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr); vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr);
memset(vmx->msr_hva, 0, getpagesize()); memset(vmx->msr_hva, 0, getpagesize());
/* Setup of a region of guest memory for the shadow VMCS. */ /* Setup of a region of guest memory for the shadow VMCS. */
vmx->shadow_vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); vmx->shadow_vmcs = (void *)vm_vaddr_alloc_page(vm);
vmx->shadow_vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->shadow_vmcs); vmx->shadow_vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->shadow_vmcs);
vmx->shadow_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->shadow_vmcs); vmx->shadow_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->shadow_vmcs);
/* Setup of a region of guest memory for the VMREAD and VMWRITE bitmaps. */ /* Setup of a region of guest memory for the VMREAD and VMWRITE bitmaps. */
vmx->vmread = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); vmx->vmread = (void *)vm_vaddr_alloc_page(vm);
vmx->vmread_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmread); vmx->vmread_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmread);
vmx->vmread_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmread); vmx->vmread_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmread);
memset(vmx->vmread_hva, 0, getpagesize()); memset(vmx->vmread_hva, 0, getpagesize());
vmx->vmwrite = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); vmx->vmwrite = (void *)vm_vaddr_alloc_page(vm);
vmx->vmwrite_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmwrite); vmx->vmwrite_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmwrite);
vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite); vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite);
memset(vmx->vmwrite_hva, 0, getpagesize()); memset(vmx->vmwrite_hva, 0, getpagesize());
/* Setup of a region of guest memory for the VP Assist page. */ /* Setup of a region of guest memory for the VP Assist page. */
vmx->vp_assist = (void *)vm_vaddr_alloc(vm, getpagesize(), vmx->vp_assist = (void *)vm_vaddr_alloc_page(vm);
0x10000, 0, 0);
vmx->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)vmx->vp_assist); vmx->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)vmx->vp_assist);
vmx->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vp_assist); vmx->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vp_assist);
/* Setup of a region of guest memory for the enlightened VMCS. */ /* Setup of a region of guest memory for the enlightened VMCS. */
vmx->enlightened_vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(), vmx->enlightened_vmcs = (void *)vm_vaddr_alloc_page(vm);
0x10000, 0, 0);
vmx->enlightened_vmcs_hva = vmx->enlightened_vmcs_hva =
addr_gva2hva(vm, (uintptr_t)vmx->enlightened_vmcs); addr_gva2hva(vm, (uintptr_t)vmx->enlightened_vmcs);
vmx->enlightened_vmcs_gpa = vmx->enlightened_vmcs_gpa =
...@@ -538,7 +536,7 @@ void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, ...@@ -538,7 +536,7 @@ void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm, void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t eptp_memslot) uint32_t eptp_memslot)
{ {
vmx->eptp = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); vmx->eptp = (void *)vm_vaddr_alloc_page(vm);
vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp); vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp);
vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp); vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp);
} }
...@@ -546,8 +544,7 @@ void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm, ...@@ -546,8 +544,7 @@ void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm, void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t eptp_memslot) uint32_t eptp_memslot)
{ {
vmx->apic_access = (void *)vm_vaddr_alloc(vm, getpagesize(), vmx->apic_access = (void *)vm_vaddr_alloc_page(vm);
0x10000, 0, 0);
vmx->apic_access_hva = addr_gva2hva(vm, (uintptr_t)vmx->apic_access); vmx->apic_access_hva = addr_gva2hva(vm, (uintptr_t)vmx->apic_access);
vmx->apic_access_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->apic_access); vmx->apic_access_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->apic_access);
} }
...@@ -214,7 +214,7 @@ int main(void) ...@@ -214,7 +214,7 @@ int main(void)
vcpu_set_hv_cpuid(vm, VCPU_ID); vcpu_set_hv_cpuid(vm, VCPU_ID);
tsc_page_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); tsc_page_gva = vm_vaddr_alloc_page(vm);
memset(addr_gpa2hva(vm, tsc_page_gva), 0x0, getpagesize()); memset(addr_gpa2hva(vm, tsc_page_gva), 0x0, getpagesize());
TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0, TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0,
"TSC page has to be page aligned\n"); "TSC page has to be page aligned\n");
......
...@@ -604,7 +604,7 @@ int main(void) ...@@ -604,7 +604,7 @@ int main(void)
/* Test MSRs */ /* Test MSRs */
vm = vm_create_default(VCPU_ID, 0, guest_msr); vm = vm_create_default(VCPU_ID, 0, guest_msr);
msr_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); msr_gva = vm_vaddr_alloc_page(vm);
memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize()); memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
vcpu_args_set(vm, VCPU_ID, 1, msr_gva); vcpu_args_set(vm, VCPU_ID, 1, msr_gva);
vcpu_enable_cap(vm, VCPU_ID, &cap); vcpu_enable_cap(vm, VCPU_ID, &cap);
...@@ -626,10 +626,10 @@ int main(void) ...@@ -626,10 +626,10 @@ int main(void)
vm = vm_create_default(VCPU_ID, 0, guest_hcall); vm = vm_create_default(VCPU_ID, 0, guest_hcall);
/* Hypercall input/output */ /* Hypercall input/output */
hcall_page = vm_vaddr_alloc(vm, 2 * getpagesize(), 0x10000, 0, 0); hcall_page = vm_vaddr_alloc_pages(vm, 2);
memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize()); memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
hcall_params = vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); hcall_params = vm_vaddr_alloc_page(vm);
memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize()); memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params); vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment