Commit a75a895e authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: selftests: Unconditionally use memslot 0 for vaddr allocations

Drop the memslot param(s) from vm_vaddr_alloc() now that all callers
directly specific '0' as the memslot.  Drop the memslot param from
virt_pgd_alloc() as well since vm_vaddr_alloc() is its only user.
I.e. shove the hardcoded '0' down to the vm_phy_pages_alloc() calls.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 408633c3
...@@ -140,8 +140,7 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags); ...@@ -140,8 +140,7 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa); void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot); void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid); void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid);
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
uint32_t data_memslot, uint32_t pgd_memslot);
vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages); vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm); vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
...@@ -239,7 +238,7 @@ int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr, ...@@ -239,7 +238,7 @@ int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
const char *exit_reason_str(unsigned int exit_reason); const char *exit_reason_str(unsigned int exit_reason);
void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot); void virt_pgd_alloc(struct kvm_vm *vm);
/* /*
* VM Virtual Page Map * VM Virtual Page Map
......
...@@ -72,12 +72,12 @@ static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm) ...@@ -72,12 +72,12 @@ static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
return 1 << (vm->page_shift - 3); return 1 << (vm->page_shift - 3);
} }
void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot) void virt_pgd_alloc(struct kvm_vm *vm)
{ {
if (!vm->pgd_created) { if (!vm->pgd_created) {
vm_paddr_t paddr = vm_phy_pages_alloc(vm, vm_paddr_t paddr = vm_phy_pages_alloc(vm,
page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size, page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size,
KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot); KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
vm->pgd = paddr; vm->pgd = paddr;
vm->pgd_created = true; vm->pgd_created = true;
} }
...@@ -302,7 +302,7 @@ void aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, ...@@ -302,7 +302,7 @@ void aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid,
DEFAULT_STACK_PGS * vm->page_size : DEFAULT_STACK_PGS * vm->page_size :
vm->page_size; vm->page_size;
uint64_t stack_vaddr = vm_vaddr_alloc(vm, stack_size, uint64_t stack_vaddr = vm_vaddr_alloc(vm, stack_size,
DEFAULT_ARM64_GUEST_STACK_VADDR_MIN, 0, 0); DEFAULT_ARM64_GUEST_STACK_VADDR_MIN);
vm_vcpu_add(vm, vcpuid); vm_vcpu_add(vm, vcpuid);
aarch64_vcpu_setup(vm, vcpuid, init); aarch64_vcpu_setup(vm, vcpuid, init);
......
...@@ -163,7 +163,7 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename) ...@@ -163,7 +163,7 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
seg_vend |= vm->page_size - 1; seg_vend |= vm->page_size - 1;
size_t seg_size = seg_vend - seg_vstart + 1; size_t seg_size = seg_vend - seg_vstart + 1;
vm_vaddr_t vaddr = vm_vaddr_alloc(vm, seg_size, seg_vstart, 0, 0); vm_vaddr_t vaddr = vm_vaddr_alloc(vm, seg_size, seg_vstart);
TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate " TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate "
"virtual memory for segment at requested min addr,\n" "virtual memory for segment at requested min addr,\n"
" segment idx: %u\n" " segment idx: %u\n"
......
...@@ -1247,15 +1247,13 @@ static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, ...@@ -1247,15 +1247,13 @@ static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
* a unique set of pages, with the minimum real allocation being at least * a unique set of pages, with the minimum real allocation being at least
* a page. * a page.
*/ */
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
uint32_t data_memslot, uint32_t pgd_memslot)
{ {
uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
virt_pgd_alloc(vm, pgd_memslot); virt_pgd_alloc(vm);
vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages, vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages,
KVM_UTIL_MIN_PFN * vm->page_size, KVM_UTIL_MIN_PFN * vm->page_size, 0);
data_memslot);
/* /*
* Find an unused range of virtual page addresses of at least * Find an unused range of virtual page addresses of at least
...@@ -1267,7 +1265,7 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, ...@@ -1267,7 +1265,7 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
for (vm_vaddr_t vaddr = vaddr_start; pages > 0; for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
pages--, vaddr += vm->page_size, paddr += vm->page_size) { pages--, vaddr += vm->page_size, paddr += vm->page_size) {
virt_pg_map(vm, vaddr, paddr, pgd_memslot); virt_pg_map(vm, vaddr, paddr, 0);
sparsebit_set(vm->vpages_mapped, sparsebit_set(vm->vpages_mapped,
vaddr >> vm->page_shift); vaddr >> vm->page_shift);
...@@ -1292,7 +1290,7 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, ...@@ -1292,7 +1290,7 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
*/ */
vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages) vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages)
{ {
return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR, 0, 0); return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR);
} }
/* /*
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#define PAGES_PER_REGION 4 #define PAGES_PER_REGION 4
void virt_pgd_alloc(struct kvm_vm *vm, uint32_t memslot) void virt_pgd_alloc(struct kvm_vm *vm)
{ {
vm_paddr_t paddr; vm_paddr_t paddr;
...@@ -24,7 +24,7 @@ void virt_pgd_alloc(struct kvm_vm *vm, uint32_t memslot) ...@@ -24,7 +24,7 @@ void virt_pgd_alloc(struct kvm_vm *vm, uint32_t memslot)
return; return;
paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION, paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
KVM_GUEST_PAGE_TABLE_MIN_PADDR, memslot); KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size);
vm->pgd = paddr; vm->pgd = paddr;
...@@ -170,7 +170,7 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code) ...@@ -170,7 +170,7 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
vm->page_size); vm->page_size);
stack_vaddr = vm_vaddr_alloc(vm, stack_size, stack_vaddr = vm_vaddr_alloc(vm, stack_size,
DEFAULT_GUEST_STACK_VADDR_MIN, 0, 0); DEFAULT_GUEST_STACK_VADDR_MIN);
vm_vcpu_add(vm, vcpuid); vm_vcpu_add(vm, vcpuid);
......
...@@ -207,7 +207,7 @@ void sregs_dump(FILE *stream, struct kvm_sregs *sregs, ...@@ -207,7 +207,7 @@ void sregs_dump(FILE *stream, struct kvm_sregs *sregs,
} }
} }
void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot) void virt_pgd_alloc(struct kvm_vm *vm)
{ {
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode); "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
...@@ -215,7 +215,7 @@ void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot) ...@@ -215,7 +215,7 @@ void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
/* If needed, create page map l4 table. */ /* If needed, create page map l4 table. */
if (!vm->pgd_created) { if (!vm->pgd_created) {
vm_paddr_t paddr = vm_phy_page_alloc(vm, vm_paddr_t paddr = vm_phy_page_alloc(vm,
KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot); KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
vm->pgd = paddr; vm->pgd = paddr;
vm->pgd_created = true; vm->pgd_created = true;
} }
...@@ -580,7 +580,7 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code) ...@@ -580,7 +580,7 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
struct kvm_regs regs; struct kvm_regs regs;
vm_vaddr_t stack_vaddr; vm_vaddr_t stack_vaddr;
stack_vaddr = vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(), stack_vaddr = vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
DEFAULT_GUEST_STACK_VADDR_MIN, 0, 0); DEFAULT_GUEST_STACK_VADDR_MIN);
/* Create VCPU */ /* Create VCPU */
vm_vcpu_add(vm, vcpuid); vm_vcpu_add(vm, vcpuid);
......
...@@ -145,7 +145,7 @@ static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid, int stage) ...@@ -145,7 +145,7 @@ static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid, int stage)
struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, vm_vaddr_t *p_gva, struct kvm_cpuid2 *cpuid) struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, vm_vaddr_t *p_gva, struct kvm_cpuid2 *cpuid)
{ {
int size = sizeof(*cpuid) + cpuid->nent * sizeof(cpuid->entries[0]); int size = sizeof(*cpuid) + cpuid->nent * sizeof(cpuid->entries[0]);
vm_vaddr_t gva = vm_vaddr_alloc(vm, size, KVM_UTIL_MIN_VADDR, 0, 0); vm_vaddr_t gva = vm_vaddr_alloc(vm, size, KVM_UTIL_MIN_VADDR);
struct kvm_cpuid2 *guest_cpuids = addr_gva2hva(vm, gva); struct kvm_cpuid2 *guest_cpuids = addr_gva2hva(vm, gva);
memcpy(guest_cpuids, cpuid, size); memcpy(guest_cpuids, cpuid, size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment