Commit 87a802d9 authored by Andrew Jones's avatar Andrew Jones Committed by Paolo Bonzini

KVM: selftests: Introduce num-pages conversion utilities

Guests and hosts don't have to have the same page size. This means
calculations are necessary when selecting the number of guest pages
to allocate in order to ensure the number is compatible with the
host. Provide utilities to help with those calculations and apply
them where appropriate.

We also revert commit bffed38d ("kvm: selftests: aarch64:
dirty_log_test: fix unaligned memslot size") and then use
vm_adjust_num_guest_pages() there instead.
Signed-off-by: default avatarAndrew Jones <drjones@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 377a41c9
...@@ -178,12 +178,11 @@ static void *vcpu_worker(void *data) ...@@ -178,12 +178,11 @@ static void *vcpu_worker(void *data)
return NULL; return NULL;
} }
static void vm_dirty_log_verify(unsigned long *bmap) static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
{ {
uint64_t step = vm_num_host_pages(mode, 1);
uint64_t page; uint64_t page;
uint64_t *value_ptr; uint64_t *value_ptr;
uint64_t step = host_page_size >= guest_page_size ? 1 :
guest_page_size / host_page_size;
for (page = 0; page < host_num_pages; page += step) { for (page = 0; page < host_num_pages; page += step) {
value_ptr = host_test_mem + page * host_page_size; value_ptr = host_test_mem + page * host_page_size;
...@@ -289,14 +288,14 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations, ...@@ -289,14 +288,14 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
* case where the size is not aligned to 64 pages. * case where the size is not aligned to 64 pages.
*/ */
guest_num_pages = (1ul << (DIRTY_MEM_BITS - guest_num_pages = (1ul << (DIRTY_MEM_BITS -
vm_get_page_shift(vm))) + 16; vm_get_page_shift(vm))) + 3;
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
#ifdef __s390x__ #ifdef __s390x__
/* Round up to multiple of 1M (segment size) */ /* Round up to multiple of 1M (segment size) */
guest_num_pages = (guest_num_pages + 0xff) & ~0xffUL; guest_num_pages = (guest_num_pages + 0xff) & ~0xffUL;
#endif #endif
host_page_size = getpagesize(); host_page_size = getpagesize();
host_num_pages = (guest_num_pages * guest_page_size) / host_page_size + host_num_pages = vm_num_host_pages(mode, guest_num_pages);
!!((guest_num_pages * guest_page_size) % host_page_size);
if (!phys_offset) { if (!phys_offset) {
guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_test_phys_mem = (vm_get_max_gfn(vm) -
...@@ -367,7 +366,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations, ...@@ -367,7 +366,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0, kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
host_num_pages); host_num_pages);
#endif #endif
vm_dirty_log_verify(bmap); vm_dirty_log_verify(mode, bmap);
iteration++; iteration++;
sync_global_to_guest(vm, iteration); sync_global_to_guest(vm, iteration);
} }
......
...@@ -164,6 +164,14 @@ unsigned int vm_get_page_size(struct kvm_vm *vm); ...@@ -164,6 +164,14 @@ unsigned int vm_get_page_size(struct kvm_vm *vm);
unsigned int vm_get_page_shift(struct kvm_vm *vm); unsigned int vm_get_page_shift(struct kvm_vm *vm);
unsigned int vm_get_max_gfn(struct kvm_vm *vm); unsigned int vm_get_max_gfn(struct kvm_vm *vm);
unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
static inline unsigned int
vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
{
return vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
}
struct kvm_userspace_memory_region * struct kvm_userspace_memory_region *
kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
uint64_t end); uint64_t end);
......
...@@ -580,6 +580,10 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, ...@@ -580,6 +580,10 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size; size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size;
size_t alignment; size_t alignment;
TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages,
"Number of guest pages is not compatible with the host. "
"Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages));
TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
"address not on a page boundary.\n" "address not on a page boundary.\n"
" guest_paddr: 0x%lx vm->page_size: 0x%x", " guest_paddr: 0x%lx vm->page_size: 0x%x",
...@@ -1701,3 +1705,36 @@ unsigned int vm_get_max_gfn(struct kvm_vm *vm) ...@@ -1701,3 +1705,36 @@ unsigned int vm_get_max_gfn(struct kvm_vm *vm)
{ {
return vm->max_gfn; return vm->max_gfn;
} }
static unsigned int vm_calc_num_pages(unsigned int num_pages,
unsigned int page_shift,
unsigned int new_page_shift,
bool ceil)
{
unsigned int n = 1 << (new_page_shift - page_shift);
if (page_shift >= new_page_shift)
return num_pages * (1 << (page_shift - new_page_shift));
return num_pages / n + !!(ceil && num_pages % n);
}
static inline int getpageshift(void)
{
return __builtin_ffs(getpagesize()) - 1;
}
unsigned int
vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
{
return vm_calc_num_pages(num_guest_pages,
vm_guest_mode_params[mode].page_shift,
getpageshift(), true);
}
unsigned int
vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages)
{
return vm_calc_num_pages(num_host_pages, getpageshift(),
vm_guest_mode_params[mode].page_shift, false);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment