Commit 52200d0d authored by Peter Xu's avatar Peter Xu Committed by Paolo Bonzini

KVM: selftests: Remove duplicate guest mode handling

Remove the duplication code in run_test() of dirty_log_test because
after some reordering of functions now we can directly use the outcome
of vm_create().

Meanwhile, with the new VM_MODE_PXXV48_4K, we can safely revert
b442324b too where we stick the x86_64 PA width to 39 bits for
dirty_log_test.
Reviewed-by: default avatarAndrew Jones <drjones@redhat.com>
Signed-off-by: default avatarPeter Xu <peterx@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 567a9f1e
...@@ -267,10 +267,8 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid, ...@@ -267,10 +267,8 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
static void run_test(enum vm_guest_mode mode, unsigned long iterations, static void run_test(enum vm_guest_mode mode, unsigned long iterations,
unsigned long interval, uint64_t phys_offset) unsigned long interval, uint64_t phys_offset)
{ {
unsigned int guest_pa_bits, guest_page_shift;
pthread_t vcpu_thread; pthread_t vcpu_thread;
struct kvm_vm *vm; struct kvm_vm *vm;
uint64_t max_gfn;
unsigned long *bmap; unsigned long *bmap;
/* /*
...@@ -285,54 +283,13 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations, ...@@ -285,54 +283,13 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K), 2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K),
guest_code); guest_code);
switch (mode) { guest_page_size = vm_get_page_size(vm);
case VM_MODE_P52V48_4K:
case VM_MODE_PXXV48_4K:
guest_pa_bits = 52;
guest_page_shift = 12;
break;
case VM_MODE_P52V48_64K:
guest_pa_bits = 52;
guest_page_shift = 16;
break;
case VM_MODE_P48V48_4K:
guest_pa_bits = 48;
guest_page_shift = 12;
break;
case VM_MODE_P48V48_64K:
guest_pa_bits = 48;
guest_page_shift = 16;
break;
case VM_MODE_P40V48_4K:
guest_pa_bits = 40;
guest_page_shift = 12;
break;
case VM_MODE_P40V48_64K:
guest_pa_bits = 40;
guest_page_shift = 16;
break;
default:
TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", mode);
}
DEBUG("Testing guest mode: %s\n", vm_guest_mode_string(mode));
#ifdef __x86_64__
/*
* FIXME
* The x86_64 kvm selftests framework currently only supports a
* single PML4 which restricts the number of physical address
* bits we can change to 39.
*/
guest_pa_bits = 39;
#endif
max_gfn = (1ul << (guest_pa_bits - guest_page_shift)) - 1;
guest_page_size = (1ul << guest_page_shift);
/* /*
* A little more than 1G of guest page sized pages. Cover the * A little more than 1G of guest page sized pages. Cover the
* case where the size is not aligned to 64 pages. * case where the size is not aligned to 64 pages.
*/ */
guest_num_pages = (1ul << (DIRTY_MEM_BITS - guest_page_shift)) + 16; guest_num_pages = (1ul << (DIRTY_MEM_BITS -
vm_get_page_shift(vm))) + 16;
#ifdef __s390x__ #ifdef __s390x__
/* Round up to multiple of 1M (segment size) */ /* Round up to multiple of 1M (segment size) */
guest_num_pages = (guest_num_pages + 0xff) & ~0xffUL; guest_num_pages = (guest_num_pages + 0xff) & ~0xffUL;
...@@ -342,7 +299,8 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations, ...@@ -342,7 +299,8 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
!!((guest_num_pages * guest_page_size) % host_page_size); !!((guest_num_pages * guest_page_size) % host_page_size);
if (!phys_offset) { if (!phys_offset) {
guest_test_phys_mem = (max_gfn - guest_num_pages) * guest_page_size; guest_test_phys_mem = (vm_get_max_gfn(vm) -
guest_num_pages) * guest_page_size;
guest_test_phys_mem &= ~(host_page_size - 1); guest_test_phys_mem &= ~(host_page_size - 1);
} else { } else {
guest_test_phys_mem = phys_offset; guest_test_phys_mem = phys_offset;
......
...@@ -154,6 +154,10 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code); ...@@ -154,6 +154,10 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code);
bool vm_is_unrestricted_guest(struct kvm_vm *vm); bool vm_is_unrestricted_guest(struct kvm_vm *vm);
unsigned int vm_get_page_size(struct kvm_vm *vm);
unsigned int vm_get_page_shift(struct kvm_vm *vm);
unsigned int vm_get_max_gfn(struct kvm_vm *vm);
struct kvm_userspace_memory_region * struct kvm_userspace_memory_region *
kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
uint64_t end); uint64_t end);
......
...@@ -136,6 +136,8 @@ struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) ...@@ -136,6 +136,8 @@ struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
{ {
struct kvm_vm *vm; struct kvm_vm *vm;
DEBUG("Testing guest mode: %s\n", vm_guest_mode_string(mode));
vm = calloc(1, sizeof(*vm)); vm = calloc(1, sizeof(*vm));
TEST_ASSERT(vm != NULL, "Insufficient Memory"); TEST_ASSERT(vm != NULL, "Insufficient Memory");
...@@ -1650,3 +1652,18 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm) ...@@ -1650,3 +1652,18 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm)
return val == 'Y'; return val == 'Y';
} }
unsigned int vm_get_page_size(struct kvm_vm *vm)
{
return vm->page_size;
}
unsigned int vm_get_page_shift(struct kvm_vm *vm)
{
return vm->page_shift;
}
unsigned int vm_get_max_gfn(struct kvm_vm *vm)
{
return vm->max_gfn;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment