Commit 2f8ebe43 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-x86-selftests-6.8-rcN' of https://github.com/kvm-x86/linux into HEAD

KVM selftests fixes/cleanups (and one KVM x86 cleanup) for 6.8:

 - Remove redundant newlines from error messages.

 - Delete an unused variable in the AMX test (which causes build failures when
   compiling with -Werror).

 - Fail instead of skipping tests if open(), e.g. of /dev/kvm, fails with an
   error code other than ENOENT (a Hyper-V selftest bug resulted in an EMFILE,
   and the test eventually got skipped).

 - Fix TSC related bugs in several Hyper-V selftests.

 - Fix a bug in the dirty ring logging test where a sem_post() could be left
   pending across multiple runs, resulting in incorrect synchronization between
   the main thread and the vCPU worker thread.

 - Relax the dirty log split test's assertions on 4KiB mappings to fix false
   positives due to the number of mappings for memslot 0 (used for code and
   data that is NOT being dirty logged) changing, e.g. due to NUMA balancing.

 - Have KVM's gtod_is_based_on_tsc() return "bool" instead of an "int" (the
   function generates boolean values, and all callers treat the return value as
   a bool).
parents 22d0bc07 6fd78bee
...@@ -2506,7 +2506,7 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) ...@@ -2506,7 +2506,7 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
} }
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
static inline int gtod_is_based_on_tsc(int mode) static inline bool gtod_is_based_on_tsc(int mode)
{ {
return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK; return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK;
} }
......
...@@ -248,7 +248,7 @@ static void *test_vcpu_run(void *arg) ...@@ -248,7 +248,7 @@ static void *test_vcpu_run(void *arg)
REPORT_GUEST_ASSERT(uc); REPORT_GUEST_ASSERT(uc);
break; break;
default: default:
TEST_FAIL("Unexpected guest exit\n"); TEST_FAIL("Unexpected guest exit");
} }
return NULL; return NULL;
...@@ -287,7 +287,7 @@ static int test_migrate_vcpu(unsigned int vcpu_idx) ...@@ -287,7 +287,7 @@ static int test_migrate_vcpu(unsigned int vcpu_idx)
/* Allow the error where the vCPU thread is already finished */ /* Allow the error where the vCPU thread is already finished */
TEST_ASSERT(ret == 0 || ret == ESRCH, TEST_ASSERT(ret == 0 || ret == ESRCH,
"Failed to migrate the vCPU:%u to pCPU: %u; ret: %d\n", "Failed to migrate the vCPU:%u to pCPU: %u; ret: %d",
vcpu_idx, new_pcpu, ret); vcpu_idx, new_pcpu, ret);
return ret; return ret;
...@@ -326,12 +326,12 @@ static void test_run(struct kvm_vm *vm) ...@@ -326,12 +326,12 @@ static void test_run(struct kvm_vm *vm)
pthread_mutex_init(&vcpu_done_map_lock, NULL); pthread_mutex_init(&vcpu_done_map_lock, NULL);
vcpu_done_map = bitmap_zalloc(test_args.nr_vcpus); vcpu_done_map = bitmap_zalloc(test_args.nr_vcpus);
TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap\n"); TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap");
for (i = 0; i < (unsigned long)test_args.nr_vcpus; i++) { for (i = 0; i < (unsigned long)test_args.nr_vcpus; i++) {
ret = pthread_create(&pt_vcpu_run[i], NULL, test_vcpu_run, ret = pthread_create(&pt_vcpu_run[i], NULL, test_vcpu_run,
(void *)(unsigned long)i); (void *)(unsigned long)i);
TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread\n", i); TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread", i);
} }
/* Spawn a thread to control the vCPU migrations */ /* Spawn a thread to control the vCPU migrations */
...@@ -340,7 +340,7 @@ static void test_run(struct kvm_vm *vm) ...@@ -340,7 +340,7 @@ static void test_run(struct kvm_vm *vm)
ret = pthread_create(&pt_vcpu_migration, NULL, ret = pthread_create(&pt_vcpu_migration, NULL,
test_vcpu_migration, NULL); test_vcpu_migration, NULL);
TEST_ASSERT(!ret, "Failed to create the migration pthread\n"); TEST_ASSERT(!ret, "Failed to create the migration pthread");
} }
...@@ -384,7 +384,7 @@ static struct kvm_vm *test_vm_create(void) ...@@ -384,7 +384,7 @@ static struct kvm_vm *test_vm_create(void)
if (kvm_has_cap(KVM_CAP_COUNTER_OFFSET)) if (kvm_has_cap(KVM_CAP_COUNTER_OFFSET))
vm_ioctl(vm, KVM_ARM_SET_COUNTER_OFFSET, &test_args.offset); vm_ioctl(vm, KVM_ARM_SET_COUNTER_OFFSET, &test_args.offset);
else else
TEST_FAIL("no support for global offset\n"); TEST_FAIL("no support for global offset");
} }
for (i = 0; i < nr_vcpus; i++) for (i = 0; i < nr_vcpus; i++)
......
...@@ -175,18 +175,18 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu) ...@@ -175,18 +175,18 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
/* First 'read' should be an upper limit of the features supported */ /* First 'read' should be an upper limit of the features supported */
vcpu_get_reg(vcpu, reg_info->reg, &val); vcpu_get_reg(vcpu, reg_info->reg, &val);
TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit),
"Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx\n", "Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx",
reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val); reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val);
/* Test a 'write' by disabling all the features of the register map */ /* Test a 'write' by disabling all the features of the register map */
ret = __vcpu_set_reg(vcpu, reg_info->reg, 0); ret = __vcpu_set_reg(vcpu, reg_info->reg, 0);
TEST_ASSERT(ret == 0, TEST_ASSERT(ret == 0,
"Failed to clear all the features of reg: 0x%lx; ret: %d\n", "Failed to clear all the features of reg: 0x%lx; ret: %d",
reg_info->reg, errno); reg_info->reg, errno);
vcpu_get_reg(vcpu, reg_info->reg, &val); vcpu_get_reg(vcpu, reg_info->reg, &val);
TEST_ASSERT(val == 0, TEST_ASSERT(val == 0,
"Expected all the features to be cleared for reg: 0x%lx\n", reg_info->reg); "Expected all the features to be cleared for reg: 0x%lx", reg_info->reg);
/* /*
* Test enabling a feature that's not supported. * Test enabling a feature that's not supported.
...@@ -195,7 +195,7 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu) ...@@ -195,7 +195,7 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
if (reg_info->max_feat_bit < 63) { if (reg_info->max_feat_bit < 63) {
ret = __vcpu_set_reg(vcpu, reg_info->reg, BIT(reg_info->max_feat_bit + 1)); ret = __vcpu_set_reg(vcpu, reg_info->reg, BIT(reg_info->max_feat_bit + 1));
TEST_ASSERT(ret != 0 && errno == EINVAL, TEST_ASSERT(ret != 0 && errno == EINVAL,
"Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx\n", "Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx",
errno, reg_info->reg); errno, reg_info->reg);
} }
} }
...@@ -216,7 +216,7 @@ static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu) ...@@ -216,7 +216,7 @@ static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu)
*/ */
vcpu_get_reg(vcpu, reg_info->reg, &val); vcpu_get_reg(vcpu, reg_info->reg, &val);
TEST_ASSERT(val == 0, TEST_ASSERT(val == 0,
"Expected all the features to be cleared for reg: 0x%lx\n", "Expected all the features to be cleared for reg: 0x%lx",
reg_info->reg); reg_info->reg);
/* /*
...@@ -226,7 +226,7 @@ static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu) ...@@ -226,7 +226,7 @@ static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu)
*/ */
ret = __vcpu_set_reg(vcpu, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit)); ret = __vcpu_set_reg(vcpu, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit));
TEST_ASSERT(ret != 0 && errno == EBUSY, TEST_ASSERT(ret != 0 && errno == EBUSY,
"Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx\n", "Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx",
errno, reg_info->reg); errno, reg_info->reg);
} }
} }
...@@ -265,7 +265,7 @@ static void test_guest_stage(struct kvm_vm **vm, struct kvm_vcpu **vcpu) ...@@ -265,7 +265,7 @@ static void test_guest_stage(struct kvm_vm **vm, struct kvm_vcpu **vcpu)
case TEST_STAGE_HVC_IFACE_FALSE_INFO: case TEST_STAGE_HVC_IFACE_FALSE_INFO:
break; break;
default: default:
TEST_FAIL("Unknown test stage: %d\n", prev_stage); TEST_FAIL("Unknown test stage: %d", prev_stage);
} }
} }
...@@ -294,7 +294,7 @@ static void test_run(void) ...@@ -294,7 +294,7 @@ static void test_run(void)
REPORT_GUEST_ASSERT(uc); REPORT_GUEST_ASSERT(uc);
break; break;
default: default:
TEST_FAIL("Unexpected guest exit\n"); TEST_FAIL("Unexpected guest exit");
} }
} }
......
...@@ -414,10 +414,10 @@ static bool punch_hole_in_backing_store(struct kvm_vm *vm, ...@@ -414,10 +414,10 @@ static bool punch_hole_in_backing_store(struct kvm_vm *vm,
if (fd != -1) { if (fd != -1) {
ret = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, ret = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
0, paging_size); 0, paging_size);
TEST_ASSERT(ret == 0, "fallocate failed\n"); TEST_ASSERT(ret == 0, "fallocate failed");
} else { } else {
ret = madvise(hva, paging_size, MADV_DONTNEED); ret = madvise(hva, paging_size, MADV_DONTNEED);
TEST_ASSERT(ret == 0, "madvise failed\n"); TEST_ASSERT(ret == 0, "madvise failed");
} }
return true; return true;
...@@ -501,7 +501,7 @@ static bool handle_cmd(struct kvm_vm *vm, int cmd) ...@@ -501,7 +501,7 @@ static bool handle_cmd(struct kvm_vm *vm, int cmd)
void fail_vcpu_run_no_handler(int ret) void fail_vcpu_run_no_handler(int ret)
{ {
TEST_FAIL("Unexpected vcpu run failure\n"); TEST_FAIL("Unexpected vcpu run failure");
} }
void fail_vcpu_run_mmio_no_syndrome_handler(int ret) void fail_vcpu_run_mmio_no_syndrome_handler(int ret)
......
...@@ -178,7 +178,7 @@ static void expect_call_denied(struct kvm_vcpu *vcpu) ...@@ -178,7 +178,7 @@ static void expect_call_denied(struct kvm_vcpu *vcpu)
struct ucall uc; struct ucall uc;
if (get_ucall(vcpu, &uc) != UCALL_SYNC) if (get_ucall(vcpu, &uc) != UCALL_SYNC)
TEST_FAIL("Unexpected ucall: %lu\n", uc.cmd); TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
TEST_ASSERT(uc.args[1] == SMCCC_RET_NOT_SUPPORTED, TEST_ASSERT(uc.args[1] == SMCCC_RET_NOT_SUPPORTED,
"Unexpected SMCCC return code: %lu", uc.args[1]); "Unexpected SMCCC return code: %lu", uc.args[1]);
......
...@@ -517,11 +517,11 @@ static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail) ...@@ -517,11 +517,11 @@ static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail)
if (expect_fail) if (expect_fail)
TEST_ASSERT(pmcr_orig == pmcr, TEST_ASSERT(pmcr_orig == pmcr,
"PMCR.N modified by KVM to a larger value (PMCR: 0x%lx) for pmcr_n: 0x%lx\n", "PMCR.N modified by KVM to a larger value (PMCR: 0x%lx) for pmcr_n: 0x%lx",
pmcr, pmcr_n); pmcr, pmcr_n);
else else
TEST_ASSERT(pmcr_n == get_pmcr_n(pmcr), TEST_ASSERT(pmcr_n == get_pmcr_n(pmcr),
"Failed to update PMCR.N to %lu (received: %lu)\n", "Failed to update PMCR.N to %lu (received: %lu)",
pmcr_n, get_pmcr_n(pmcr)); pmcr_n, get_pmcr_n(pmcr));
} }
...@@ -594,12 +594,12 @@ static void run_pmregs_validity_test(uint64_t pmcr_n) ...@@ -594,12 +594,12 @@ static void run_pmregs_validity_test(uint64_t pmcr_n)
*/ */
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), &reg_val); vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), &reg_val);
TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0, TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
"Initial read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n", "Initial read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
KVM_ARM64_SYS_REG(set_reg_id), reg_val); KVM_ARM64_SYS_REG(set_reg_id), reg_val);
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), &reg_val); vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), &reg_val);
TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0, TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
"Initial read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n", "Initial read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
KVM_ARM64_SYS_REG(clr_reg_id), reg_val); KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
/* /*
...@@ -611,12 +611,12 @@ static void run_pmregs_validity_test(uint64_t pmcr_n) ...@@ -611,12 +611,12 @@ static void run_pmregs_validity_test(uint64_t pmcr_n)
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), &reg_val); vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), &reg_val);
TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0, TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
"Read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n", "Read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
KVM_ARM64_SYS_REG(set_reg_id), reg_val); KVM_ARM64_SYS_REG(set_reg_id), reg_val);
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), &reg_val); vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), &reg_val);
TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0, TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
"Read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n", "Read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
KVM_ARM64_SYS_REG(clr_reg_id), reg_val); KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
} }
......
...@@ -45,10 +45,10 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args) ...@@ -45,10 +45,10 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
/* Let the guest access its memory */ /* Let the guest access its memory */
ret = _vcpu_run(vcpu); ret = _vcpu_run(vcpu);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); TEST_ASSERT(ret == 0, "vcpu_run failed: %d", ret);
if (get_ucall(vcpu, NULL) != UCALL_SYNC) { if (get_ucall(vcpu, NULL) != UCALL_SYNC) {
TEST_ASSERT(false, TEST_ASSERT(false,
"Invalid guest sync status: exit_reason=%s\n", "Invalid guest sync status: exit_reason=%s",
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
} }
......
...@@ -88,9 +88,9 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args) ...@@ -88,9 +88,9 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
ret = _vcpu_run(vcpu); ret = _vcpu_run(vcpu);
ts_diff = timespec_elapsed(start); ts_diff = timespec_elapsed(start);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); TEST_ASSERT(ret == 0, "vcpu_run failed: %d", ret);
TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC, TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Invalid guest sync status: exit_reason=%s\n", "Invalid guest sync status: exit_reason=%s",
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
pr_debug("Got sync event from vCPU %d\n", vcpu_idx); pr_debug("Got sync event from vCPU %d\n", vcpu_idx);
......
...@@ -262,7 +262,7 @@ static void default_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err) ...@@ -262,7 +262,7 @@ static void default_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
"vcpu run failed: errno=%d", err); "vcpu run failed: errno=%d", err);
TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC, TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Invalid guest sync status: exit_reason=%s\n", "Invalid guest sync status: exit_reason=%s",
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
vcpu_handle_sync_stop(); vcpu_handle_sync_stop();
...@@ -376,7 +376,10 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, ...@@ -376,7 +376,10 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
cleared = kvm_vm_reset_dirty_ring(vcpu->vm); cleared = kvm_vm_reset_dirty_ring(vcpu->vm);
/* Cleared pages should be the same as collected */ /*
* Cleared pages should be the same as collected, as KVM is supposed to
* clear only the entries that have been harvested.
*/
TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch " TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
"with collected (%u)", cleared, count); "with collected (%u)", cleared, count);
...@@ -410,17 +413,11 @@ static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err) ...@@ -410,17 +413,11 @@ static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
pr_info("vcpu continues now.\n"); pr_info("vcpu continues now.\n");
} else { } else {
TEST_ASSERT(false, "Invalid guest sync status: " TEST_ASSERT(false, "Invalid guest sync status: "
"exit_reason=%s\n", "exit_reason=%s",
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
} }
} }
static void dirty_ring_before_vcpu_join(void)
{
/* Kick another round of vcpu just to make sure it will quit */
sem_post(&sem_vcpu_cont);
}
struct log_mode { struct log_mode {
const char *name; const char *name;
/* Return true if this mode is supported, otherwise false */ /* Return true if this mode is supported, otherwise false */
...@@ -433,7 +430,6 @@ struct log_mode { ...@@ -433,7 +430,6 @@ struct log_mode {
uint32_t *ring_buf_idx); uint32_t *ring_buf_idx);
/* Hook to call when after each vcpu run */ /* Hook to call when after each vcpu run */
void (*after_vcpu_run)(struct kvm_vcpu *vcpu, int ret, int err); void (*after_vcpu_run)(struct kvm_vcpu *vcpu, int ret, int err);
void (*before_vcpu_join) (void);
} log_modes[LOG_MODE_NUM] = { } log_modes[LOG_MODE_NUM] = {
{ {
.name = "dirty-log", .name = "dirty-log",
...@@ -452,7 +448,6 @@ struct log_mode { ...@@ -452,7 +448,6 @@ struct log_mode {
.supported = dirty_ring_supported, .supported = dirty_ring_supported,
.create_vm_done = dirty_ring_create_vm_done, .create_vm_done = dirty_ring_create_vm_done,
.collect_dirty_pages = dirty_ring_collect_dirty_pages, .collect_dirty_pages = dirty_ring_collect_dirty_pages,
.before_vcpu_join = dirty_ring_before_vcpu_join,
.after_vcpu_run = dirty_ring_after_vcpu_run, .after_vcpu_run = dirty_ring_after_vcpu_run,
}, },
}; };
...@@ -513,14 +508,6 @@ static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err) ...@@ -513,14 +508,6 @@ static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
mode->after_vcpu_run(vcpu, ret, err); mode->after_vcpu_run(vcpu, ret, err);
} }
static void log_mode_before_vcpu_join(void)
{
struct log_mode *mode = &log_modes[host_log_mode];
if (mode->before_vcpu_join)
mode->before_vcpu_join();
}
static void generate_random_array(uint64_t *guest_array, uint64_t size) static void generate_random_array(uint64_t *guest_array, uint64_t size)
{ {
uint64_t i; uint64_t i;
...@@ -719,6 +706,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) ...@@ -719,6 +706,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct kvm_vm *vm; struct kvm_vm *vm;
unsigned long *bmap; unsigned long *bmap;
uint32_t ring_buf_idx = 0; uint32_t ring_buf_idx = 0;
int sem_val;
if (!log_mode_supported()) { if (!log_mode_supported()) {
print_skip("Log mode '%s' not supported", print_skip("Log mode '%s' not supported",
...@@ -788,12 +776,22 @@ static void run_test(enum vm_guest_mode mode, void *arg) ...@@ -788,12 +776,22 @@ static void run_test(enum vm_guest_mode mode, void *arg)
/* Start the iterations */ /* Start the iterations */
iteration = 1; iteration = 1;
sync_global_to_guest(vm, iteration); sync_global_to_guest(vm, iteration);
host_quit = false; WRITE_ONCE(host_quit, false);
host_dirty_count = 0; host_dirty_count = 0;
host_clear_count = 0; host_clear_count = 0;
host_track_next_count = 0; host_track_next_count = 0;
WRITE_ONCE(dirty_ring_vcpu_ring_full, false); WRITE_ONCE(dirty_ring_vcpu_ring_full, false);
/*
* Ensure the previous iteration didn't leave a dangling semaphore, i.e.
* that the main task and vCPU worker were synchronized and completed
* verification of all iterations.
*/
sem_getvalue(&sem_vcpu_stop, &sem_val);
TEST_ASSERT_EQ(sem_val, 0);
sem_getvalue(&sem_vcpu_cont, &sem_val);
TEST_ASSERT_EQ(sem_val, 0);
pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu); pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);
while (iteration < p->iterations) { while (iteration < p->iterations) {
...@@ -819,15 +817,21 @@ static void run_test(enum vm_guest_mode mode, void *arg) ...@@ -819,15 +817,21 @@ static void run_test(enum vm_guest_mode mode, void *arg)
assert(host_log_mode == LOG_MODE_DIRTY_RING || assert(host_log_mode == LOG_MODE_DIRTY_RING ||
atomic_read(&vcpu_sync_stop_requested) == false); atomic_read(&vcpu_sync_stop_requested) == false);
vm_dirty_log_verify(mode, bmap); vm_dirty_log_verify(mode, bmap);
sem_post(&sem_vcpu_cont);
iteration++; /*
* Set host_quit before sem_vcpu_cont in the final iteration to
* ensure that the vCPU worker doesn't resume the guest. As
* above, the dirty ring test may stop and wait even when not
* explicitly request to do so, i.e. would hang waiting for a
* "continue" if it's allowed to resume the guest.
*/
if (++iteration == p->iterations)
WRITE_ONCE(host_quit, true);
sem_post(&sem_vcpu_cont);
sync_global_to_guest(vm, iteration); sync_global_to_guest(vm, iteration);
} }
/* Tell the vcpu thread to quit */
host_quit = true;
log_mode_before_vcpu_join();
pthread_join(vcpu_thread, NULL); pthread_join(vcpu_thread, NULL);
pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), " pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
......
...@@ -152,7 +152,7 @@ static void check_supported(struct vcpu_reg_list *c) ...@@ -152,7 +152,7 @@ static void check_supported(struct vcpu_reg_list *c)
continue; continue;
__TEST_REQUIRE(kvm_has_cap(s->capability), __TEST_REQUIRE(kvm_has_cap(s->capability),
"%s: %s not available, skipping tests\n", "%s: %s not available, skipping tests",
config_name(c), s->name); config_name(c), s->name);
} }
} }
......
...@@ -98,7 +98,7 @@ static void ucall_abort(const char *assert_msg, const char *expected_assert_msg) ...@@ -98,7 +98,7 @@ static void ucall_abort(const char *assert_msg, const char *expected_assert_msg)
int offset = len_str - len_substr; int offset = len_str - len_substr;
TEST_ASSERT(len_substr <= len_str, TEST_ASSERT(len_substr <= len_str,
"Expected '%s' to be a substring of '%s'\n", "Expected '%s' to be a substring of '%s'",
assert_msg, expected_assert_msg); assert_msg, expected_assert_msg);
TEST_ASSERT(strcmp(&assert_msg[offset], expected_assert_msg) == 0, TEST_ASSERT(strcmp(&assert_msg[offset], expected_assert_msg) == 0,
...@@ -116,7 +116,7 @@ static void run_test(struct kvm_vcpu *vcpu, const char *expected_printf, ...@@ -116,7 +116,7 @@ static void run_test(struct kvm_vcpu *vcpu, const char *expected_printf,
vcpu_run(vcpu); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == UCALL_EXIT_REASON, TEST_ASSERT(run->exit_reason == UCALL_EXIT_REASON,
"Unexpected exit reason: %u (%s),\n", "Unexpected exit reason: %u (%s),",
run->exit_reason, exit_reason_str(run->exit_reason)); run->exit_reason, exit_reason_str(run->exit_reason));
switch (get_ucall(vcpu, &uc)) { switch (get_ucall(vcpu, &uc)) {
...@@ -161,11 +161,11 @@ static void test_limits(void) ...@@ -161,11 +161,11 @@ static void test_limits(void)
vcpu_run(vcpu); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == UCALL_EXIT_REASON, TEST_ASSERT(run->exit_reason == UCALL_EXIT_REASON,
"Unexpected exit reason: %u (%s),\n", "Unexpected exit reason: %u (%s),",
run->exit_reason, exit_reason_str(run->exit_reason)); run->exit_reason, exit_reason_str(run->exit_reason));
TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_ABORT, TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_ABORT,
"Unexpected ucall command: %lu, Expected: %u (UCALL_ABORT)\n", "Unexpected ucall command: %lu, Expected: %u (UCALL_ABORT)",
uc.cmd, UCALL_ABORT); uc.cmd, UCALL_ABORT);
kvm_vm_free(vm); kvm_vm_free(vm);
......
...@@ -41,7 +41,7 @@ static void *run_vcpu(void *arg) ...@@ -41,7 +41,7 @@ static void *run_vcpu(void *arg)
vcpu_run(vcpu); vcpu_run(vcpu);
TEST_ASSERT(false, "%s: exited with reason %d: %s\n", TEST_ASSERT(false, "%s: exited with reason %d: %s",
__func__, run->exit_reason, __func__, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
pthread_exit(NULL); pthread_exit(NULL);
...@@ -55,7 +55,7 @@ static void *sleeping_thread(void *arg) ...@@ -55,7 +55,7 @@ static void *sleeping_thread(void *arg)
fd = open("/dev/null", O_RDWR); fd = open("/dev/null", O_RDWR);
close(fd); close(fd);
} }
TEST_ASSERT(false, "%s: exited\n", __func__); TEST_ASSERT(false, "%s: exited", __func__);
pthread_exit(NULL); pthread_exit(NULL);
} }
...@@ -118,7 +118,7 @@ static void run_test(uint32_t run) ...@@ -118,7 +118,7 @@ static void run_test(uint32_t run)
for (i = 0; i < VCPU_NUM; ++i) for (i = 0; i < VCPU_NUM; ++i)
check_join(threads[i], &b); check_join(threads[i], &b);
/* Should not be reached */ /* Should not be reached */
TEST_ASSERT(false, "%s: [%d] child escaped the ninja\n", __func__, run); TEST_ASSERT(false, "%s: [%d] child escaped the ninja", __func__, run);
} }
void wait_for_child_setup(pid_t pid) void wait_for_child_setup(pid_t pid)
......
...@@ -195,4 +195,6 @@ __printf(3, 4) int guest_snprintf(char *buf, int n, const char *fmt, ...); ...@@ -195,4 +195,6 @@ __printf(3, 4) int guest_snprintf(char *buf, int n, const char *fmt, ...);
char *strdup_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2), nonnull(1))); char *strdup_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2), nonnull(1)));
char *sys_get_cur_clocksource(void);
#endif /* SELFTEST_KVM_TEST_UTIL_H */ #endif /* SELFTEST_KVM_TEST_UTIL_H */
...@@ -1271,4 +1271,6 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, ...@@ -1271,4 +1271,6 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
#define PFERR_GUEST_PAGE_MASK BIT_ULL(PFERR_GUEST_PAGE_BIT) #define PFERR_GUEST_PAGE_MASK BIT_ULL(PFERR_GUEST_PAGE_BIT)
#define PFERR_IMPLICIT_ACCESS BIT_ULL(PFERR_IMPLICIT_ACCESS_BIT) #define PFERR_IMPLICIT_ACCESS BIT_ULL(PFERR_IMPLICIT_ACCESS_BIT)
bool sys_clocksource_is_based_on_tsc(void);
#endif /* SELFTEST_KVM_PROCESSOR_H */ #endif /* SELFTEST_KVM_PROCESSOR_H */
...@@ -65,7 +65,7 @@ int main(int argc, char *argv[]) ...@@ -65,7 +65,7 @@ int main(int argc, char *argv[])
int r = setrlimit(RLIMIT_NOFILE, &rl); int r = setrlimit(RLIMIT_NOFILE, &rl);
__TEST_REQUIRE(r >= 0, __TEST_REQUIRE(r >= 0,
"RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n", "RLIMIT_NOFILE hard limit is too low (%d, wanted %d)",
old_rlim_max, nr_fds_wanted); old_rlim_max, nr_fds_wanted);
} else { } else {
TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!"); TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!");
......
...@@ -204,9 +204,9 @@ static void *vcpu_worker(void *data) ...@@ -204,9 +204,9 @@ static void *vcpu_worker(void *data)
ret = _vcpu_run(vcpu); ret = _vcpu_run(vcpu);
ts_diff = timespec_elapsed(start); ts_diff = timespec_elapsed(start);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); TEST_ASSERT(ret == 0, "vcpu_run failed: %d", ret);
TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC, TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Invalid guest sync status: exit_reason=%s\n", "Invalid guest sync status: exit_reason=%s",
exit_reason_str(vcpu->run->exit_reason)); exit_reason_str(vcpu->run->exit_reason));
pr_debug("Got sync event from vCPU %d\n", vcpu->id); pr_debug("Got sync event from vCPU %d\n", vcpu->id);
......
...@@ -398,7 +398,7 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) ...@@ -398,7 +398,7 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
int i; int i;
TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n" TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
" num: %u\n", num); " num: %u", num);
va_start(ap, num); va_start(ap, num);
......
...@@ -38,7 +38,7 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs, ...@@ -38,7 +38,7 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs,
struct list_head *iter; struct list_head *iter;
unsigned int nr_gic_pages, nr_vcpus_created = 0; unsigned int nr_gic_pages, nr_vcpus_created = 0;
TEST_ASSERT(nr_vcpus, "Number of vCPUs cannot be empty\n"); TEST_ASSERT(nr_vcpus, "Number of vCPUs cannot be empty");
/* /*
* Make sure that the caller is infact calling this * Make sure that the caller is infact calling this
...@@ -47,7 +47,7 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs, ...@@ -47,7 +47,7 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs,
list_for_each(iter, &vm->vcpus) list_for_each(iter, &vm->vcpus)
nr_vcpus_created++; nr_vcpus_created++;
TEST_ASSERT(nr_vcpus == nr_vcpus_created, TEST_ASSERT(nr_vcpus == nr_vcpus_created,
"Number of vCPUs requested (%u) doesn't match with the ones created for the VM (%u)\n", "Number of vCPUs requested (%u) doesn't match with the ones created for the VM (%u)",
nr_vcpus, nr_vcpus_created); nr_vcpus, nr_vcpus_created);
/* Distributor setup */ /* Distributor setup */
......
...@@ -184,7 +184,7 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename) ...@@ -184,7 +184,7 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
"Seek to program segment offset failed,\n" "Seek to program segment offset failed,\n"
" program header idx: %u errno: %i\n" " program header idx: %u errno: %i\n"
" offset_rv: 0x%jx\n" " offset_rv: 0x%jx\n"
" expected: 0x%jx\n", " expected: 0x%jx",
n1, errno, (intmax_t) offset_rv, n1, errno, (intmax_t) offset_rv,
(intmax_t) phdr.p_offset); (intmax_t) phdr.p_offset);
test_read(fd, addr_gva2hva(vm, phdr.p_vaddr), test_read(fd, addr_gva2hva(vm, phdr.p_vaddr),
......
...@@ -27,7 +27,8 @@ int open_path_or_exit(const char *path, int flags) ...@@ -27,7 +27,8 @@ int open_path_or_exit(const char *path, int flags)
int fd; int fd;
fd = open(path, flags); fd = open(path, flags);
__TEST_REQUIRE(fd >= 0, "%s not available (errno: %d)", path, errno); __TEST_REQUIRE(fd >= 0 || errno != ENOENT, "Cannot open %s: %s", path, strerror(errno));
TEST_ASSERT(fd >= 0, "Failed to open '%s'", path);
return fd; return fd;
} }
...@@ -320,7 +321,7 @@ static uint64_t vm_nr_pages_required(enum vm_guest_mode mode, ...@@ -320,7 +321,7 @@ static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
uint64_t nr_pages; uint64_t nr_pages;
TEST_ASSERT(nr_runnable_vcpus, TEST_ASSERT(nr_runnable_vcpus,
"Use vm_create_barebones() for VMs that _never_ have vCPUs\n"); "Use vm_create_barebones() for VMs that _never_ have vCPUs");
TEST_ASSERT(nr_runnable_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS), TEST_ASSERT(nr_runnable_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
"nr_vcpus = %d too large for host, max-vcpus = %d", "nr_vcpus = %d too large for host, max-vcpus = %d",
...@@ -491,7 +492,7 @@ void kvm_pin_this_task_to_pcpu(uint32_t pcpu) ...@@ -491,7 +492,7 @@ void kvm_pin_this_task_to_pcpu(uint32_t pcpu)
CPU_ZERO(&mask); CPU_ZERO(&mask);
CPU_SET(pcpu, &mask); CPU_SET(pcpu, &mask);
r = sched_setaffinity(0, sizeof(mask), &mask); r = sched_setaffinity(0, sizeof(mask), &mask);
TEST_ASSERT(!r, "sched_setaffinity() failed for pCPU '%u'.\n", pcpu); TEST_ASSERT(!r, "sched_setaffinity() failed for pCPU '%u'.", pcpu);
} }
static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask) static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
...@@ -499,7 +500,7 @@ static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask) ...@@ -499,7 +500,7 @@ static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
uint32_t pcpu = atoi_non_negative("CPU number", cpu_str); uint32_t pcpu = atoi_non_negative("CPU number", cpu_str);
TEST_ASSERT(CPU_ISSET(pcpu, allowed_mask), TEST_ASSERT(CPU_ISSET(pcpu, allowed_mask),
"Not allowed to run on pCPU '%d', check cgroups?\n", pcpu); "Not allowed to run on pCPU '%d', check cgroups?", pcpu);
return pcpu; return pcpu;
} }
...@@ -529,7 +530,7 @@ void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], ...@@ -529,7 +530,7 @@ void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
int i, r; int i, r;
cpu_list = strdup(pcpus_string); cpu_list = strdup(pcpus_string);
TEST_ASSERT(cpu_list, "strdup() allocation failed.\n"); TEST_ASSERT(cpu_list, "strdup() allocation failed.");
r = sched_getaffinity(0, sizeof(allowed_mask), &allowed_mask); r = sched_getaffinity(0, sizeof(allowed_mask), &allowed_mask);
TEST_ASSERT(!r, "sched_getaffinity() failed"); TEST_ASSERT(!r, "sched_getaffinity() failed");
...@@ -538,7 +539,7 @@ void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], ...@@ -538,7 +539,7 @@ void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
/* 1. Get all pcpus for vcpus. */ /* 1. Get all pcpus for vcpus. */
for (i = 0; i < nr_vcpus; i++) { for (i = 0; i < nr_vcpus; i++) {
TEST_ASSERT(cpu, "pCPU not provided for vCPU '%d'\n", i); TEST_ASSERT(cpu, "pCPU not provided for vCPU '%d'", i);
vcpu_to_pcpu[i] = parse_pcpu(cpu, &allowed_mask); vcpu_to_pcpu[i] = parse_pcpu(cpu, &allowed_mask);
cpu = strtok(NULL, delim); cpu = strtok(NULL, delim);
} }
...@@ -1057,7 +1058,7 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, ...@@ -1057,7 +1058,7 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n" TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n"
" rc: %i errno: %i\n" " rc: %i errno: %i\n"
" slot: %u flags: 0x%x\n" " slot: %u flags: 0x%x\n"
" guest_phys_addr: 0x%lx size: 0x%lx guest_memfd: %d\n", " guest_phys_addr: 0x%lx size: 0x%lx guest_memfd: %d",
ret, errno, slot, flags, ret, errno, slot, flags,
guest_paddr, (uint64_t) region->region.memory_size, guest_paddr, (uint64_t) region->region.memory_size,
region->region.guest_memfd); region->region.guest_memfd);
...@@ -1222,7 +1223,7 @@ void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size, ...@@ -1222,7 +1223,7 @@ void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size,
len = min_t(uint64_t, end - gpa, region->region.memory_size - offset); len = min_t(uint64_t, end - gpa, region->region.memory_size - offset);
ret = fallocate(region->region.guest_memfd, mode, fd_offset, len); ret = fallocate(region->region.guest_memfd, mode, fd_offset, len);
TEST_ASSERT(!ret, "fallocate() failed to %s at %lx (len = %lu), fd = %d, mode = %x, offset = %lx\n", TEST_ASSERT(!ret, "fallocate() failed to %s at %lx (len = %lu), fd = %d, mode = %x, offset = %lx",
punch_hole ? "punch hole" : "allocate", gpa, len, punch_hole ? "punch hole" : "allocate", gpa, len,
region->region.guest_memfd, mode, fd_offset); region->region.guest_memfd, mode, fd_offset);
} }
...@@ -1265,7 +1266,7 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) ...@@ -1265,7 +1266,7 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
/* Confirm a vcpu with the specified id doesn't already exist. */ /* Confirm a vcpu with the specified id doesn't already exist. */
TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists\n", vcpu_id); TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists", vcpu_id);
/* Allocate and initialize new vcpu structure. */ /* Allocate and initialize new vcpu structure. */
vcpu = calloc(1, sizeof(*vcpu)); vcpu = calloc(1, sizeof(*vcpu));
......
...@@ -192,7 +192,7 @@ struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus, ...@@ -192,7 +192,7 @@ struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
TEST_ASSERT(guest_num_pages < region_end_gfn, TEST_ASSERT(guest_num_pages < region_end_gfn,
"Requested more guest memory than address space allows.\n" "Requested more guest memory than address space allows.\n"
" guest pages: %" PRIx64 " max gfn: %" PRIx64 " guest pages: %" PRIx64 " max gfn: %" PRIx64
" nr_vcpus: %d wss: %" PRIx64 "]\n", " nr_vcpus: %d wss: %" PRIx64 "]",
guest_num_pages, region_end_gfn - 1, nr_vcpus, vcpu_memory_bytes); guest_num_pages, region_end_gfn - 1, nr_vcpus, vcpu_memory_bytes);
args->gpa = (region_end_gfn - guest_num_pages - 1) * args->guest_page_size; args->gpa = (region_end_gfn - guest_num_pages - 1) * args->guest_page_size;
......
...@@ -327,7 +327,7 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) ...@@ -327,7 +327,7 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
int i; int i;
TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n" TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
" num: %u\n", num); " num: %u", num);
va_start(ap, num); va_start(ap, num);
......
...@@ -198,7 +198,7 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) ...@@ -198,7 +198,7 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
int i; int i;
TEST_ASSERT(num >= 1 && num <= 5, "Unsupported number of args,\n" TEST_ASSERT(num >= 1 && num <= 5, "Unsupported number of args,\n"
" num: %u\n", " num: %u",
num); num);
va_start(ap, num); va_start(ap, num);
......
...@@ -392,3 +392,28 @@ char *strdup_printf(const char *fmt, ...) ...@@ -392,3 +392,28 @@ char *strdup_printf(const char *fmt, ...)
return str; return str;
} }
#define CLOCKSOURCE_PATH "/sys/devices/system/clocksource/clocksource0/current_clocksource"
char *sys_get_cur_clocksource(void)
{
char *clk_name;
struct stat st;
FILE *fp;
fp = fopen(CLOCKSOURCE_PATH, "r");
TEST_ASSERT(fp, "failed to open clocksource file, errno: %d", errno);
TEST_ASSERT(!fstat(fileno(fp), &st), "failed to stat clocksource file, errno: %d",
errno);
clk_name = malloc(st.st_size);
TEST_ASSERT(clk_name, "failed to allocate buffer to read file");
TEST_ASSERT(fgets(clk_name, st.st_size, fp), "failed to read clocksource file: %d",
ferror(fp));
fclose(fp);
return clk_name;
}
...@@ -69,7 +69,7 @@ static void *uffd_handler_thread_fn(void *arg) ...@@ -69,7 +69,7 @@ static void *uffd_handler_thread_fn(void *arg)
if (pollfd[1].revents & POLLIN) { if (pollfd[1].revents & POLLIN) {
r = read(pollfd[1].fd, &tmp_chr, 1); r = read(pollfd[1].fd, &tmp_chr, 1);
TEST_ASSERT(r == 1, TEST_ASSERT(r == 1,
"Error reading pipefd in UFFD thread\n"); "Error reading pipefd in UFFD thread");
break; break;
} }
......
...@@ -170,10 +170,10 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm, ...@@ -170,10 +170,10 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
* this level. * this level.
*/ */
TEST_ASSERT(current_level != target_level, TEST_ASSERT(current_level != target_level,
"Cannot create hugepage at level: %u, vaddr: 0x%lx\n", "Cannot create hugepage at level: %u, vaddr: 0x%lx",
current_level, vaddr); current_level, vaddr);
TEST_ASSERT(!(*pte & PTE_LARGE_MASK), TEST_ASSERT(!(*pte & PTE_LARGE_MASK),
"Cannot create page table at level: %u, vaddr: 0x%lx\n", "Cannot create page table at level: %u, vaddr: 0x%lx",
current_level, vaddr); current_level, vaddr);
} }
return pte; return pte;
...@@ -220,7 +220,7 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level) ...@@ -220,7 +220,7 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
/* Fill in page table entry. */ /* Fill in page table entry. */
pte = virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K); pte = virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K);
TEST_ASSERT(!(*pte & PTE_PRESENT_MASK), TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
"PTE already present for 4k page at vaddr: 0x%lx\n", vaddr); "PTE already present for 4k page at vaddr: 0x%lx", vaddr);
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK); *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
} }
...@@ -253,7 +253,7 @@ static bool vm_is_target_pte(uint64_t *pte, int *level, int current_level) ...@@ -253,7 +253,7 @@ static bool vm_is_target_pte(uint64_t *pte, int *level, int current_level)
if (*pte & PTE_LARGE_MASK) { if (*pte & PTE_LARGE_MASK) {
TEST_ASSERT(*level == PG_LEVEL_NONE || TEST_ASSERT(*level == PG_LEVEL_NONE ||
*level == current_level, *level == current_level,
"Unexpected hugepage at level %d\n", current_level); "Unexpected hugepage at level %d", current_level);
*level = current_level; *level = current_level;
} }
...@@ -825,7 +825,7 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) ...@@ -825,7 +825,7 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
struct kvm_regs regs; struct kvm_regs regs;
TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n" TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n"
" num: %u\n", " num: %u",
num); num);
va_start(ap, num); va_start(ap, num);
...@@ -1299,3 +1299,14 @@ void kvm_selftest_arch_init(void) ...@@ -1299,3 +1299,14 @@ void kvm_selftest_arch_init(void)
host_cpu_is_intel = this_cpu_is_intel(); host_cpu_is_intel = this_cpu_is_intel();
host_cpu_is_amd = this_cpu_is_amd(); host_cpu_is_amd = this_cpu_is_amd();
} }
bool sys_clocksource_is_based_on_tsc(void)
{
char *clk_name = sys_get_cur_clocksource();
bool ret = !strcmp(clk_name, "tsc\n") ||
!strcmp(clk_name, "hyperv_clocksource_tsc_page\n");
free(clk_name);
return ret;
}
...@@ -54,7 +54,7 @@ int vcpu_enable_evmcs(struct kvm_vcpu *vcpu) ...@@ -54,7 +54,7 @@ int vcpu_enable_evmcs(struct kvm_vcpu *vcpu)
/* KVM should return supported EVMCS version range */ /* KVM should return supported EVMCS version range */
TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) && TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
(evmcs_ver & 0xff) > 0, (evmcs_ver & 0xff) > 0,
"Incorrect EVMCS version range: %x:%x\n", "Incorrect EVMCS version range: %x:%x",
evmcs_ver & 0xff, evmcs_ver >> 8); evmcs_ver & 0xff, evmcs_ver >> 8);
return evmcs_ver; return evmcs_ver;
...@@ -387,10 +387,10 @@ static void nested_create_pte(struct kvm_vm *vm, ...@@ -387,10 +387,10 @@ static void nested_create_pte(struct kvm_vm *vm,
* this level. * this level.
*/ */
TEST_ASSERT(current_level != target_level, TEST_ASSERT(current_level != target_level,
"Cannot create hugepage at level: %u, nested_paddr: 0x%lx\n", "Cannot create hugepage at level: %u, nested_paddr: 0x%lx",
current_level, nested_paddr); current_level, nested_paddr);
TEST_ASSERT(!pte->page_size, TEST_ASSERT(!pte->page_size,
"Cannot create page table at level: %u, nested_paddr: 0x%lx\n", "Cannot create page table at level: %u, nested_paddr: 0x%lx",
current_level, nested_paddr); current_level, nested_paddr);
} }
} }
......
...@@ -45,7 +45,7 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args) ...@@ -45,7 +45,7 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
/* Let the guest access its memory until a stop signal is received */ /* Let the guest access its memory until a stop signal is received */
while (!READ_ONCE(memstress_args.stop_vcpus)) { while (!READ_ONCE(memstress_args.stop_vcpus)) {
ret = _vcpu_run(vcpu); ret = _vcpu_run(vcpu);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); TEST_ASSERT(ret == 0, "vcpu_run failed: %d", ret);
if (get_ucall(vcpu, NULL) == UCALL_SYNC) if (get_ucall(vcpu, NULL) == UCALL_SYNC)
continue; continue;
......
...@@ -175,11 +175,11 @@ static void wait_for_vcpu(void) ...@@ -175,11 +175,11 @@ static void wait_for_vcpu(void)
struct timespec ts; struct timespec ts;
TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts), TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts),
"clock_gettime() failed: %d\n", errno); "clock_gettime() failed: %d", errno);
ts.tv_sec += 2; ts.tv_sec += 2;
TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts), TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts),
"sem_timedwait() failed: %d\n", errno); "sem_timedwait() failed: %d", errno);
} }
static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages) static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages)
...@@ -336,7 +336,7 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots, ...@@ -336,7 +336,7 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr, slot); gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr, slot);
TEST_ASSERT(gpa == guest_addr, TEST_ASSERT(gpa == guest_addr,
"vm_phy_pages_alloc() failed\n"); "vm_phy_pages_alloc() failed");
data->hva_slots[slot - 1] = addr_gpa2hva(data->vm, guest_addr); data->hva_slots[slot - 1] = addr_gpa2hva(data->vm, guest_addr);
memset(data->hva_slots[slot - 1], 0, npages * guest_page_size); memset(data->hva_slots[slot - 1], 0, npages * guest_page_size);
......
...@@ -177,7 +177,7 @@ void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) ...@@ -177,7 +177,7 @@ void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
/* Double check whether the desired extension was enabled */ /* Double check whether the desired extension was enabled */
__TEST_REQUIRE(vcpu_has_ext(vcpu, feature), __TEST_REQUIRE(vcpu_has_ext(vcpu, feature),
"%s not available, skipping tests\n", s->name); "%s not available, skipping tests", s->name);
} }
} }
......
...@@ -245,7 +245,7 @@ int main(int argc, char *argv[]) ...@@ -245,7 +245,7 @@ int main(int argc, char *argv[])
} while (snapshot != atomic_read(&seq_cnt)); } while (snapshot != atomic_read(&seq_cnt));
TEST_ASSERT(rseq_cpu == cpu, TEST_ASSERT(rseq_cpu == cpu,
"rseq CPU = %d, sched CPU = %d\n", rseq_cpu, cpu); "rseq CPU = %d, sched CPU = %d", rseq_cpu, cpu);
} }
/* /*
...@@ -256,7 +256,7 @@ int main(int argc, char *argv[]) ...@@ -256,7 +256,7 @@ int main(int argc, char *argv[])
* migrations given the 1us+ delay in the migration task. * migrations given the 1us+ delay in the migration task.
*/ */
TEST_ASSERT(i > (NR_TASK_MIGRATIONS / 2), TEST_ASSERT(i > (NR_TASK_MIGRATIONS / 2),
"Only performed %d KVM_RUNs, task stalled too much?\n", i); "Only performed %d KVM_RUNs, task stalled too much?", i);
pthread_join(migration_thread, NULL); pthread_join(migration_thread, NULL);
......
...@@ -78,7 +78,7 @@ static void assert_noirq(struct kvm_vcpu *vcpu) ...@@ -78,7 +78,7 @@ static void assert_noirq(struct kvm_vcpu *vcpu)
* (notably, the emergency call interrupt we have injected) should * (notably, the emergency call interrupt we have injected) should
* be cleared by the resets, so this should be 0. * be cleared by the resets, so this should be 0.
*/ */
TEST_ASSERT(irqs >= 0, "Could not fetch IRQs: errno %d\n", errno); TEST_ASSERT(irqs >= 0, "Could not fetch IRQs: errno %d", errno);
TEST_ASSERT(!irqs, "IRQ pending"); TEST_ASSERT(!irqs, "IRQ pending");
} }
...@@ -199,7 +199,7 @@ static void inject_irq(struct kvm_vcpu *vcpu) ...@@ -199,7 +199,7 @@ static void inject_irq(struct kvm_vcpu *vcpu)
irq->type = KVM_S390_INT_EMERGENCY; irq->type = KVM_S390_INT_EMERGENCY;
irq->u.emerg.code = vcpu->id; irq->u.emerg.code = vcpu->id;
irqs = __vcpu_ioctl(vcpu, KVM_S390_SET_IRQ_STATE, &irq_state); irqs = __vcpu_ioctl(vcpu, KVM_S390_SET_IRQ_STATE, &irq_state);
TEST_ASSERT(irqs >= 0, "Error injecting EMERGENCY IRQ errno %d\n", errno); TEST_ASSERT(irqs >= 0, "Error injecting EMERGENCY IRQ errno %d", errno);
} }
static struct kvm_vm *create_vm(struct kvm_vcpu **vcpu) static struct kvm_vm *create_vm(struct kvm_vcpu **vcpu)
......
...@@ -39,13 +39,13 @@ static void guest_code(void) ...@@ -39,13 +39,13 @@ static void guest_code(void)
#define REG_COMPARE(reg) \ #define REG_COMPARE(reg) \
TEST_ASSERT(left->reg == right->reg, \ TEST_ASSERT(left->reg == right->reg, \
"Register " #reg \ "Register " #reg \
" values did not match: 0x%llx, 0x%llx\n", \ " values did not match: 0x%llx, 0x%llx", \
left->reg, right->reg) left->reg, right->reg)
#define REG_COMPARE32(reg) \ #define REG_COMPARE32(reg) \
TEST_ASSERT(left->reg == right->reg, \ TEST_ASSERT(left->reg == right->reg, \
"Register " #reg \ "Register " #reg \
" values did not match: 0x%x, 0x%x\n", \ " values did not match: 0x%x, 0x%x", \
left->reg, right->reg) left->reg, right->reg)
...@@ -82,14 +82,14 @@ void test_read_invalid(struct kvm_vcpu *vcpu) ...@@ -82,14 +82,14 @@ void test_read_invalid(struct kvm_vcpu *vcpu)
run->kvm_valid_regs = INVALID_SYNC_FIELD; run->kvm_valid_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vcpu); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL, TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d",
rv); rv);
run->kvm_valid_regs = 0; run->kvm_valid_regs = 0;
run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS; run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vcpu); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL, TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d",
rv); rv);
run->kvm_valid_regs = 0; run->kvm_valid_regs = 0;
} }
...@@ -103,14 +103,14 @@ void test_set_invalid(struct kvm_vcpu *vcpu) ...@@ -103,14 +103,14 @@ void test_set_invalid(struct kvm_vcpu *vcpu)
run->kvm_dirty_regs = INVALID_SYNC_FIELD; run->kvm_dirty_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vcpu); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL, TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d",
rv); rv);
run->kvm_dirty_regs = 0; run->kvm_dirty_regs = 0;
run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS; run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vcpu); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL, TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d",
rv); rv);
run->kvm_dirty_regs = 0; run->kvm_dirty_regs = 0;
} }
...@@ -125,12 +125,12 @@ void test_req_and_verify_all_valid_regs(struct kvm_vcpu *vcpu) ...@@ -125,12 +125,12 @@ void test_req_and_verify_all_valid_regs(struct kvm_vcpu *vcpu)
/* Request and verify all valid register sets. */ /* Request and verify all valid register sets. */
run->kvm_valid_regs = TEST_SYNC_FIELDS; run->kvm_valid_regs = TEST_SYNC_FIELDS;
rv = _vcpu_run(vcpu); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv); TEST_ASSERT(rv == 0, "vcpu_run failed: %d", rv);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC); TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
TEST_ASSERT(run->s390_sieic.icptcode == 4 && TEST_ASSERT(run->s390_sieic.icptcode == 4 &&
(run->s390_sieic.ipa >> 8) == 0x83 && (run->s390_sieic.ipa >> 8) == 0x83 &&
(run->s390_sieic.ipb >> 16) == 0x501, (run->s390_sieic.ipb >> 16) == 0x501,
"Unexpected interception code: ic=%u, ipa=0x%x, ipb=0x%x\n", "Unexpected interception code: ic=%u, ipa=0x%x, ipb=0x%x",
run->s390_sieic.icptcode, run->s390_sieic.ipa, run->s390_sieic.icptcode, run->s390_sieic.ipa,
run->s390_sieic.ipb); run->s390_sieic.ipb);
...@@ -161,7 +161,7 @@ void test_set_and_verify_various_reg_values(struct kvm_vcpu *vcpu) ...@@ -161,7 +161,7 @@ void test_set_and_verify_various_reg_values(struct kvm_vcpu *vcpu)
} }
rv = _vcpu_run(vcpu); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv); TEST_ASSERT(rv == 0, "vcpu_run failed: %d", rv);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC); TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
TEST_ASSERT(run->s.regs.gprs[11] == 0xBAD1DEA + 1, TEST_ASSERT(run->s.regs.gprs[11] == 0xBAD1DEA + 1,
"r11 sync regs value incorrect 0x%llx.", "r11 sync regs value incorrect 0x%llx.",
...@@ -193,7 +193,7 @@ void test_clear_kvm_dirty_regs_bits(struct kvm_vcpu *vcpu) ...@@ -193,7 +193,7 @@ void test_clear_kvm_dirty_regs_bits(struct kvm_vcpu *vcpu)
run->s.regs.gprs[11] = 0xDEADBEEF; run->s.regs.gprs[11] = 0xDEADBEEF;
run->s.regs.diag318 = 0x4B1D; run->s.regs.diag318 = 0x4B1D;
rv = _vcpu_run(vcpu); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv); TEST_ASSERT(rv == 0, "vcpu_run failed: %d", rv);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC); TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
TEST_ASSERT(run->s.regs.gprs[11] != 0xDEADBEEF, TEST_ASSERT(run->s.regs.gprs[11] != 0xDEADBEEF,
"r11 sync regs value incorrect 0x%llx.", "r11 sync regs value incorrect 0x%llx.",
......
...@@ -98,11 +98,11 @@ static void wait_for_vcpu(void) ...@@ -98,11 +98,11 @@ static void wait_for_vcpu(void)
struct timespec ts; struct timespec ts;
TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts), TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts),
"clock_gettime() failed: %d\n", errno); "clock_gettime() failed: %d", errno);
ts.tv_sec += 2; ts.tv_sec += 2;
TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts), TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts),
"sem_timedwait() failed: %d\n", errno); "sem_timedwait() failed: %d", errno);
/* Wait for the vCPU thread to reenter the guest. */ /* Wait for the vCPU thread to reenter the guest. */
usleep(100000); usleep(100000);
...@@ -302,7 +302,7 @@ static void test_delete_memory_region(void) ...@@ -302,7 +302,7 @@ static void test_delete_memory_region(void)
if (run->exit_reason == KVM_EXIT_INTERNAL_ERROR) if (run->exit_reason == KVM_EXIT_INTERNAL_ERROR)
TEST_ASSERT(regs.rip >= final_rip_start && TEST_ASSERT(regs.rip >= final_rip_start &&
regs.rip < final_rip_end, regs.rip < final_rip_end,
"Bad rip, expected 0x%lx - 0x%lx, got 0x%llx\n", "Bad rip, expected 0x%lx - 0x%lx, got 0x%llx",
final_rip_start, final_rip_end, regs.rip); final_rip_start, final_rip_end, regs.rip);
kvm_vm_free(vm); kvm_vm_free(vm);
......
...@@ -108,7 +108,7 @@ static void enter_guest(struct kvm_vcpu *vcpu) ...@@ -108,7 +108,7 @@ static void enter_guest(struct kvm_vcpu *vcpu)
handle_abort(&uc); handle_abort(&uc);
return; return;
default: default:
TEST_ASSERT(0, "unhandled ucall %ld\n", TEST_ASSERT(0, "unhandled ucall %ld",
get_ucall(vcpu, &uc)); get_ucall(vcpu, &uc));
} }
} }
......
...@@ -221,7 +221,7 @@ int main(int argc, char *argv[]) ...@@ -221,7 +221,7 @@ int main(int argc, char *argv[])
vm_vaddr_t amx_cfg, tiledata, xstate; vm_vaddr_t amx_cfg, tiledata, xstate;
struct ucall uc; struct ucall uc;
u32 amx_offset; u32 amx_offset;
int stage, ret; int ret;
/* /*
* Note, all off-by-default features must be enabled before anything * Note, all off-by-default features must be enabled before anything
...@@ -263,7 +263,7 @@ int main(int argc, char *argv[]) ...@@ -263,7 +263,7 @@ int main(int argc, char *argv[])
memset(addr_gva2hva(vm, xstate), 0, PAGE_SIZE * DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE)); memset(addr_gva2hva(vm, xstate), 0, PAGE_SIZE * DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE));
vcpu_args_set(vcpu, 3, amx_cfg, tiledata, xstate); vcpu_args_set(vcpu, 3, amx_cfg, tiledata, xstate);
for (stage = 1; ; stage++) { for (;;) {
vcpu_run(vcpu); vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
...@@ -296,7 +296,7 @@ int main(int argc, char *argv[]) ...@@ -296,7 +296,7 @@ int main(int argc, char *argv[])
void *tiles_data = (void *)addr_gva2hva(vm, tiledata); void *tiles_data = (void *)addr_gva2hva(vm, tiledata);
/* Only check TMM0 register, 1 tile */ /* Only check TMM0 register, 1 tile */
ret = memcmp(amx_start, tiles_data, TILE_SIZE); ret = memcmp(amx_start, tiles_data, TILE_SIZE);
TEST_ASSERT(ret == 0, "memcmp failed, ret=%d\n", ret); TEST_ASSERT(ret == 0, "memcmp failed, ret=%d", ret);
kvm_x86_state_cleanup(state); kvm_x86_state_cleanup(state);
break; break;
case 9: case 9:
......
...@@ -84,7 +84,7 @@ static void compare_cpuids(const struct kvm_cpuid2 *cpuid1, ...@@ -84,7 +84,7 @@ static void compare_cpuids(const struct kvm_cpuid2 *cpuid1,
TEST_ASSERT(e1->function == e2->function && TEST_ASSERT(e1->function == e2->function &&
e1->index == e2->index && e1->flags == e2->flags, e1->index == e2->index && e1->flags == e2->flags,
"CPUID entries[%d] mismtach: 0x%x.%d.%x vs. 0x%x.%d.%x\n", "CPUID entries[%d] mismtach: 0x%x.%d.%x vs. 0x%x.%d.%x",
i, e1->function, e1->index, e1->flags, i, e1->function, e1->index, e1->flags,
e2->function, e2->index, e2->flags); e2->function, e2->index, e2->flags);
...@@ -170,7 +170,7 @@ static void test_get_cpuid2(struct kvm_vcpu *vcpu) ...@@ -170,7 +170,7 @@ static void test_get_cpuid2(struct kvm_vcpu *vcpu)
vcpu_ioctl(vcpu, KVM_GET_CPUID2, cpuid); vcpu_ioctl(vcpu, KVM_GET_CPUID2, cpuid);
TEST_ASSERT(cpuid->nent == vcpu->cpuid->nent, TEST_ASSERT(cpuid->nent == vcpu->cpuid->nent,
"KVM didn't update nent on success, wanted %u, got %u\n", "KVM didn't update nent on success, wanted %u, got %u",
vcpu->cpuid->nent, cpuid->nent); vcpu->cpuid->nent, cpuid->nent);
for (i = 0; i < vcpu->cpuid->nent; i++) { for (i = 0; i < vcpu->cpuid->nent; i++) {
......
...@@ -92,7 +92,6 @@ static void run_test(enum vm_guest_mode mode, void *unused) ...@@ -92,7 +92,6 @@ static void run_test(enum vm_guest_mode mode, void *unused)
uint64_t host_num_pages; uint64_t host_num_pages;
uint64_t pages_per_slot; uint64_t pages_per_slot;
int i; int i;
uint64_t total_4k_pages;
struct kvm_page_stats stats_populated; struct kvm_page_stats stats_populated;
struct kvm_page_stats stats_dirty_logging_enabled; struct kvm_page_stats stats_dirty_logging_enabled;
struct kvm_page_stats stats_dirty_pass[ITERATIONS]; struct kvm_page_stats stats_dirty_pass[ITERATIONS];
...@@ -107,6 +106,9 @@ static void run_test(enum vm_guest_mode mode, void *unused) ...@@ -107,6 +106,9 @@ static void run_test(enum vm_guest_mode mode, void *unused)
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages); guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
host_num_pages = vm_num_host_pages(mode, guest_num_pages); host_num_pages = vm_num_host_pages(mode, guest_num_pages);
pages_per_slot = host_num_pages / SLOTS; pages_per_slot = host_num_pages / SLOTS;
TEST_ASSERT_EQ(host_num_pages, pages_per_slot * SLOTS);
TEST_ASSERT(!(host_num_pages % 512),
"Number of pages, '%lu' not a multiple of 2MiB", host_num_pages);
bitmaps = memstress_alloc_bitmaps(SLOTS, pages_per_slot); bitmaps = memstress_alloc_bitmaps(SLOTS, pages_per_slot);
...@@ -165,10 +167,8 @@ static void run_test(enum vm_guest_mode mode, void *unused) ...@@ -165,10 +167,8 @@ static void run_test(enum vm_guest_mode mode, void *unused)
memstress_free_bitmaps(bitmaps, SLOTS); memstress_free_bitmaps(bitmaps, SLOTS);
memstress_destroy_vm(vm); memstress_destroy_vm(vm);
/* Make assertions about the page counts. */ TEST_ASSERT_EQ((stats_populated.pages_2m * 512 +
total_4k_pages = stats_populated.pages_4k; stats_populated.pages_1g * 512 * 512), host_num_pages);
total_4k_pages += stats_populated.pages_2m * 512;
total_4k_pages += stats_populated.pages_1g * 512 * 512;
/* /*
* Check that all huge pages were split. Since large pages can only * Check that all huge pages were split. Since large pages can only
...@@ -180,19 +180,22 @@ static void run_test(enum vm_guest_mode mode, void *unused) ...@@ -180,19 +180,22 @@ static void run_test(enum vm_guest_mode mode, void *unused)
*/ */
if (dirty_log_manual_caps) { if (dirty_log_manual_caps) {
TEST_ASSERT_EQ(stats_clear_pass[0].hugepages, 0); TEST_ASSERT_EQ(stats_clear_pass[0].hugepages, 0);
TEST_ASSERT_EQ(stats_clear_pass[0].pages_4k, total_4k_pages); TEST_ASSERT(stats_clear_pass[0].pages_4k >= host_num_pages,
"Expected at least '%lu' 4KiB pages, found only '%lu'",
host_num_pages, stats_clear_pass[0].pages_4k);
TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, stats_populated.hugepages); TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, stats_populated.hugepages);
} else { } else {
TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, 0); TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, 0);
TEST_ASSERT_EQ(stats_dirty_logging_enabled.pages_4k, total_4k_pages); TEST_ASSERT(stats_dirty_logging_enabled.pages_4k >= host_num_pages,
"Expected at least '%lu' 4KiB pages, found only '%lu'",
host_num_pages, stats_dirty_logging_enabled.pages_4k);
} }
/* /*
* Once dirty logging is disabled and the vCPUs have touched all their * Once dirty logging is disabled and the vCPUs have touched all their
* memory again, the page counts should be the same as they were * memory again, the hugepage counts should be the same as they were
* right after initial population of memory. * right after initial population of memory.
*/ */
TEST_ASSERT_EQ(stats_populated.pages_4k, stats_repopulated.pages_4k);
TEST_ASSERT_EQ(stats_populated.pages_2m, stats_repopulated.pages_2m); TEST_ASSERT_EQ(stats_populated.pages_2m, stats_repopulated.pages_2m);
TEST_ASSERT_EQ(stats_populated.pages_1g, stats_repopulated.pages_1g); TEST_ASSERT_EQ(stats_populated.pages_1g, stats_repopulated.pages_1g);
} }
......
...@@ -41,7 +41,7 @@ static inline void handle_flds_emulation_failure_exit(struct kvm_vcpu *vcpu) ...@@ -41,7 +41,7 @@ static inline void handle_flds_emulation_failure_exit(struct kvm_vcpu *vcpu)
insn_bytes = run->emulation_failure.insn_bytes; insn_bytes = run->emulation_failure.insn_bytes;
TEST_ASSERT(insn_bytes[0] == 0xd9 && insn_bytes[1] == 0, TEST_ASSERT(insn_bytes[0] == 0xd9 && insn_bytes[1] == 0,
"Expected 'flds [eax]', opcode '0xd9 0x00', got opcode 0x%02x 0x%02x\n", "Expected 'flds [eax]', opcode '0xd9 0x00', got opcode 0x%02x 0x%02x",
insn_bytes[0], insn_bytes[1]); insn_bytes[0], insn_bytes[1]);
vcpu_regs_get(vcpu, &regs); vcpu_regs_get(vcpu, &regs);
......
...@@ -212,6 +212,7 @@ int main(void) ...@@ -212,6 +212,7 @@ int main(void)
int stage; int stage;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_TIME)); TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_TIME));
TEST_REQUIRE(sys_clocksource_is_based_on_tsc());
vm = vm_create_with_one_vcpu(&vcpu, guest_main); vm = vm_create_with_one_vcpu(&vcpu, guest_main);
...@@ -220,7 +221,7 @@ int main(void) ...@@ -220,7 +221,7 @@ int main(void)
tsc_page_gva = vm_vaddr_alloc_page(vm); tsc_page_gva = vm_vaddr_alloc_page(vm);
memset(addr_gva2hva(vm, tsc_page_gva), 0x0, getpagesize()); memset(addr_gva2hva(vm, tsc_page_gva), 0x0, getpagesize());
TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0, TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0,
"TSC page has to be page aligned\n"); "TSC page has to be page aligned");
vcpu_args_set(vcpu, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva)); vcpu_args_set(vcpu, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva));
host_check_tsc_msr_rdtsc(vcpu); host_check_tsc_msr_rdtsc(vcpu);
...@@ -237,7 +238,7 @@ int main(void) ...@@ -237,7 +238,7 @@ int main(void)
break; break;
case UCALL_DONE: case UCALL_DONE:
/* Keep in sync with guest_main() */ /* Keep in sync with guest_main() */
TEST_ASSERT(stage == 11, "Testing ended prematurely, stage %d\n", TEST_ASSERT(stage == 11, "Testing ended prematurely, stage %d",
stage); stage);
goto out; goto out;
default: default:
......
...@@ -454,7 +454,7 @@ static void guest_test_msrs_access(void) ...@@ -454,7 +454,7 @@ static void guest_test_msrs_access(void)
case 44: case 44:
/* MSR is not available when CPUID feature bit is unset */ /* MSR is not available when CPUID feature bit is unset */
if (!has_invtsc) if (!has_invtsc)
continue; goto next_stage;
msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL; msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
msr->write = false; msr->write = false;
msr->fault_expected = true; msr->fault_expected = true;
...@@ -462,7 +462,7 @@ static void guest_test_msrs_access(void) ...@@ -462,7 +462,7 @@ static void guest_test_msrs_access(void)
case 45: case 45:
/* MSR is vailable when CPUID feature bit is set */ /* MSR is vailable when CPUID feature bit is set */
if (!has_invtsc) if (!has_invtsc)
continue; goto next_stage;
vcpu_set_cpuid_feature(vcpu, HV_ACCESS_TSC_INVARIANT); vcpu_set_cpuid_feature(vcpu, HV_ACCESS_TSC_INVARIANT);
msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL; msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
msr->write = false; msr->write = false;
...@@ -471,7 +471,7 @@ static void guest_test_msrs_access(void) ...@@ -471,7 +471,7 @@ static void guest_test_msrs_access(void)
case 46: case 46:
/* Writing bits other than 0 is forbidden */ /* Writing bits other than 0 is forbidden */
if (!has_invtsc) if (!has_invtsc)
continue; goto next_stage;
msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL; msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
msr->write = true; msr->write = true;
msr->write_val = 0xdeadbeef; msr->write_val = 0xdeadbeef;
...@@ -480,7 +480,7 @@ static void guest_test_msrs_access(void) ...@@ -480,7 +480,7 @@ static void guest_test_msrs_access(void)
case 47: case 47:
/* Setting bit 0 enables the feature */ /* Setting bit 0 enables the feature */
if (!has_invtsc) if (!has_invtsc)
continue; goto next_stage;
msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL; msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
msr->write = true; msr->write = true;
msr->write_val = 1; msr->write_val = 1;
...@@ -513,6 +513,7 @@ static void guest_test_msrs_access(void) ...@@ -513,6 +513,7 @@ static void guest_test_msrs_access(void)
return; return;
} }
next_stage:
stage++; stage++;
kvm_vm_free(vm); kvm_vm_free(vm);
} }
......
...@@ -289,7 +289,7 @@ int main(int argc, char *argv[]) ...@@ -289,7 +289,7 @@ int main(int argc, char *argv[])
switch (get_ucall(vcpu[0], &uc)) { switch (get_ucall(vcpu[0], &uc)) {
case UCALL_SYNC: case UCALL_SYNC:
TEST_ASSERT(uc.args[1] == stage, TEST_ASSERT(uc.args[1] == stage,
"Unexpected stage: %ld (%d expected)\n", "Unexpected stage: %ld (%d expected)",
uc.args[1], stage); uc.args[1], stage);
break; break;
case UCALL_DONE: case UCALL_DONE:
......
...@@ -658,7 +658,7 @@ int main(int argc, char *argv[]) ...@@ -658,7 +658,7 @@ int main(int argc, char *argv[])
switch (get_ucall(vcpu[0], &uc)) { switch (get_ucall(vcpu[0], &uc)) {
case UCALL_SYNC: case UCALL_SYNC:
TEST_ASSERT(uc.args[1] == stage, TEST_ASSERT(uc.args[1] == stage,
"Unexpected stage: %ld (%d expected)\n", "Unexpected stage: %ld (%d expected)",
uc.args[1], stage); uc.args[1], stage);
break; break;
case UCALL_ABORT: case UCALL_ABORT:
......
...@@ -92,7 +92,7 @@ static void setup_clock(struct kvm_vm *vm, struct test_case *test_case) ...@@ -92,7 +92,7 @@ static void setup_clock(struct kvm_vm *vm, struct test_case *test_case)
break; break;
} while (errno == EINTR); } while (errno == EINTR);
TEST_ASSERT(!r, "clock_gettime() failed: %d\n", r); TEST_ASSERT(!r, "clock_gettime() failed: %d", r);
data.realtime = ts.tv_sec * NSEC_PER_SEC; data.realtime = ts.tv_sec * NSEC_PER_SEC;
data.realtime += ts.tv_nsec; data.realtime += ts.tv_nsec;
...@@ -127,47 +127,11 @@ static void enter_guest(struct kvm_vcpu *vcpu) ...@@ -127,47 +127,11 @@ static void enter_guest(struct kvm_vcpu *vcpu)
handle_abort(&uc); handle_abort(&uc);
return; return;
default: default:
TEST_ASSERT(0, "unhandled ucall: %ld\n", uc.cmd); TEST_ASSERT(0, "unhandled ucall: %ld", uc.cmd);
} }
} }
} }
#define CLOCKSOURCE_PATH "/sys/devices/system/clocksource/clocksource0/current_clocksource"
static void check_clocksource(void)
{
char *clk_name;
struct stat st;
FILE *fp;
fp = fopen(CLOCKSOURCE_PATH, "r");
if (!fp) {
pr_info("failed to open clocksource file: %d; assuming TSC.\n",
errno);
return;
}
if (fstat(fileno(fp), &st)) {
pr_info("failed to stat clocksource file: %d; assuming TSC.\n",
errno);
goto out;
}
clk_name = malloc(st.st_size);
TEST_ASSERT(clk_name, "failed to allocate buffer to read file\n");
if (!fgets(clk_name, st.st_size, fp)) {
pr_info("failed to read clocksource file: %d; assuming TSC.\n",
ferror(fp));
goto out;
}
TEST_ASSERT(!strncmp(clk_name, "tsc\n", st.st_size),
"clocksource not supported: %s", clk_name);
out:
fclose(fp);
}
int main(void) int main(void)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
...@@ -179,7 +143,7 @@ int main(void) ...@@ -179,7 +143,7 @@ int main(void)
flags = kvm_check_cap(KVM_CAP_ADJUST_CLOCK); flags = kvm_check_cap(KVM_CAP_ADJUST_CLOCK);
TEST_REQUIRE(flags & KVM_CLOCK_REALTIME); TEST_REQUIRE(flags & KVM_CLOCK_REALTIME);
check_clocksource(); TEST_REQUIRE(sys_clocksource_is_based_on_tsc());
vm = vm_create_with_one_vcpu(&vcpu, guest_main); vm = vm_create_with_one_vcpu(&vcpu, guest_main);
......
...@@ -257,9 +257,9 @@ int main(int argc, char **argv) ...@@ -257,9 +257,9 @@ int main(int argc, char **argv)
TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_DISABLE_NX_HUGE_PAGES)); TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_DISABLE_NX_HUGE_PAGES));
__TEST_REQUIRE(token == MAGIC_TOKEN, __TEST_REQUIRE(token == MAGIC_TOKEN,
"This test must be run with the magic token %d.\n" "This test must be run with the magic token via '-t %d'.\n"
"This is done by nx_huge_pages_test.sh, which\n" "Running via nx_huge_pages_test.sh, which also handles "
"also handles environment setup for the test.", MAGIC_TOKEN); "environment setup, is strongly recommended.", MAGIC_TOKEN);
run_test(reclaim_period_ms, false, reboot_permissions); run_test(reclaim_period_ms, false, reboot_permissions);
run_test(reclaim_period_ms, true, reboot_permissions); run_test(reclaim_period_ms, true, reboot_permissions);
......
...@@ -44,7 +44,7 @@ static void test_msr_platform_info_enabled(struct kvm_vcpu *vcpu) ...@@ -44,7 +44,7 @@ static void test_msr_platform_info_enabled(struct kvm_vcpu *vcpu)
get_ucall(vcpu, &uc); get_ucall(vcpu, &uc);
TEST_ASSERT(uc.cmd == UCALL_SYNC, TEST_ASSERT(uc.cmd == UCALL_SYNC,
"Received ucall other than UCALL_SYNC: %lu\n", uc.cmd); "Received ucall other than UCALL_SYNC: %lu", uc.cmd);
TEST_ASSERT((uc.args[1] & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) == TEST_ASSERT((uc.args[1] & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) ==
MSR_PLATFORM_INFO_MAX_TURBO_RATIO, MSR_PLATFORM_INFO_MAX_TURBO_RATIO,
"Expected MSR_PLATFORM_INFO to have max turbo ratio mask: %i.", "Expected MSR_PLATFORM_INFO to have max turbo ratio mask: %i.",
......
...@@ -866,7 +866,7 @@ static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx, ...@@ -866,7 +866,7 @@ static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx,
* userspace doesn't set any pmu filter. * userspace doesn't set any pmu filter.
*/ */
count = run_vcpu_to_sync(vcpu); count = run_vcpu_to_sync(vcpu);
TEST_ASSERT(count, "Unexpected count value: %ld\n", count); TEST_ASSERT(count, "Unexpected count value: %ld", count);
for (i = 0; i < BIT(nr_fixed_counters); i++) { for (i = 0; i < BIT(nr_fixed_counters); i++) {
bitmap = BIT(i); bitmap = BIT(i);
......
...@@ -91,7 +91,7 @@ static void sev_migrate_from(struct kvm_vm *dst, struct kvm_vm *src) ...@@ -91,7 +91,7 @@ static void sev_migrate_from(struct kvm_vm *dst, struct kvm_vm *src)
int ret; int ret;
ret = __sev_migrate_from(dst, src); ret = __sev_migrate_from(dst, src);
TEST_ASSERT(!ret, "Migration failed, ret: %d, errno: %d\n", ret, errno); TEST_ASSERT(!ret, "Migration failed, ret: %d, errno: %d", ret, errno);
} }
static void test_sev_migrate_from(bool es) static void test_sev_migrate_from(bool es)
...@@ -113,7 +113,7 @@ static void test_sev_migrate_from(bool es) ...@@ -113,7 +113,7 @@ static void test_sev_migrate_from(bool es)
/* Migrate the guest back to the original VM. */ /* Migrate the guest back to the original VM. */
ret = __sev_migrate_from(src_vm, dst_vms[NR_MIGRATE_TEST_VMS - 1]); ret = __sev_migrate_from(src_vm, dst_vms[NR_MIGRATE_TEST_VMS - 1]);
TEST_ASSERT(ret == -1 && errno == EIO, TEST_ASSERT(ret == -1 && errno == EIO,
"VM that was migrated from should be dead. ret %d, errno: %d\n", ret, "VM that was migrated from should be dead. ret %d, errno: %d", ret,
errno); errno);
kvm_vm_free(src_vm); kvm_vm_free(src_vm);
...@@ -172,7 +172,7 @@ static void test_sev_migrate_parameters(void) ...@@ -172,7 +172,7 @@ static void test_sev_migrate_parameters(void)
vm_no_sev = aux_vm_create(true); vm_no_sev = aux_vm_create(true);
ret = __sev_migrate_from(vm_no_vcpu, vm_no_sev); ret = __sev_migrate_from(vm_no_vcpu, vm_no_sev);
TEST_ASSERT(ret == -1 && errno == EINVAL, TEST_ASSERT(ret == -1 && errno == EINVAL,
"Migrations require SEV enabled. ret %d, errno: %d\n", ret, "Migrations require SEV enabled. ret %d, errno: %d", ret,
errno); errno);
if (!have_sev_es) if (!have_sev_es)
...@@ -187,25 +187,25 @@ static void test_sev_migrate_parameters(void) ...@@ -187,25 +187,25 @@ static void test_sev_migrate_parameters(void)
ret = __sev_migrate_from(sev_vm, sev_es_vm); ret = __sev_migrate_from(sev_vm, sev_es_vm);
TEST_ASSERT( TEST_ASSERT(
ret == -1 && errno == EINVAL, ret == -1 && errno == EINVAL,
"Should not be able migrate to SEV enabled VM. ret: %d, errno: %d\n", "Should not be able migrate to SEV enabled VM. ret: %d, errno: %d",
ret, errno); ret, errno);
ret = __sev_migrate_from(sev_es_vm, sev_vm); ret = __sev_migrate_from(sev_es_vm, sev_vm);
TEST_ASSERT( TEST_ASSERT(
ret == -1 && errno == EINVAL, ret == -1 && errno == EINVAL,
"Should not be able migrate to SEV-ES enabled VM. ret: %d, errno: %d\n", "Should not be able migrate to SEV-ES enabled VM. ret: %d, errno: %d",
ret, errno); ret, errno);
ret = __sev_migrate_from(vm_no_vcpu, sev_es_vm); ret = __sev_migrate_from(vm_no_vcpu, sev_es_vm);
TEST_ASSERT( TEST_ASSERT(
ret == -1 && errno == EINVAL, ret == -1 && errno == EINVAL,
"SEV-ES migrations require same number of vCPUS. ret: %d, errno: %d\n", "SEV-ES migrations require same number of vCPUS. ret: %d, errno: %d",
ret, errno); ret, errno);
ret = __sev_migrate_from(vm_no_vcpu, sev_es_vm_no_vmsa); ret = __sev_migrate_from(vm_no_vcpu, sev_es_vm_no_vmsa);
TEST_ASSERT( TEST_ASSERT(
ret == -1 && errno == EINVAL, ret == -1 && errno == EINVAL,
"SEV-ES migrations require UPDATE_VMSA. ret %d, errno: %d\n", "SEV-ES migrations require UPDATE_VMSA. ret %d, errno: %d",
ret, errno); ret, errno);
kvm_vm_free(sev_vm); kvm_vm_free(sev_vm);
...@@ -227,7 +227,7 @@ static void sev_mirror_create(struct kvm_vm *dst, struct kvm_vm *src) ...@@ -227,7 +227,7 @@ static void sev_mirror_create(struct kvm_vm *dst, struct kvm_vm *src)
int ret; int ret;
ret = __sev_mirror_create(dst, src); ret = __sev_mirror_create(dst, src);
TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d\n", ret, errno); TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d", ret, errno);
} }
static void verify_mirror_allowed_cmds(int vm_fd) static void verify_mirror_allowed_cmds(int vm_fd)
...@@ -259,7 +259,7 @@ static void verify_mirror_allowed_cmds(int vm_fd) ...@@ -259,7 +259,7 @@ static void verify_mirror_allowed_cmds(int vm_fd)
ret = __sev_ioctl(vm_fd, cmd_id, NULL, &fw_error); ret = __sev_ioctl(vm_fd, cmd_id, NULL, &fw_error);
TEST_ASSERT( TEST_ASSERT(
ret == -1 && errno == EINVAL, ret == -1 && errno == EINVAL,
"Should not be able call command: %d. ret: %d, errno: %d\n", "Should not be able call command: %d. ret: %d, errno: %d",
cmd_id, ret, errno); cmd_id, ret, errno);
} }
...@@ -301,18 +301,18 @@ static void test_sev_mirror_parameters(void) ...@@ -301,18 +301,18 @@ static void test_sev_mirror_parameters(void)
ret = __sev_mirror_create(sev_vm, sev_vm); ret = __sev_mirror_create(sev_vm, sev_vm);
TEST_ASSERT( TEST_ASSERT(
ret == -1 && errno == EINVAL, ret == -1 && errno == EINVAL,
"Should not be able copy context to self. ret: %d, errno: %d\n", "Should not be able copy context to self. ret: %d, errno: %d",
ret, errno); ret, errno);
ret = __sev_mirror_create(vm_no_vcpu, vm_with_vcpu); ret = __sev_mirror_create(vm_no_vcpu, vm_with_vcpu);
TEST_ASSERT(ret == -1 && errno == EINVAL, TEST_ASSERT(ret == -1 && errno == EINVAL,
"Copy context requires SEV enabled. ret %d, errno: %d\n", ret, "Copy context requires SEV enabled. ret %d, errno: %d", ret,
errno); errno);
ret = __sev_mirror_create(vm_with_vcpu, sev_vm); ret = __sev_mirror_create(vm_with_vcpu, sev_vm);
TEST_ASSERT( TEST_ASSERT(
ret == -1 && errno == EINVAL, ret == -1 && errno == EINVAL,
"SEV copy context requires no vCPUS on the destination. ret: %d, errno: %d\n", "SEV copy context requires no vCPUS on the destination. ret: %d, errno: %d",
ret, errno); ret, errno);
if (!have_sev_es) if (!have_sev_es)
...@@ -322,13 +322,13 @@ static void test_sev_mirror_parameters(void) ...@@ -322,13 +322,13 @@ static void test_sev_mirror_parameters(void)
ret = __sev_mirror_create(sev_vm, sev_es_vm); ret = __sev_mirror_create(sev_vm, sev_es_vm);
TEST_ASSERT( TEST_ASSERT(
ret == -1 && errno == EINVAL, ret == -1 && errno == EINVAL,
"Should not be able copy context to SEV enabled VM. ret: %d, errno: %d\n", "Should not be able copy context to SEV enabled VM. ret: %d, errno: %d",
ret, errno); ret, errno);
ret = __sev_mirror_create(sev_es_vm, sev_vm); ret = __sev_mirror_create(sev_es_vm, sev_vm);
TEST_ASSERT( TEST_ASSERT(
ret == -1 && errno == EINVAL, ret == -1 && errno == EINVAL,
"Should not be able copy context to SEV-ES enabled VM. ret: %d, errno: %d\n", "Should not be able copy context to SEV-ES enabled VM. ret: %d, errno: %d",
ret, errno); ret, errno);
kvm_vm_free(sev_es_vm); kvm_vm_free(sev_es_vm);
......
...@@ -74,7 +74,7 @@ int main(int argc, char *argv[]) ...@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
MEM_REGION_SIZE / PAGE_SIZE, 0); MEM_REGION_SIZE / PAGE_SIZE, 0);
gpa = vm_phy_pages_alloc(vm, MEM_REGION_SIZE / PAGE_SIZE, gpa = vm_phy_pages_alloc(vm, MEM_REGION_SIZE / PAGE_SIZE,
MEM_REGION_GPA, MEM_REGION_SLOT); MEM_REGION_GPA, MEM_REGION_SLOT);
TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n"); TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc");
virt_map(vm, MEM_REGION_GVA, MEM_REGION_GPA, 1); virt_map(vm, MEM_REGION_GVA, MEM_REGION_GPA, 1);
hva = addr_gpa2hva(vm, MEM_REGION_GPA); hva = addr_gpa2hva(vm, MEM_REGION_GPA);
memset(hva, 0, PAGE_SIZE); memset(hva, 0, PAGE_SIZE);
...@@ -102,7 +102,7 @@ int main(int argc, char *argv[]) ...@@ -102,7 +102,7 @@ int main(int argc, char *argv[])
case UCALL_DONE: case UCALL_DONE:
break; break;
default: default:
TEST_FAIL("Unrecognized ucall: %lu\n", uc.cmd); TEST_FAIL("Unrecognized ucall: %lu", uc.cmd);
} }
kvm_vm_free(vm); kvm_vm_free(vm);
......
...@@ -46,7 +46,7 @@ static void compare_regs(struct kvm_regs *left, struct kvm_regs *right) ...@@ -46,7 +46,7 @@ static void compare_regs(struct kvm_regs *left, struct kvm_regs *right)
#define REG_COMPARE(reg) \ #define REG_COMPARE(reg) \
TEST_ASSERT(left->reg == right->reg, \ TEST_ASSERT(left->reg == right->reg, \
"Register " #reg \ "Register " #reg \
" values did not match: 0x%llx, 0x%llx\n", \ " values did not match: 0x%llx, 0x%llx", \
left->reg, right->reg) left->reg, right->reg)
REG_COMPARE(rax); REG_COMPARE(rax);
REG_COMPARE(rbx); REG_COMPARE(rbx);
...@@ -230,14 +230,14 @@ int main(int argc, char *argv[]) ...@@ -230,14 +230,14 @@ int main(int argc, char *argv[])
run->kvm_valid_regs = INVALID_SYNC_FIELD; run->kvm_valid_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vcpu); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL, TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d",
rv); rv);
run->kvm_valid_regs = 0; run->kvm_valid_regs = 0;
run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS; run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vcpu); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL, TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d",
rv); rv);
run->kvm_valid_regs = 0; run->kvm_valid_regs = 0;
...@@ -245,14 +245,14 @@ int main(int argc, char *argv[]) ...@@ -245,14 +245,14 @@ int main(int argc, char *argv[])
run->kvm_dirty_regs = INVALID_SYNC_FIELD; run->kvm_dirty_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vcpu); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL, TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d",
rv); rv);
run->kvm_dirty_regs = 0; run->kvm_dirty_regs = 0;
run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS; run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vcpu); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL, TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d",
rv); rv);
run->kvm_dirty_regs = 0; run->kvm_dirty_regs = 0;
......
...@@ -143,7 +143,7 @@ static void run_vcpu_expect_gp(struct kvm_vcpu *vcpu) ...@@ -143,7 +143,7 @@ static void run_vcpu_expect_gp(struct kvm_vcpu *vcpu)
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_SYNC, TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_SYNC,
"Expect UCALL_SYNC\n"); "Expect UCALL_SYNC");
TEST_ASSERT(uc.args[1] == SYNC_GP, "#GP is expected."); TEST_ASSERT(uc.args[1] == SYNC_GP, "#GP is expected.");
printf("vCPU received GP in guest.\n"); printf("vCPU received GP in guest.\n");
} }
...@@ -188,7 +188,7 @@ static void *run_ucna_injection(void *arg) ...@@ -188,7 +188,7 @@ static void *run_ucna_injection(void *arg)
TEST_ASSERT_KVM_EXIT_REASON(params->vcpu, KVM_EXIT_IO); TEST_ASSERT_KVM_EXIT_REASON(params->vcpu, KVM_EXIT_IO);
TEST_ASSERT(get_ucall(params->vcpu, &uc) == UCALL_SYNC, TEST_ASSERT(get_ucall(params->vcpu, &uc) == UCALL_SYNC,
"Expect UCALL_SYNC\n"); "Expect UCALL_SYNC");
TEST_ASSERT(uc.args[1] == SYNC_FIRST_UCNA, "Injecting first UCNA."); TEST_ASSERT(uc.args[1] == SYNC_FIRST_UCNA, "Injecting first UCNA.");
printf("Injecting first UCNA at %#x.\n", FIRST_UCNA_ADDR); printf("Injecting first UCNA at %#x.\n", FIRST_UCNA_ADDR);
...@@ -198,7 +198,7 @@ static void *run_ucna_injection(void *arg) ...@@ -198,7 +198,7 @@ static void *run_ucna_injection(void *arg)
TEST_ASSERT_KVM_EXIT_REASON(params->vcpu, KVM_EXIT_IO); TEST_ASSERT_KVM_EXIT_REASON(params->vcpu, KVM_EXIT_IO);
TEST_ASSERT(get_ucall(params->vcpu, &uc) == UCALL_SYNC, TEST_ASSERT(get_ucall(params->vcpu, &uc) == UCALL_SYNC,
"Expect UCALL_SYNC\n"); "Expect UCALL_SYNC");
TEST_ASSERT(uc.args[1] == SYNC_SECOND_UCNA, "Injecting second UCNA."); TEST_ASSERT(uc.args[1] == SYNC_SECOND_UCNA, "Injecting second UCNA.");
printf("Injecting second UCNA at %#x.\n", SECOND_UCNA_ADDR); printf("Injecting second UCNA at %#x.\n", SECOND_UCNA_ADDR);
...@@ -208,7 +208,7 @@ static void *run_ucna_injection(void *arg) ...@@ -208,7 +208,7 @@ static void *run_ucna_injection(void *arg)
TEST_ASSERT_KVM_EXIT_REASON(params->vcpu, KVM_EXIT_IO); TEST_ASSERT_KVM_EXIT_REASON(params->vcpu, KVM_EXIT_IO);
if (get_ucall(params->vcpu, &uc) == UCALL_ABORT) { if (get_ucall(params->vcpu, &uc) == UCALL_ABORT) {
TEST_ASSERT(false, "vCPU assertion failure: %s.\n", TEST_ASSERT(false, "vCPU assertion failure: %s.",
(const char *)uc.args[0]); (const char *)uc.args[0]);
} }
......
...@@ -71,7 +71,7 @@ int main(int argc, char *argv[]) ...@@ -71,7 +71,7 @@ int main(int argc, char *argv[])
break; break;
TEST_ASSERT(run->io.port == 0x80, TEST_ASSERT(run->io.port == 0x80,
"Expected I/O at port 0x80, got port 0x%x\n", run->io.port); "Expected I/O at port 0x80, got port 0x%x", run->io.port);
/* /*
* Modify the rep string count in RCX: 2 => 1 and 3 => 8192. * Modify the rep string count in RCX: 2 => 1 and 3 => 8192.
......
...@@ -99,7 +99,7 @@ int main(int argc, char *argv[]) ...@@ -99,7 +99,7 @@ int main(int argc, char *argv[])
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR); TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR);
TEST_ASSERT(run->internal.suberror == TEST_ASSERT(run->internal.suberror ==
KVM_INTERNAL_ERROR_EMULATION, KVM_INTERNAL_ERROR_EMULATION,
"Got internal suberror other than KVM_INTERNAL_ERROR_EMULATION: %u\n", "Got internal suberror other than KVM_INTERNAL_ERROR_EMULATION: %u",
run->internal.suberror); run->internal.suberror);
break; break;
} }
......
...@@ -128,17 +128,17 @@ int main(int argc, char *argv[]) ...@@ -128,17 +128,17 @@ int main(int argc, char *argv[])
*/ */
kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap); kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
if (uc.args[1]) { if (uc.args[1]) {
TEST_ASSERT(test_bit(0, bmap), "Page 0 incorrectly reported clean\n"); TEST_ASSERT(test_bit(0, bmap), "Page 0 incorrectly reported clean");
TEST_ASSERT(host_test_mem[0] == 1, "Page 0 not written by guest\n"); TEST_ASSERT(host_test_mem[0] == 1, "Page 0 not written by guest");
} else { } else {
TEST_ASSERT(!test_bit(0, bmap), "Page 0 incorrectly reported dirty\n"); TEST_ASSERT(!test_bit(0, bmap), "Page 0 incorrectly reported dirty");
TEST_ASSERT(host_test_mem[0] == 0xaaaaaaaaaaaaaaaaULL, "Page 0 written by guest\n"); TEST_ASSERT(host_test_mem[0] == 0xaaaaaaaaaaaaaaaaULL, "Page 0 written by guest");
} }
TEST_ASSERT(!test_bit(1, bmap), "Page 1 incorrectly reported dirty\n"); TEST_ASSERT(!test_bit(1, bmap), "Page 1 incorrectly reported dirty");
TEST_ASSERT(host_test_mem[4096 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 1 written by guest\n"); TEST_ASSERT(host_test_mem[4096 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 1 written by guest");
TEST_ASSERT(!test_bit(2, bmap), "Page 2 incorrectly reported dirty\n"); TEST_ASSERT(!test_bit(2, bmap), "Page 2 incorrectly reported dirty");
TEST_ASSERT(host_test_mem[8192 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 2 written by guest\n"); TEST_ASSERT(host_test_mem[8192 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 2 written by guest");
break; break;
case UCALL_DONE: case UCALL_DONE:
done = true; done = true;
......
...@@ -28,7 +28,7 @@ static void __run_vcpu_with_invalid_state(struct kvm_vcpu *vcpu) ...@@ -28,7 +28,7 @@ static void __run_vcpu_with_invalid_state(struct kvm_vcpu *vcpu)
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR); TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR);
TEST_ASSERT(run->emulation_failure.suberror == KVM_INTERNAL_ERROR_EMULATION, TEST_ASSERT(run->emulation_failure.suberror == KVM_INTERNAL_ERROR_EMULATION,
"Expected emulation failure, got %d\n", "Expected emulation failure, got %d",
run->emulation_failure.suberror); run->emulation_failure.suberror);
} }
......
...@@ -116,23 +116,6 @@ static void l1_guest_code(struct vmx_pages *vmx_pages) ...@@ -116,23 +116,6 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_DONE(); GUEST_DONE();
} }
static bool system_has_stable_tsc(void)
{
bool tsc_is_stable;
FILE *fp;
char buf[4];
fp = fopen("/sys/devices/system/clocksource/clocksource0/current_clocksource", "r");
if (fp == NULL)
return false;
tsc_is_stable = fgets(buf, sizeof(buf), fp) &&
!strncmp(buf, "tsc", sizeof(buf));
fclose(fp);
return tsc_is_stable;
}
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
...@@ -148,7 +131,7 @@ int main(int argc, char *argv[]) ...@@ -148,7 +131,7 @@ int main(int argc, char *argv[])
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX)); TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_TSC_CONTROL)); TEST_REQUIRE(kvm_has_cap(KVM_CAP_TSC_CONTROL));
TEST_REQUIRE(system_has_stable_tsc()); TEST_REQUIRE(sys_clocksource_is_based_on_tsc());
/* /*
* We set L1's scale factor to be a random number from 2 to 10. * We set L1's scale factor to be a random number from 2 to 10.
......
...@@ -216,7 +216,7 @@ static void *vcpu_thread(void *arg) ...@@ -216,7 +216,7 @@ static void *vcpu_thread(void *arg)
"Halting vCPU halted %lu times, woke %lu times, received %lu IPIs.\n" "Halting vCPU halted %lu times, woke %lu times, received %lu IPIs.\n"
"Halter TPR=%#x PPR=%#x LVR=%#x\n" "Halter TPR=%#x PPR=%#x LVR=%#x\n"
"Migrations attempted: %lu\n" "Migrations attempted: %lu\n"
"Migrations completed: %lu\n", "Migrations completed: %lu",
vcpu->id, (const char *)uc.args[0], vcpu->id, (const char *)uc.args[0],
params->data->ipis_sent, params->data->hlt_count, params->data->ipis_sent, params->data->hlt_count,
params->data->wake_count, params->data->wake_count,
...@@ -288,7 +288,7 @@ void do_migrations(struct test_data_page *data, int run_secs, int delay_usecs, ...@@ -288,7 +288,7 @@ void do_migrations(struct test_data_page *data, int run_secs, int delay_usecs,
} }
TEST_ASSERT(nodes > 1, TEST_ASSERT(nodes > 1,
"Did not find at least 2 numa nodes. Can't do migration\n"); "Did not find at least 2 numa nodes. Can't do migration");
fprintf(stderr, "Migrating amongst %d nodes found\n", nodes); fprintf(stderr, "Migrating amongst %d nodes found\n", nodes);
...@@ -347,7 +347,7 @@ void do_migrations(struct test_data_page *data, int run_secs, int delay_usecs, ...@@ -347,7 +347,7 @@ void do_migrations(struct test_data_page *data, int run_secs, int delay_usecs,
wake_count != data->wake_count, wake_count != data->wake_count,
"IPI, HLT and wake count have not increased " "IPI, HLT and wake count have not increased "
"in the last %lu seconds. " "in the last %lu seconds. "
"HLTer is likely hung.\n", interval_secs); "HLTer is likely hung.", interval_secs);
ipis_sent = data->ipis_sent; ipis_sent = data->ipis_sent;
hlt_count = data->hlt_count; hlt_count = data->hlt_count;
...@@ -381,7 +381,7 @@ void get_cmdline_args(int argc, char *argv[], int *run_secs, ...@@ -381,7 +381,7 @@ void get_cmdline_args(int argc, char *argv[], int *run_secs,
"-m adds calls to migrate_pages while vCPUs are running." "-m adds calls to migrate_pages while vCPUs are running."
" Default is no migrations.\n" " Default is no migrations.\n"
"-d <delay microseconds> - delay between migrate_pages() calls." "-d <delay microseconds> - delay between migrate_pages() calls."
" Default is %d microseconds.\n", " Default is %d microseconds.",
DEFAULT_RUN_SECS, DEFAULT_DELAY_USECS); DEFAULT_RUN_SECS, DEFAULT_DELAY_USECS);
} }
} }
......
...@@ -116,7 +116,7 @@ int main(int argc, char *argv[]) ...@@ -116,7 +116,7 @@ int main(int argc, char *argv[])
vcpu_run(vcpu); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n", "Unexpected exit reason: %u (%s),",
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
......
...@@ -29,7 +29,7 @@ int main(int argc, char *argv[]) ...@@ -29,7 +29,7 @@ int main(int argc, char *argv[])
xss_val = vcpu_get_msr(vcpu, MSR_IA32_XSS); xss_val = vcpu_get_msr(vcpu, MSR_IA32_XSS);
TEST_ASSERT(xss_val == 0, TEST_ASSERT(xss_val == 0,
"MSR_IA32_XSS should be initialized to zero\n"); "MSR_IA32_XSS should be initialized to zero");
vcpu_set_msr(vcpu, MSR_IA32_XSS, xss_val); vcpu_set_msr(vcpu, MSR_IA32_XSS, xss_val);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment