Commit b4694260 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: selftests: Convert userspace_msr_exit_test away from VCPU_ID

Convert userspace_msr_exit_test to use vm_create_with_one_vcpu() and pass
around a 'struct kvm_vcpu' object instead of using a global VCPU_ID.
Note, this is a "functional" change in the sense that the test now
creates a vCPU with vcpu_id==0 instead of vcpu_id==1.  The non-zero
VCPU_ID was 100% arbitrary and added little to no validation coverage.
If testing non-zero vCPU IDs is desirable for generic tests, that can be
done in the future by tweaking the VM creation helpers.

Opportunistically use vcpu_run() instead of _vcpu_run() with an open
coded assert that KVM_RUN succeeded.  Fix minor coding style violations
too.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 21c602e6
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#define KVM_FEP_LENGTH 5 #define KVM_FEP_LENGTH 5
static int fep_available = 1; static int fep_available = 1;
#define VCPU_ID 1
#define MSR_NON_EXISTENT 0x474f4f00 #define MSR_NON_EXISTENT 0x474f4f00
static u64 deny_bits = 0; static u64 deny_bits = 0;
...@@ -395,31 +394,22 @@ static void guest_ud_handler(struct ex_regs *regs) ...@@ -395,31 +394,22 @@ static void guest_ud_handler(struct ex_regs *regs)
regs->rip += KVM_FEP_LENGTH; regs->rip += KVM_FEP_LENGTH;
} }
static void run_guest(struct kvm_vm *vm) static void check_for_guest_assert(struct kvm_vcpu *vcpu)
{ {
int rc;
rc = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
}
static void check_for_guest_assert(struct kvm_vm *vm)
{
struct kvm_run *run = vcpu_state(vm, VCPU_ID);
struct ucall uc; struct ucall uc;
if (run->exit_reason == KVM_EXIT_IO && if (vcpu->run->exit_reason == KVM_EXIT_IO &&
get_ucall(vm, VCPU_ID, &uc) == UCALL_ABORT) { get_ucall(vcpu->vm, vcpu->id, &uc) == UCALL_ABORT) {
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], TEST_FAIL("%s at %s:%ld",
__FILE__, uc.args[1]); (const char *)uc.args[0], __FILE__, uc.args[1]);
} }
} }
static void process_rdmsr(struct kvm_vm *vm, uint32_t msr_index) static void process_rdmsr(struct kvm_vcpu *vcpu, uint32_t msr_index)
{ {
struct kvm_run *run = vcpu_state(vm, VCPU_ID); struct kvm_run *run = vcpu->run;
check_for_guest_assert(vm); check_for_guest_assert(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_X86_RDMSR, TEST_ASSERT(run->exit_reason == KVM_EXIT_X86_RDMSR,
"Unexpected exit reason: %u (%s),\n", "Unexpected exit reason: %u (%s),\n",
...@@ -450,11 +440,11 @@ static void process_rdmsr(struct kvm_vm *vm, uint32_t msr_index) ...@@ -450,11 +440,11 @@ static void process_rdmsr(struct kvm_vm *vm, uint32_t msr_index)
} }
} }
static void process_wrmsr(struct kvm_vm *vm, uint32_t msr_index) static void process_wrmsr(struct kvm_vcpu *vcpu, uint32_t msr_index)
{ {
struct kvm_run *run = vcpu_state(vm, VCPU_ID); struct kvm_run *run = vcpu->run;
check_for_guest_assert(vm); check_for_guest_assert(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_X86_WRMSR, TEST_ASSERT(run->exit_reason == KVM_EXIT_X86_WRMSR,
"Unexpected exit reason: %u (%s),\n", "Unexpected exit reason: %u (%s),\n",
...@@ -481,43 +471,43 @@ static void process_wrmsr(struct kvm_vm *vm, uint32_t msr_index) ...@@ -481,43 +471,43 @@ static void process_wrmsr(struct kvm_vm *vm, uint32_t msr_index)
} }
} }
static void process_ucall_done(struct kvm_vm *vm) static void process_ucall_done(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu_state(vm, VCPU_ID); struct kvm_run *run = vcpu->run;
struct ucall uc; struct ucall uc;
check_for_guest_assert(vm); check_for_guest_assert(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s)", "Unexpected exit reason: %u (%s)",
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
TEST_ASSERT(get_ucall(vm, VCPU_ID, &uc) == UCALL_DONE, TEST_ASSERT(get_ucall(vcpu->vm, vcpu->id, &uc) == UCALL_DONE,
"Unexpected ucall command: %lu, expected UCALL_DONE (%d)", "Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
uc.cmd, UCALL_DONE); uc.cmd, UCALL_DONE);
} }
static uint64_t process_ucall(struct kvm_vm *vm) static uint64_t process_ucall(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu_state(vm, VCPU_ID); struct kvm_run *run = vcpu->run;
struct ucall uc = {}; struct ucall uc = {};
check_for_guest_assert(vm); check_for_guest_assert(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s)", "Unexpected exit reason: %u (%s)",
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
switch (get_ucall(vm, VCPU_ID, &uc)) { switch (get_ucall(vcpu->vm, vcpu->id, &uc)) {
case UCALL_SYNC: case UCALL_SYNC:
break; break;
case UCALL_ABORT: case UCALL_ABORT:
check_for_guest_assert(vm); check_for_guest_assert(vcpu);
break; break;
case UCALL_DONE: case UCALL_DONE:
process_ucall_done(vm); process_ucall_done(vcpu);
break; break;
default: default:
TEST_ASSERT(false, "Unexpected ucall"); TEST_ASSERT(false, "Unexpected ucall");
...@@ -526,38 +516,39 @@ static uint64_t process_ucall(struct kvm_vm *vm) ...@@ -526,38 +516,39 @@ static uint64_t process_ucall(struct kvm_vm *vm)
return uc.cmd; return uc.cmd;
} }
static void run_guest_then_process_rdmsr(struct kvm_vm *vm, uint32_t msr_index) static void run_guest_then_process_rdmsr(struct kvm_vcpu *vcpu,
uint32_t msr_index)
{ {
run_guest(vm); vcpu_run(vcpu->vm, vcpu->id);
process_rdmsr(vm, msr_index); process_rdmsr(vcpu, msr_index);
} }
static void run_guest_then_process_wrmsr(struct kvm_vm *vm, uint32_t msr_index) static void run_guest_then_process_wrmsr(struct kvm_vcpu *vcpu,
uint32_t msr_index)
{ {
run_guest(vm); vcpu_run(vcpu->vm, vcpu->id);
process_wrmsr(vm, msr_index); process_wrmsr(vcpu, msr_index);
} }
static uint64_t run_guest_then_process_ucall(struct kvm_vm *vm) static uint64_t run_guest_then_process_ucall(struct kvm_vcpu *vcpu)
{ {
run_guest(vm); vcpu_run(vcpu->vm, vcpu->id);
return process_ucall(vm); return process_ucall(vcpu);
} }
static void run_guest_then_process_ucall_done(struct kvm_vm *vm) static void run_guest_then_process_ucall_done(struct kvm_vcpu *vcpu)
{ {
run_guest(vm); vcpu_run(vcpu->vm, vcpu->id);
process_ucall_done(vm); process_ucall_done(vcpu);
} }
static void test_msr_filter_allow(void) static void test_msr_filter_allow(void)
{ {
struct kvm_vcpu *vcpu;
struct kvm_vm *vm; struct kvm_vm *vm;
int rc; int rc;
/* Create VM */ vm = vm_create_with_one_vcpu(&vcpu, guest_code_filter_allow);
vm = vm_create_default(VCPU_ID, 0, guest_code_filter_allow);
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR); rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available"); TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
...@@ -569,43 +560,43 @@ static void test_msr_filter_allow(void) ...@@ -569,43 +560,43 @@ static void test_msr_filter_allow(void)
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_allow); vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_allow);
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, VCPU_ID); vcpu_init_descriptor_tables(vm, vcpu->id);
vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler); vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
/* Process guest code userspace exits. */ /* Process guest code userspace exits. */
run_guest_then_process_rdmsr(vm, MSR_IA32_XSS); run_guest_then_process_rdmsr(vcpu, MSR_IA32_XSS);
run_guest_then_process_wrmsr(vm, MSR_IA32_XSS); run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
run_guest_then_process_wrmsr(vm, MSR_IA32_XSS); run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
run_guest_then_process_rdmsr(vm, MSR_IA32_FLUSH_CMD); run_guest_then_process_rdmsr(vcpu, MSR_IA32_FLUSH_CMD);
run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD); run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD); run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
run_guest_then_process_wrmsr(vm, MSR_NON_EXISTENT); run_guest_then_process_wrmsr(vcpu, MSR_NON_EXISTENT);
run_guest_then_process_rdmsr(vm, MSR_NON_EXISTENT); run_guest_then_process_rdmsr(vcpu, MSR_NON_EXISTENT);
vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
run_guest(vm); vcpu_run(vm, vcpu->id);
vm_install_exception_handler(vm, UD_VECTOR, NULL); vm_install_exception_handler(vm, UD_VECTOR, NULL);
if (process_ucall(vm) != UCALL_DONE) { if (process_ucall(vcpu) != UCALL_DONE) {
vm_install_exception_handler(vm, GP_VECTOR, guest_fep_gp_handler); vm_install_exception_handler(vm, GP_VECTOR, guest_fep_gp_handler);
/* Process emulated rdmsr and wrmsr instructions. */ /* Process emulated rdmsr and wrmsr instructions. */
run_guest_then_process_rdmsr(vm, MSR_IA32_XSS); run_guest_then_process_rdmsr(vcpu, MSR_IA32_XSS);
run_guest_then_process_wrmsr(vm, MSR_IA32_XSS); run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
run_guest_then_process_wrmsr(vm, MSR_IA32_XSS); run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
run_guest_then_process_rdmsr(vm, MSR_IA32_FLUSH_CMD); run_guest_then_process_rdmsr(vcpu, MSR_IA32_FLUSH_CMD);
run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD); run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD); run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
run_guest_then_process_wrmsr(vm, MSR_NON_EXISTENT); run_guest_then_process_wrmsr(vcpu, MSR_NON_EXISTENT);
run_guest_then_process_rdmsr(vm, MSR_NON_EXISTENT); run_guest_then_process_rdmsr(vcpu, MSR_NON_EXISTENT);
/* Confirm the guest completed without issues. */ /* Confirm the guest completed without issues. */
run_guest_then_process_ucall_done(vm); run_guest_then_process_ucall_done(vcpu);
} else { } else {
printf("To run the instruction emulated tests set the module parameter 'kvm.force_emulation_prefix=1'\n"); printf("To run the instruction emulated tests set the module parameter 'kvm.force_emulation_prefix=1'\n");
} }
...@@ -613,16 +604,16 @@ static void test_msr_filter_allow(void) ...@@ -613,16 +604,16 @@ static void test_msr_filter_allow(void)
kvm_vm_free(vm); kvm_vm_free(vm);
} }
static int handle_ucall(struct kvm_vm *vm) static int handle_ucall(struct kvm_vcpu *vcpu)
{ {
struct ucall uc; struct ucall uc;
switch (get_ucall(vm, VCPU_ID, &uc)) { switch (get_ucall(vcpu->vm, vcpu->id, &uc)) {
case UCALL_ABORT: case UCALL_ABORT:
TEST_FAIL("Guest assertion not met"); TEST_FAIL("Guest assertion not met");
break; break;
case UCALL_SYNC: case UCALL_SYNC:
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &no_filter_deny); vm_ioctl(vcpu->vm, KVM_X86_SET_MSR_FILTER, &no_filter_deny);
break; break;
case UCALL_DONE: case UCALL_DONE:
return 1; return 1;
...@@ -672,14 +663,13 @@ static void handle_wrmsr(struct kvm_run *run) ...@@ -672,14 +663,13 @@ static void handle_wrmsr(struct kvm_run *run)
static void test_msr_filter_deny(void) static void test_msr_filter_deny(void)
{ {
struct kvm_vcpu *vcpu;
struct kvm_vm *vm; struct kvm_vm *vm;
struct kvm_run *run; struct kvm_run *run;
int rc; int rc;
/* Create VM */ vm = vm_create_with_one_vcpu(&vcpu, guest_code_filter_deny);
vm = vm_create_default(VCPU_ID, 0, guest_code_filter_deny); run = vcpu->run;
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
run = vcpu_state(vm, VCPU_ID);
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR); rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available"); TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
...@@ -694,9 +684,7 @@ static void test_msr_filter_deny(void) ...@@ -694,9 +684,7 @@ static void test_msr_filter_deny(void)
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_deny); vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_deny);
while (1) { while (1) {
rc = _vcpu_run(vm, VCPU_ID); vcpu_run(vm, vcpu->id);
TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
switch (run->exit_reason) { switch (run->exit_reason) {
case KVM_EXIT_X86_RDMSR: case KVM_EXIT_X86_RDMSR:
...@@ -706,7 +694,7 @@ static void test_msr_filter_deny(void) ...@@ -706,7 +694,7 @@ static void test_msr_filter_deny(void)
handle_wrmsr(run); handle_wrmsr(run);
break; break;
case KVM_EXIT_IO: case KVM_EXIT_IO:
if (handle_ucall(vm)) if (handle_ucall(vcpu))
goto done; goto done;
break; break;
} }
...@@ -722,12 +710,11 @@ static void test_msr_filter_deny(void) ...@@ -722,12 +710,11 @@ static void test_msr_filter_deny(void)
static void test_msr_permission_bitmap(void) static void test_msr_permission_bitmap(void)
{ {
struct kvm_vcpu *vcpu;
struct kvm_vm *vm; struct kvm_vm *vm;
int rc; int rc;
/* Create VM */ vm = vm_create_with_one_vcpu(&vcpu, guest_code_permission_bitmap);
vm = vm_create_default(VCPU_ID, 0, guest_code_permission_bitmap);
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR); rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available"); TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
...@@ -737,11 +724,12 @@ static void test_msr_permission_bitmap(void) ...@@ -737,11 +724,12 @@ static void test_msr_permission_bitmap(void)
TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available"); TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_fs); vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_fs);
run_guest_then_process_rdmsr(vm, MSR_FS_BASE); run_guest_then_process_rdmsr(vcpu, MSR_FS_BASE);
TEST_ASSERT(run_guest_then_process_ucall(vm) == UCALL_SYNC, "Expected ucall state to be UCALL_SYNC."); TEST_ASSERT(run_guest_then_process_ucall(vcpu) == UCALL_SYNC,
"Expected ucall state to be UCALL_SYNC.");
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_gs); vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_gs);
run_guest_then_process_rdmsr(vm, MSR_GS_BASE); run_guest_then_process_rdmsr(vcpu, MSR_GS_BASE);
run_guest_then_process_ucall_done(vm); run_guest_then_process_ucall_done(vcpu);
kvm_vm_free(vm); kvm_vm_free(vm);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment