Commit 768e9a61 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: selftests: Purge vm+vcpu_id == vcpu silliness

Take a vCPU directly instead of a VM+vcpu pair in all vCPU-scoped helpers
and ioctls.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 5260db3e
...@@ -218,14 +218,14 @@ static void *test_vcpu_run(void *arg) ...@@ -218,14 +218,14 @@ static void *test_vcpu_run(void *arg)
struct kvm_vm *vm = vcpu->vm; struct kvm_vm *vm = vcpu->vm;
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[vcpu_idx]; struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[vcpu_idx];
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
/* Currently, any exit from guest is an indication of completion */ /* Currently, any exit from guest is an indication of completion */
pthread_mutex_lock(&vcpu_done_map_lock); pthread_mutex_lock(&vcpu_done_map_lock);
set_bit(vcpu_idx, vcpu_done_map); set_bit(vcpu_idx, vcpu_done_map);
pthread_mutex_unlock(&vcpu_done_map_lock); pthread_mutex_unlock(&vcpu_done_map_lock);
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC: case UCALL_SYNC:
case UCALL_DONE: case UCALL_DONE:
break; break;
...@@ -345,9 +345,9 @@ static void test_run(struct kvm_vm *vm) ...@@ -345,9 +345,9 @@ static void test_run(struct kvm_vm *vm)
static void test_init_timer_irq(struct kvm_vm *vm) static void test_init_timer_irq(struct kvm_vm *vm)
{ {
/* Timer initid should be same for all the vCPUs, so query only vCPU-0 */ /* Timer initid should be same for all the vCPUs, so query only vCPU-0 */
vcpu_device_attr_get(vm, vcpus[0]->id, KVM_ARM_VCPU_TIMER_CTRL, vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq); KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq);
vcpu_device_attr_get(vm, vcpus[0]->id, KVM_ARM_VCPU_TIMER_CTRL, vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq); KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq);
sync_global_to_guest(vm, ptimer_irq); sync_global_to_guest(vm, ptimer_irq);
...@@ -370,7 +370,7 @@ static struct kvm_vm *test_vm_create(void) ...@@ -370,7 +370,7 @@ static struct kvm_vm *test_vm_create(void)
vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler); vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler);
for (i = 0; i < nr_vcpus; i++) for (i = 0; i < nr_vcpus; i++)
vcpu_init_descriptor_tables(vm, vcpus[i]->id); vcpu_init_descriptor_tables(vcpus[i]);
ucall_init(vm, NULL); ucall_init(vm, NULL);
test_init_timer_irq(vm); test_init_timer_irq(vm);
......
...@@ -242,7 +242,7 @@ static int debug_version(struct kvm_vcpu *vcpu) ...@@ -242,7 +242,7 @@ static int debug_version(struct kvm_vcpu *vcpu)
{ {
uint64_t id_aa64dfr0; uint64_t id_aa64dfr0;
vcpu_get_reg(vcpu->vm, vcpu->id, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &id_aa64dfr0); vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &id_aa64dfr0);
return id_aa64dfr0 & 0xf; return id_aa64dfr0 & 0xf;
} }
...@@ -257,7 +257,7 @@ int main(int argc, char *argv[]) ...@@ -257,7 +257,7 @@ int main(int argc, char *argv[])
ucall_init(vm, NULL); ucall_init(vm, NULL);
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, vcpu->id); vcpu_init_descriptor_tables(vcpu);
if (debug_version(vcpu) < 6) { if (debug_version(vcpu) < 6) {
print_skip("Armv8 debug architecture not supported."); print_skip("Armv8 debug architecture not supported.");
...@@ -277,9 +277,9 @@ int main(int argc, char *argv[]) ...@@ -277,9 +277,9 @@ int main(int argc, char *argv[])
ESR_EC_SVC64, guest_svc_handler); ESR_EC_SVC64, guest_svc_handler);
for (stage = 0; stage < 11; stage++) { for (stage = 0; stage < 11; stage++) {
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC: case UCALL_SYNC:
TEST_ASSERT(uc.args[1] == stage, TEST_ASSERT(uc.args[1] == stage,
"Stage %d: Unexpected sync ucall, got %lx", "Stage %d: Unexpected sync ucall, got %lx",
......
...@@ -377,7 +377,7 @@ static void prepare_vcpu_init(struct vcpu_config *c, struct kvm_vcpu_init *init) ...@@ -377,7 +377,7 @@ static void prepare_vcpu_init(struct vcpu_config *c, struct kvm_vcpu_init *init)
init->features[s->feature / 32] |= 1 << (s->feature % 32); init->features[s->feature / 32] |= 1 << (s->feature % 32);
} }
static void finalize_vcpu(struct kvm_vm *vm, uint32_t vcpuid, struct vcpu_config *c) static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_config *c)
{ {
struct reg_sublist *s; struct reg_sublist *s;
int feature; int feature;
...@@ -385,7 +385,7 @@ static void finalize_vcpu(struct kvm_vm *vm, uint32_t vcpuid, struct vcpu_config ...@@ -385,7 +385,7 @@ static void finalize_vcpu(struct kvm_vm *vm, uint32_t vcpuid, struct vcpu_config
for_each_sublist(c, s) { for_each_sublist(c, s) {
if (s->finalize) { if (s->finalize) {
feature = s->feature; feature = s->feature;
vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_FINALIZE, &feature); vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature);
} }
} }
} }
...@@ -420,10 +420,10 @@ static void run_test(struct vcpu_config *c) ...@@ -420,10 +420,10 @@ static void run_test(struct vcpu_config *c)
vm = vm_create_barebones(); vm = vm_create_barebones();
prepare_vcpu_init(c, &init); prepare_vcpu_init(c, &init);
vcpu = __vm_vcpu_add(vm, 0); vcpu = __vm_vcpu_add(vm, 0);
aarch64_vcpu_setup(vm, vcpu->id, &init); aarch64_vcpu_setup(vcpu, &init);
finalize_vcpu(vm, vcpu->id, c); finalize_vcpu(vcpu, c);
reg_list = vcpu_get_reg_list(vm, vcpu->id); reg_list = vcpu_get_reg_list(vcpu);
if (fixup_core_regs) if (fixup_core_regs)
core_reg_fixup(); core_reg_fixup();
...@@ -459,7 +459,7 @@ static void run_test(struct vcpu_config *c) ...@@ -459,7 +459,7 @@ static void run_test(struct vcpu_config *c)
bool reject_reg = false; bool reject_reg = false;
int ret; int ret;
ret = __vcpu_get_reg(vm, vcpu->id, reg_list->reg[i], &addr); ret = __vcpu_get_reg(vcpu, reg_list->reg[i], &addr);
if (ret) { if (ret) {
printf("%s: Failed to get ", config_name(c)); printf("%s: Failed to get ", config_name(c));
print_reg(c, reg.id); print_reg(c, reg.id);
...@@ -471,7 +471,7 @@ static void run_test(struct vcpu_config *c) ...@@ -471,7 +471,7 @@ static void run_test(struct vcpu_config *c)
for_each_sublist(c, s) { for_each_sublist(c, s) {
if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) { if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) {
reject_reg = true; reject_reg = true;
ret = __vcpu_ioctl(vm, vcpu->id, KVM_SET_ONE_REG, &reg); ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
if (ret != -1 || errno != EPERM) { if (ret != -1 || errno != EPERM) {
printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno); printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno);
print_reg(c, reg.id); print_reg(c, reg.id);
...@@ -483,7 +483,7 @@ static void run_test(struct vcpu_config *c) ...@@ -483,7 +483,7 @@ static void run_test(struct vcpu_config *c)
} }
if (!reject_reg) { if (!reject_reg) {
ret = __vcpu_ioctl(vm, vcpu->id, KVM_SET_ONE_REG, &reg); ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
if (ret) { if (ret) {
printf("%s: Failed to set ", config_name(c)); printf("%s: Failed to set ", config_name(c));
print_reg(c, reg.id); print_reg(c, reg.id);
......
...@@ -158,7 +158,7 @@ static void steal_time_init(struct kvm_vcpu *vcpu) ...@@ -158,7 +158,7 @@ static void steal_time_init(struct kvm_vcpu *vcpu)
gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE); gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE);
vm_userspace_mem_region_add(vcpu->vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0); vm_userspace_mem_region_add(vcpu->vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
vcpu_device_attr_set(vcpu->vm, vcpu->id, KVM_ARM_VCPU_PVTIME_CTRL, vcpu_device_attr_set(vcpu, KVM_ARM_VCPU_PVTIME_CTRL,
KVM_ARM_VCPU_PVTIME_IPA, &st_ipa); KVM_ARM_VCPU_PVTIME_IPA, &st_ipa);
} }
...@@ -172,18 +172,18 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu) ...@@ -172,18 +172,18 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i]; const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
/* First 'read' should be an upper limit of the features supported */ /* First 'read' should be an upper limit of the features supported */
vcpu_get_reg(vcpu->vm, vcpu->id, reg_info->reg, &val); vcpu_get_reg(vcpu, reg_info->reg, &val);
TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit),
"Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx\n", "Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx\n",
reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val); reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val);
/* Test a 'write' by disabling all the features of the register map */ /* Test a 'write' by disabling all the features of the register map */
ret = __vcpu_set_reg(vcpu->vm, vcpu->id, reg_info->reg, 0); ret = __vcpu_set_reg(vcpu, reg_info->reg, 0);
TEST_ASSERT(ret == 0, TEST_ASSERT(ret == 0,
"Failed to clear all the features of reg: 0x%lx; ret: %d\n", "Failed to clear all the features of reg: 0x%lx; ret: %d\n",
reg_info->reg, errno); reg_info->reg, errno);
vcpu_get_reg(vcpu->vm, vcpu->id, reg_info->reg, &val); vcpu_get_reg(vcpu, reg_info->reg, &val);
TEST_ASSERT(val == 0, TEST_ASSERT(val == 0,
"Expected all the features to be cleared for reg: 0x%lx\n", reg_info->reg); "Expected all the features to be cleared for reg: 0x%lx\n", reg_info->reg);
...@@ -192,7 +192,7 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu) ...@@ -192,7 +192,7 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
* Avoid this check if all the bits are occupied. * Avoid this check if all the bits are occupied.
*/ */
if (reg_info->max_feat_bit < 63) { if (reg_info->max_feat_bit < 63) {
ret = __vcpu_set_reg(vcpu->vm, vcpu->id, reg_info->reg, BIT(reg_info->max_feat_bit + 1)); ret = __vcpu_set_reg(vcpu, reg_info->reg, BIT(reg_info->max_feat_bit + 1));
TEST_ASSERT(ret != 0 && errno == EINVAL, TEST_ASSERT(ret != 0 && errno == EINVAL,
"Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx\n", "Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx\n",
errno, reg_info->reg); errno, reg_info->reg);
...@@ -213,7 +213,7 @@ static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu) ...@@ -213,7 +213,7 @@ static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu)
* Before starting the VM, the test clears all the bits. * Before starting the VM, the test clears all the bits.
* Check if that's still the case. * Check if that's still the case.
*/ */
vcpu_get_reg(vcpu->vm, vcpu->id, reg_info->reg, &val); vcpu_get_reg(vcpu, reg_info->reg, &val);
TEST_ASSERT(val == 0, TEST_ASSERT(val == 0,
"Expected all the features to be cleared for reg: 0x%lx\n", "Expected all the features to be cleared for reg: 0x%lx\n",
reg_info->reg); reg_info->reg);
...@@ -223,7 +223,7 @@ static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu) ...@@ -223,7 +223,7 @@ static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu)
* the registers and should return EBUSY. Set the registers and check for * the registers and should return EBUSY. Set the registers and check for
* the expected errno. * the expected errno.
*/ */
ret = __vcpu_set_reg(vcpu->vm, vcpu->id, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit)); ret = __vcpu_set_reg(vcpu, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit));
TEST_ASSERT(ret != 0 && errno == EBUSY, TEST_ASSERT(ret != 0 && errno == EBUSY,
"Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx\n", "Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx\n",
errno, reg_info->reg); errno, reg_info->reg);
...@@ -281,9 +281,9 @@ static void test_run(void) ...@@ -281,9 +281,9 @@ static void test_run(void)
test_fw_regs_before_vm_start(vcpu); test_fw_regs_before_vm_start(vcpu);
while (!guest_done) { while (!guest_done) {
vcpu_run(vcpu->vm, vcpu->id); vcpu_run(vcpu);
switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC: case UCALL_SYNC:
test_guest_stage(&vm, &vcpu); test_guest_stage(&vm, &vcpu);
break; break;
......
...@@ -67,7 +67,7 @@ static void vcpu_power_off(struct kvm_vcpu *vcpu) ...@@ -67,7 +67,7 @@ static void vcpu_power_off(struct kvm_vcpu *vcpu)
.mp_state = KVM_MP_STATE_STOPPED, .mp_state = KVM_MP_STATE_STOPPED,
}; };
vcpu_mp_state_set(vcpu->vm, vcpu->id, &mp_state); vcpu_mp_state_set(vcpu, &mp_state);
} }
static struct kvm_vm *setup_vm(void *guest_code, struct kvm_vcpu **source, static struct kvm_vm *setup_vm(void *guest_code, struct kvm_vcpu **source,
...@@ -92,8 +92,8 @@ static void enter_guest(struct kvm_vcpu *vcpu) ...@@ -92,8 +92,8 @@ static void enter_guest(struct kvm_vcpu *vcpu)
{ {
struct ucall uc; struct ucall uc;
vcpu_run(vcpu->vm, vcpu->id); vcpu_run(vcpu);
if (get_ucall(vcpu->vm, vcpu->id, &uc) == UCALL_ABORT) if (get_ucall(vcpu, &uc) == UCALL_ABORT)
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], __FILE__, TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], __FILE__,
uc.args[1]); uc.args[1]);
} }
...@@ -102,8 +102,8 @@ static void assert_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -102,8 +102,8 @@ static void assert_vcpu_reset(struct kvm_vcpu *vcpu)
{ {
uint64_t obs_pc, obs_x0; uint64_t obs_pc, obs_x0;
vcpu_get_reg(vcpu->vm, vcpu->id, ARM64_CORE_REG(regs.pc), &obs_pc); vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &obs_pc);
vcpu_get_reg(vcpu->vm, vcpu->id, ARM64_CORE_REG(regs.regs[0]), &obs_x0); vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.regs[0]), &obs_x0);
TEST_ASSERT(obs_pc == CPU_ON_ENTRY_ADDR, TEST_ASSERT(obs_pc == CPU_ON_ENTRY_ADDR,
"unexpected target cpu pc: %lx (expected: %lx)", "unexpected target cpu pc: %lx (expected: %lx)",
...@@ -143,11 +143,11 @@ static void host_test_cpu_on(void) ...@@ -143,11 +143,11 @@ static void host_test_cpu_on(void)
*/ */
vcpu_power_off(target); vcpu_power_off(target);
vcpu_get_reg(vm, target->id, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr); vcpu_get_reg(target, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr);
vcpu_args_set(vm, source->id, 1, target_mpidr & MPIDR_HWID_BITMASK); vcpu_args_set(source, 1, target_mpidr & MPIDR_HWID_BITMASK);
enter_guest(source); enter_guest(source);
if (get_ucall(vm, source->id, &uc) != UCALL_DONE) if (get_ucall(source, &uc) != UCALL_DONE)
TEST_FAIL("Unhandled ucall: %lu", uc.cmd); TEST_FAIL("Unhandled ucall: %lu", uc.cmd);
assert_vcpu_reset(target); assert_vcpu_reset(target);
......
...@@ -28,12 +28,12 @@ static int add_init_2vcpus(struct kvm_vcpu_init *init0, ...@@ -28,12 +28,12 @@ static int add_init_2vcpus(struct kvm_vcpu_init *init0,
vm = vm_create_barebones(); vm = vm_create_barebones();
vcpu0 = __vm_vcpu_add(vm, 0); vcpu0 = __vm_vcpu_add(vm, 0);
ret = __vcpu_ioctl(vm, vcpu0->id, KVM_ARM_VCPU_INIT, init0); ret = __vcpu_ioctl(vcpu0, KVM_ARM_VCPU_INIT, init0);
if (ret) if (ret)
goto free_exit; goto free_exit;
vcpu1 = __vm_vcpu_add(vm, 1); vcpu1 = __vm_vcpu_add(vm, 1);
ret = __vcpu_ioctl(vm, vcpu1->id, KVM_ARM_VCPU_INIT, init1); ret = __vcpu_ioctl(vcpu1, KVM_ARM_VCPU_INIT, init1);
free_exit: free_exit:
kvm_vm_free(vm); kvm_vm_free(vm);
...@@ -56,11 +56,11 @@ static int add_2vcpus_init_2vcpus(struct kvm_vcpu_init *init0, ...@@ -56,11 +56,11 @@ static int add_2vcpus_init_2vcpus(struct kvm_vcpu_init *init0,
vcpu0 = __vm_vcpu_add(vm, 0); vcpu0 = __vm_vcpu_add(vm, 0);
vcpu1 = __vm_vcpu_add(vm, 1); vcpu1 = __vm_vcpu_add(vm, 1);
ret = __vcpu_ioctl(vm, vcpu0->id, KVM_ARM_VCPU_INIT, init0); ret = __vcpu_ioctl(vcpu0, KVM_ARM_VCPU_INIT, init0);
if (ret) if (ret)
goto free_exit; goto free_exit;
ret = __vcpu_ioctl(vm, vcpu1->id, KVM_ARM_VCPU_INIT, init1); ret = __vcpu_ioctl(vcpu1, KVM_ARM_VCPU_INIT, init1);
free_exit: free_exit:
kvm_vm_free(vm); kvm_vm_free(vm);
......
...@@ -70,7 +70,7 @@ static int run_vcpu(struct kvm_vcpu *vcpu) ...@@ -70,7 +70,7 @@ static int run_vcpu(struct kvm_vcpu *vcpu)
{ {
ucall_init(vcpu->vm, NULL); ucall_init(vcpu->vm, NULL);
return __vcpu_run(vcpu->vm, vcpu->id) ? -errno : 0; return __vcpu_run(vcpu) ? -errno : 0;
} }
static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type, static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type,
......
...@@ -759,12 +759,12 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split) ...@@ -759,12 +759,12 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
ucall_init(vm, NULL); ucall_init(vm, NULL);
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, vcpu->id); vcpu_init_descriptor_tables(vcpu);
/* Setup the guest args page (so it gets the args). */ /* Setup the guest args page (so it gets the args). */
args_gva = vm_vaddr_alloc_page(vm); args_gva = vm_vaddr_alloc_page(vm);
memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args)); memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args));
vcpu_args_set(vm, vcpu->id, 1, args_gva); vcpu_args_set(vcpu, 1, args_gva);
gic_fd = vgic_v3_setup(vm, 1, nr_irqs, gic_fd = vgic_v3_setup(vm, 1, nr_irqs,
GICD_BASE_GPA, GICR_BASE_GPA); GICD_BASE_GPA, GICR_BASE_GPA);
...@@ -777,9 +777,9 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split) ...@@ -777,9 +777,9 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
guest_irq_handlers[args.eoi_split][args.level_sensitive]); guest_irq_handlers[args.eoi_split][args.level_sensitive]);
while (1) { while (1) {
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC: case UCALL_SYNC:
kvm_inject_get_call(vm, &uc, &inject_args); kvm_inject_get_call(vm, &uc, &inject_args);
run_guest_cmd(vcpu, gic_fd, &inject_args, &args); run_guest_cmd(vcpu, gic_fd, &inject_args, &args);
......
...@@ -194,7 +194,7 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm, ...@@ -194,7 +194,7 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm,
static void assert_ucall(struct kvm_vcpu *vcpu, uint64_t expected_ucall) static void assert_ucall(struct kvm_vcpu *vcpu, uint64_t expected_ucall)
{ {
struct ucall uc; struct ucall uc;
uint64_t actual_ucall = get_ucall(vcpu->vm, vcpu->id, &uc); uint64_t actual_ucall = get_ucall(vcpu, &uc);
TEST_ASSERT(expected_ucall == actual_ucall, TEST_ASSERT(expected_ucall == actual_ucall,
"Guest exited unexpectedly (expected ucall %" PRIu64 "Guest exited unexpectedly (expected ucall %" PRIu64
...@@ -226,7 +226,7 @@ static void vcpu_thread_main(struct perf_test_vcpu_args *vcpu_args) ...@@ -226,7 +226,7 @@ static void vcpu_thread_main(struct perf_test_vcpu_args *vcpu_args)
while (spin_wait_for_next_iteration(&current_iteration)) { while (spin_wait_for_next_iteration(&current_iteration)) {
switch (READ_ONCE(iteration_work)) { switch (READ_ONCE(iteration_work)) {
case ITERATION_ACCESS_MEMORY: case ITERATION_ACCESS_MEMORY:
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
assert_ucall(vcpu, UCALL_SYNC); assert_ucall(vcpu, UCALL_SYNC);
break; break;
case ITERATION_MARK_IDLE: case ITERATION_MARK_IDLE:
......
...@@ -45,7 +45,6 @@ static char *guest_data_prototype; ...@@ -45,7 +45,6 @@ static char *guest_data_prototype;
static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
{ {
struct kvm_vcpu *vcpu = vcpu_args->vcpu; struct kvm_vcpu *vcpu = vcpu_args->vcpu;
struct kvm_vm *vm = perf_test_args.vm;
int vcpu_idx = vcpu_args->vcpu_idx; int vcpu_idx = vcpu_args->vcpu_idx;
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
struct timespec start; struct timespec start;
...@@ -55,9 +54,9 @@ static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) ...@@ -55,9 +54,9 @@ static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
clock_gettime(CLOCK_MONOTONIC, &start); clock_gettime(CLOCK_MONOTONIC, &start);
/* Let the guest access its memory */ /* Let the guest access its memory */
ret = _vcpu_run(vm, vcpu->id); ret = _vcpu_run(vcpu);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
if (get_ucall(vm, vcpu->id, NULL) != UCALL_SYNC) { if (get_ucall(vcpu, NULL) != UCALL_SYNC) {
TEST_ASSERT(false, TEST_ASSERT(false,
"Invalid guest sync status: exit_reason=%s\n", "Invalid guest sync status: exit_reason=%s\n",
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
......
...@@ -69,7 +69,6 @@ static int vcpu_last_completed_iteration[KVM_MAX_VCPUS]; ...@@ -69,7 +69,6 @@ static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
{ {
struct kvm_vcpu *vcpu = vcpu_args->vcpu; struct kvm_vcpu *vcpu = vcpu_args->vcpu;
struct kvm_vm *vm = perf_test_args.vm;
int vcpu_idx = vcpu_args->vcpu_idx; int vcpu_idx = vcpu_args->vcpu_idx;
uint64_t pages_count = 0; uint64_t pages_count = 0;
struct kvm_run *run; struct kvm_run *run;
...@@ -85,18 +84,18 @@ static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) ...@@ -85,18 +84,18 @@ static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
int current_iteration = READ_ONCE(iteration); int current_iteration = READ_ONCE(iteration);
clock_gettime(CLOCK_MONOTONIC, &start); clock_gettime(CLOCK_MONOTONIC, &start);
ret = _vcpu_run(vm, vcpu->id); ret = _vcpu_run(vcpu);
ts_diff = timespec_elapsed(start); ts_diff = timespec_elapsed(start);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
TEST_ASSERT(get_ucall(vm, vcpu->id, NULL) == UCALL_SYNC, TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Invalid guest sync status: exit_reason=%s\n", "Invalid guest sync status: exit_reason=%s\n",
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
pr_debug("Got sync event from vCPU %d\n", vcpu_idx); pr_debug("Got sync event from vCPU %d\n", vcpu_idx);
vcpu_last_completed_iteration[vcpu_idx] = current_iteration; vcpu_last_completed_iteration[vcpu_idx] = current_iteration;
pr_debug("vCPU %d updated last completed iteration to %d\n", pr_debug("vCPU %d updated last completed iteration to %d\n",
vcpu->id, vcpu_last_completed_iteration[vcpu_idx]); vcpu_idx, vcpu_last_completed_iteration[vcpu_idx]);
if (current_iteration) { if (current_iteration) {
pages_count += vcpu_args->pages; pages_count += vcpu_args->pages;
......
...@@ -255,7 +255,7 @@ static void default_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err) ...@@ -255,7 +255,7 @@ static void default_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
TEST_ASSERT(ret == 0 || (ret == -1 && err == EINTR), TEST_ASSERT(ret == 0 || (ret == -1 && err == EINTR),
"vcpu run failed: errno=%d", err); "vcpu run failed: errno=%d", err);
TEST_ASSERT(get_ucall(vcpu->vm, vcpu->id, NULL) == UCALL_SYNC, TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Invalid guest sync status: exit_reason=%s\n", "Invalid guest sync status: exit_reason=%s\n",
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
...@@ -346,7 +346,7 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, ...@@ -346,7 +346,7 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
} }
/* Only have one vcpu */ /* Only have one vcpu */
count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu->vm, vcpu->id), count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu),
slot, bitmap, num_pages, &fetch_index); slot, bitmap, num_pages, &fetch_index);
cleared = kvm_vm_reset_dirty_ring(vcpu->vm); cleared = kvm_vm_reset_dirty_ring(vcpu->vm);
...@@ -369,7 +369,7 @@ static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err) ...@@ -369,7 +369,7 @@ static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
/* A ucall-sync or ring-full event is allowed */ /* A ucall-sync or ring-full event is allowed */
if (get_ucall(vcpu->vm, vcpu->id, NULL) == UCALL_SYNC) { if (get_ucall(vcpu, NULL) == UCALL_SYNC) {
/* We should allow this to continue */ /* We should allow this to continue */
; ;
} else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL || } else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL ||
...@@ -521,7 +521,7 @@ static void *vcpu_worker(void *data) ...@@ -521,7 +521,7 @@ static void *vcpu_worker(void *data)
sigmask->len = 8; sigmask->len = 8;
pthread_sigmask(0, NULL, sigset); pthread_sigmask(0, NULL, sigset);
sigdelset(sigset, SIG_IPI); sigdelset(sigset, SIG_IPI);
vcpu_ioctl(vm, vcpu->id, KVM_SET_SIGNAL_MASK, sigmask); vcpu_ioctl(vcpu, KVM_SET_SIGNAL_MASK, sigmask);
sigemptyset(sigset); sigemptyset(sigset);
sigaddset(sigset, SIG_IPI); sigaddset(sigset, SIG_IPI);
...@@ -533,7 +533,7 @@ static void *vcpu_worker(void *data) ...@@ -533,7 +533,7 @@ static void *vcpu_worker(void *data)
generate_random_array(guest_array, TEST_PAGES_PER_LOOP); generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
pages_count += TEST_PAGES_PER_LOOP; pages_count += TEST_PAGES_PER_LOOP;
/* Let the guest dirty the random pages */ /* Let the guest dirty the random pages */
ret = __vcpu_run(vm, vcpu->id); ret = __vcpu_run(vcpu);
if (ret == -1 && errno == EINTR) { if (ret == -1 && errno == EINTR) {
int sig = -1; int sig = -1;
sigwait(sigset, &sig); sigwait(sigset, &sig);
......
...@@ -39,7 +39,7 @@ static void *run_vcpu(void *arg) ...@@ -39,7 +39,7 @@ static void *run_vcpu(void *arg)
struct kvm_vcpu *vcpu = arg; struct kvm_vcpu *vcpu = arg;
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
vcpu_run(vcpu->vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(false, "%s: exited with reason %d: %s\n", TEST_ASSERT(false, "%s: exited with reason %d: %s\n",
__func__, run->exit_reason, __func__, run->exit_reason,
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
#define MPIDR_HWID_BITMASK (0xff00fffffful) #define MPIDR_HWID_BITMASK (0xff00fffffful)
void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init *init); void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init);
struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
struct kvm_vcpu_init *init, void *guest_code); struct kvm_vcpu_init *init, void *guest_code);
...@@ -101,7 +101,7 @@ void aarch64_get_supported_page_sizes(uint32_t ipa, ...@@ -101,7 +101,7 @@ void aarch64_get_supported_page_sizes(uint32_t ipa,
bool *ps4k, bool *ps16k, bool *ps64k); bool *ps4k, bool *ps16k, bool *ps64k);
void vm_init_descriptor_tables(struct kvm_vm *vm); void vm_init_descriptor_tables(struct kvm_vm *vm);
void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid); void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu);
typedef void(*handler_fn)(struct ex_regs *); typedef void(*handler_fn)(struct ex_regs *);
void vm_install_exception_handler(struct kvm_vm *vm, void vm_install_exception_handler(struct kvm_vm *vm,
......
...@@ -26,7 +26,7 @@ struct ucall { ...@@ -26,7 +26,7 @@ struct ucall {
void ucall_init(struct kvm_vm *vm, void *arg); void ucall_init(struct kvm_vm *vm, void *arg);
void ucall_uninit(struct kvm_vm *vm); void ucall_uninit(struct kvm_vm *vm);
void ucall(uint64_t cmd, int nargs, ...); void ucall(uint64_t cmd, int nargs, ...);
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc); uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc);
#define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \ #define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \
ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4) ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4)
......
...@@ -241,7 +241,7 @@ struct hv_enlightened_vmcs { ...@@ -241,7 +241,7 @@ struct hv_enlightened_vmcs {
extern struct hv_enlightened_vmcs *current_evmcs; extern struct hv_enlightened_vmcs *current_evmcs;
extern struct hv_vp_assist_page *current_vp_assist; extern struct hv_vp_assist_page *current_vp_assist;
int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id); int vcpu_enable_evmcs(struct kvm_vcpu *vcpu);
static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist) static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist)
{ {
......
...@@ -422,9 +422,8 @@ static inline unsigned int x86_model(unsigned int eax) ...@@ -422,9 +422,8 @@ static inline unsigned int x86_model(unsigned int eax)
return ((eax >> 12) & 0xf0) | ((eax >> 4) & 0x0f); return ((eax >> 12) & 0xf0) | ((eax >> 4) & 0x0f);
} }
struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid); struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu);
void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state);
struct kvm_x86_state *state);
void kvm_x86_state_cleanup(struct kvm_x86_state *state); void kvm_x86_state_cleanup(struct kvm_x86_state *state);
const struct kvm_msr_list *kvm_get_msr_index_list(void); const struct kvm_msr_list *kvm_get_msr_index_list(void);
...@@ -432,73 +431,71 @@ const struct kvm_msr_list *kvm_get_feature_msr_index_list(void); ...@@ -432,73 +431,71 @@ const struct kvm_msr_list *kvm_get_feature_msr_index_list(void);
bool kvm_msr_is_in_save_restore_list(uint32_t msr_index); bool kvm_msr_is_in_save_restore_list(uint32_t msr_index);
uint64_t kvm_get_feature_msr(uint64_t msr_index); uint64_t kvm_get_feature_msr(uint64_t msr_index);
static inline void vcpu_msrs_get(struct kvm_vm *vm, uint32_t vcpuid, static inline void vcpu_msrs_get(struct kvm_vcpu *vcpu,
struct kvm_msrs *msrs) struct kvm_msrs *msrs)
{ {
int r = __vcpu_ioctl(vm, vcpuid, KVM_GET_MSRS, msrs); int r = __vcpu_ioctl(vcpu, KVM_GET_MSRS, msrs);
TEST_ASSERT(r == msrs->nmsrs, TEST_ASSERT(r == msrs->nmsrs,
"KVM_GET_MSRS failed, r: %i (failed on MSR %x)", "KVM_GET_MSRS failed, r: %i (failed on MSR %x)",
r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index); r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index);
} }
static inline void vcpu_msrs_set(struct kvm_vm *vm, uint32_t vcpuid, static inline void vcpu_msrs_set(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs)
struct kvm_msrs *msrs)
{ {
int r = __vcpu_ioctl(vm, vcpuid, KVM_SET_MSRS, msrs); int r = __vcpu_ioctl(vcpu, KVM_SET_MSRS, msrs);
TEST_ASSERT(r == msrs->nmsrs, TEST_ASSERT(r == msrs->nmsrs,
"KVM_GET_MSRS failed, r: %i (failed on MSR %x)", "KVM_GET_MSRS failed, r: %i (failed on MSR %x)",
r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index); r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index);
} }
static inline void vcpu_debugregs_get(struct kvm_vm *vm, uint32_t vcpuid, static inline void vcpu_debugregs_get(struct kvm_vcpu *vcpu,
struct kvm_debugregs *debugregs) struct kvm_debugregs *debugregs)
{ {
vcpu_ioctl(vm, vcpuid, KVM_GET_DEBUGREGS, debugregs); vcpu_ioctl(vcpu, KVM_GET_DEBUGREGS, debugregs);
} }
static inline void vcpu_debugregs_set(struct kvm_vm *vm, uint32_t vcpuid, static inline void vcpu_debugregs_set(struct kvm_vcpu *vcpu,
struct kvm_debugregs *debugregs) struct kvm_debugregs *debugregs)
{ {
vcpu_ioctl(vm, vcpuid, KVM_SET_DEBUGREGS, debugregs); vcpu_ioctl(vcpu, KVM_SET_DEBUGREGS, debugregs);
} }
static inline void vcpu_xsave_get(struct kvm_vm *vm, uint32_t vcpuid, static inline void vcpu_xsave_get(struct kvm_vcpu *vcpu,
struct kvm_xsave *xsave) struct kvm_xsave *xsave)
{ {
vcpu_ioctl(vm, vcpuid, KVM_GET_XSAVE, xsave); vcpu_ioctl(vcpu, KVM_GET_XSAVE, xsave);
} }
static inline void vcpu_xsave2_get(struct kvm_vm *vm, uint32_t vcpuid, static inline void vcpu_xsave2_get(struct kvm_vcpu *vcpu,
struct kvm_xsave *xsave) struct kvm_xsave *xsave)
{ {
vcpu_ioctl(vm, vcpuid, KVM_GET_XSAVE2, xsave); vcpu_ioctl(vcpu, KVM_GET_XSAVE2, xsave);
} }
static inline void vcpu_xsave_set(struct kvm_vm *vm, uint32_t vcpuid, static inline void vcpu_xsave_set(struct kvm_vcpu *vcpu,
struct kvm_xsave *xsave) struct kvm_xsave *xsave)
{ {
vcpu_ioctl(vm, vcpuid, KVM_SET_XSAVE, xsave); vcpu_ioctl(vcpu, KVM_SET_XSAVE, xsave);
} }
static inline void vcpu_xcrs_get(struct kvm_vm *vm, uint32_t vcpuid, static inline void vcpu_xcrs_get(struct kvm_vcpu *vcpu,
struct kvm_xcrs *xcrs) struct kvm_xcrs *xcrs)
{ {
vcpu_ioctl(vm, vcpuid, KVM_GET_XCRS, xcrs); vcpu_ioctl(vcpu, KVM_GET_XCRS, xcrs);
} }
static inline void vcpu_xcrs_set(struct kvm_vm *vm, uint32_t vcpuid, static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs)
struct kvm_xcrs *xcrs)
{ {
vcpu_ioctl(vm, vcpuid, KVM_SET_XCRS, xcrs); vcpu_ioctl(vcpu, KVM_SET_XCRS, xcrs);
} }
struct kvm_cpuid2 *kvm_get_supported_cpuid(void); struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vm *vm, uint32_t vcpuid); struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vcpu *vcpu);
static inline int __vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid, static inline int __vcpu_set_cpuid(struct kvm_vcpu *vcpu,
struct kvm_cpuid2 *cpuid) struct kvm_cpuid2 *cpuid)
{ {
return __vcpu_ioctl(vm, vcpuid, KVM_SET_CPUID2, cpuid); return __vcpu_ioctl(vcpu, KVM_SET_CPUID2, cpuid);
} }
static inline void vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid, static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu,
struct kvm_cpuid2 *cpuid) struct kvm_cpuid2 *cpuid)
{ {
vcpu_ioctl(vm, vcpuid, KVM_SET_CPUID2, cpuid); vcpu_ioctl(vcpu, KVM_SET_CPUID2, cpuid);
} }
struct kvm_cpuid_entry2 * struct kvm_cpuid_entry2 *
...@@ -510,14 +507,13 @@ kvm_get_supported_cpuid_entry(uint32_t function) ...@@ -510,14 +507,13 @@ kvm_get_supported_cpuid_entry(uint32_t function)
return kvm_get_supported_cpuid_index(function, 0); return kvm_get_supported_cpuid_index(function, 0);
} }
uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index); uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index);
int _vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index, int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value);
uint64_t msr_value);
static inline void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, static inline void vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index,
uint64_t msr_index, uint64_t msr_value) uint64_t msr_value)
{ {
int r = _vcpu_set_msr(vm, vcpuid, msr_index, msr_value); int r = _vcpu_set_msr(vcpu, msr_index, msr_value);
TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_SET_MSRS, r)); TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_SET_MSRS, r));
} }
...@@ -541,13 +537,14 @@ struct ex_regs { ...@@ -541,13 +537,14 @@ struct ex_regs {
}; };
void vm_init_descriptor_tables(struct kvm_vm *vm); void vm_init_descriptor_tables(struct kvm_vm *vm);
void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid); void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu);
void vm_install_exception_handler(struct kvm_vm *vm, int vector, void vm_install_exception_handler(struct kvm_vm *vm, int vector,
void (*handler)(struct ex_regs *)); void (*handler)(struct ex_regs *));
uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr); uint64_t vm_get_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
void vm_set_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr, uint64_t vaddr);
uint64_t pte); void vm_set_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
uint64_t vaddr, uint64_t pte);
/* /*
* get_cpuid() - find matching CPUID entry and return pointer to it. * get_cpuid() - find matching CPUID entry and return pointer to it.
...@@ -567,8 +564,8 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, ...@@ -567,8 +564,8 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
uint64_t a3); uint64_t a3);
struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void); struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void);
void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid); void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu);
struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid); struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu);
void vm_xsave_req_perm(int bit); void vm_xsave_req_perm(int bit);
enum pg_level { enum pg_level {
......
...@@ -174,7 +174,7 @@ static void vm_stats_test(struct kvm_vm *vm) ...@@ -174,7 +174,7 @@ static void vm_stats_test(struct kvm_vm *vm)
static void vcpu_stats_test(struct kvm_vcpu *vcpu) static void vcpu_stats_test(struct kvm_vcpu *vcpu)
{ {
int stats_fd = vcpu_get_stats_fd(vcpu->vm, vcpu->id); int stats_fd = vcpu_get_stats_fd(vcpu);
stats_test(stats_fd); stats_test(stats_fd);
close(stats_fd); close(stats_fd);
......
...@@ -184,7 +184,6 @@ static void guest_code(bool do_write) ...@@ -184,7 +184,6 @@ static void guest_code(bool do_write)
static void *vcpu_worker(void *data) static void *vcpu_worker(void *data)
{ {
struct kvm_vm *vm = test_args.vm;
struct kvm_vcpu *vcpu = data; struct kvm_vcpu *vcpu = data;
bool do_write = !(vcpu->id % 2); bool do_write = !(vcpu->id % 2);
struct timespec start; struct timespec start;
...@@ -192,7 +191,7 @@ static void *vcpu_worker(void *data) ...@@ -192,7 +191,7 @@ static void *vcpu_worker(void *data)
enum test_stage stage; enum test_stage stage;
int ret; int ret;
vcpu_args_set(vm, vcpu->id, 1, do_write); vcpu_args_set(vcpu, 1, do_write);
while (!READ_ONCE(host_quit)) { while (!READ_ONCE(host_quit)) {
ret = sem_wait(&test_stage_updated); ret = sem_wait(&test_stage_updated);
...@@ -202,11 +201,11 @@ static void *vcpu_worker(void *data) ...@@ -202,11 +201,11 @@ static void *vcpu_worker(void *data)
return NULL; return NULL;
clock_gettime(CLOCK_MONOTONIC_RAW, &start); clock_gettime(CLOCK_MONOTONIC_RAW, &start);
ret = _vcpu_run(vm, vcpu->id); ret = _vcpu_run(vcpu);
ts_diff = timespec_elapsed(start); ts_diff = timespec_elapsed(start);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
TEST_ASSERT(get_ucall(vm, vcpu->id, NULL) == UCALL_SYNC, TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Invalid guest sync status: exit_reason=%s\n", "Invalid guest sync status: exit_reason=%s\n",
exit_reason_str(vcpu->run->exit_reason)); exit_reason_str(vcpu->run->exit_reason));
......
...@@ -212,9 +212,10 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) ...@@ -212,9 +212,10 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
} }
} }
void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init *init) void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
{ {
struct kvm_vcpu_init default_init = { .target = -1, }; struct kvm_vcpu_init default_init = { .target = -1, };
struct kvm_vm *vm = vcpu->vm;
uint64_t sctlr_el1, tcr_el1; uint64_t sctlr_el1, tcr_el1;
if (!init) if (!init)
...@@ -226,16 +227,16 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init ...@@ -226,16 +227,16 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init
init->target = preferred.target; init->target = preferred.target;
} }
vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_INIT, init); vcpu_ioctl(vcpu, KVM_ARM_VCPU_INIT, init);
/* /*
* Enable FP/ASIMD to avoid trapping when accessing Q0-Q15 * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
* registers, which the variable argument list macros do. * registers, which the variable argument list macros do.
*/ */
vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20); vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20);
vcpu_get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1); vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1);
vcpu_get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1); vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1);
/* Configure base granule size */ /* Configure base granule size */
switch (vm->mode) { switch (vm->mode) {
...@@ -296,19 +297,19 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init ...@@ -296,19 +297,19 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init
tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12); tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
tcr_el1 |= (64 - vm->va_bits) /* T0SZ */; tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1); vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1);
vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1); vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);
vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1); vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1);
vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), vm->pgd); vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), vm->pgd);
vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpuid); vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpu->id);
} }
void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
{ {
uint64_t pstate, pc; uint64_t pstate, pc;
vcpu_get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate); vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate), &pstate);
vcpu_get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc); vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc);
fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n", fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
indent, "", pstate, pc); indent, "", pstate, pc);
...@@ -324,10 +325,10 @@ struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, ...@@ -324,10 +325,10 @@ struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
DEFAULT_ARM64_GUEST_STACK_VADDR_MIN); DEFAULT_ARM64_GUEST_STACK_VADDR_MIN);
struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id); struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
aarch64_vcpu_setup(vm, vcpu_id, init); aarch64_vcpu_setup(vcpu, init);
vcpu_set_reg(vm, vcpu_id, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size); vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
vcpu_set_reg(vm, vcpu_id, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
return vcpu; return vcpu;
} }
...@@ -338,7 +339,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, ...@@ -338,7 +339,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
return aarch64_vcpu_add(vm, vcpu_id, NULL, guest_code); return aarch64_vcpu_add(vm, vcpu_id, NULL, guest_code);
} }
void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
{ {
va_list ap; va_list ap;
int i; int i;
...@@ -349,7 +350,7 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) ...@@ -349,7 +350,7 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
va_start(ap, num); va_start(ap, num);
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
vcpu_set_reg(vm, vcpuid, ARM64_CORE_REG(regs.regs[i]), vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.regs[i]),
va_arg(ap, uint64_t)); va_arg(ap, uint64_t));
} }
...@@ -363,11 +364,11 @@ void kvm_exit_unexpected_exception(int vector, uint64_t ec, bool valid_ec) ...@@ -363,11 +364,11 @@ void kvm_exit_unexpected_exception(int vector, uint64_t ec, bool valid_ec)
; ;
} }
void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid) void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
{ {
struct ucall uc; struct ucall uc;
if (get_ucall(vm, vcpuid, &uc) != UCALL_UNHANDLED) if (get_ucall(vcpu, &uc) != UCALL_UNHANDLED)
return; return;
if (uc.args[2]) /* valid_ec */ { if (uc.args[2]) /* valid_ec */ {
...@@ -385,11 +386,11 @@ struct handlers { ...@@ -385,11 +386,11 @@ struct handlers {
handler_fn exception_handlers[VECTOR_NUM][ESR_EC_NUM]; handler_fn exception_handlers[VECTOR_NUM][ESR_EC_NUM];
}; };
void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid) void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu)
{ {
extern char vectors; extern char vectors;
vcpu_set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors); vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors);
} }
void route_exception(struct ex_regs *regs, int vector) void route_exception(struct ex_regs *regs, int vector)
......
...@@ -88,9 +88,9 @@ void ucall(uint64_t cmd, int nargs, ...) ...@@ -88,9 +88,9 @@ void ucall(uint64_t cmd, int nargs, ...)
*ucall_exit_mmio_addr = (vm_vaddr_t)&uc; *ucall_exit_mmio_addr = (vm_vaddr_t)&uc;
} }
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{ {
struct kvm_run *run = vcpu_state(vm, vcpu_id); struct kvm_run *run = vcpu->run;
struct ucall ucall = {}; struct ucall ucall = {};
if (uc) if (uc)
...@@ -103,9 +103,9 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) ...@@ -103,9 +103,9 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8, TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8,
"Unexpected ucall exit mmio address access"); "Unexpected ucall exit mmio address access");
memcpy(&gva, run->mmio.data, sizeof(gva)); memcpy(&gva, run->mmio.data, sizeof(gva));
memcpy(&ucall, addr_gva2hva(vm, gva), sizeof(ucall)); memcpy(&ucall, addr_gva2hva(vcpu->vm, gva), sizeof(ucall));
vcpu_run_complete_io(vm, vcpu_id); vcpu_run_complete_io(vcpu);
if (uc) if (uc)
memcpy(uc, &ucall, sizeof(ucall)); memcpy(uc, &ucall, sizeof(ucall));
} }
......
...@@ -1395,88 +1395,49 @@ void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa) ...@@ -1395,88 +1395,49 @@ void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
return (void *) ((uintptr_t) region->host_alias + offset); return (void *) ((uintptr_t) region->host_alias + offset);
} }
/* /* Create an interrupt controller chip for the specified VM. */
* VM Create IRQ Chip
*
* Input Args:
* vm - Virtual Machine
*
* Output Args: None
*
* Return: None
*
* Creates an interrupt controller chip for the VM specified by vm.
*/
void vm_create_irqchip(struct kvm_vm *vm) void vm_create_irqchip(struct kvm_vm *vm)
{ {
vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL); vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL);
vm->has_irqchip = true; vm->has_irqchip = true;
} }
struct kvm_run *vcpu_state(struct kvm_vcpu *vcpu)
/*
* VM VCPU State
*
* Input Args:
* vm - Virtual Machine
* vcpuid - VCPU ID
*
* Output Args: None
*
* Return:
* Pointer to structure that describes the state of the VCPU.
*
* Locates and returns a pointer to a structure that describes the
* state of the VCPU with the given vcpuid.
*/
struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid)
{ {
struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid);
return vcpu->run; return vcpu->run;
} }
/*
* VM VCPU Run
*
* Input Args:
* vm - Virtual Machine
* vcpuid - VCPU ID
*
* Output Args: None
*
* Return: None
*
* Switch to executing the code for the VCPU given by vcpuid, within the VM
* given by vm.
*/
void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
{
int ret = _vcpu_run(vm, vcpuid);
TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_RUN, ret)); int _vcpu_run(struct kvm_vcpu *vcpu)
}
int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
{ {
int rc; int rc;
do { do {
rc = __vcpu_run(vm, vcpuid); rc = __vcpu_run(vcpu);
} while (rc == -1 && errno == EINTR); } while (rc == -1 && errno == EINTR);
assert_on_unhandled_exception(vm, vcpuid); assert_on_unhandled_exception(vcpu);
return rc; return rc;
} }
void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid) /*
* Invoke KVM_RUN on a vCPU until KVM returns something other than -EINTR.
* Assert if the KVM returns an error (other than -EINTR).
*/
void vcpu_run(struct kvm_vcpu *vcpu)
{
int ret = _vcpu_run(vcpu);
TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_RUN, ret));
}
void vcpu_run_complete_io(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid);
int ret; int ret;
vcpu->run->immediate_exit = 1; vcpu->run->immediate_exit = 1;
ret = __vcpu_run(vm, vcpuid); ret = __vcpu_run(vcpu);
vcpu->run->immediate_exit = 0; vcpu->run->immediate_exit = 0;
TEST_ASSERT(ret == -1 && errno == EINTR, TEST_ASSERT(ret == -1 && errno == EINTR,
...@@ -1485,73 +1446,57 @@ void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid) ...@@ -1485,73 +1446,57 @@ void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
} }
/* /*
* VM VCPU Get Reg List
*
* Input Args:
* vm - Virtual Machine
* vcpuid - VCPU ID
*
* Output Args:
* None
*
* Return:
* A pointer to an allocated struct kvm_reg_list
*
* Get the list of guest registers which are supported for * Get the list of guest registers which are supported for
* KVM_GET_ONE_REG/KVM_SET_ONE_REG calls * KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls. Returns a kvm_reg_list pointer,
* it is the callers responsibility to free the list.
*/ */
struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid) struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu)
{ {
struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list; struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list;
int ret; int ret;
ret = __vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, &reg_list_n); ret = __vcpu_ioctl(vcpu, KVM_GET_REG_LIST, &reg_list_n);
TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0"); TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0");
reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64)); reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64));
reg_list->n = reg_list_n.n; reg_list->n = reg_list_n.n;
vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, reg_list); vcpu_ioctl(vcpu, KVM_GET_REG_LIST, reg_list);
return reg_list; return reg_list;
} }
int __vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, int __vcpu_ioctl(struct kvm_vcpu *vcpu, unsigned long cmd, void *arg)
unsigned long cmd, void *arg)
{ {
struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid);
return ioctl(vcpu->fd, cmd, arg); return ioctl(vcpu->fd, cmd, arg);
} }
void _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long cmd, void _vcpu_ioctl(struct kvm_vcpu *vcpu, unsigned long cmd, const char *name,
const char *name, void *arg) void *arg)
{ {
int ret = __vcpu_ioctl(vm, vcpuid, cmd, arg); int ret = __vcpu_ioctl(vcpu, cmd, arg);
TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret)); TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret));
} }
void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid) void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid); uint32_t page_size = vcpu->vm->page_size;
uint32_t size = vm->dirty_ring_size; uint32_t size = vcpu->vm->dirty_ring_size;
TEST_ASSERT(size > 0, "Should enable dirty ring first"); TEST_ASSERT(size > 0, "Should enable dirty ring first");
if (!vcpu->dirty_gfns) { if (!vcpu->dirty_gfns) {
void *addr; void *addr;
addr = mmap(NULL, size, PROT_READ, addr = mmap(NULL, size, PROT_READ, MAP_PRIVATE, vcpu->fd,
MAP_PRIVATE, vcpu->fd, page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private"); TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private");
addr = mmap(NULL, size, PROT_READ | PROT_EXEC, addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, vcpu->fd,
MAP_PRIVATE, vcpu->fd, page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec"); TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec");
addr = mmap(NULL, size, PROT_READ | PROT_WRITE, addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd,
MAP_SHARED, vcpu->fd, page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed"); TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed");
vcpu->dirty_gfns = addr; vcpu->dirty_gfns = addr;
...@@ -1636,36 +1581,6 @@ int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val) ...@@ -1636,36 +1581,6 @@ int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val)
return __kvm_ioctl(dev_fd, KVM_SET_DEVICE_ATTR, &kvmattr); return __kvm_ioctl(dev_fd, KVM_SET_DEVICE_ATTR, &kvmattr);
} }
int __vcpu_device_attr_get(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
uint64_t attr, void *val)
{
return __kvm_device_attr_get(vcpu_get(vm, vcpuid)->fd, group, attr, val);
}
void vcpu_device_attr_get(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
uint64_t attr, void *val)
{
kvm_device_attr_get(vcpu_get(vm, vcpuid)->fd, group, attr, val);
}
int __vcpu_device_attr_set(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
uint64_t attr, void *val)
{
return __kvm_device_attr_set(vcpu_get(vm, vcpuid)->fd, group, attr, val);
}
void vcpu_device_attr_set(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
uint64_t attr, void *val)
{
kvm_device_attr_set(vcpu_get(vm, vcpuid)->fd, group, attr, val);
}
int __vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
uint64_t attr)
{
return __kvm_has_device_attr(vcpu_get(vm, vcpuid)->fd, group, attr);
}
/* /*
* IRQ related functions. * IRQ related functions.
*/ */
...@@ -1781,8 +1696,9 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) ...@@ -1781,8 +1696,9 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
virt_dump(stream, vm, indent + 4); virt_dump(stream, vm, indent + 4);
} }
fprintf(stream, "%*sVCPUs:\n", indent, ""); fprintf(stream, "%*sVCPUs:\n", indent, "");
list_for_each_entry(vcpu, &vm->vcpus, list) list_for_each_entry(vcpu, &vm->vcpus, list)
vcpu_dump(stream, vm, vcpu->id, indent + 2); vcpu_dump(stream, vcpu, indent + 2);
} }
/* Known KVM exit reasons */ /* Known KVM exit reasons */
......
...@@ -98,7 +98,7 @@ void perf_test_setup_vcpus(struct kvm_vm *vm, int nr_vcpus, ...@@ -98,7 +98,7 @@ void perf_test_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
vcpu_args->gpa = pta->gpa; vcpu_args->gpa = pta->gpa;
} }
vcpu_args_set(vm, vcpus[i]->id, 1, i); vcpu_args_set(vcpus[i], 1, i);
pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n", pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
i, vcpu_args->gpa, vcpu_args->gpa + i, vcpu_args->gpa, vcpu_args->gpa +
......
...@@ -178,8 +178,9 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) ...@@ -178,8 +178,9 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
} }
} }
void riscv_vcpu_mmu_setup(struct kvm_vm *vm, int vcpuid) void riscv_vcpu_mmu_setup(struct kvm_vcpu *vcpu)
{ {
struct kvm_vm *vm = vcpu->vm;
unsigned long satp; unsigned long satp;
/* /*
...@@ -198,46 +199,46 @@ void riscv_vcpu_mmu_setup(struct kvm_vm *vm, int vcpuid) ...@@ -198,46 +199,46 @@ void riscv_vcpu_mmu_setup(struct kvm_vm *vm, int vcpuid)
satp = (vm->pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN; satp = (vm->pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN;
satp |= SATP_MODE_48; satp |= SATP_MODE_48;
vcpu_set_reg(vm, vcpuid, RISCV_CSR_REG(satp), satp); vcpu_set_reg(vcpu, RISCV_CSR_REG(satp), satp);
} }
void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
{ {
struct kvm_riscv_core core; struct kvm_riscv_core core;
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(mode), &core.mode); vcpu_get_reg(vcpu, RISCV_CORE_REG(mode), &core.mode);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.pc), &core.regs.pc); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.pc), &core.regs.pc);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.ra), &core.regs.ra); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.ra), &core.regs.ra);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.sp), &core.regs.sp); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.sp), &core.regs.sp);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.gp), &core.regs.gp); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.gp), &core.regs.gp);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.tp), &core.regs.tp); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.tp), &core.regs.tp);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t0), &core.regs.t0); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t0), &core.regs.t0);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t1), &core.regs.t1); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t1), &core.regs.t1);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t2), &core.regs.t2); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t2), &core.regs.t2);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s0), &core.regs.s0); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s0), &core.regs.s0);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s1), &core.regs.s1); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s1), &core.regs.s1);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a0), &core.regs.a0); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a0), &core.regs.a0);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a1), &core.regs.a1); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a1), &core.regs.a1);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a2), &core.regs.a2); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a2), &core.regs.a2);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a3), &core.regs.a3); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a3), &core.regs.a3);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a4), &core.regs.a4); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a4), &core.regs.a4);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a5), &core.regs.a5); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a5), &core.regs.a5);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a6), &core.regs.a6); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a6), &core.regs.a6);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a7), &core.regs.a7); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a7), &core.regs.a7);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s2), &core.regs.s2); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s2), &core.regs.s2);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s3), &core.regs.s3); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s3), &core.regs.s3);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s4), &core.regs.s4); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s4), &core.regs.s4);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s5), &core.regs.s5); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s5), &core.regs.s5);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s6), &core.regs.s6); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s6), &core.regs.s6);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s7), &core.regs.s7); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s7), &core.regs.s7);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s8), &core.regs.s8); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s8), &core.regs.s8);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s9), &core.regs.s9); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s9), &core.regs.s9);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s10), &core.regs.s10); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s10), &core.regs.s10);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s11), &core.regs.s11); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s11), &core.regs.s11);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t3), &core.regs.t3); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t3), &core.regs.t3);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t4), &core.regs.t4); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t4), &core.regs.t4);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t5), &core.regs.t5); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t5), &core.regs.t5);
vcpu_get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t6), &core.regs.t6); vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t6), &core.regs.t6);
fprintf(stream, fprintf(stream,
" MODE: 0x%lx\n", core.mode); " MODE: 0x%lx\n", core.mode);
...@@ -288,7 +289,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, ...@@ -288,7 +289,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
vcpu = __vm_vcpu_add(vm, vcpu_id); vcpu = __vm_vcpu_add(vm, vcpu_id);
riscv_vcpu_mmu_setup(vm, vcpu_id); riscv_vcpu_mmu_setup(vcpu);
/* /*
* With SBI HSM support in KVM RISC-V, all secondary VCPUs are * With SBI HSM support in KVM RISC-V, all secondary VCPUs are
...@@ -296,28 +297,25 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, ...@@ -296,28 +297,25 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
* are powered-on using KVM_SET_MP_STATE ioctl(). * are powered-on using KVM_SET_MP_STATE ioctl().
*/ */
mps.mp_state = KVM_MP_STATE_RUNNABLE; mps.mp_state = KVM_MP_STATE_RUNNABLE;
r = __vcpu_ioctl(vm, vcpu_id, KVM_SET_MP_STATE, &mps); r = __vcpu_ioctl(vcpu, KVM_SET_MP_STATE, &mps);
TEST_ASSERT(!r, "IOCTL KVM_SET_MP_STATE failed (error %d)", r); TEST_ASSERT(!r, "IOCTL KVM_SET_MP_STATE failed (error %d)", r);
/* Setup global pointer of guest to be same as the host */ /* Setup global pointer of guest to be same as the host */
asm volatile ( asm volatile (
"add %0, gp, zero" : "=r" (current_gp) : : "memory"); "add %0, gp, zero" : "=r" (current_gp) : : "memory");
vcpu_set_reg(vm, vcpu_id, RISCV_CORE_REG(regs.gp), current_gp); vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.gp), current_gp);
/* Setup stack pointer and program counter of guest */ /* Setup stack pointer and program counter of guest */
vcpu_set_reg(vm, vcpu_id, RISCV_CORE_REG(regs.sp), vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size);
stack_vaddr + stack_size); vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code);
vcpu_set_reg(vm, vcpu_id, RISCV_CORE_REG(regs.pc),
(unsigned long)guest_code);
/* Setup default exception vector of guest */ /* Setup default exception vector of guest */
vcpu_set_reg(vm, vcpu_id, RISCV_CSR_REG(stvec), vcpu_set_reg(vcpu, RISCV_CSR_REG(stvec), (unsigned long)guest_unexp_trap);
(unsigned long)guest_unexp_trap);
return vcpu; return vcpu;
} }
void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
{ {
va_list ap; va_list ap;
uint64_t id = RISCV_CORE_REG(regs.a0); uint64_t id = RISCV_CORE_REG(regs.a0);
...@@ -355,12 +353,12 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) ...@@ -355,12 +353,12 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
id = RISCV_CORE_REG(regs.a7); id = RISCV_CORE_REG(regs.a7);
break; break;
} }
vcpu_set_reg(vm, vcpuid, id, va_arg(ap, uint64_t)); vcpu_set_reg(vcpu, id, va_arg(ap, uint64_t));
} }
va_end(ap); va_end(ap);
} }
void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid) void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
{ {
} }
...@@ -64,9 +64,9 @@ void ucall(uint64_t cmd, int nargs, ...) ...@@ -64,9 +64,9 @@ void ucall(uint64_t cmd, int nargs, ...)
(vm_vaddr_t)&uc, 0, 0, 0, 0, 0); (vm_vaddr_t)&uc, 0, 0, 0, 0, 0);
} }
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{ {
struct kvm_run *run = vcpu_state(vm, vcpu_id); struct kvm_run *run = vcpu->run;
struct ucall ucall = {}; struct ucall ucall = {};
if (uc) if (uc)
...@@ -76,16 +76,17 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) ...@@ -76,16 +76,17 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
run->riscv_sbi.extension_id == KVM_RISCV_SELFTESTS_SBI_EXT) { run->riscv_sbi.extension_id == KVM_RISCV_SELFTESTS_SBI_EXT) {
switch (run->riscv_sbi.function_id) { switch (run->riscv_sbi.function_id) {
case KVM_RISCV_SELFTESTS_SBI_UCALL: case KVM_RISCV_SELFTESTS_SBI_UCALL:
memcpy(&ucall, addr_gva2hva(vm, memcpy(&ucall,
run->riscv_sbi.args[0]), sizeof(ucall)); addr_gva2hva(vcpu->vm, run->riscv_sbi.args[0]),
sizeof(ucall));
vcpu_run_complete_io(vm, vcpu_id); vcpu_run_complete_io(vcpu);
if (uc) if (uc)
memcpy(uc, &ucall, sizeof(ucall)); memcpy(uc, &ucall, sizeof(ucall));
break; break;
case KVM_RISCV_SELFTESTS_SBI_UNEXP: case KVM_RISCV_SELFTESTS_SBI_UNEXP:
vcpu_dump(stderr, vm, vcpu_id, 2); vcpu_dump(stderr, vcpu, 2);
TEST_ASSERT(0, "Unexpected trap taken by guest"); TEST_ASSERT(0, "Unexpected trap taken by guest");
break; break;
default: default:
......
...@@ -32,7 +32,7 @@ static uint64_t diag318_handler(void) ...@@ -32,7 +32,7 @@ static uint64_t diag318_handler(void)
uint64_t diag318_info; uint64_t diag318_info;
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
run = vcpu->run; run = vcpu->run;
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC, TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
......
...@@ -173,23 +173,23 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, ...@@ -173,23 +173,23 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
vcpu = __vm_vcpu_add(vm, vcpu_id); vcpu = __vm_vcpu_add(vm, vcpu_id);
/* Setup guest registers */ /* Setup guest registers */
vcpu_regs_get(vm, vcpu_id, &regs); vcpu_regs_get(vcpu, &regs);
regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160; regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160;
vcpu_regs_set(vm, vcpu_id, &regs); vcpu_regs_set(vcpu, &regs);
vcpu_sregs_get(vm, vcpu_id, &sregs); vcpu_sregs_get(vcpu, &sregs);
sregs.crs[0] |= 0x00040000; /* Enable floating point regs */ sregs.crs[0] |= 0x00040000; /* Enable floating point regs */
sregs.crs[1] = vm->pgd | 0xf; /* Primary region table */ sregs.crs[1] = vm->pgd | 0xf; /* Primary region table */
vcpu_sregs_set(vm, vcpu_id, &sregs); vcpu_sregs_set(vcpu, &sregs);
run = vcpu_state(vm, vcpu_id); run = vcpu->run;
run->psw_mask = 0x0400000180000000ULL; /* DAT enabled + 64 bit mode */ run->psw_mask = 0x0400000180000000ULL; /* DAT enabled + 64 bit mode */
run->psw_addr = (uintptr_t)guest_code; run->psw_addr = (uintptr_t)guest_code;
return vcpu; return vcpu;
} }
void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
{ {
va_list ap; va_list ap;
struct kvm_regs regs; struct kvm_regs regs;
...@@ -200,23 +200,21 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...) ...@@ -200,23 +200,21 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
num); num);
va_start(ap, num); va_start(ap, num);
vcpu_regs_get(vm, vcpuid, &regs); vcpu_regs_get(vcpu, &regs);
for (i = 0; i < num; i++) for (i = 0; i < num; i++)
regs.gprs[i + 2] = va_arg(ap, uint64_t); regs.gprs[i + 2] = va_arg(ap, uint64_t);
vcpu_regs_set(vm, vcpuid, &regs); vcpu_regs_set(vcpu, &regs);
va_end(ap); va_end(ap);
} }
void vcpu_arch_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent) void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
{ {
struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid);
fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n", fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
indent, "", vcpu->run->psw_mask, vcpu->run->psw_addr); indent, "", vcpu->run->psw_mask, vcpu->run->psw_addr);
} }
void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid) void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
{ {
} }
...@@ -33,9 +33,9 @@ void ucall(uint64_t cmd, int nargs, ...) ...@@ -33,9 +33,9 @@ void ucall(uint64_t cmd, int nargs, ...)
asm volatile ("diag 0,%0,0x501" : : "a"(&uc) : "memory"); asm volatile ("diag 0,%0,0x501" : : "a"(&uc) : "memory");
} }
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{ {
struct kvm_run *run = vcpu_state(vm, vcpu_id); struct kvm_run *run = vcpu->run;
struct ucall ucall = {}; struct ucall ucall = {};
if (uc) if (uc)
...@@ -47,10 +47,10 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) ...@@ -47,10 +47,10 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
(run->s390_sieic.ipb >> 16) == 0x501) { (run->s390_sieic.ipb >> 16) == 0x501) {
int reg = run->s390_sieic.ipa & 0xf; int reg = run->s390_sieic.ipa & 0xf;
memcpy(&ucall, addr_gva2hva(vm, run->s.regs.gprs[reg]), memcpy(&ucall, addr_gva2hva(vcpu->vm, run->s.regs.gprs[reg]),
sizeof(ucall)); sizeof(ucall));
vcpu_run_complete_io(vm, vcpu_id); vcpu_run_complete_io(vcpu);
if (uc) if (uc)
memcpy(uc, &ucall, sizeof(ucall)); memcpy(uc, &ucall, sizeof(ucall));
} }
......
...@@ -35,9 +35,9 @@ void ucall(uint64_t cmd, int nargs, ...) ...@@ -35,9 +35,9 @@ void ucall(uint64_t cmd, int nargs, ...)
: : [port] "d" (UCALL_PIO_PORT), "D" (&uc) : "rax", "memory"); : : [port] "d" (UCALL_PIO_PORT), "D" (&uc) : "rax", "memory");
} }
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{ {
struct kvm_run *run = vcpu_state(vm, vcpu_id); struct kvm_run *run = vcpu->run;
struct ucall ucall = {}; struct ucall ucall = {};
if (uc) if (uc)
...@@ -46,11 +46,11 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) ...@@ -46,11 +46,11 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
if (run->exit_reason == KVM_EXIT_IO && run->io.port == UCALL_PIO_PORT) { if (run->exit_reason == KVM_EXIT_IO && run->io.port == UCALL_PIO_PORT) {
struct kvm_regs regs; struct kvm_regs regs;
vcpu_regs_get(vm, vcpu_id, &regs); vcpu_regs_get(vcpu, &regs);
memcpy(&ucall, addr_gva2hva(vm, (vm_vaddr_t)regs.rdi), memcpy(&ucall, addr_gva2hva(vcpu->vm, (vm_vaddr_t)regs.rdi),
sizeof(ucall)); sizeof(ucall));
vcpu_run_complete_io(vm, vcpu_id); vcpu_run_complete_io(vcpu);
if (uc) if (uc)
memcpy(uc, &ucall, sizeof(ucall)); memcpy(uc, &ucall, sizeof(ucall));
} }
......
...@@ -42,11 +42,11 @@ struct eptPageTablePointer { ...@@ -42,11 +42,11 @@ struct eptPageTablePointer {
uint64_t address:40; uint64_t address:40;
uint64_t reserved_63_52:12; uint64_t reserved_63_52:12;
}; };
int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id) int vcpu_enable_evmcs(struct kvm_vcpu *vcpu)
{ {
uint16_t evmcs_ver; uint16_t evmcs_ver;
vcpu_enable_cap(vm, vcpu_id, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
(unsigned long)&evmcs_ver); (unsigned long)&evmcs_ver);
/* KVM should return supported EVMCS version range */ /* KVM should return supported EVMCS version range */
......
...@@ -51,10 +51,10 @@ static void rendezvous_with_boss(void) ...@@ -51,10 +51,10 @@ static void rendezvous_with_boss(void)
} }
} }
static void run_vcpu(struct kvm_vm *vm, uint32_t vcpu_id) static void run_vcpu(struct kvm_vcpu *vcpu)
{ {
vcpu_run(vm, vcpu_id); vcpu_run(vcpu);
ASSERT_EQ(get_ucall(vm, vcpu_id, NULL), UCALL_DONE); ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
} }
static void *vcpu_worker(void *data) static void *vcpu_worker(void *data)
...@@ -65,25 +65,25 @@ static void *vcpu_worker(void *data) ...@@ -65,25 +65,25 @@ static void *vcpu_worker(void *data)
struct kvm_sregs sregs; struct kvm_sregs sregs;
struct kvm_regs regs; struct kvm_regs regs;
vcpu_args_set(vm, vcpu->id, 3, info->start_gpa, info->end_gpa, vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa,
vm_get_page_size(vm)); vm_get_page_size(vm));
/* Snapshot regs before the first run. */ /* Snapshot regs before the first run. */
vcpu_regs_get(vm, vcpu->id, &regs); vcpu_regs_get(vcpu, &regs);
rendezvous_with_boss(); rendezvous_with_boss();
run_vcpu(vm, vcpu->id); run_vcpu(vcpu);
rendezvous_with_boss(); rendezvous_with_boss();
vcpu_regs_set(vm, vcpu->id, &regs); vcpu_regs_set(vcpu, &regs);
vcpu_sregs_get(vm, vcpu->id, &sregs); vcpu_sregs_get(vcpu, &sregs);
#ifdef __x86_64__ #ifdef __x86_64__
/* Toggle CR0.WP to trigger a MMU context reset. */ /* Toggle CR0.WP to trigger a MMU context reset. */
sregs.cr0 ^= X86_CR0_WP; sregs.cr0 ^= X86_CR0_WP;
#endif #endif
vcpu_sregs_set(vm, vcpu->id, &sregs); vcpu_sregs_set(vcpu, &sregs);
rendezvous_with_boss(); rendezvous_with_boss();
run_vcpu(vm, vcpu->id); run_vcpu(vcpu);
rendezvous_with_boss(); rendezvous_with_boss();
return NULL; return NULL;
......
...@@ -39,7 +39,6 @@ static bool run_vcpus = true; ...@@ -39,7 +39,6 @@ static bool run_vcpus = true;
static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
{ {
struct kvm_vcpu *vcpu = vcpu_args->vcpu; struct kvm_vcpu *vcpu = vcpu_args->vcpu;
struct kvm_vm *vm = perf_test_args.vm;
struct kvm_run *run; struct kvm_run *run;
int ret; int ret;
...@@ -47,10 +46,10 @@ static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) ...@@ -47,10 +46,10 @@ static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
/* Let the guest access its memory until a stop signal is received */ /* Let the guest access its memory until a stop signal is received */
while (READ_ONCE(run_vcpus)) { while (READ_ONCE(run_vcpus)) {
ret = _vcpu_run(vm, vcpu->id); ret = _vcpu_run(vcpu);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
if (get_ucall(vm, vcpu->id, NULL) == UCALL_SYNC) if (get_ucall(vcpu, NULL) == UCALL_SYNC)
continue; continue;
TEST_ASSERT(false, TEST_ASSERT(false,
......
...@@ -146,9 +146,9 @@ static void *vcpu_worker(void *__data) ...@@ -146,9 +146,9 @@ static void *vcpu_worker(void *__data)
struct ucall uc; struct ucall uc;
while (1) { while (1) {
vcpu_run(data->vm, vcpu->id); vcpu_run(vcpu);
switch (get_ucall(data->vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC: case UCALL_SYNC:
TEST_ASSERT(uc.args[1] == 0, TEST_ASSERT(uc.args[1] == 0,
"Unexpected sync ucall, got %lx", "Unexpected sync ucall, got %lx",
......
...@@ -233,8 +233,8 @@ int main(int argc, char *argv[]) ...@@ -233,8 +233,8 @@ int main(int argc, char *argv[])
pthread_create(&migration_thread, NULL, migration_worker, 0); pthread_create(&migration_thread, NULL, migration_worker, 0);
for (i = 0; !done; i++) { for (i = 0; !done; i++) {
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(get_ucall(vm, vcpu->id, NULL) == UCALL_SYNC, TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Guest failed?"); "Guest failed?");
/* /*
......
...@@ -152,7 +152,7 @@ static void memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo) ...@@ -152,7 +152,7 @@ static void memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo)
if (!vcpu) if (!vcpu)
vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo); vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo);
else else
vcpu_ioctl(vcpu->vm, vcpu->id, KVM_S390_MEM_OP, ksmo); vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo);
} }
static int err_memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo) static int err_memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo)
...@@ -162,7 +162,7 @@ static int err_memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo) ...@@ -162,7 +162,7 @@ static int err_memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo)
if (!vcpu) if (!vcpu)
return __vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo); return __vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo);
else else
return __vcpu_ioctl(vcpu->vm, vcpu->id, KVM_S390_MEM_OP, ksmo); return __vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo);
} }
#define MEMOP(err, info_p, mop_target_p, access_mode_p, buf_p, size_p, ...) \ #define MEMOP(err, info_p, mop_target_p, access_mode_p, buf_p, size_p, ...) \
...@@ -250,8 +250,8 @@ enum stage { ...@@ -250,8 +250,8 @@ enum stage {
struct ucall uc; \ struct ucall uc; \
int __stage = (stage); \ int __stage = (stage); \
\ \
vcpu_run(__vcpu->vm, __vcpu->id); \ vcpu_run(__vcpu); \
get_ucall(__vcpu->vm, __vcpu->id, &uc); \ get_ucall(__vcpu, &uc); \
ASSERT_EQ(uc.cmd, UCALL_SYNC); \ ASSERT_EQ(uc.cmd, UCALL_SYNC); \
ASSERT_EQ(uc.args[1], __stage); \ ASSERT_EQ(uc.args[1], __stage); \
}) \ }) \
......
...@@ -61,7 +61,7 @@ static void test_one_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t value) ...@@ -61,7 +61,7 @@ static void test_one_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t value)
{ {
uint64_t eval_reg; uint64_t eval_reg;
vcpu_get_reg(vcpu->vm, vcpu->id, id, &eval_reg); vcpu_get_reg(vcpu, id, &eval_reg);
TEST_ASSERT(eval_reg == value, "value == 0x%lx", value); TEST_ASSERT(eval_reg == value, "value == 0x%lx", value);
} }
...@@ -72,7 +72,7 @@ static void assert_noirq(struct kvm_vcpu *vcpu) ...@@ -72,7 +72,7 @@ static void assert_noirq(struct kvm_vcpu *vcpu)
irq_state.len = sizeof(buf); irq_state.len = sizeof(buf);
irq_state.buf = (unsigned long)buf; irq_state.buf = (unsigned long)buf;
irqs = __vcpu_ioctl(vcpu->vm, vcpu->id, KVM_S390_GET_IRQ_STATE, &irq_state); irqs = __vcpu_ioctl(vcpu, KVM_S390_GET_IRQ_STATE, &irq_state);
/* /*
* irqs contains the number of retrieved interrupts. Any interrupt * irqs contains the number of retrieved interrupts. Any interrupt
* (notably, the emergency call interrupt we have injected) should * (notably, the emergency call interrupt we have injected) should
...@@ -89,13 +89,13 @@ static void assert_clear(struct kvm_vcpu *vcpu) ...@@ -89,13 +89,13 @@ static void assert_clear(struct kvm_vcpu *vcpu)
struct kvm_regs regs; struct kvm_regs regs;
struct kvm_fpu fpu; struct kvm_fpu fpu;
vcpu_regs_get(vcpu->vm, vcpu->id, &regs); vcpu_regs_get(vcpu, &regs);
TEST_ASSERT(!memcmp(&regs.gprs, regs_null, sizeof(regs.gprs)), "grs == 0"); TEST_ASSERT(!memcmp(&regs.gprs, regs_null, sizeof(regs.gprs)), "grs == 0");
vcpu_sregs_get(vcpu->vm, vcpu->id, &sregs); vcpu_sregs_get(vcpu, &sregs);
TEST_ASSERT(!memcmp(&sregs.acrs, regs_null, sizeof(sregs.acrs)), "acrs == 0"); TEST_ASSERT(!memcmp(&sregs.acrs, regs_null, sizeof(sregs.acrs)), "acrs == 0");
vcpu_fpu_get(vcpu->vm, vcpu->id, &fpu); vcpu_fpu_get(vcpu, &fpu);
TEST_ASSERT(!memcmp(&fpu.fprs, regs_null, sizeof(fpu.fprs)), "fprs == 0"); TEST_ASSERT(!memcmp(&fpu.fprs, regs_null, sizeof(fpu.fprs)), "fprs == 0");
/* sync regs */ /* sync regs */
...@@ -133,7 +133,7 @@ static void assert_initial(struct kvm_vcpu *vcpu) ...@@ -133,7 +133,7 @@ static void assert_initial(struct kvm_vcpu *vcpu)
struct kvm_fpu fpu; struct kvm_fpu fpu;
/* KVM_GET_SREGS */ /* KVM_GET_SREGS */
vcpu_sregs_get(vcpu->vm, vcpu->id, &sregs); vcpu_sregs_get(vcpu, &sregs);
TEST_ASSERT(sregs.crs[0] == 0xE0UL, "cr0 == 0xE0 (KVM_GET_SREGS)"); TEST_ASSERT(sregs.crs[0] == 0xE0UL, "cr0 == 0xE0 (KVM_GET_SREGS)");
TEST_ASSERT(sregs.crs[14] == 0xC2000000UL, TEST_ASSERT(sregs.crs[14] == 0xC2000000UL,
"cr14 == 0xC2000000 (KVM_GET_SREGS)"); "cr14 == 0xC2000000 (KVM_GET_SREGS)");
...@@ -159,7 +159,7 @@ static void assert_initial(struct kvm_vcpu *vcpu) ...@@ -159,7 +159,7 @@ static void assert_initial(struct kvm_vcpu *vcpu)
TEST_ASSERT(vcpu->run->psw_addr == 0, "psw_addr == 0 (kvm_run)"); TEST_ASSERT(vcpu->run->psw_addr == 0, "psw_addr == 0 (kvm_run)");
TEST_ASSERT(vcpu->run->psw_mask == 0, "psw_mask == 0 (kvm_run)"); TEST_ASSERT(vcpu->run->psw_mask == 0, "psw_mask == 0 (kvm_run)");
vcpu_fpu_get(vcpu->vm, vcpu->id, &fpu); vcpu_fpu_get(vcpu, &fpu);
TEST_ASSERT(!fpu.fpc, "fpc == 0"); TEST_ASSERT(!fpu.fpc, "fpc == 0");
test_one_reg(vcpu, KVM_REG_S390_GBEA, 1); test_one_reg(vcpu, KVM_REG_S390_GBEA, 1);
...@@ -198,7 +198,7 @@ static void inject_irq(struct kvm_vcpu *vcpu) ...@@ -198,7 +198,7 @@ static void inject_irq(struct kvm_vcpu *vcpu)
irq_state.buf = (unsigned long)buf; irq_state.buf = (unsigned long)buf;
irq->type = KVM_S390_INT_EMERGENCY; irq->type = KVM_S390_INT_EMERGENCY;
irq->u.emerg.code = vcpu->id; irq->u.emerg.code = vcpu->id;
irqs = __vcpu_ioctl(vcpu->vm, vcpu->id, KVM_S390_SET_IRQ_STATE, &irq_state); irqs = __vcpu_ioctl(vcpu, KVM_S390_SET_IRQ_STATE, &irq_state);
TEST_ASSERT(irqs >= 0, "Error injecting EMERGENCY IRQ errno %d\n", errno); TEST_ASSERT(irqs >= 0, "Error injecting EMERGENCY IRQ errno %d\n", errno);
} }
...@@ -221,11 +221,11 @@ static void test_normal(void) ...@@ -221,11 +221,11 @@ static void test_normal(void)
ksft_print_msg("Testing normal reset\n"); ksft_print_msg("Testing normal reset\n");
vm = create_vm(&vcpu); vm = create_vm(&vcpu);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
inject_irq(vcpu); inject_irq(vcpu);
vcpu_ioctl(vm, vcpu->id, KVM_S390_NORMAL_RESET, 0); vcpu_ioctl(vcpu, KVM_S390_NORMAL_RESET, 0);
/* must clears */ /* must clears */
assert_normal(vcpu); assert_normal(vcpu);
...@@ -244,11 +244,11 @@ static void test_initial(void) ...@@ -244,11 +244,11 @@ static void test_initial(void)
ksft_print_msg("Testing initial reset\n"); ksft_print_msg("Testing initial reset\n");
vm = create_vm(&vcpu); vm = create_vm(&vcpu);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
inject_irq(vcpu); inject_irq(vcpu);
vcpu_ioctl(vm, vcpu->id, KVM_S390_INITIAL_RESET, 0); vcpu_ioctl(vcpu, KVM_S390_INITIAL_RESET, 0);
/* must clears */ /* must clears */
assert_normal(vcpu); assert_normal(vcpu);
...@@ -267,11 +267,11 @@ static void test_clear(void) ...@@ -267,11 +267,11 @@ static void test_clear(void)
ksft_print_msg("Testing clear reset\n"); ksft_print_msg("Testing clear reset\n");
vm = create_vm(&vcpu); vm = create_vm(&vcpu);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
inject_irq(vcpu); inject_irq(vcpu);
vcpu_ioctl(vm, vcpu->id, KVM_S390_CLEAR_RESET, 0); vcpu_ioctl(vcpu, KVM_S390_CLEAR_RESET, 0);
/* must clears */ /* must clears */
assert_normal(vcpu); assert_normal(vcpu);
......
...@@ -80,14 +80,14 @@ void test_read_invalid(struct kvm_vcpu *vcpu) ...@@ -80,14 +80,14 @@ void test_read_invalid(struct kvm_vcpu *vcpu)
/* Request reading invalid register set from VCPU. */ /* Request reading invalid register set from VCPU. */
run->kvm_valid_regs = INVALID_SYNC_FIELD; run->kvm_valid_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vcpu->vm, vcpu->id); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL, TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv); rv);
run->kvm_valid_regs = 0; run->kvm_valid_regs = 0;
run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS; run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vcpu->vm, vcpu->id); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL, TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv); rv);
...@@ -101,14 +101,14 @@ void test_set_invalid(struct kvm_vcpu *vcpu) ...@@ -101,14 +101,14 @@ void test_set_invalid(struct kvm_vcpu *vcpu)
/* Request setting invalid register set into VCPU. */ /* Request setting invalid register set into VCPU. */
run->kvm_dirty_regs = INVALID_SYNC_FIELD; run->kvm_dirty_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vcpu->vm, vcpu->id); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL, TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv); rv);
run->kvm_dirty_regs = 0; run->kvm_dirty_regs = 0;
run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS; run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vcpu->vm, vcpu->id); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL, TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv); rv);
...@@ -124,7 +124,7 @@ void test_req_and_verify_all_valid_regs(struct kvm_vcpu *vcpu) ...@@ -124,7 +124,7 @@ void test_req_and_verify_all_valid_regs(struct kvm_vcpu *vcpu)
/* Request and verify all valid register sets. */ /* Request and verify all valid register sets. */
run->kvm_valid_regs = TEST_SYNC_FIELDS; run->kvm_valid_regs = TEST_SYNC_FIELDS;
rv = _vcpu_run(vcpu->vm, vcpu->id); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv); TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC, TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
"Unexpected exit reason: %u (%s)\n", "Unexpected exit reason: %u (%s)\n",
...@@ -137,10 +137,10 @@ void test_req_and_verify_all_valid_regs(struct kvm_vcpu *vcpu) ...@@ -137,10 +137,10 @@ void test_req_and_verify_all_valid_regs(struct kvm_vcpu *vcpu)
run->s390_sieic.icptcode, run->s390_sieic.ipa, run->s390_sieic.icptcode, run->s390_sieic.ipa,
run->s390_sieic.ipb); run->s390_sieic.ipb);
vcpu_regs_get(vcpu->vm, vcpu->id, &regs); vcpu_regs_get(vcpu, &regs);
compare_regs(&regs, &run->s.regs); compare_regs(&regs, &run->s.regs);
vcpu_sregs_get(vcpu->vm, vcpu->id, &sregs); vcpu_sregs_get(vcpu, &sregs);
compare_sregs(&sregs, &run->s.regs); compare_sregs(&sregs, &run->s.regs);
} }
...@@ -163,7 +163,7 @@ void test_set_and_verify_various_reg_values(struct kvm_vcpu *vcpu) ...@@ -163,7 +163,7 @@ void test_set_and_verify_various_reg_values(struct kvm_vcpu *vcpu)
run->kvm_dirty_regs |= KVM_SYNC_DIAG318; run->kvm_dirty_regs |= KVM_SYNC_DIAG318;
} }
rv = _vcpu_run(vcpu->vm, vcpu->id); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv); TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC, TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
"Unexpected exit reason: %u (%s)\n", "Unexpected exit reason: %u (%s)\n",
...@@ -179,10 +179,10 @@ void test_set_and_verify_various_reg_values(struct kvm_vcpu *vcpu) ...@@ -179,10 +179,10 @@ void test_set_and_verify_various_reg_values(struct kvm_vcpu *vcpu)
"diag318 sync regs value incorrect 0x%llx.", "diag318 sync regs value incorrect 0x%llx.",
run->s.regs.diag318); run->s.regs.diag318);
vcpu_regs_get(vcpu->vm, vcpu->id, &regs); vcpu_regs_get(vcpu, &regs);
compare_regs(&regs, &run->s.regs); compare_regs(&regs, &run->s.regs);
vcpu_sregs_get(vcpu->vm, vcpu->id, &sregs); vcpu_sregs_get(vcpu, &sregs);
compare_sregs(&sregs, &run->s.regs); compare_sregs(&sregs, &run->s.regs);
} }
...@@ -198,7 +198,7 @@ void test_clear_kvm_dirty_regs_bits(struct kvm_vcpu *vcpu) ...@@ -198,7 +198,7 @@ void test_clear_kvm_dirty_regs_bits(struct kvm_vcpu *vcpu)
run->kvm_dirty_regs = 0; run->kvm_dirty_regs = 0;
run->s.regs.gprs[11] = 0xDEADBEEF; run->s.regs.gprs[11] = 0xDEADBEEF;
run->s.regs.diag318 = 0x4B1D; run->s.regs.diag318 = 0x4B1D;
rv = _vcpu_run(vcpu->vm, vcpu->id); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv); TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC, TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
"Unexpected exit reason: %u (%s)\n", "Unexpected exit reason: %u (%s)\n",
......
...@@ -187,8 +187,8 @@ static void guest_code(void) ...@@ -187,8 +187,8 @@ static void guest_code(void)
struct ucall uc; \ struct ucall uc; \
int __stage = (stage); \ int __stage = (stage); \
\ \
vcpu_run(__vcpu->vm, __vcpu->id); \ vcpu_run(__vcpu); \
get_ucall(__vcpu->vm, __vcpu->id, &uc); \ get_ucall(__vcpu, &uc); \
if (uc.cmd == UCALL_ABORT) { \ if (uc.cmd == UCALL_ABORT) { \
TEST_FAIL("line %lu: %s, hints: %lu, %lu", uc.args[1], \ TEST_FAIL("line %lu: %s, hints: %lu, %lu", uc.args[1], \
(const char *)uc.args[0], uc.args[2], uc.args[3]); \ (const char *)uc.args[0], uc.args[2], uc.args[3]); \
......
...@@ -63,10 +63,10 @@ static void *vcpu_worker(void *data) ...@@ -63,10 +63,10 @@ static void *vcpu_worker(void *data)
* has been deleted or while it is being moved . * has been deleted or while it is being moved .
*/ */
while (1) { while (1) {
vcpu_run(vcpu->vm, vcpu->id); vcpu_run(vcpu);
if (run->exit_reason == KVM_EXIT_IO) { if (run->exit_reason == KVM_EXIT_IO) {
cmd = get_ucall(vcpu->vm, vcpu->id, &uc); cmd = get_ucall(vcpu, &uc);
if (cmd != UCALL_SYNC) if (cmd != UCALL_SYNC)
break; break;
...@@ -291,7 +291,7 @@ static void test_delete_memory_region(void) ...@@ -291,7 +291,7 @@ static void test_delete_memory_region(void)
run->exit_reason == KVM_EXIT_INTERNAL_ERROR, run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
"Unexpected exit reason = %d", run->exit_reason); "Unexpected exit reason = %d", run->exit_reason);
vcpu_regs_get(vm, vcpu->id, &regs); vcpu_regs_get(vcpu, &regs);
/* /*
* On AMD, after KVM_EXIT_SHUTDOWN the VMCB has been reinitialized already, * On AMD, after KVM_EXIT_SHUTDOWN the VMCB has been reinitialized already,
...@@ -318,7 +318,7 @@ static void test_zero_memory_regions(void) ...@@ -318,7 +318,7 @@ static void test_zero_memory_regions(void)
vcpu = __vm_vcpu_add(vm, 0); vcpu = __vm_vcpu_add(vm, 0);
vm_ioctl(vm, KVM_SET_NR_MMU_PAGES, (void *)64ul); vm_ioctl(vm, KVM_SET_NR_MMU_PAGES, (void *)64ul);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
run = vcpu->run; run = vcpu->run;
TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR, TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
......
...@@ -73,11 +73,11 @@ static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) ...@@ -73,11 +73,11 @@ static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
sync_global_to_guest(vcpu->vm, st_gva[i]); sync_global_to_guest(vcpu->vm, st_gva[i]);
ret = _vcpu_set_msr(vcpu->vm, vcpu->id, MSR_KVM_STEAL_TIME, ret = _vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME,
(ulong)st_gva[i] | KVM_STEAL_RESERVED_MASK); (ulong)st_gva[i] | KVM_STEAL_RESERVED_MASK);
TEST_ASSERT(ret == 0, "Bad GPA didn't fail"); TEST_ASSERT(ret == 0, "Bad GPA didn't fail");
vcpu_set_msr(vcpu->vm, vcpu->id, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED); vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED);
} }
static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
...@@ -163,7 +163,7 @@ static bool is_steal_time_supported(struct kvm_vcpu *vcpu) ...@@ -163,7 +163,7 @@ static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
.attr = KVM_ARM_VCPU_PVTIME_IPA, .attr = KVM_ARM_VCPU_PVTIME_IPA,
}; };
return !__vcpu_ioctl(vcpu->vm, vcpu->id, KVM_HAS_DEVICE_ATTR, &dev); return !__vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
} }
static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
...@@ -178,20 +178,20 @@ static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) ...@@ -178,20 +178,20 @@ static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
.addr = (uint64_t)&st_ipa, .addr = (uint64_t)&st_ipa,
}; };
vcpu_ioctl(vm, vcpu->id, KVM_HAS_DEVICE_ATTR, &dev); vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
/* ST_GPA_BASE is identity mapped */ /* ST_GPA_BASE is identity mapped */
st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
sync_global_to_guest(vm, st_gva[i]); sync_global_to_guest(vm, st_gva[i]);
st_ipa = (ulong)st_gva[i] | 1; st_ipa = (ulong)st_gva[i] | 1;
ret = __vcpu_ioctl(vm, vcpu->id, KVM_SET_DEVICE_ATTR, &dev); ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
TEST_ASSERT(ret == -1 && errno == EINVAL, "Bad IPA didn't report EINVAL"); TEST_ASSERT(ret == -1 && errno == EINVAL, "Bad IPA didn't report EINVAL");
st_ipa = (ulong)st_gva[i]; st_ipa = (ulong)st_gva[i];
vcpu_ioctl(vm, vcpu->id, KVM_SET_DEVICE_ATTR, &dev); vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
ret = __vcpu_ioctl(vm, vcpu->id, KVM_SET_DEVICE_ATTR, &dev); ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
TEST_ASSERT(ret == -1 && errno == EEXIST, "Set IPA twice without EEXIST"); TEST_ASSERT(ret == -1 && errno == EEXIST, "Set IPA twice without EEXIST");
} }
...@@ -227,9 +227,9 @@ static void run_vcpu(struct kvm_vcpu *vcpu) ...@@ -227,9 +227,9 @@ static void run_vcpu(struct kvm_vcpu *vcpu)
{ {
struct ucall uc; struct ucall uc;
vcpu_run(vcpu->vm, vcpu->id); vcpu_run(vcpu);
switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC: case UCALL_SYNC:
case UCALL_DONE: case UCALL_DONE:
break; break;
...@@ -280,7 +280,7 @@ int main(int ac, char **av) ...@@ -280,7 +280,7 @@ int main(int ac, char **av)
for (i = 0; i < NR_VCPUS; ++i) { for (i = 0; i < NR_VCPUS; ++i) {
steal_time_init(vcpus[i], i); steal_time_init(vcpus[i], i);
vcpu_args_set(vm, vcpus[i]->id, 1, i); vcpu_args_set(vcpus[i], 1, i);
/* First VCPU run initializes steal-time */ /* First VCPU run initializes steal-time */
run_vcpu(vcpus[i]); run_vcpu(vcpus[i]);
......
...@@ -28,8 +28,7 @@ static struct test_case test_cases[] = { ...@@ -28,8 +28,7 @@ static struct test_case test_cases[] = {
static void check_preconditions(struct kvm_vcpu *vcpu) static void check_preconditions(struct kvm_vcpu *vcpu)
{ {
if (!__vcpu_has_device_attr(vcpu->vm, vcpu->id, KVM_VCPU_TSC_CTRL, if (!__vcpu_has_device_attr(vcpu, KVM_VCPU_TSC_CTRL, KVM_VCPU_TSC_OFFSET))
KVM_VCPU_TSC_OFFSET))
return; return;
print_skip("KVM_VCPU_TSC_OFFSET not supported; skipping test"); print_skip("KVM_VCPU_TSC_OFFSET not supported; skipping test");
...@@ -38,8 +37,8 @@ static void check_preconditions(struct kvm_vcpu *vcpu) ...@@ -38,8 +37,8 @@ static void check_preconditions(struct kvm_vcpu *vcpu)
static void setup_system_counter(struct kvm_vcpu *vcpu, struct test_case *test) static void setup_system_counter(struct kvm_vcpu *vcpu, struct test_case *test)
{ {
vcpu_device_attr_set(vcpu->vm, vcpu->id, KVM_VCPU_TSC_CTRL, vcpu_device_attr_set(vcpu, KVM_VCPU_TSC_CTRL, KVM_VCPU_TSC_OFFSET,
KVM_VCPU_TSC_OFFSET, &test->tsc_offset); &test->tsc_offset);
} }
static uint64_t guest_read_system_counter(struct test_case *test) static uint64_t guest_read_system_counter(struct test_case *test)
...@@ -101,10 +100,10 @@ static void enter_guest(struct kvm_vcpu *vcpu) ...@@ -101,10 +100,10 @@ static void enter_guest(struct kvm_vcpu *vcpu)
setup_system_counter(vcpu, test); setup_system_counter(vcpu, test);
start = host_read_guest_system_counter(test); start = host_read_guest_system_counter(test);
vcpu_run(vcpu->vm, vcpu->id); vcpu_run(vcpu);
end = host_read_guest_system_counter(test); end = host_read_guest_system_counter(test);
switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC: case UCALL_SYNC:
handle_sync(&uc, start, end); handle_sync(&uc, start, end);
break; break;
...@@ -113,7 +112,7 @@ static void enter_guest(struct kvm_vcpu *vcpu) ...@@ -113,7 +112,7 @@ static void enter_guest(struct kvm_vcpu *vcpu)
return; return;
default: default:
TEST_ASSERT(0, "unhandled ucall %ld\n", TEST_ASSERT(0, "unhandled ucall %ld\n",
get_ucall(vcpu->vm, vcpu->id, &uc)); get_ucall(vcpu, &uc));
} }
} }
} }
......
...@@ -351,11 +351,11 @@ int main(int argc, char *argv[]) ...@@ -351,11 +351,11 @@ int main(int argc, char *argv[])
} }
run = vcpu->run; run = vcpu->run;
vcpu_regs_get(vm, vcpu->id, &regs1); vcpu_regs_get(vcpu, &regs1);
/* Register #NM handler */ /* Register #NM handler */
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, vcpu->id); vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, NM_VECTOR, guest_nm_handler); vm_install_exception_handler(vm, NM_VECTOR, guest_nm_handler);
/* amx cfg for guest_code */ /* amx cfg for guest_code */
...@@ -369,16 +369,16 @@ int main(int argc, char *argv[]) ...@@ -369,16 +369,16 @@ int main(int argc, char *argv[])
/* xsave data for guest_code */ /* xsave data for guest_code */
xsavedata = vm_vaddr_alloc_pages(vm, 3); xsavedata = vm_vaddr_alloc_pages(vm, 3);
memset(addr_gva2hva(vm, xsavedata), 0, 3 * getpagesize()); memset(addr_gva2hva(vm, xsavedata), 0, 3 * getpagesize());
vcpu_args_set(vm, vcpu->id, 3, amx_cfg, tiledata, xsavedata); vcpu_args_set(vcpu, 3, amx_cfg, tiledata, xsavedata);
for (stage = 1; ; stage++) { for (stage = 1; ; stage++) {
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n", "Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason, stage, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT: case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
__FILE__, uc.args[1]); __FILE__, uc.args[1]);
...@@ -403,7 +403,7 @@ int main(int argc, char *argv[]) ...@@ -403,7 +403,7 @@ int main(int argc, char *argv[])
* size subtract 8K amx size. * size subtract 8K amx size.
*/ */
amx_offset = xsave_restore_size - NUM_TILES*TILE_SIZE; amx_offset = xsave_restore_size - NUM_TILES*TILE_SIZE;
state = vcpu_save_state(vm, vcpu->id); state = vcpu_save_state(vcpu);
void *amx_start = (void *)state->xsave + amx_offset; void *amx_start = (void *)state->xsave + amx_offset;
void *tiles_data = (void *)addr_gva2hva(vm, tiledata); void *tiles_data = (void *)addr_gva2hva(vm, tiledata);
/* Only check TMM0 register, 1 tile */ /* Only check TMM0 register, 1 tile */
...@@ -424,21 +424,21 @@ int main(int argc, char *argv[]) ...@@ -424,21 +424,21 @@ int main(int argc, char *argv[])
TEST_FAIL("Unknown ucall %lu", uc.cmd); TEST_FAIL("Unknown ucall %lu", uc.cmd);
} }
state = vcpu_save_state(vm, vcpu->id); state = vcpu_save_state(vcpu);
memset(&regs1, 0, sizeof(regs1)); memset(&regs1, 0, sizeof(regs1));
vcpu_regs_get(vm, vcpu->id, &regs1); vcpu_regs_get(vcpu, &regs1);
kvm_vm_release(vm); kvm_vm_release(vm);
/* Restore state in a new VM. */ /* Restore state in a new VM. */
vcpu = vm_recreate_with_one_vcpu(vm); vcpu = vm_recreate_with_one_vcpu(vm);
vcpu_set_cpuid(vm, vcpu->id, kvm_get_supported_cpuid()); vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid());
vcpu_load_state(vm, vcpu->id, state); vcpu_load_state(vcpu, state);
run = vcpu->run; run = vcpu->run;
kvm_x86_state_cleanup(state); kvm_x86_state_cleanup(state);
memset(&regs2, 0, sizeof(regs2)); memset(&regs2, 0, sizeof(regs2));
vcpu_regs_get(vm, vcpu->id, &regs2); vcpu_regs_get(vcpu, &regs2);
TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)), TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
"Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx", "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
(ulong) regs2.rdi, (ulong) regs2.rsi); (ulong) regs2.rdi, (ulong) regs2.rsi);
......
...@@ -120,9 +120,9 @@ static void run_vcpu(struct kvm_vcpu *vcpu, int stage) ...@@ -120,9 +120,9 @@ static void run_vcpu(struct kvm_vcpu *vcpu, int stage)
{ {
struct ucall uc; struct ucall uc;
vcpu_run(vcpu->vm, vcpu->id); vcpu_run(vcpu);
switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC: case UCALL_SYNC:
TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") && TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
uc.args[1] == stage + 1, uc.args[1] == stage + 1,
...@@ -159,14 +159,14 @@ static void set_cpuid_after_run(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid) ...@@ -159,14 +159,14 @@ static void set_cpuid_after_run(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid)
u32 eax, ebx, x; u32 eax, ebx, x;
/* Setting unmodified CPUID is allowed */ /* Setting unmodified CPUID is allowed */
rc = __vcpu_set_cpuid(vcpu->vm, vcpu->id, cpuid); rc = __vcpu_set_cpuid(vcpu, cpuid);
TEST_ASSERT(!rc, "Setting unmodified CPUID after KVM_RUN failed: %d", rc); TEST_ASSERT(!rc, "Setting unmodified CPUID after KVM_RUN failed: %d", rc);
/* Changing CPU features is forbidden */ /* Changing CPU features is forbidden */
ent = get_cpuid(cpuid, 0x7, 0); ent = get_cpuid(cpuid, 0x7, 0);
ebx = ent->ebx; ebx = ent->ebx;
ent->ebx--; ent->ebx--;
rc = __vcpu_set_cpuid(vcpu->vm, vcpu->id, cpuid); rc = __vcpu_set_cpuid(vcpu, cpuid);
TEST_ASSERT(rc, "Changing CPU features should fail"); TEST_ASSERT(rc, "Changing CPU features should fail");
ent->ebx = ebx; ent->ebx = ebx;
...@@ -175,7 +175,7 @@ static void set_cpuid_after_run(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid) ...@@ -175,7 +175,7 @@ static void set_cpuid_after_run(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid)
eax = ent->eax; eax = ent->eax;
x = eax & 0xff; x = eax & 0xff;
ent->eax = (eax & ~0xffu) | (x - 1); ent->eax = (eax & ~0xffu) | (x - 1);
rc = __vcpu_set_cpuid(vcpu->vm, vcpu->id, cpuid); rc = __vcpu_set_cpuid(vcpu, cpuid);
TEST_ASSERT(rc, "Changing MAXPHYADDR should fail"); TEST_ASSERT(rc, "Changing MAXPHYADDR should fail");
ent->eax = eax; ent->eax = eax;
} }
...@@ -191,13 +191,13 @@ int main(void) ...@@ -191,13 +191,13 @@ int main(void)
vm = vm_create_with_one_vcpu(&vcpu, guest_main); vm = vm_create_with_one_vcpu(&vcpu, guest_main);
supp_cpuid = kvm_get_supported_cpuid(); supp_cpuid = kvm_get_supported_cpuid();
cpuid2 = vcpu_get_cpuid(vm, vcpu->id); cpuid2 = vcpu_get_cpuid(vcpu);
compare_cpuids(supp_cpuid, cpuid2); compare_cpuids(supp_cpuid, cpuid2);
vcpu_alloc_cpuid(vm, &cpuid_gva, cpuid2); vcpu_alloc_cpuid(vm, &cpuid_gva, cpuid2);
vcpu_args_set(vm, vcpu->id, 1, cpuid_gva); vcpu_args_set(vcpu, 1, cpuid_gva);
for (stage = 0; stage < 3; stage++) for (stage = 0; stage < 3; stage++)
run_vcpu(vcpu, stage); run_vcpu(vcpu, stage);
......
...@@ -82,19 +82,19 @@ int main(int argc, char *argv[]) ...@@ -82,19 +82,19 @@ int main(int argc, char *argv[])
run = vcpu->run; run = vcpu->run;
while (1) { while (1) {
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n", "Unexpected exit reason: %u (%s),\n",
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC: case UCALL_SYNC:
/* emulate hypervisor clearing CR4.OSXSAVE */ /* emulate hypervisor clearing CR4.OSXSAVE */
vcpu_sregs_get(vm, vcpu->id, &sregs); vcpu_sregs_get(vcpu, &sregs);
sregs.cr4 &= ~X86_CR4_OSXSAVE; sregs.cr4 &= ~X86_CR4_OSXSAVE;
vcpu_sregs_set(vm, vcpu->id, &sregs); vcpu_sregs_set(vcpu, &sregs);
break; break;
case UCALL_ABORT: case UCALL_ABORT:
TEST_FAIL("Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit."); TEST_FAIL("Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
......
...@@ -70,9 +70,9 @@ static void vcpu_skip_insn(struct kvm_vcpu *vcpu, int insn_len) ...@@ -70,9 +70,9 @@ static void vcpu_skip_insn(struct kvm_vcpu *vcpu, int insn_len)
{ {
struct kvm_regs regs; struct kvm_regs regs;
vcpu_regs_get(vcpu->vm, vcpu->id, &regs); vcpu_regs_get(vcpu, &regs);
regs.rip += insn_len; regs.rip += insn_len;
vcpu_regs_set(vcpu->vm, vcpu->id, &regs); vcpu_regs_set(vcpu, &regs);
} }
int main(void) int main(void)
...@@ -106,8 +106,8 @@ int main(void) ...@@ -106,8 +106,8 @@ int main(void)
/* Test software BPs - int3 */ /* Test software BPs - int3 */
memset(&debug, 0, sizeof(debug)); memset(&debug, 0, sizeof(debug));
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
vcpu_guest_debug_set(vm, vcpu->id, &debug); vcpu_guest_debug_set(vcpu, &debug);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG && TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
run->debug.arch.exception == BP_VECTOR && run->debug.arch.exception == BP_VECTOR &&
run->debug.arch.pc == CAST_TO_RIP(sw_bp), run->debug.arch.pc == CAST_TO_RIP(sw_bp),
...@@ -122,8 +122,8 @@ int main(void) ...@@ -122,8 +122,8 @@ int main(void)
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
debug.arch.debugreg[i] = CAST_TO_RIP(hw_bp); debug.arch.debugreg[i] = CAST_TO_RIP(hw_bp);
debug.arch.debugreg[7] = 0x400 | (1UL << (2*i+1)); debug.arch.debugreg[7] = 0x400 | (1UL << (2*i+1));
vcpu_guest_debug_set(vm, vcpu->id, &debug); vcpu_guest_debug_set(vcpu, &debug);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
target_dr6 = 0xffff0ff0 | (1UL << i); target_dr6 = 0xffff0ff0 | (1UL << i);
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG && TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
run->debug.arch.exception == DB_VECTOR && run->debug.arch.exception == DB_VECTOR &&
...@@ -145,8 +145,8 @@ int main(void) ...@@ -145,8 +145,8 @@ int main(void)
debug.arch.debugreg[i] = CAST_TO_RIP(guest_value); debug.arch.debugreg[i] = CAST_TO_RIP(guest_value);
debug.arch.debugreg[7] = 0x00000400 | (1UL << (2*i+1)) | debug.arch.debugreg[7] = 0x00000400 | (1UL << (2*i+1)) |
(0x000d0000UL << (4*i)); (0x000d0000UL << (4*i));
vcpu_guest_debug_set(vm, vcpu->id, &debug); vcpu_guest_debug_set(vcpu, &debug);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
target_dr6 = 0xffff0ff0 | (1UL << i); target_dr6 = 0xffff0ff0 | (1UL << i);
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG && TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
run->debug.arch.exception == DB_VECTOR && run->debug.arch.exception == DB_VECTOR &&
...@@ -172,8 +172,8 @@ int main(void) ...@@ -172,8 +172,8 @@ int main(void)
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP | debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP |
KVM_GUESTDBG_BLOCKIRQ; KVM_GUESTDBG_BLOCKIRQ;
debug.arch.debugreg[7] = 0x00000400; debug.arch.debugreg[7] = 0x00000400;
vcpu_guest_debug_set(vm, vcpu->id, &debug); vcpu_guest_debug_set(vcpu, &debug);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG && TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
run->debug.arch.exception == DB_VECTOR && run->debug.arch.exception == DB_VECTOR &&
run->debug.arch.pc == target_rip && run->debug.arch.pc == target_rip &&
...@@ -189,8 +189,8 @@ int main(void) ...@@ -189,8 +189,8 @@ int main(void)
memset(&debug, 0, sizeof(debug)); memset(&debug, 0, sizeof(debug));
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
debug.arch.debugreg[7] = 0x400 | DR7_GD; debug.arch.debugreg[7] = 0x400 | DR7_GD;
vcpu_guest_debug_set(vm, vcpu->id, &debug); vcpu_guest_debug_set(vcpu, &debug);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
target_dr6 = 0xffff0ff0 | DR6_BD; target_dr6 = 0xffff0ff0 | DR6_BD;
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG && TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
run->debug.arch.exception == DB_VECTOR && run->debug.arch.exception == DB_VECTOR &&
...@@ -204,11 +204,11 @@ int main(void) ...@@ -204,11 +204,11 @@ int main(void)
/* Disable all debug controls, run to the end */ /* Disable all debug controls, run to the end */
memset(&debug, 0, sizeof(debug)); memset(&debug, 0, sizeof(debug));
vcpu_guest_debug_set(vm, vcpu->id, &debug); vcpu_guest_debug_set(vcpu, &debug);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, "KVM_EXIT_IO"); TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, "KVM_EXIT_IO");
cmd = get_ucall(vm, vcpu->id, &uc); cmd = get_ucall(vcpu, &uc);
TEST_ASSERT(cmd == UCALL_DONE, "UCALL_DONE"); TEST_ASSERT(cmd == UCALL_DONE, "UCALL_DONE");
kvm_vm_free(vm); kvm_vm_free(vm);
......
...@@ -83,9 +83,9 @@ static void process_exit_on_emulation_error(struct kvm_vcpu *vcpu) ...@@ -83,9 +83,9 @@ static void process_exit_on_emulation_error(struct kvm_vcpu *vcpu)
* contained an flds instruction that is 2-bytes in * contained an flds instruction that is 2-bytes in
* length (ie: no prefix, no SIB, no displacement). * length (ie: no prefix, no SIB, no displacement).
*/ */
vcpu_regs_get(vcpu->vm, vcpu->id, &regs); vcpu_regs_get(vcpu, &regs);
regs.rip += 2; regs.rip += 2;
vcpu_regs_set(vcpu->vm, vcpu->id, &regs); vcpu_regs_set(vcpu, &regs);
} }
} }
} }
...@@ -101,7 +101,7 @@ static void check_for_guest_assert(struct kvm_vcpu *vcpu) ...@@ -101,7 +101,7 @@ static void check_for_guest_assert(struct kvm_vcpu *vcpu)
struct ucall uc; struct ucall uc;
if (vcpu->run->exit_reason == KVM_EXIT_IO && if (vcpu->run->exit_reason == KVM_EXIT_IO &&
get_ucall(vcpu->vm, vcpu->id, &uc) == UCALL_ABORT) { get_ucall(vcpu, &uc) == UCALL_ABORT) {
do_guest_assert(&uc); do_guest_assert(&uc);
} }
} }
...@@ -118,7 +118,7 @@ static void process_ucall_done(struct kvm_vcpu *vcpu) ...@@ -118,7 +118,7 @@ static void process_ucall_done(struct kvm_vcpu *vcpu)
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
TEST_ASSERT(get_ucall(vcpu->vm, vcpu->id, &uc) == UCALL_DONE, TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_DONE,
"Unexpected ucall command: %lu, expected UCALL_DONE (%d)", "Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
uc.cmd, UCALL_DONE); uc.cmd, UCALL_DONE);
} }
...@@ -133,7 +133,7 @@ static uint64_t process_ucall(struct kvm_vcpu *vcpu) ...@@ -133,7 +133,7 @@ static uint64_t process_ucall(struct kvm_vcpu *vcpu)
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC: case UCALL_SYNC:
break; break;
case UCALL_ABORT: case UCALL_ABORT:
...@@ -175,7 +175,7 @@ int main(int argc, char *argv[]) ...@@ -175,7 +175,7 @@ int main(int argc, char *argv[])
entry->eax = (entry->eax & 0xffffff00) | MAXPHYADDR; entry->eax = (entry->eax & 0xffffff00) | MAXPHYADDR;
set_cpuid(cpuid, entry); set_cpuid(cpuid, entry);
vcpu_set_cpuid(vm, vcpu->id, cpuid); vcpu_set_cpuid(vcpu, cpuid);
rc = kvm_check_cap(KVM_CAP_EXIT_ON_EMULATION_FAILURE); rc = kvm_check_cap(KVM_CAP_EXIT_ON_EMULATION_FAILURE);
TEST_ASSERT(rc, "KVM_CAP_EXIT_ON_EMULATION_FAILURE is unavailable"); TEST_ASSERT(rc, "KVM_CAP_EXIT_ON_EMULATION_FAILURE is unavailable");
...@@ -190,12 +190,12 @@ int main(int argc, char *argv[]) ...@@ -190,12 +190,12 @@ int main(int argc, char *argv[])
virt_map(vm, MEM_REGION_GVA, MEM_REGION_GPA, 1); virt_map(vm, MEM_REGION_GVA, MEM_REGION_GPA, 1);
hva = addr_gpa2hva(vm, MEM_REGION_GPA); hva = addr_gpa2hva(vm, MEM_REGION_GPA);
memset(hva, 0, PAGE_SIZE); memset(hva, 0, PAGE_SIZE);
pte = vm_get_page_table_entry(vm, vcpu->id, MEM_REGION_GVA); pte = vm_get_page_table_entry(vm, vcpu, MEM_REGION_GVA);
vm_set_page_table_entry(vm, vcpu->id, MEM_REGION_GVA, pte | (1ull << 36)); vm_set_page_table_entry(vm, vcpu, MEM_REGION_GVA, pte | (1ull << 36));
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
process_exit_on_emulation_error(vcpu); process_exit_on_emulation_error(vcpu);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(process_ucall(vcpu) == UCALL_DONE, "Expected UCALL_DONE"); TEST_ASSERT(process_ucall(vcpu) == UCALL_DONE, "Expected UCALL_DONE");
......
...@@ -161,12 +161,12 @@ void inject_nmi(struct kvm_vcpu *vcpu) ...@@ -161,12 +161,12 @@ void inject_nmi(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu_events events; struct kvm_vcpu_events events;
vcpu_events_get(vcpu->vm, vcpu->id, &events); vcpu_events_get(vcpu, &events);
events.nmi.pending = 1; events.nmi.pending = 1;
events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING; events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
vcpu_events_set(vcpu->vm, vcpu->id, &events); vcpu_events_set(vcpu, &events);
} }
static struct kvm_vcpu *save_restore_vm(struct kvm_vm *vm, static struct kvm_vcpu *save_restore_vm(struct kvm_vm *vm,
...@@ -175,21 +175,21 @@ static struct kvm_vcpu *save_restore_vm(struct kvm_vm *vm, ...@@ -175,21 +175,21 @@ static struct kvm_vcpu *save_restore_vm(struct kvm_vm *vm,
struct kvm_regs regs1, regs2; struct kvm_regs regs1, regs2;
struct kvm_x86_state *state; struct kvm_x86_state *state;
state = vcpu_save_state(vm, vcpu->id); state = vcpu_save_state(vcpu);
memset(&regs1, 0, sizeof(regs1)); memset(&regs1, 0, sizeof(regs1));
vcpu_regs_get(vm, vcpu->id, &regs1); vcpu_regs_get(vcpu, &regs1);
kvm_vm_release(vm); kvm_vm_release(vm);
/* Restore state in a new VM. */ /* Restore state in a new VM. */
vcpu = vm_recreate_with_one_vcpu(vm); vcpu = vm_recreate_with_one_vcpu(vm);
vcpu_set_hv_cpuid(vm, vcpu->id); vcpu_set_hv_cpuid(vcpu);
vcpu_enable_evmcs(vm, vcpu->id); vcpu_enable_evmcs(vcpu);
vcpu_load_state(vm, vcpu->id, state); vcpu_load_state(vcpu, state);
kvm_x86_state_cleanup(state); kvm_x86_state_cleanup(state);
memset(&regs2, 0, sizeof(regs2)); memset(&regs2, 0, sizeof(regs2));
vcpu_regs_get(vm, vcpu->id, &regs2); vcpu_regs_get(vcpu, &regs2);
TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)), TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
"Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx", "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
(ulong) regs2.rdi, (ulong) regs2.rsi); (ulong) regs2.rdi, (ulong) regs2.rsi);
...@@ -215,14 +215,14 @@ int main(int argc, char *argv[]) ...@@ -215,14 +215,14 @@ int main(int argc, char *argv[])
exit(KSFT_SKIP); exit(KSFT_SKIP);
} }
vcpu_set_hv_cpuid(vm, vcpu->id); vcpu_set_hv_cpuid(vcpu);
vcpu_enable_evmcs(vm, vcpu->id); vcpu_enable_evmcs(vcpu);
vcpu_alloc_vmx(vm, &vmx_pages_gva); vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva); vcpu_args_set(vcpu, 1, vmx_pages_gva);
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, vcpu->id); vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler); vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler);
...@@ -231,13 +231,13 @@ int main(int argc, char *argv[]) ...@@ -231,13 +231,13 @@ int main(int argc, char *argv[])
for (stage = 1;; stage++) { for (stage = 1;; stage++) {
run = vcpu->run; run = vcpu->run;
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n", "Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason, stage, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT: case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
__FILE__, uc.args[1]); __FILE__, uc.args[1]);
......
...@@ -95,7 +95,7 @@ static void guest_main(void) ...@@ -95,7 +95,7 @@ static void guest_main(void)
static void setup_ud_vector(struct kvm_vcpu *vcpu) static void setup_ud_vector(struct kvm_vcpu *vcpu)
{ {
vm_init_descriptor_tables(vcpu->vm); vm_init_descriptor_tables(vcpu->vm);
vcpu_init_descriptor_tables(vcpu->vm, vcpu->id); vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler); vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler);
} }
...@@ -104,8 +104,8 @@ static void enter_guest(struct kvm_vcpu *vcpu) ...@@ -104,8 +104,8 @@ static void enter_guest(struct kvm_vcpu *vcpu)
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
struct ucall uc; struct ucall uc;
vcpu_run(vcpu->vm, vcpu->id); vcpu_run(vcpu);
switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC: case UCALL_SYNC:
pr_info("%s: %016lx\n", (const char *)uc.args[2], uc.args[3]); pr_info("%s: %016lx\n", (const char *)uc.args[2], uc.args[3]);
break; break;
......
...@@ -178,16 +178,16 @@ static void host_check_tsc_msr_rdtsc(struct kvm_vcpu *vcpu) ...@@ -178,16 +178,16 @@ static void host_check_tsc_msr_rdtsc(struct kvm_vcpu *vcpu)
u64 tsc_freq, r1, r2, t1, t2; u64 tsc_freq, r1, r2, t1, t2;
s64 delta_ns; s64 delta_ns;
tsc_freq = vcpu_get_msr(vcpu->vm, vcpu->id, HV_X64_MSR_TSC_FREQUENCY); tsc_freq = vcpu_get_msr(vcpu, HV_X64_MSR_TSC_FREQUENCY);
TEST_ASSERT(tsc_freq > 0, "TSC frequency must be nonzero"); TEST_ASSERT(tsc_freq > 0, "TSC frequency must be nonzero");
/* For increased accuracy, take mean rdtsc() before and afrer ioctl */ /* For increased accuracy, take mean rdtsc() before and afrer ioctl */
r1 = rdtsc(); r1 = rdtsc();
t1 = vcpu_get_msr(vcpu->vm, vcpu->id, HV_X64_MSR_TIME_REF_COUNT); t1 = vcpu_get_msr(vcpu, HV_X64_MSR_TIME_REF_COUNT);
r1 = (r1 + rdtsc()) / 2; r1 = (r1 + rdtsc()) / 2;
nop_loop(); nop_loop();
r2 = rdtsc(); r2 = rdtsc();
t2 = vcpu_get_msr(vcpu->vm, vcpu->id, HV_X64_MSR_TIME_REF_COUNT); t2 = vcpu_get_msr(vcpu, HV_X64_MSR_TIME_REF_COUNT);
r2 = (r2 + rdtsc()) / 2; r2 = (r2 + rdtsc()) / 2;
TEST_ASSERT(t2 > t1, "Time reference MSR is not monotonic (%ld <= %ld)", t1, t2); TEST_ASSERT(t2 > t1, "Time reference MSR is not monotonic (%ld <= %ld)", t1, t2);
...@@ -215,24 +215,24 @@ int main(void) ...@@ -215,24 +215,24 @@ int main(void)
vm = vm_create_with_one_vcpu(&vcpu, guest_main); vm = vm_create_with_one_vcpu(&vcpu, guest_main);
run = vcpu->run; run = vcpu->run;
vcpu_set_hv_cpuid(vm, vcpu->id); vcpu_set_hv_cpuid(vcpu);
tsc_page_gva = vm_vaddr_alloc_page(vm); tsc_page_gva = vm_vaddr_alloc_page(vm);
memset(addr_gva2hva(vm, tsc_page_gva), 0x0, getpagesize()); memset(addr_gva2hva(vm, tsc_page_gva), 0x0, getpagesize());
TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0, TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0,
"TSC page has to be page aligned\n"); "TSC page has to be page aligned\n");
vcpu_args_set(vm, vcpu->id, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva)); vcpu_args_set(vcpu, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva));
host_check_tsc_msr_rdtsc(vcpu); host_check_tsc_msr_rdtsc(vcpu);
for (stage = 1;; stage++) { for (stage = 1;; stage++) {
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n", "Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason, stage, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT: case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
__FILE__, uc.args[1]); __FILE__, uc.args[1]);
......
...@@ -119,7 +119,7 @@ void test_hv_cpuid_e2big(struct kvm_vm *vm, struct kvm_vcpu *vcpu) ...@@ -119,7 +119,7 @@ void test_hv_cpuid_e2big(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
int ret; int ret;
if (vcpu) if (vcpu)
ret = __vcpu_ioctl(vm, vcpu->id, KVM_GET_SUPPORTED_HV_CPUID, &cpuid); ret = __vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, &cpuid);
else else
ret = __kvm_ioctl(vm_get_kvm_fd(vm), KVM_GET_SUPPORTED_HV_CPUID, &cpuid); ret = __kvm_ioctl(vm_get_kvm_fd(vm), KVM_GET_SUPPORTED_HV_CPUID, &cpuid);
...@@ -147,7 +147,7 @@ int main(int argc, char *argv[]) ...@@ -147,7 +147,7 @@ int main(int argc, char *argv[])
/* Test vCPU ioctl version */ /* Test vCPU ioctl version */
test_hv_cpuid_e2big(vm, vcpu); test_hv_cpuid_e2big(vm, vcpu);
hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vm, vcpu->id); hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vcpu);
test_hv_cpuid(hv_cpuid_entries, false); test_hv_cpuid(hv_cpuid_entries, false);
free(hv_cpuid_entries); free(hv_cpuid_entries);
...@@ -156,8 +156,8 @@ int main(int argc, char *argv[]) ...@@ -156,8 +156,8 @@ int main(int argc, char *argv[])
print_skip("Enlightened VMCS is unsupported"); print_skip("Enlightened VMCS is unsupported");
goto do_sys; goto do_sys;
} }
vcpu_enable_evmcs(vm, vcpu->id); vcpu_enable_evmcs(vcpu);
hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vm, vcpu->id); hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vcpu);
test_hv_cpuid(hv_cpuid_entries, true); test_hv_cpuid(hv_cpuid_entries, true);
free(hv_cpuid_entries); free(hv_cpuid_entries);
......
...@@ -161,7 +161,7 @@ static void hv_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, ...@@ -161,7 +161,7 @@ static void hv_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
"failed to set HYPERV_CPUID_ENLIGHTMENT_INFO leaf"); "failed to set HYPERV_CPUID_ENLIGHTMENT_INFO leaf");
TEST_ASSERT(set_cpuid(cpuid, dbg), TEST_ASSERT(set_cpuid(cpuid, dbg),
"failed to set HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES leaf"); "failed to set HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES leaf");
vcpu_set_cpuid(vcpu->vm, vcpu->id, cpuid); vcpu_set_cpuid(vcpu, cpuid);
} }
static void guest_test_msrs_access(void) static void guest_test_msrs_access(void)
...@@ -191,15 +191,15 @@ static void guest_test_msrs_access(void) ...@@ -191,15 +191,15 @@ static void guest_test_msrs_access(void)
memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize()); memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
msr = addr_gva2hva(vm, msr_gva); msr = addr_gva2hva(vm, msr_gva);
vcpu_args_set(vm, vcpu->id, 1, msr_gva); vcpu_args_set(vcpu, 1, msr_gva);
vcpu_enable_cap(vm, vcpu->id, KVM_CAP_HYPERV_ENFORCE_CPUID, 1); vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
vcpu_set_hv_cpuid(vm, vcpu->id); vcpu_set_hv_cpuid(vcpu);
best = kvm_get_supported_hv_cpuid(); best = kvm_get_supported_hv_cpuid();
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, vcpu->id); vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler); vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
run = vcpu->run; run = vcpu->run;
...@@ -333,7 +333,7 @@ static void guest_test_msrs_access(void) ...@@ -333,7 +333,7 @@ static void guest_test_msrs_access(void)
* Remains unavailable even with KVM_CAP_HYPERV_SYNIC2 * Remains unavailable even with KVM_CAP_HYPERV_SYNIC2
* capability enabled and guest visible CPUID bit unset. * capability enabled and guest visible CPUID bit unset.
*/ */
vcpu_enable_cap(vm, vcpu->id, KVM_CAP_HYPERV_SYNIC2, 0); vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_SYNIC2, 0);
break; break;
case 22: case 22:
feat.eax |= HV_MSR_SYNIC_AVAILABLE; feat.eax |= HV_MSR_SYNIC_AVAILABLE;
...@@ -471,12 +471,12 @@ static void guest_test_msrs_access(void) ...@@ -471,12 +471,12 @@ static void guest_test_msrs_access(void)
else else
pr_debug("Stage %d: finish\n", stage); pr_debug("Stage %d: finish\n", stage);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"unexpected exit reason: %u (%s)", "unexpected exit reason: %u (%s)",
run->exit_reason, exit_reason_str(run->exit_reason)); run->exit_reason, exit_reason_str(run->exit_reason));
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC: case UCALL_SYNC:
TEST_ASSERT(uc.args[1] == 0, TEST_ASSERT(uc.args[1] == 0,
"Unexpected stage: %ld (0 expected)\n", "Unexpected stage: %ld (0 expected)\n",
...@@ -520,7 +520,7 @@ static void guest_test_hcalls_access(void) ...@@ -520,7 +520,7 @@ static void guest_test_hcalls_access(void)
vm = vm_create_with_one_vcpu(&vcpu, guest_hcall); vm = vm_create_with_one_vcpu(&vcpu, guest_hcall);
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, vcpu->id); vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
/* Hypercall input/output */ /* Hypercall input/output */
...@@ -531,10 +531,10 @@ static void guest_test_hcalls_access(void) ...@@ -531,10 +531,10 @@ static void guest_test_hcalls_access(void)
hcall_params = vm_vaddr_alloc_page(vm); hcall_params = vm_vaddr_alloc_page(vm);
memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize()); memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
vcpu_args_set(vm, vcpu->id, 2, addr_gva2gpa(vm, hcall_page), hcall_params); vcpu_args_set(vcpu, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
vcpu_enable_cap(vm, vcpu->id, KVM_CAP_HYPERV_ENFORCE_CPUID, 1); vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
vcpu_set_hv_cpuid(vm, vcpu->id); vcpu_set_hv_cpuid(vcpu);
best = kvm_get_supported_hv_cpuid(); best = kvm_get_supported_hv_cpuid();
...@@ -641,12 +641,12 @@ static void guest_test_hcalls_access(void) ...@@ -641,12 +641,12 @@ static void guest_test_hcalls_access(void)
else else
pr_debug("Stage %d: finish\n", stage); pr_debug("Stage %d: finish\n", stage);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"unexpected exit reason: %u (%s)", "unexpected exit reason: %u (%s)",
run->exit_reason, exit_reason_str(run->exit_reason)); run->exit_reason, exit_reason_str(run->exit_reason));
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC: case UCALL_SYNC:
TEST_ASSERT(uc.args[1] == 0, TEST_ASSERT(uc.args[1] == 0,
"Unexpected stage: %ld (0 expected)\n", "Unexpected stage: %ld (0 expected)\n",
......
...@@ -133,19 +133,19 @@ int main(int argc, char *argv[]) ...@@ -133,19 +133,19 @@ int main(int argc, char *argv[])
} }
/* Create VM */ /* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vcpu_set_hv_cpuid(vm, vcpu->id); vcpu_set_hv_cpuid(vcpu);
run = vcpu->run; run = vcpu->run;
vcpu_alloc_svm(vm, &nested_gva); vcpu_alloc_svm(vm, &nested_gva);
vcpu_args_set(vm, vcpu->id, 1, nested_gva); vcpu_args_set(vcpu, 1, nested_gva);
for (stage = 1;; stage++) { for (stage = 1;; stage++) {
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n", "Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason, stage, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT: case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
__FILE__, uc.args[1]); __FILE__, uc.args[1]);
......
...@@ -116,14 +116,14 @@ static void enter_guest(struct kvm_vcpu *vcpu) ...@@ -116,14 +116,14 @@ static void enter_guest(struct kvm_vcpu *vcpu)
vm_ioctl(vm, KVM_GET_CLOCK, &start); vm_ioctl(vm, KVM_GET_CLOCK, &start);
vcpu_run(vcpu->vm, vcpu->id); vcpu_run(vcpu);
vm_ioctl(vm, KVM_GET_CLOCK, &end); vm_ioctl(vm, KVM_GET_CLOCK, &end);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"unexpected exit reason: %u (%s)", "unexpected exit reason: %u (%s)",
run->exit_reason, exit_reason_str(run->exit_reason)); run->exit_reason, exit_reason_str(run->exit_reason));
switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC: case UCALL_SYNC:
handle_sync(&uc, &start, &end); handle_sync(&uc, &start, &end);
break; break;
...@@ -193,7 +193,7 @@ int main(void) ...@@ -193,7 +193,7 @@ int main(void)
pvti_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000); pvti_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000);
pvti_gpa = addr_gva2gpa(vm, pvti_gva); pvti_gpa = addr_gva2gpa(vm, pvti_gva);
vcpu_args_set(vm, vcpu->id, 2, pvti_gpa, pvti_gva); vcpu_args_set(vcpu, 2, pvti_gpa, pvti_gva);
enter_guest(vcpu); enter_guest(vcpu);
kvm_vm_free(vm); kvm_vm_free(vm);
......
...@@ -177,12 +177,12 @@ static void enter_guest(struct kvm_vcpu *vcpu) ...@@ -177,12 +177,12 @@ static void enter_guest(struct kvm_vcpu *vcpu)
struct ucall uc; struct ucall uc;
while (true) { while (true) {
vcpu_run(vcpu->vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"unexpected exit reason: %u (%s)", "unexpected exit reason: %u (%s)",
run->exit_reason, exit_reason_str(run->exit_reason)); run->exit_reason, exit_reason_str(run->exit_reason));
switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_PR_MSR: case UCALL_PR_MSR:
pr_msr(&uc); pr_msr(&uc);
break; break;
...@@ -211,14 +211,14 @@ int main(void) ...@@ -211,14 +211,14 @@ int main(void)
vm = vm_create_with_one_vcpu(&vcpu, guest_main); vm = vm_create_with_one_vcpu(&vcpu, guest_main);
vcpu_enable_cap(vm, vcpu->id, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 1); vcpu_enable_cap(vcpu, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 1);
best = kvm_get_supported_cpuid(); best = kvm_get_supported_cpuid();
clear_kvm_cpuid_features(best); clear_kvm_cpuid_features(best);
vcpu_set_cpuid(vm, vcpu->id, best); vcpu_set_cpuid(vcpu, best);
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, vcpu->id); vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler); vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
enter_guest(vcpu); enter_guest(vcpu);
......
...@@ -35,7 +35,7 @@ static void mmu_role_test(u32 *cpuid_reg, u32 evil_cpuid_val) ...@@ -35,7 +35,7 @@ static void mmu_role_test(u32 *cpuid_reg, u32 evil_cpuid_val)
/* Map 1gb page without a backing memlot. */ /* Map 1gb page without a backing memlot. */
__virt_pg_map(vm, MMIO_GPA, MMIO_GPA, PG_LEVEL_1G); __virt_pg_map(vm, MMIO_GPA, MMIO_GPA, PG_LEVEL_1G);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
/* Guest access to the 1gb page should trigger MMIO. */ /* Guest access to the 1gb page should trigger MMIO. */
TEST_ASSERT(run->exit_reason == KVM_EXIT_MMIO, TEST_ASSERT(run->exit_reason == KVM_EXIT_MMIO,
...@@ -54,7 +54,7 @@ static void mmu_role_test(u32 *cpuid_reg, u32 evil_cpuid_val) ...@@ -54,7 +54,7 @@ static void mmu_role_test(u32 *cpuid_reg, u32 evil_cpuid_val)
* returns the struct that contains the entry being modified. Eww. * returns the struct that contains the entry being modified. Eww.
*/ */
*cpuid_reg = evil_cpuid_val; *cpuid_reg = evil_cpuid_val;
vcpu_set_cpuid(vm, vcpu->id, kvm_get_supported_cpuid()); vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid());
/* /*
* Add a dummy memslot to coerce KVM into bumping the MMIO generation. * Add a dummy memslot to coerce KVM into bumping the MMIO generation.
...@@ -67,12 +67,12 @@ static void mmu_role_test(u32 *cpuid_reg, u32 evil_cpuid_val) ...@@ -67,12 +67,12 @@ static void mmu_role_test(u32 *cpuid_reg, u32 evil_cpuid_val)
/* Set up a #PF handler to eat the RSVD #PF and signal all done! */ /* Set up a #PF handler to eat the RSVD #PF and signal all done! */
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, vcpu->id); vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, PF_VECTOR, guest_pf_handler); vm_install_exception_handler(vm, PF_VECTOR, guest_pf_handler);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
cmd = get_ucall(vm, vcpu->id, NULL); cmd = get_ucall(vcpu, NULL);
TEST_ASSERT(cmd == UCALL_DONE, TEST_ASSERT(cmd == UCALL_DONE,
"Unexpected guest exit, exit_reason=%s, ucall.cmd = %lu\n", "Unexpected guest exit, exit_reason=%s, ucall.cmd = %lu\n",
exit_reason_str(run->exit_reason), cmd); exit_reason_str(run->exit_reason), cmd);
......
...@@ -40,12 +40,12 @@ static void test_msr_platform_info_enabled(struct kvm_vcpu *vcpu) ...@@ -40,12 +40,12 @@ static void test_msr_platform_info_enabled(struct kvm_vcpu *vcpu)
struct ucall uc; struct ucall uc;
vm_enable_cap(vcpu->vm, KVM_CAP_MSR_PLATFORM_INFO, true); vm_enable_cap(vcpu->vm, KVM_CAP_MSR_PLATFORM_INFO, true);
vcpu_run(vcpu->vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Exit_reason other than KVM_EXIT_IO: %u (%s),\n", "Exit_reason other than KVM_EXIT_IO: %u (%s),\n",
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
get_ucall(vcpu->vm, vcpu->id, &uc); get_ucall(vcpu, &uc);
TEST_ASSERT(uc.cmd == UCALL_SYNC, TEST_ASSERT(uc.cmd == UCALL_SYNC,
"Received ucall other than UCALL_SYNC: %lu\n", uc.cmd); "Received ucall other than UCALL_SYNC: %lu\n", uc.cmd);
TEST_ASSERT((uc.args[1] & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) == TEST_ASSERT((uc.args[1] & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) ==
...@@ -59,7 +59,7 @@ static void test_msr_platform_info_disabled(struct kvm_vcpu *vcpu) ...@@ -59,7 +59,7 @@ static void test_msr_platform_info_disabled(struct kvm_vcpu *vcpu)
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
vm_enable_cap(vcpu->vm, KVM_CAP_MSR_PLATFORM_INFO, false); vm_enable_cap(vcpu->vm, KVM_CAP_MSR_PLATFORM_INFO, false);
vcpu_run(vcpu->vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN, TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
"Exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n", "Exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n",
run->exit_reason, run->exit_reason,
...@@ -84,12 +84,12 @@ int main(int argc, char *argv[]) ...@@ -84,12 +84,12 @@ int main(int argc, char *argv[])
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);
msr_platform_info = vcpu_get_msr(vm, vcpu->id, MSR_PLATFORM_INFO); msr_platform_info = vcpu_get_msr(vcpu, MSR_PLATFORM_INFO);
vcpu_set_msr(vm, vcpu->id, MSR_PLATFORM_INFO, vcpu_set_msr(vcpu, MSR_PLATFORM_INFO,
msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO); msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
test_msr_platform_info_enabled(vcpu); test_msr_platform_info_enabled(vcpu);
test_msr_platform_info_disabled(vcpu); test_msr_platform_info_disabled(vcpu);
vcpu_set_msr(vm, vcpu->id, MSR_PLATFORM_INFO, msr_platform_info); vcpu_set_msr(vcpu, MSR_PLATFORM_INFO, msr_platform_info);
kvm_vm_free(vm); kvm_vm_free(vm);
......
...@@ -177,12 +177,12 @@ static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu) ...@@ -177,12 +177,12 @@ static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu)
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
struct ucall uc; struct ucall uc;
vcpu_run(vcpu->vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Exit_reason other than KVM_EXIT_IO: %u (%s)\n", "Exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
get_ucall(vcpu->vm, vcpu->id, &uc); get_ucall(vcpu, &uc);
TEST_ASSERT(uc.cmd == UCALL_SYNC, TEST_ASSERT(uc.cmd == UCALL_SYNC,
"Received ucall other than UCALL_SYNC: %lu", uc.cmd); "Received ucall other than UCALL_SYNC: %lu", uc.cmd);
return uc.args[1]; return uc.args[1];
...@@ -371,7 +371,7 @@ static void test_pmu_config_disable(void (*guest_code)(void)) ...@@ -371,7 +371,7 @@ static void test_pmu_config_disable(void (*guest_code)(void))
vcpu = vm_vcpu_add(vm, 0, guest_code); vcpu = vm_vcpu_add(vm, 0, guest_code);
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, vcpu->id); vcpu_init_descriptor_tables(vcpu);
TEST_ASSERT(!sanity_check_pmu(vcpu), TEST_ASSERT(!sanity_check_pmu(vcpu),
"Guest should not be able to use disabled PMU."); "Guest should not be able to use disabled PMU.");
...@@ -470,7 +470,7 @@ int main(int argc, char *argv[]) ...@@ -470,7 +470,7 @@ int main(int argc, char *argv[])
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, vcpu->id); vcpu_init_descriptor_tables(vcpu);
if (!sanity_check_pmu(vcpu)) { if (!sanity_check_pmu(vcpu)) {
print_skip("Guest PMU is not functional"); print_skip("Guest PMU is not functional");
......
...@@ -49,9 +49,9 @@ static void run_vcpu(struct kvm_vcpu *vcpu) ...@@ -49,9 +49,9 @@ static void run_vcpu(struct kvm_vcpu *vcpu)
for (stage = 0; stage < 2; stage++) { for (stage = 0; stage < 2; stage++) {
vcpu_run(vcpu->vm, vcpu->id); vcpu_run(vcpu);
switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC: case UCALL_SYNC:
TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") && TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
uc.args[1] == stage + 1, uc.args[1] == stage + 1,
......
...@@ -35,11 +35,11 @@ static void test_cr4_feature_bit(struct kvm_vcpu *vcpu, struct kvm_sregs *orig, ...@@ -35,11 +35,11 @@ static void test_cr4_feature_bit(struct kvm_vcpu *vcpu, struct kvm_sregs *orig,
memcpy(&sregs, orig, sizeof(sregs)); memcpy(&sregs, orig, sizeof(sregs));
sregs.cr4 |= feature_bit; sregs.cr4 |= feature_bit;
rc = _vcpu_sregs_set(vcpu->vm, vcpu->id, &sregs); rc = _vcpu_sregs_set(vcpu, &sregs);
TEST_ASSERT(rc, "KVM allowed unsupported CR4 bit (0x%lx)", feature_bit); TEST_ASSERT(rc, "KVM allowed unsupported CR4 bit (0x%lx)", feature_bit);
/* Sanity check that KVM didn't change anything. */ /* Sanity check that KVM didn't change anything. */
vcpu_sregs_get(vcpu->vm, vcpu->id, &sregs); vcpu_sregs_get(vcpu, &sregs);
TEST_ASSERT(!memcmp(&sregs, orig, sizeof(sregs)), "KVM modified sregs"); TEST_ASSERT(!memcmp(&sregs, orig, sizeof(sregs)), "KVM modified sregs");
} }
...@@ -97,15 +97,15 @@ int main(int argc, char *argv[]) ...@@ -97,15 +97,15 @@ int main(int argc, char *argv[])
vm = vm_create_barebones(); vm = vm_create_barebones();
vcpu = __vm_vcpu_add(vm, 0); vcpu = __vm_vcpu_add(vm, 0);
vcpu_sregs_get(vm, vcpu->id, &sregs); vcpu_sregs_get(vcpu, &sregs);
sregs.cr4 |= calc_cr4_feature_bits(vm); sregs.cr4 |= calc_cr4_feature_bits(vm);
cr4 = sregs.cr4; cr4 = sregs.cr4;
rc = _vcpu_sregs_set(vm, vcpu->id, &sregs); rc = _vcpu_sregs_set(vcpu, &sregs);
TEST_ASSERT(!rc, "Failed to set supported CR4 bits (0x%lx)", cr4); TEST_ASSERT(!rc, "Failed to set supported CR4 bits (0x%lx)", cr4);
vcpu_sregs_get(vm, vcpu->id, &sregs); vcpu_sregs_get(vcpu, &sregs);
TEST_ASSERT(sregs.cr4 == cr4, "sregs.CR4 (0x%llx) != CR4 (0x%lx)", TEST_ASSERT(sregs.cr4 == cr4, "sregs.CR4 (0x%llx) != CR4 (0x%lx)",
sregs.cr4, cr4); sregs.cr4, cr4);
...@@ -125,13 +125,13 @@ int main(int argc, char *argv[]) ...@@ -125,13 +125,13 @@ int main(int argc, char *argv[])
/* Create a "real" VM and verify APIC_BASE can be set. */ /* Create a "real" VM and verify APIC_BASE can be set. */
vm = vm_create_with_one_vcpu(&vcpu, NULL); vm = vm_create_with_one_vcpu(&vcpu, NULL);
vcpu_sregs_get(vm, vcpu->id, &sregs); vcpu_sregs_get(vcpu, &sregs);
sregs.apic_base = 1 << 10; sregs.apic_base = 1 << 10;
rc = _vcpu_sregs_set(vm, vcpu->id, &sregs); rc = _vcpu_sregs_set(vcpu, &sregs);
TEST_ASSERT(rc, "Set IA32_APIC_BASE to %llx (invalid)", TEST_ASSERT(rc, "Set IA32_APIC_BASE to %llx (invalid)",
sregs.apic_base); sregs.apic_base);
sregs.apic_base = 1 << 11; sregs.apic_base = 1 << 11;
rc = _vcpu_sregs_set(vm, vcpu->id, &sregs); rc = _vcpu_sregs_set(vcpu, &sregs);
TEST_ASSERT(!rc, "Couldn't set IA32_APIC_BASE to %llx (valid)", TEST_ASSERT(!rc, "Couldn't set IA32_APIC_BASE to %llx (valid)",
sregs.apic_base); sregs.apic_base);
......
...@@ -118,12 +118,12 @@ void inject_smi(struct kvm_vcpu *vcpu) ...@@ -118,12 +118,12 @@ void inject_smi(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu_events events; struct kvm_vcpu_events events;
vcpu_events_get(vcpu->vm, vcpu->id, &events); vcpu_events_get(vcpu, &events);
events.smi.pending = 1; events.smi.pending = 1;
events.flags |= KVM_VCPUEVENT_VALID_SMM; events.flags |= KVM_VCPUEVENT_VALID_SMM;
vcpu_events_set(vcpu->vm, vcpu->id, &events); vcpu_events_set(vcpu, &events);
} }
int main(int argc, char *argv[]) int main(int argc, char *argv[])
...@@ -151,7 +151,7 @@ int main(int argc, char *argv[]) ...@@ -151,7 +151,7 @@ int main(int argc, char *argv[])
memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler, memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler,
sizeof(smi_handler)); sizeof(smi_handler));
vcpu_set_msr(vm, vcpu->id, MSR_IA32_SMBASE, SMRAM_GPA); vcpu_set_msr(vcpu, MSR_IA32_SMBASE, SMRAM_GPA);
if (kvm_check_cap(KVM_CAP_NESTED_STATE)) { if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
if (nested_svm_supported()) if (nested_svm_supported())
...@@ -163,17 +163,17 @@ int main(int argc, char *argv[]) ...@@ -163,17 +163,17 @@ int main(int argc, char *argv[])
if (!nested_gva) if (!nested_gva)
pr_info("will skip SMM test with VMX enabled\n"); pr_info("will skip SMM test with VMX enabled\n");
vcpu_args_set(vm, vcpu->id, 1, nested_gva); vcpu_args_set(vcpu, 1, nested_gva);
for (stage = 1;; stage++) { for (stage = 1;; stage++) {
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n", "Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason, stage, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
memset(&regs, 0, sizeof(regs)); memset(&regs, 0, sizeof(regs));
vcpu_regs_get(vm, vcpu->id, &regs); vcpu_regs_get(vcpu, &regs);
stage_reported = regs.rax & 0xff; stage_reported = regs.rax & 0xff;
...@@ -201,12 +201,12 @@ int main(int argc, char *argv[]) ...@@ -201,12 +201,12 @@ int main(int argc, char *argv[])
if (stage == 10) if (stage == 10)
inject_smi(vcpu); inject_smi(vcpu);
state = vcpu_save_state(vm, vcpu->id); state = vcpu_save_state(vcpu);
kvm_vm_release(vm); kvm_vm_release(vm);
vcpu = vm_recreate_with_one_vcpu(vm); vcpu = vm_recreate_with_one_vcpu(vm);
vcpu_set_cpuid(vm, vcpu->id, kvm_get_supported_cpuid()); vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid());
vcpu_load_state(vm, vcpu->id, state); vcpu_load_state(vcpu, state);
run = vcpu->run; run = vcpu->run;
kvm_x86_state_cleanup(state); kvm_x86_state_cleanup(state);
} }
......
...@@ -167,7 +167,7 @@ int main(int argc, char *argv[]) ...@@ -167,7 +167,7 @@ int main(int argc, char *argv[])
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);
run = vcpu->run; run = vcpu->run;
vcpu_regs_get(vm, vcpu->id, &regs1); vcpu_regs_get(vcpu, &regs1);
if (kvm_check_cap(KVM_CAP_NESTED_STATE)) { if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
if (nested_svm_supported()) if (nested_svm_supported())
...@@ -179,16 +179,16 @@ int main(int argc, char *argv[]) ...@@ -179,16 +179,16 @@ int main(int argc, char *argv[])
if (!nested_gva) if (!nested_gva)
pr_info("will skip nested state checks\n"); pr_info("will skip nested state checks\n");
vcpu_args_set(vm, vcpu->id, 1, nested_gva); vcpu_args_set(vcpu, 1, nested_gva);
for (stage = 1;; stage++) { for (stage = 1;; stage++) {
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n", "Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason, stage, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT: case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
__FILE__, uc.args[1]); __FILE__, uc.args[1]);
...@@ -206,21 +206,21 @@ int main(int argc, char *argv[]) ...@@ -206,21 +206,21 @@ int main(int argc, char *argv[])
uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx", uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
stage, (ulong)uc.args[1]); stage, (ulong)uc.args[1]);
state = vcpu_save_state(vm, vcpu->id); state = vcpu_save_state(vcpu);
memset(&regs1, 0, sizeof(regs1)); memset(&regs1, 0, sizeof(regs1));
vcpu_regs_get(vm, vcpu->id, &regs1); vcpu_regs_get(vcpu, &regs1);
kvm_vm_release(vm); kvm_vm_release(vm);
/* Restore state in a new VM. */ /* Restore state in a new VM. */
vcpu = vm_recreate_with_one_vcpu(vm); vcpu = vm_recreate_with_one_vcpu(vm);
vcpu_set_cpuid(vm, vcpu->id, kvm_get_supported_cpuid()); vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid());
vcpu_load_state(vm, vcpu->id, state); vcpu_load_state(vcpu, state);
run = vcpu->run; run = vcpu->run;
kvm_x86_state_cleanup(state); kvm_x86_state_cleanup(state);
memset(&regs2, 0, sizeof(regs2)); memset(&regs2, 0, sizeof(regs2));
vcpu_regs_get(vm, vcpu->id, &regs2); vcpu_regs_get(vcpu, &regs2);
TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)), TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
"Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx", "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
(ulong) regs2.rdi, (ulong) regs2.rsi); (ulong) regs2.rdi, (ulong) regs2.rsi);
......
...@@ -95,23 +95,23 @@ int main(int argc, char *argv[]) ...@@ -95,23 +95,23 @@ int main(int argc, char *argv[])
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, vcpu->id); vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, VINTR_IRQ_NUMBER, vintr_irq_handler); vm_install_exception_handler(vm, VINTR_IRQ_NUMBER, vintr_irq_handler);
vm_install_exception_handler(vm, INTR_IRQ_NUMBER, intr_irq_handler); vm_install_exception_handler(vm, INTR_IRQ_NUMBER, intr_irq_handler);
vcpu_alloc_svm(vm, &svm_gva); vcpu_alloc_svm(vm, &svm_gva);
vcpu_args_set(vm, vcpu->id, 1, svm_gva); vcpu_args_set(vcpu, 1, svm_gva);
run = vcpu->run; run = vcpu->run;
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT: case UCALL_ABORT:
TEST_FAIL("%s", (const char *)uc.args[0]); TEST_FAIL("%s", (const char *)uc.args[0]);
break; break;
......
...@@ -145,7 +145,7 @@ static void run_test(bool is_nmi) ...@@ -145,7 +145,7 @@ static void run_test(bool is_nmi)
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, vcpu->id); vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler); vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler);
vm_install_exception_handler(vm, BP_VECTOR, guest_bp_handler); vm_install_exception_handler(vm, BP_VECTOR, guest_bp_handler);
...@@ -163,23 +163,23 @@ static void run_test(bool is_nmi) ...@@ -163,23 +163,23 @@ static void run_test(bool is_nmi)
} else { } else {
idt_alt_vm = 0; idt_alt_vm = 0;
} }
vcpu_args_set(vm, vcpu->id, 3, svm_gva, (uint64_t)is_nmi, (uint64_t)idt_alt_vm); vcpu_args_set(vcpu, 3, svm_gva, (uint64_t)is_nmi, (uint64_t)idt_alt_vm);
memset(&debug, 0, sizeof(debug)); memset(&debug, 0, sizeof(debug));
vcpu_guest_debug_set(vm, vcpu->id, &debug); vcpu_guest_debug_set(vcpu, &debug);
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
struct ucall uc; struct ucall uc;
alarm(2); alarm(2);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
alarm(0); alarm(0);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT: case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld, vals = 0x%lx 0x%lx 0x%lx", (const char *)uc.args[0], TEST_FAIL("%s at %s:%ld, vals = 0x%lx 0x%lx 0x%lx", (const char *)uc.args[0],
__FILE__, uc.args[1], uc.args[2], uc.args[3], uc.args[4]); __FILE__, uc.args[1], uc.args[2], uc.args[3], uc.args[4]);
......
...@@ -44,19 +44,19 @@ int main(int argc, char *argv[]) ...@@ -44,19 +44,19 @@ int main(int argc, char *argv[])
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vcpu_alloc_svm(vm, &svm_gva); vcpu_alloc_svm(vm, &svm_gva);
vcpu_args_set(vm, vcpu->id, 1, svm_gva); vcpu_args_set(vcpu, 1, svm_gva);
for (;;) { for (;;) {
volatile struct kvm_run *run = vcpu->run; volatile struct kvm_run *run = vcpu->run;
struct ucall uc; struct ucall uc;
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT: case UCALL_ABORT:
TEST_FAIL("%s", (const char *)uc.args[0]); TEST_FAIL("%s", (const char *)uc.args[0]);
/* NOT REACHED */ /* NOT REACHED */
......
...@@ -109,14 +109,14 @@ int main(int argc, char *argv[]) ...@@ -109,14 +109,14 @@ int main(int argc, char *argv[])
/* Request reading invalid register set from VCPU. */ /* Request reading invalid register set from VCPU. */
run->kvm_valid_regs = INVALID_SYNC_FIELD; run->kvm_valid_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vm, vcpu->id); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL, TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv); rv);
run->kvm_valid_regs = 0; run->kvm_valid_regs = 0;
run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS; run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vm, vcpu->id); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL, TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv); rv);
...@@ -124,14 +124,14 @@ int main(int argc, char *argv[]) ...@@ -124,14 +124,14 @@ int main(int argc, char *argv[])
/* Request setting invalid register set into VCPU. */ /* Request setting invalid register set into VCPU. */
run->kvm_dirty_regs = INVALID_SYNC_FIELD; run->kvm_dirty_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vm, vcpu->id); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL, TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv); rv);
run->kvm_dirty_regs = 0; run->kvm_dirty_regs = 0;
run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS; run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vm, vcpu->id); rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL, TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv); rv);
...@@ -140,19 +140,19 @@ int main(int argc, char *argv[]) ...@@ -140,19 +140,19 @@ int main(int argc, char *argv[])
/* Request and verify all valid register sets. */ /* Request and verify all valid register sets. */
/* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */ /* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */
run->kvm_valid_regs = TEST_SYNC_FIELDS; run->kvm_valid_regs = TEST_SYNC_FIELDS;
rv = _vcpu_run(vm, vcpu->id); rv = _vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n", "Unexpected exit reason: %u (%s),\n",
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
vcpu_regs_get(vm, vcpu->id, &regs); vcpu_regs_get(vcpu, &regs);
compare_regs(&regs, &run->s.regs.regs); compare_regs(&regs, &run->s.regs.regs);
vcpu_sregs_get(vm, vcpu->id, &sregs); vcpu_sregs_get(vcpu, &sregs);
compare_sregs(&sregs, &run->s.regs.sregs); compare_sregs(&sregs, &run->s.regs.sregs);
vcpu_events_get(vm, vcpu->id, &events); vcpu_events_get(vcpu, &events);
compare_vcpu_events(&events, &run->s.regs.events); compare_vcpu_events(&events, &run->s.regs.events);
/* Set and verify various register values. */ /* Set and verify various register values. */
...@@ -162,7 +162,7 @@ int main(int argc, char *argv[]) ...@@ -162,7 +162,7 @@ int main(int argc, char *argv[])
run->kvm_valid_regs = TEST_SYNC_FIELDS; run->kvm_valid_regs = TEST_SYNC_FIELDS;
run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS; run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS;
rv = _vcpu_run(vm, vcpu->id); rv = _vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n", "Unexpected exit reason: %u (%s),\n",
run->exit_reason, run->exit_reason,
...@@ -174,13 +174,13 @@ int main(int argc, char *argv[]) ...@@ -174,13 +174,13 @@ int main(int argc, char *argv[])
"apic_base sync regs value incorrect 0x%llx.", "apic_base sync regs value incorrect 0x%llx.",
run->s.regs.sregs.apic_base); run->s.regs.sregs.apic_base);
vcpu_regs_get(vm, vcpu->id, &regs); vcpu_regs_get(vcpu, &regs);
compare_regs(&regs, &run->s.regs.regs); compare_regs(&regs, &run->s.regs.regs);
vcpu_sregs_get(vm, vcpu->id, &sregs); vcpu_sregs_get(vcpu, &sregs);
compare_sregs(&sregs, &run->s.regs.sregs); compare_sregs(&sregs, &run->s.regs.sregs);
vcpu_events_get(vm, vcpu->id, &events); vcpu_events_get(vcpu, &events);
compare_vcpu_events(&events, &run->s.regs.events); compare_vcpu_events(&events, &run->s.regs.events);
/* Clear kvm_dirty_regs bits, verify new s.regs values are /* Clear kvm_dirty_regs bits, verify new s.regs values are
...@@ -189,7 +189,7 @@ int main(int argc, char *argv[]) ...@@ -189,7 +189,7 @@ int main(int argc, char *argv[])
run->kvm_valid_regs = TEST_SYNC_FIELDS; run->kvm_valid_regs = TEST_SYNC_FIELDS;
run->kvm_dirty_regs = 0; run->kvm_dirty_regs = 0;
run->s.regs.regs.rbx = 0xDEADBEEF; run->s.regs.regs.rbx = 0xDEADBEEF;
rv = _vcpu_run(vm, vcpu->id); rv = _vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n", "Unexpected exit reason: %u (%s),\n",
run->exit_reason, run->exit_reason,
...@@ -206,8 +206,8 @@ int main(int argc, char *argv[]) ...@@ -206,8 +206,8 @@ int main(int argc, char *argv[])
run->kvm_dirty_regs = 0; run->kvm_dirty_regs = 0;
run->s.regs.regs.rbx = 0xAAAA; run->s.regs.regs.rbx = 0xAAAA;
regs.rbx = 0xBAC0; regs.rbx = 0xBAC0;
vcpu_regs_set(vm, vcpu->id, &regs); vcpu_regs_set(vcpu, &regs);
rv = _vcpu_run(vm, vcpu->id); rv = _vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n", "Unexpected exit reason: %u (%s),\n",
run->exit_reason, run->exit_reason,
...@@ -215,7 +215,7 @@ int main(int argc, char *argv[]) ...@@ -215,7 +215,7 @@ int main(int argc, char *argv[])
TEST_ASSERT(run->s.regs.regs.rbx == 0xAAAA, TEST_ASSERT(run->s.regs.regs.rbx == 0xAAAA,
"rbx sync regs value incorrect 0x%llx.", "rbx sync regs value incorrect 0x%llx.",
run->s.regs.regs.rbx); run->s.regs.regs.rbx);
vcpu_regs_get(vm, vcpu->id, &regs); vcpu_regs_get(vcpu, &regs);
TEST_ASSERT(regs.rbx == 0xBAC0 + 1, TEST_ASSERT(regs.rbx == 0xBAC0 + 1,
"rbx guest value incorrect 0x%llx.", "rbx guest value incorrect 0x%llx.",
regs.rbx); regs.rbx);
...@@ -227,7 +227,7 @@ int main(int argc, char *argv[]) ...@@ -227,7 +227,7 @@ int main(int argc, char *argv[])
run->kvm_valid_regs = 0; run->kvm_valid_regs = 0;
run->kvm_dirty_regs = TEST_SYNC_FIELDS; run->kvm_dirty_regs = TEST_SYNC_FIELDS;
run->s.regs.regs.rbx = 0xBBBB; run->s.regs.regs.rbx = 0xBBBB;
rv = _vcpu_run(vm, vcpu->id); rv = _vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n", "Unexpected exit reason: %u (%s),\n",
run->exit_reason, run->exit_reason,
...@@ -235,7 +235,7 @@ int main(int argc, char *argv[]) ...@@ -235,7 +235,7 @@ int main(int argc, char *argv[])
TEST_ASSERT(run->s.regs.regs.rbx == 0xBBBB, TEST_ASSERT(run->s.regs.regs.rbx == 0xBBBB,
"rbx sync regs value incorrect 0x%llx.", "rbx sync regs value incorrect 0x%llx.",
run->s.regs.regs.rbx); run->s.regs.regs.rbx);
vcpu_regs_get(vm, vcpu->id, &regs); vcpu_regs_get(vcpu, &regs);
TEST_ASSERT(regs.rbx == 0xBBBB + 1, TEST_ASSERT(regs.rbx == 0xBBBB + 1,
"rbx guest value incorrect 0x%llx.", "rbx guest value incorrect 0x%llx.",
regs.rbx); regs.rbx);
......
...@@ -61,8 +61,8 @@ int main(void) ...@@ -61,8 +61,8 @@ int main(void)
run = vcpu->run; run = vcpu->run;
vcpu_alloc_vmx(vm, &vmx_pages_gva); vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva); vcpu_args_set(vcpu, 1, vmx_pages_gva);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Expected KVM_EXIT_IO, got: %u (%s)\n", "Expected KVM_EXIT_IO, got: %u (%s)\n",
...@@ -70,21 +70,21 @@ int main(void) ...@@ -70,21 +70,21 @@ int main(void)
TEST_ASSERT(run->io.port == ARBITRARY_IO_PORT, TEST_ASSERT(run->io.port == ARBITRARY_IO_PORT,
"Expected IN from port %d from L2, got port %d", "Expected IN from port %d from L2, got port %d",
ARBITRARY_IO_PORT, run->io.port); ARBITRARY_IO_PORT, run->io.port);
vcpu_events_get(vm, vcpu->id, &events); vcpu_events_get(vcpu, &events);
events.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT; events.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
events.triple_fault.pending = true; events.triple_fault.pending = true;
vcpu_events_set(vm, vcpu->id, &events); vcpu_events_set(vcpu, &events);
run->immediate_exit = true; run->immediate_exit = true;
vcpu_run_complete_io(vm, vcpu->id); vcpu_run_complete_io(vcpu);
vcpu_events_get(vm, vcpu->id, &events); vcpu_events_get(vcpu, &events);
TEST_ASSERT(events.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT, TEST_ASSERT(events.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT,
"Triple fault event invalid"); "Triple fault event invalid");
TEST_ASSERT(events.triple_fault.pending, TEST_ASSERT(events.triple_fault.pending,
"No triple fault pending"); "No triple fault pending");
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_DONE: case UCALL_DONE:
break; break;
case UCALL_ABORT: case UCALL_ABORT:
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#define GUEST_STEP (UNITY * 4) #define GUEST_STEP (UNITY * 4)
#define ROUND(x) ((x + UNITY / 2) & -UNITY) #define ROUND(x) ((x + UNITY / 2) & -UNITY)
#define rounded_rdmsr(x) ROUND(rdmsr(x)) #define rounded_rdmsr(x) ROUND(rdmsr(x))
#define rounded_host_rdmsr(x) ROUND(vcpu_get_msr(vm, vcpu->id, x)) #define rounded_host_rdmsr(x) ROUND(vcpu_get_msr(vcpu, x))
static void guest_code(void) static void guest_code(void)
{ {
...@@ -68,9 +68,9 @@ static void run_vcpu(struct kvm_vcpu *vcpu, int stage) ...@@ -68,9 +68,9 @@ static void run_vcpu(struct kvm_vcpu *vcpu, int stage)
{ {
struct ucall uc; struct ucall uc;
vcpu_run(vcpu->vm, vcpu->id); vcpu_run(vcpu);
switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC: case UCALL_SYNC:
TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") && TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
uc.args[1] == stage + 1, "Stage %d: Unexpected register values vmexit, got %lx", uc.args[1] == stage + 1, "Stage %d: Unexpected register values vmexit, got %lx",
...@@ -116,18 +116,18 @@ int main(void) ...@@ -116,18 +116,18 @@ int main(void)
* Host: writes to MSR_IA32_TSC set the host-side offset * Host: writes to MSR_IA32_TSC set the host-side offset
* and therefore do not change MSR_IA32_TSC_ADJUST. * and therefore do not change MSR_IA32_TSC_ADJUST.
*/ */
vcpu_set_msr(vm, vcpu->id, MSR_IA32_TSC, HOST_ADJUST + val); vcpu_set_msr(vcpu, MSR_IA32_TSC, HOST_ADJUST + val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val); ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
run_vcpu(vcpu, 3); run_vcpu(vcpu, 3);
/* Host: writes to MSR_IA32_TSC_ADJUST do not modify the TSC. */ /* Host: writes to MSR_IA32_TSC_ADJUST do not modify the TSC. */
vcpu_set_msr(vm, vcpu->id, MSR_IA32_TSC_ADJUST, UNITY * 123456); vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, UNITY * 123456);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val); ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
ASSERT_EQ(vcpu_get_msr(vm, vcpu->id, MSR_IA32_TSC_ADJUST), UNITY * 123456); ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_TSC_ADJUST), UNITY * 123456);
/* Restore previous value. */ /* Restore previous value. */
vcpu_set_msr(vm, vcpu->id, MSR_IA32_TSC_ADJUST, val); vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val); ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val); ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
......
...@@ -58,7 +58,7 @@ static void *run_vcpu(void *_cpu_nr) ...@@ -58,7 +58,7 @@ static void *run_vcpu(void *_cpu_nr)
if (!first_cpu_done) { if (!first_cpu_done) {
first_cpu_done = true; first_cpu_done = true;
vcpu_set_msr(vm, vcpu->id, MSR_IA32_TSC, TEST_TSC_OFFSET); vcpu_set_msr(vcpu, MSR_IA32_TSC, TEST_TSC_OFFSET);
} }
pthread_spin_unlock(&create_lock); pthread_spin_unlock(&create_lock);
...@@ -67,13 +67,13 @@ static void *run_vcpu(void *_cpu_nr) ...@@ -67,13 +67,13 @@ static void *run_vcpu(void *_cpu_nr)
volatile struct kvm_run *run = vcpu->run; volatile struct kvm_run *run = vcpu->run;
struct ucall uc; struct ucall uc;
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_DONE: case UCALL_DONE:
goto out; goto out;
......
...@@ -65,14 +65,14 @@ int main(int argc, char *argv[]) ...@@ -65,14 +65,14 @@ int main(int argc, char *argv[])
memset(&regs, 0, sizeof(regs)); memset(&regs, 0, sizeof(regs));
while (1) { while (1) {
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n", "Unexpected exit reason: %u (%s),\n",
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
if (get_ucall(vm, vcpu->id, &uc)) if (get_ucall(vcpu, &uc))
break; break;
TEST_ASSERT(run->io.port == 0x80, TEST_ASSERT(run->io.port == 0x80,
...@@ -85,13 +85,13 @@ int main(int argc, char *argv[]) ...@@ -85,13 +85,13 @@ int main(int argc, char *argv[])
* scope from a testing perspective as it's not ABI in any way, * scope from a testing perspective as it's not ABI in any way,
* i.e. it really is abusing internal KVM knowledge. * i.e. it really is abusing internal KVM knowledge.
*/ */
vcpu_regs_get(vm, vcpu->id, &regs); vcpu_regs_get(vcpu, &regs);
if (regs.rcx == 2) if (regs.rcx == 2)
regs.rcx = 1; regs.rcx = 1;
if (regs.rcx == 3) if (regs.rcx == 3)
regs.rcx = 8192; regs.rcx = 8192;
memset((void *)run + run->io.data_offset, 0xaa, 4096); memset((void *)run + run->io.data_offset, 0xaa, 4096);
vcpu_regs_set(vm, vcpu->id, &regs); vcpu_regs_set(vcpu, &regs);
} }
switch (uc.cmd) { switch (uc.cmd) {
......
...@@ -399,7 +399,7 @@ static void check_for_guest_assert(struct kvm_vcpu *vcpu) ...@@ -399,7 +399,7 @@ static void check_for_guest_assert(struct kvm_vcpu *vcpu)
struct ucall uc; struct ucall uc;
if (vcpu->run->exit_reason == KVM_EXIT_IO && if (vcpu->run->exit_reason == KVM_EXIT_IO &&
get_ucall(vcpu->vm, vcpu->id, &uc) == UCALL_ABORT) { get_ucall(vcpu, &uc) == UCALL_ABORT) {
TEST_FAIL("%s at %s:%ld", TEST_FAIL("%s at %s:%ld",
(const char *)uc.args[0], __FILE__, uc.args[1]); (const char *)uc.args[0], __FILE__, uc.args[1]);
} }
...@@ -483,7 +483,7 @@ static void process_ucall_done(struct kvm_vcpu *vcpu) ...@@ -483,7 +483,7 @@ static void process_ucall_done(struct kvm_vcpu *vcpu)
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
TEST_ASSERT(get_ucall(vcpu->vm, vcpu->id, &uc) == UCALL_DONE, TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_DONE,
"Unexpected ucall command: %lu, expected UCALL_DONE (%d)", "Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
uc.cmd, UCALL_DONE); uc.cmd, UCALL_DONE);
} }
...@@ -500,7 +500,7 @@ static uint64_t process_ucall(struct kvm_vcpu *vcpu) ...@@ -500,7 +500,7 @@ static uint64_t process_ucall(struct kvm_vcpu *vcpu)
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC: case UCALL_SYNC:
break; break;
case UCALL_ABORT: case UCALL_ABORT:
...@@ -519,26 +519,26 @@ static uint64_t process_ucall(struct kvm_vcpu *vcpu) ...@@ -519,26 +519,26 @@ static uint64_t process_ucall(struct kvm_vcpu *vcpu)
static void run_guest_then_process_rdmsr(struct kvm_vcpu *vcpu, static void run_guest_then_process_rdmsr(struct kvm_vcpu *vcpu,
uint32_t msr_index) uint32_t msr_index)
{ {
vcpu_run(vcpu->vm, vcpu->id); vcpu_run(vcpu);
process_rdmsr(vcpu, msr_index); process_rdmsr(vcpu, msr_index);
} }
static void run_guest_then_process_wrmsr(struct kvm_vcpu *vcpu, static void run_guest_then_process_wrmsr(struct kvm_vcpu *vcpu,
uint32_t msr_index) uint32_t msr_index)
{ {
vcpu_run(vcpu->vm, vcpu->id); vcpu_run(vcpu);
process_wrmsr(vcpu, msr_index); process_wrmsr(vcpu, msr_index);
} }
static uint64_t run_guest_then_process_ucall(struct kvm_vcpu *vcpu) static uint64_t run_guest_then_process_ucall(struct kvm_vcpu *vcpu)
{ {
vcpu_run(vcpu->vm, vcpu->id); vcpu_run(vcpu);
return process_ucall(vcpu); return process_ucall(vcpu);
} }
static void run_guest_then_process_ucall_done(struct kvm_vcpu *vcpu) static void run_guest_then_process_ucall_done(struct kvm_vcpu *vcpu)
{ {
vcpu_run(vcpu->vm, vcpu->id); vcpu_run(vcpu);
process_ucall_done(vcpu); process_ucall_done(vcpu);
} }
...@@ -560,7 +560,7 @@ static void test_msr_filter_allow(void) ...@@ -560,7 +560,7 @@ static void test_msr_filter_allow(void)
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_allow); vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_allow);
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, vcpu->id); vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler); vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
...@@ -577,7 +577,7 @@ static void test_msr_filter_allow(void) ...@@ -577,7 +577,7 @@ static void test_msr_filter_allow(void)
run_guest_then_process_rdmsr(vcpu, MSR_NON_EXISTENT); run_guest_then_process_rdmsr(vcpu, MSR_NON_EXISTENT);
vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
vm_install_exception_handler(vm, UD_VECTOR, NULL); vm_install_exception_handler(vm, UD_VECTOR, NULL);
if (process_ucall(vcpu) != UCALL_DONE) { if (process_ucall(vcpu) != UCALL_DONE) {
...@@ -608,7 +608,7 @@ static int handle_ucall(struct kvm_vcpu *vcpu) ...@@ -608,7 +608,7 @@ static int handle_ucall(struct kvm_vcpu *vcpu)
{ {
struct ucall uc; struct ucall uc;
switch (get_ucall(vcpu->vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT: case UCALL_ABORT:
TEST_FAIL("Guest assertion not met"); TEST_FAIL("Guest assertion not met");
break; break;
...@@ -684,7 +684,7 @@ static void test_msr_filter_deny(void) ...@@ -684,7 +684,7 @@ static void test_msr_filter_deny(void)
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_deny); vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_deny);
while (1) { while (1) {
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
switch (run->exit_reason) { switch (run->exit_reason) {
case KVM_EXIT_X86_RDMSR: case KVM_EXIT_X86_RDMSR:
......
...@@ -95,13 +95,13 @@ int main(int argc, char *argv[]) ...@@ -95,13 +95,13 @@ int main(int argc, char *argv[])
vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva); vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
prepare_virtualize_apic_accesses(vmx, vm); prepare_virtualize_apic_accesses(vmx, vm);
vcpu_args_set(vm, vcpu->id, 2, vmx_pages_gva, high_gpa); vcpu_args_set(vcpu, 2, vmx_pages_gva, high_gpa);
while (!done) { while (!done) {
volatile struct kvm_run *run = vcpu->run; volatile struct kvm_run *run = vcpu->run;
struct ucall uc; struct ucall uc;
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
if (apic_access_addr == high_gpa) { if (apic_access_addr == high_gpa) {
TEST_ASSERT(run->exit_reason == TEST_ASSERT(run->exit_reason ==
KVM_EXIT_INTERNAL_ERROR, KVM_EXIT_INTERNAL_ERROR,
...@@ -119,7 +119,7 @@ int main(int argc, char *argv[]) ...@@ -119,7 +119,7 @@ int main(int argc, char *argv[])
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT: case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
__FILE__, uc.args[1]); __FILE__, uc.args[1]);
......
...@@ -57,13 +57,13 @@ int main(int argc, char *argv[]) ...@@ -57,13 +57,13 @@ int main(int argc, char *argv[])
/* Allocate VMX pages and shared descriptors (vmx_pages). */ /* Allocate VMX pages and shared descriptors (vmx_pages). */
vcpu_alloc_vmx(vm, &vmx_pages_gva); vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva); vcpu_args_set(vcpu, 1, vmx_pages_gva);
for (;;) { for (;;) {
volatile struct kvm_run *run = vcpu->run; volatile struct kvm_run *run = vcpu->run;
struct ucall uc; struct ucall uc;
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason, run->exit_reason,
...@@ -72,7 +72,7 @@ int main(int argc, char *argv[]) ...@@ -72,7 +72,7 @@ int main(int argc, char *argv[])
if (run->io.port == PORT_L0_EXIT) if (run->io.port == PORT_L0_EXIT)
break; break;
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT: case UCALL_ABORT:
TEST_FAIL("%s", (const char *)uc.args[0]); TEST_FAIL("%s", (const char *)uc.args[0]);
/* NOT REACHED */ /* NOT REACHED */
......
...@@ -82,7 +82,7 @@ int main(int argc, char *argv[]) ...@@ -82,7 +82,7 @@ int main(int argc, char *argv[])
/* Create VM */ /* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva); vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva); vcpu_args_set(vcpu, 1, vmx_pages_gva);
run = vcpu->run; run = vcpu->run;
/* Add an extra memory slot for testing dirty logging */ /* Add an extra memory slot for testing dirty logging */
...@@ -115,13 +115,13 @@ int main(int argc, char *argv[]) ...@@ -115,13 +115,13 @@ int main(int argc, char *argv[])
while (!done) { while (!done) {
memset(host_test_mem, 0xaa, TEST_MEM_PAGES * 4096); memset(host_test_mem, 0xaa, TEST_MEM_PAGES * 4096);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n", "Unexpected exit reason: %u (%s),\n",
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT: case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
__FILE__, uc.args[1]); __FILE__, uc.args[1]);
......
...@@ -24,7 +24,7 @@ static void __run_vcpu_with_invalid_state(struct kvm_vcpu *vcpu) ...@@ -24,7 +24,7 @@ static void __run_vcpu_with_invalid_state(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
vcpu_run(vcpu->vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR, TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
"Expected KVM_EXIT_INTERNAL_ERROR, got %d (%s)\n", "Expected KVM_EXIT_INTERNAL_ERROR, got %d (%s)\n",
...@@ -60,9 +60,9 @@ static void set_or_clear_invalid_guest_state(struct kvm_vcpu *vcpu, bool set) ...@@ -60,9 +60,9 @@ static void set_or_clear_invalid_guest_state(struct kvm_vcpu *vcpu, bool set)
static struct kvm_sregs sregs; static struct kvm_sregs sregs;
if (!sregs.cr0) if (!sregs.cr0)
vcpu_sregs_get(vcpu->vm, vcpu->id, &sregs); vcpu_sregs_get(vcpu, &sregs);
sregs.tr.unusable = !!set; sregs.tr.unusable = !!set;
vcpu_sregs_set(vcpu->vm, vcpu->id, &sregs); vcpu_sregs_set(vcpu, &sregs);
} }
static void set_invalid_guest_state(struct kvm_vcpu *vcpu) static void set_invalid_guest_state(struct kvm_vcpu *vcpu)
...@@ -91,7 +91,7 @@ static void sigalrm_handler(int sig) ...@@ -91,7 +91,7 @@ static void sigalrm_handler(int sig)
TEST_ASSERT(sig == SIGALRM, "Unexpected signal = %d", sig); TEST_ASSERT(sig == SIGALRM, "Unexpected signal = %d", sig);
vcpu_events_get(vcpu->vm, vcpu->id, &events); vcpu_events_get(vcpu, &events);
/* /*
* If an exception is pending, attempt KVM_RUN with invalid guest, * If an exception is pending, attempt KVM_RUN with invalid guest,
...@@ -120,7 +120,7 @@ int main(int argc, char *argv[]) ...@@ -120,7 +120,7 @@ int main(int argc, char *argv[])
get_set_sigalrm_vcpu(vcpu); get_set_sigalrm_vcpu(vcpu);
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, vcpu->id); vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
......
...@@ -64,9 +64,9 @@ int main(int argc, char *argv[]) ...@@ -64,9 +64,9 @@ int main(int argc, char *argv[])
/* Allocate VMX pages and shared descriptors (vmx_pages). */ /* Allocate VMX pages and shared descriptors (vmx_pages). */
vcpu_alloc_vmx(vm, &vmx_pages_gva); vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva); vcpu_args_set(vcpu, 1, vmx_pages_gva);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
run = vcpu->run; run = vcpu->run;
...@@ -88,13 +88,13 @@ int main(int argc, char *argv[]) ...@@ -88,13 +88,13 @@ int main(int argc, char *argv[])
* emulating invalid guest state for L2. * emulating invalid guest state for L2.
*/ */
memset(&sregs, 0, sizeof(sregs)); memset(&sregs, 0, sizeof(sregs));
vcpu_sregs_get(vm, vcpu->id, &sregs); vcpu_sregs_get(vcpu, &sregs);
sregs.tr.unusable = 1; sregs.tr.unusable = 1;
vcpu_sregs_set(vm, vcpu->id, &sregs); vcpu_sregs_set(vcpu, &sregs);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_DONE: case UCALL_DONE:
break; break;
case UCALL_ABORT: case UCALL_ABORT:
......
...@@ -182,26 +182,25 @@ int main(int argc, char *argv[]) ...@@ -182,26 +182,25 @@ int main(int argc, char *argv[])
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vcpu_alloc_vmx(vm, &vmx_pages_gva); vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva); vcpu_args_set(vcpu, 1, vmx_pages_gva);
tsc_khz = __vcpu_ioctl(vm, vcpu->id, KVM_GET_TSC_KHZ, NULL); tsc_khz = __vcpu_ioctl(vcpu, KVM_GET_TSC_KHZ, NULL);
TEST_ASSERT(tsc_khz != -1, "vcpu ioctl KVM_GET_TSC_KHZ failed"); TEST_ASSERT(tsc_khz != -1, "vcpu ioctl KVM_GET_TSC_KHZ failed");
/* scale down L1's TSC frequency */ /* scale down L1's TSC frequency */
vcpu_ioctl(vm, vcpu->id, KVM_SET_TSC_KHZ, vcpu_ioctl(vcpu, KVM_SET_TSC_KHZ, (void *) (tsc_khz / l1_scale_factor));
(void *) (tsc_khz / l1_scale_factor));
for (;;) { for (;;) {
volatile struct kvm_run *run = vcpu->run; volatile struct kvm_run *run = vcpu->run;
struct ucall uc; struct ucall uc;
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT: case UCALL_ABORT:
TEST_FAIL("%s", (const char *) uc.args[0]); TEST_FAIL("%s", (const char *) uc.args[0]);
case UCALL_SYNC: case UCALL_SYNC:
......
...@@ -87,27 +87,27 @@ int main(int argc, char *argv[]) ...@@ -87,27 +87,27 @@ int main(int argc, char *argv[])
} }
/* testcase 1, set capabilities when we have PDCM bit */ /* testcase 1, set capabilities when we have PDCM bit */
vcpu_set_cpuid(vm, vcpu->id, cpuid); vcpu_set_cpuid(vcpu, cpuid);
vcpu_set_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES); vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES);
/* check capabilities can be retrieved with KVM_GET_MSR */ /* check capabilities can be retrieved with KVM_GET_MSR */
ASSERT_EQ(vcpu_get_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES); ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES);
/* check whatever we write with KVM_SET_MSR is _not_ modified */ /* check whatever we write with KVM_SET_MSR is _not_ modified */
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
ASSERT_EQ(vcpu_get_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES); ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES);
/* testcase 2, check valid LBR formats are accepted */ /* testcase 2, check valid LBR formats are accepted */
vcpu_set_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES, 0); vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0);
ASSERT_EQ(vcpu_get_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES), 0); ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), 0);
vcpu_set_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES, host_cap.lbr_format); vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.lbr_format);
ASSERT_EQ(vcpu_get_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES), (u64)host_cap.lbr_format); ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), (u64)host_cap.lbr_format);
/* testcase 3, check invalid LBR format is rejected */ /* testcase 3, check invalid LBR format is rejected */
/* Note, on Arch LBR capable platforms, LBR_FMT in perf capability msr is 0x3f, /* Note, on Arch LBR capable platforms, LBR_FMT in perf capability msr is 0x3f,
* to avoid the failure, use a true invalid format 0x30 for the test. */ * to avoid the failure, use a true invalid format 0x30 for the test. */
ret = _vcpu_set_msr(vm, vcpu->id, MSR_IA32_PERF_CAPABILITIES, 0x30); ret = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0x30);
TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail."); TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail.");
printf("Completed perf capability tests.\n"); printf("Completed perf capability tests.\n");
......
...@@ -178,19 +178,19 @@ int main(int argc, char *argv[]) ...@@ -178,19 +178,19 @@ int main(int argc, char *argv[])
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);
run = vcpu->run; run = vcpu->run;
vcpu_regs_get(vm, vcpu->id, &regs1); vcpu_regs_get(vcpu, &regs1);
vcpu_alloc_vmx(vm, &vmx_pages_gva); vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva); vcpu_args_set(vcpu, 1, vmx_pages_gva);
for (stage = 1;; stage++) { for (stage = 1;; stage++) {
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n", "Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason, stage, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT: case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
__FILE__, uc.args[1]); __FILE__, uc.args[1]);
...@@ -232,22 +232,22 @@ int main(int argc, char *argv[]) ...@@ -232,22 +232,22 @@ int main(int argc, char *argv[])
stage, uc.args[4], uc.args[5]); stage, uc.args[4], uc.args[5]);
} }
state = vcpu_save_state(vm, vcpu->id); state = vcpu_save_state(vcpu);
memset(&regs1, 0, sizeof(regs1)); memset(&regs1, 0, sizeof(regs1));
vcpu_regs_get(vm, vcpu->id, &regs1); vcpu_regs_get(vcpu, &regs1);
kvm_vm_release(vm); kvm_vm_release(vm);
/* Restore state in a new VM. */ /* Restore state in a new VM. */
vcpu = vm_recreate_with_one_vcpu(vm); vcpu = vm_recreate_with_one_vcpu(vm);
vcpu_set_cpuid(vm, vcpu->id, kvm_get_supported_cpuid()); vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid());
vcpu_load_state(vm, vcpu->id, state); vcpu_load_state(vcpu, state);
run = vcpu->run; run = vcpu->run;
kvm_x86_state_cleanup(state); kvm_x86_state_cleanup(state);
memset(&regs2, 0, sizeof(regs2)); memset(&regs2, 0, sizeof(regs2));
vcpu_regs_get(vm, vcpu->id, &regs2); vcpu_regs_get(vcpu, &regs2);
TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)), TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
"Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx", "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
(ulong) regs2.rdi, (ulong) regs2.rsi); (ulong) regs2.rdi, (ulong) regs2.rsi);
......
...@@ -28,7 +28,7 @@ bool have_evmcs; ...@@ -28,7 +28,7 @@ bool have_evmcs;
void test_nested_state(struct kvm_vcpu *vcpu, struct kvm_nested_state *state) void test_nested_state(struct kvm_vcpu *vcpu, struct kvm_nested_state *state)
{ {
vcpu_nested_state_set(vcpu->vm, vcpu->id, state); vcpu_nested_state_set(vcpu, state);
} }
void test_nested_state_expect_errno(struct kvm_vcpu *vcpu, void test_nested_state_expect_errno(struct kvm_vcpu *vcpu,
...@@ -37,7 +37,7 @@ void test_nested_state_expect_errno(struct kvm_vcpu *vcpu, ...@@ -37,7 +37,7 @@ void test_nested_state_expect_errno(struct kvm_vcpu *vcpu,
{ {
int rv; int rv;
rv = __vcpu_nested_state_set(vcpu->vm, vcpu->id, state); rv = __vcpu_nested_state_set(vcpu, state);
TEST_ASSERT(rv == -1 && errno == expected_errno, TEST_ASSERT(rv == -1 && errno == expected_errno,
"Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)", "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)",
strerror(expected_errno), expected_errno, rv, strerror(errno), strerror(expected_errno), expected_errno, rv, strerror(errno),
...@@ -121,7 +121,7 @@ void test_vmx_nested_state(struct kvm_vcpu *vcpu) ...@@ -121,7 +121,7 @@ void test_vmx_nested_state(struct kvm_vcpu *vcpu)
test_nested_state(vcpu, state); test_nested_state(vcpu, state);
/* Enable VMX in the guest CPUID. */ /* Enable VMX in the guest CPUID. */
vcpu_set_cpuid(vcpu->vm, vcpu->id, kvm_get_supported_cpuid()); vcpu_set_cpuid(vcpu, kvm_get_supported_cpuid());
/* /*
* Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
...@@ -137,7 +137,7 @@ void test_vmx_nested_state(struct kvm_vcpu *vcpu) ...@@ -137,7 +137,7 @@ void test_vmx_nested_state(struct kvm_vcpu *vcpu)
state->flags &= KVM_STATE_NESTED_EVMCS; state->flags &= KVM_STATE_NESTED_EVMCS;
if (have_evmcs) { if (have_evmcs) {
test_nested_state_expect_einval(vcpu, state); test_nested_state_expect_einval(vcpu, state);
vcpu_enable_evmcs(vcpu->vm, vcpu->id); vcpu_enable_evmcs(vcpu);
} }
test_nested_state(vcpu, state); test_nested_state(vcpu, state);
...@@ -233,7 +233,7 @@ void test_vmx_nested_state(struct kvm_vcpu *vcpu) ...@@ -233,7 +233,7 @@ void test_vmx_nested_state(struct kvm_vcpu *vcpu)
state->hdr.vmx.vmcs12_pa = -1ull; state->hdr.vmx.vmcs12_pa = -1ull;
state->flags = 0; state->flags = 0;
test_nested_state(vcpu, state); test_nested_state(vcpu, state);
vcpu_nested_state_get(vcpu->vm, vcpu->id, state); vcpu_nested_state_get(vcpu, state);
TEST_ASSERT(state->size >= sizeof(*state) && state->size <= state_sz, TEST_ASSERT(state->size >= sizeof(*state) && state->size <= state_sz,
"Size must be between %ld and %d. The size returned was %d.", "Size must be between %ld and %d. The size returned was %d.",
sizeof(*state), state_sz, state->size); sizeof(*state), state_sz, state->size);
...@@ -255,7 +255,7 @@ void disable_vmx(struct kvm_vcpu *vcpu) ...@@ -255,7 +255,7 @@ void disable_vmx(struct kvm_vcpu *vcpu)
TEST_ASSERT(i != cpuid->nent, "CPUID function 1 not found"); TEST_ASSERT(i != cpuid->nent, "CPUID function 1 not found");
cpuid->entries[i].ecx &= ~CPUID_VMX; cpuid->entries[i].ecx &= ~CPUID_VMX;
vcpu_set_cpuid(vcpu->vm, vcpu->id, cpuid); vcpu_set_cpuid(vcpu, cpuid);
cpuid->entries[i].ecx |= CPUID_VMX; cpuid->entries[i].ecx |= CPUID_VMX;
} }
......
...@@ -133,19 +133,19 @@ int main(int argc, char *argv[]) ...@@ -133,19 +133,19 @@ int main(int argc, char *argv[])
/* Allocate VMX pages and shared descriptors (vmx_pages). */ /* Allocate VMX pages and shared descriptors (vmx_pages). */
vcpu_alloc_vmx(vm, &vmx_pages_gva); vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vm, vcpu->id, 1, vmx_pages_gva); vcpu_args_set(vcpu, 1, vmx_pages_gva);
for (;;) { for (;;) {
volatile struct kvm_run *run = vcpu->run; volatile struct kvm_run *run = vcpu->run;
struct ucall uc; struct ucall uc;
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT: case UCALL_ABORT:
TEST_FAIL("%s", (const char *)uc.args[0]); TEST_FAIL("%s", (const char *)uc.args[0]);
/* NOT REACHED */ /* NOT REACHED */
......
...@@ -206,14 +206,14 @@ static void *vcpu_thread(void *arg) ...@@ -206,14 +206,14 @@ static void *vcpu_thread(void *arg)
vcpu->id, r); vcpu->id, r);
fprintf(stderr, "vCPU thread running vCPU %u\n", vcpu->id); fprintf(stderr, "vCPU thread running vCPU %u\n", vcpu->id);
vcpu_run(vcpu->vm, vcpu->id); vcpu_run(vcpu);
exit_reason = vcpu->run->exit_reason; exit_reason = vcpu->run->exit_reason;
TEST_ASSERT(exit_reason == KVM_EXIT_IO, TEST_ASSERT(exit_reason == KVM_EXIT_IO,
"vCPU %u exited with unexpected exit reason %u-%s, expected KVM_EXIT_IO", "vCPU %u exited with unexpected exit reason %u-%s, expected KVM_EXIT_IO",
vcpu->id, exit_reason, exit_reason_str(exit_reason)); vcpu->id, exit_reason, exit_reason_str(exit_reason));
if (get_ucall(vcpu->vm, vcpu->id, &uc) == UCALL_ABORT) { if (get_ucall(vcpu, &uc) == UCALL_ABORT) {
TEST_ASSERT(false, TEST_ASSERT(false,
"vCPU %u exited with error: %s.\n" "vCPU %u exited with error: %s.\n"
"Sending vCPU sent %lu IPIs to halting vCPU\n" "Sending vCPU sent %lu IPIs to halting vCPU\n"
...@@ -415,7 +415,7 @@ int main(int argc, char *argv[]) ...@@ -415,7 +415,7 @@ int main(int argc, char *argv[])
vm = vm_create_with_one_vcpu(&params[0].vcpu, halter_guest_code); vm = vm_create_with_one_vcpu(&params[0].vcpu, halter_guest_code);
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, params[0].vcpu->id); vcpu_init_descriptor_tables(params[0].vcpu);
vm_install_exception_handler(vm, IPI_VECTOR, guest_ipi_handler); vm_install_exception_handler(vm, IPI_VECTOR, guest_ipi_handler);
virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA); virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
...@@ -428,8 +428,8 @@ int main(int argc, char *argv[]) ...@@ -428,8 +428,8 @@ int main(int argc, char *argv[])
params[0].data = data; params[0].data = data;
params[1].data = data; params[1].data = data;
vcpu_args_set(vm, params[0].vcpu->id, 1, test_data_page_vaddr); vcpu_args_set(params[0].vcpu, 1, test_data_page_vaddr);
vcpu_args_set(vm, params[1].vcpu->id, 1, test_data_page_vaddr); vcpu_args_set(params[1].vcpu, 1, test_data_page_vaddr);
pipis_rcvd = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ipis_rcvd); pipis_rcvd = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ipis_rcvd);
params[0].pipis_rcvd = pipis_rcvd; params[0].pipis_rcvd = pipis_rcvd;
......
...@@ -47,7 +47,7 @@ static void x2apic_guest_code(void) ...@@ -47,7 +47,7 @@ static void x2apic_guest_code(void)
} while (1); } while (1);
} }
static void ____test_icr(struct kvm_vm *vm, struct xapic_vcpu *x, uint64_t val) static void ____test_icr(struct xapic_vcpu *x, uint64_t val)
{ {
struct kvm_vcpu *vcpu = x->vcpu; struct kvm_vcpu *vcpu = x->vcpu;
struct kvm_lapic_state xapic; struct kvm_lapic_state xapic;
...@@ -59,16 +59,16 @@ static void ____test_icr(struct kvm_vm *vm, struct xapic_vcpu *x, uint64_t val) ...@@ -59,16 +59,16 @@ static void ____test_icr(struct kvm_vm *vm, struct xapic_vcpu *x, uint64_t val)
* all bits are valid and should not be modified by KVM (ignoring the * all bits are valid and should not be modified by KVM (ignoring the
* fact that vectors 0-15 are technically illegal). * fact that vectors 0-15 are technically illegal).
*/ */
vcpu_ioctl(vm, vcpu->id, KVM_GET_LAPIC, &xapic); vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic);
*((u32 *)&xapic.regs[APIC_IRR]) = val; *((u32 *)&xapic.regs[APIC_IRR]) = val;
*((u32 *)&xapic.regs[APIC_IRR + 0x10]) = val >> 32; *((u32 *)&xapic.regs[APIC_IRR + 0x10]) = val >> 32;
vcpu_ioctl(vm, vcpu->id, KVM_SET_LAPIC, &xapic); vcpu_ioctl(vcpu, KVM_SET_LAPIC, &xapic);
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
ASSERT_EQ(get_ucall(vm, vcpu->id, &uc), UCALL_SYNC); ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC);
ASSERT_EQ(uc.args[1], val); ASSERT_EQ(uc.args[1], val);
vcpu_ioctl(vm, vcpu->id, KVM_GET_LAPIC, &xapic); vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic);
icr = (u64)(*((u32 *)&xapic.regs[APIC_ICR])) | icr = (u64)(*((u32 *)&xapic.regs[APIC_ICR])) |
(u64)(*((u32 *)&xapic.regs[APIC_ICR2])) << 32; (u64)(*((u32 *)&xapic.regs[APIC_ICR2])) << 32;
if (!x->is_x2apic) if (!x->is_x2apic)
...@@ -76,24 +76,24 @@ static void ____test_icr(struct kvm_vm *vm, struct xapic_vcpu *x, uint64_t val) ...@@ -76,24 +76,24 @@ static void ____test_icr(struct kvm_vm *vm, struct xapic_vcpu *x, uint64_t val)
ASSERT_EQ(icr, val & ~APIC_ICR_BUSY); ASSERT_EQ(icr, val & ~APIC_ICR_BUSY);
} }
static void __test_icr(struct kvm_vm *vm, struct xapic_vcpu *x, uint64_t val) static void __test_icr(struct xapic_vcpu *x, uint64_t val)
{ {
____test_icr(vm, x, val | APIC_ICR_BUSY); ____test_icr(x, val | APIC_ICR_BUSY);
____test_icr(vm, x, val & ~(u64)APIC_ICR_BUSY); ____test_icr(x, val & ~(u64)APIC_ICR_BUSY);
} }
static void test_icr(struct kvm_vm *vm, struct xapic_vcpu *x) static void test_icr(struct xapic_vcpu *x)
{ {
struct kvm_vcpu *vcpu = x->vcpu; struct kvm_vcpu *vcpu = x->vcpu;
uint64_t icr, i, j; uint64_t icr, i, j;
icr = APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_FIXED; icr = APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_FIXED;
for (i = 0; i <= 0xff; i++) for (i = 0; i <= 0xff; i++)
__test_icr(vm, x, icr | i); __test_icr(x, icr | i);
icr = APIC_INT_ASSERT | APIC_DM_FIXED; icr = APIC_INT_ASSERT | APIC_DM_FIXED;
for (i = 0; i <= 0xff; i++) for (i = 0; i <= 0xff; i++)
__test_icr(vm, x, icr | i); __test_icr(x, icr | i);
/* /*
* Send all flavors of IPIs to non-existent vCPUs. TODO: use number of * Send all flavors of IPIs to non-existent vCPUs. TODO: use number of
...@@ -102,18 +102,18 @@ static void test_icr(struct kvm_vm *vm, struct xapic_vcpu *x) ...@@ -102,18 +102,18 @@ static void test_icr(struct kvm_vm *vm, struct xapic_vcpu *x)
icr = APIC_INT_ASSERT | 0xff; icr = APIC_INT_ASSERT | 0xff;
for (i = vcpu->id + 1; i < 0xff; i++) { for (i = vcpu->id + 1; i < 0xff; i++) {
for (j = 0; j < 8; j++) for (j = 0; j < 8; j++)
__test_icr(vm, x, i << (32 + 24) | APIC_INT_ASSERT | (j << 8)); __test_icr(x, i << (32 + 24) | APIC_INT_ASSERT | (j << 8));
} }
/* And again with a shorthand destination for all types of IPIs. */ /* And again with a shorthand destination for all types of IPIs. */
icr = APIC_DEST_ALLBUT | APIC_INT_ASSERT; icr = APIC_DEST_ALLBUT | APIC_INT_ASSERT;
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
__test_icr(vm, x, icr | (i << 8)); __test_icr(x, icr | (i << 8));
/* And a few garbage value, just make sure it's an IRQ (blocked). */ /* And a few garbage value, just make sure it's an IRQ (blocked). */
__test_icr(vm, x, 0xa5a5a5a5a5a5a5a5 & ~APIC_DM_FIXED_MASK); __test_icr(x, 0xa5a5a5a5a5a5a5a5 & ~APIC_DM_FIXED_MASK);
__test_icr(vm, x, 0x5a5a5a5a5a5a5a5a & ~APIC_DM_FIXED_MASK); __test_icr(x, 0x5a5a5a5a5a5a5a5a & ~APIC_DM_FIXED_MASK);
__test_icr(vm, x, -1ull & ~APIC_DM_FIXED_MASK); __test_icr(x, -1ull & ~APIC_DM_FIXED_MASK);
} }
int main(int argc, char *argv[]) int main(int argc, char *argv[])
...@@ -127,7 +127,7 @@ int main(int argc, char *argv[]) ...@@ -127,7 +127,7 @@ int main(int argc, char *argv[])
int i; int i;
vm = vm_create_with_one_vcpu(&x.vcpu, x2apic_guest_code); vm = vm_create_with_one_vcpu(&x.vcpu, x2apic_guest_code);
test_icr(vm, &x); test_icr(&x);
kvm_vm_free(vm); kvm_vm_free(vm);
/* /*
...@@ -138,15 +138,15 @@ int main(int argc, char *argv[]) ...@@ -138,15 +138,15 @@ int main(int argc, char *argv[])
vm = vm_create_with_one_vcpu(&x.vcpu, xapic_guest_code); vm = vm_create_with_one_vcpu(&x.vcpu, xapic_guest_code);
x.is_x2apic = false; x.is_x2apic = false;
cpuid = vcpu_get_cpuid(vm, x.vcpu->id); cpuid = vcpu_get_cpuid(x.vcpu);
for (i = 0; i < cpuid->nent; i++) { for (i = 0; i < cpuid->nent; i++) {
if (cpuid->entries[i].function == 1) if (cpuid->entries[i].function == 1)
break; break;
} }
cpuid->entries[i].ecx &= ~BIT(21); cpuid->entries[i].ecx &= ~BIT(21);
vcpu_set_cpuid(vm, x.vcpu->id, cpuid); vcpu_set_cpuid(x.vcpu, cpuid);
virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA); virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
test_icr(vm, &x); test_icr(&x);
kvm_vm_free(vm); kvm_vm_free(vm);
} }
...@@ -348,7 +348,7 @@ static void handle_alrm(int sig) ...@@ -348,7 +348,7 @@ static void handle_alrm(int sig)
{ {
if (vinfo) if (vinfo)
printf("evtchn_upcall_pending 0x%x\n", vinfo->evtchn_upcall_pending); printf("evtchn_upcall_pending 0x%x\n", vinfo->evtchn_upcall_pending);
vcpu_dump(stdout, vcpu->vm, vcpu->id, 0); vcpu_dump(stdout, vcpu, 0);
TEST_FAIL("IRQ delivery timed out"); TEST_FAIL("IRQ delivery timed out");
} }
...@@ -423,13 +423,13 @@ int main(int argc, char *argv[]) ...@@ -423,13 +423,13 @@ int main(int argc, char *argv[])
.type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
.u.gpa = VCPU_INFO_ADDR, .u.gpa = VCPU_INFO_ADDR,
}; };
vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &vi); vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &vi);
struct kvm_xen_vcpu_attr pvclock = { struct kvm_xen_vcpu_attr pvclock = {
.type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO, .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
.u.gpa = PVTIME_ADDR, .u.gpa = PVTIME_ADDR,
}; };
vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &pvclock); vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &pvclock);
struct kvm_xen_hvm_attr vec = { struct kvm_xen_hvm_attr vec = {
.type = KVM_XEN_ATTR_TYPE_UPCALL_VECTOR, .type = KVM_XEN_ATTR_TYPE_UPCALL_VECTOR,
...@@ -438,7 +438,7 @@ int main(int argc, char *argv[]) ...@@ -438,7 +438,7 @@ int main(int argc, char *argv[])
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &vec); vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &vec);
vm_init_descriptor_tables(vm); vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, vcpu->id); vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, EVTCHN_VECTOR, evtchn_handler); vm_install_exception_handler(vm, EVTCHN_VECTOR, evtchn_handler);
if (do_runstate_tests) { if (do_runstate_tests) {
...@@ -446,7 +446,7 @@ int main(int argc, char *argv[]) ...@@ -446,7 +446,7 @@ int main(int argc, char *argv[])
.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR, .type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR,
.u.gpa = RUNSTATE_ADDR, .u.gpa = RUNSTATE_ADDR,
}; };
vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &st); vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &st);
} }
int irq_fd[2] = { -1, -1 }; int irq_fd[2] = { -1, -1 };
...@@ -522,7 +522,7 @@ int main(int argc, char *argv[]) ...@@ -522,7 +522,7 @@ int main(int argc, char *argv[])
inj.u.evtchn.flags = 0; inj.u.evtchn.flags = 0;
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &inj); vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &inj);
vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &tmr); vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
} }
vinfo = addr_gpa2hva(vm, VCPU_INFO_VADDR); vinfo = addr_gpa2hva(vm, VCPU_INFO_VADDR);
vinfo->evtchn_upcall_pending = 0; vinfo->evtchn_upcall_pending = 0;
...@@ -536,14 +536,14 @@ int main(int argc, char *argv[]) ...@@ -536,14 +536,14 @@ int main(int argc, char *argv[])
volatile struct kvm_run *run = vcpu->run; volatile struct kvm_run *run = vcpu->run;
struct ucall uc; struct ucall uc;
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT: case UCALL_ABORT:
TEST_FAIL("%s", (const char *)uc.args[0]); TEST_FAIL("%s", (const char *)uc.args[0]);
/* NOT REACHED */ /* NOT REACHED */
...@@ -572,7 +572,7 @@ int main(int argc, char *argv[]) ...@@ -572,7 +572,7 @@ int main(int argc, char *argv[])
printf("Testing runstate %s\n", runstate_names[uc.args[1]]); printf("Testing runstate %s\n", runstate_names[uc.args[1]]);
rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT; rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT;
rst.u.runstate.state = uc.args[1]; rst.u.runstate.state = uc.args[1];
vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &rst); vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst);
break; break;
case 4: case 4:
...@@ -587,7 +587,7 @@ int main(int argc, char *argv[]) ...@@ -587,7 +587,7 @@ int main(int argc, char *argv[])
0x6b6b - rs->time[RUNSTATE_offline]; 0x6b6b - rs->time[RUNSTATE_offline];
rst.u.runstate.time_runnable = -rst.u.runstate.time_blocked - rst.u.runstate.time_runnable = -rst.u.runstate.time_blocked -
rst.u.runstate.time_offline; rst.u.runstate.time_offline;
vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &rst); vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst);
break; break;
case 5: case 5:
...@@ -599,7 +599,7 @@ int main(int argc, char *argv[]) ...@@ -599,7 +599,7 @@ int main(int argc, char *argv[])
rst.u.runstate.state_entry_time = 0x6b6b + 0x5a; rst.u.runstate.state_entry_time = 0x6b6b + 0x5a;
rst.u.runstate.time_blocked = 0x6b6b; rst.u.runstate.time_blocked = 0x6b6b;
rst.u.runstate.time_offline = 0x5a; rst.u.runstate.time_offline = 0x5a;
vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &rst); vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst);
break; break;
case 6: case 6:
...@@ -700,7 +700,7 @@ int main(int argc, char *argv[]) ...@@ -700,7 +700,7 @@ int main(int argc, char *argv[])
case 14: case 14:
memset(&tmr, 0, sizeof(tmr)); memset(&tmr, 0, sizeof(tmr));
tmr.type = KVM_XEN_VCPU_ATTR_TYPE_TIMER; tmr.type = KVM_XEN_VCPU_ATTR_TYPE_TIMER;
vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_GET_ATTR, &tmr); vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
TEST_ASSERT(tmr.u.timer.port == EVTCHN_TIMER, TEST_ASSERT(tmr.u.timer.port == EVTCHN_TIMER,
"Timer port not returned"); "Timer port not returned");
TEST_ASSERT(tmr.u.timer.priority == KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL, TEST_ASSERT(tmr.u.timer.priority == KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL,
...@@ -720,7 +720,7 @@ int main(int argc, char *argv[]) ...@@ -720,7 +720,7 @@ int main(int argc, char *argv[])
printf("Testing restored oneshot timer\n"); printf("Testing restored oneshot timer\n");
tmr.u.timer.expires_ns = rs->state_entry_time + 100000000, tmr.u.timer.expires_ns = rs->state_entry_time + 100000000,
vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &tmr); vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
evtchn_irq_expected = true; evtchn_irq_expected = true;
alarm(1); alarm(1);
break; break;
...@@ -747,7 +747,7 @@ int main(int argc, char *argv[]) ...@@ -747,7 +747,7 @@ int main(int argc, char *argv[])
printf("Testing SCHEDOP_poll wake on masked event\n"); printf("Testing SCHEDOP_poll wake on masked event\n");
tmr.u.timer.expires_ns = rs->state_entry_time + 100000000, tmr.u.timer.expires_ns = rs->state_entry_time + 100000000,
vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &tmr); vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
alarm(1); alarm(1);
break; break;
...@@ -758,11 +758,11 @@ int main(int argc, char *argv[]) ...@@ -758,11 +758,11 @@ int main(int argc, char *argv[])
evtchn_irq_expected = true; evtchn_irq_expected = true;
tmr.u.timer.expires_ns = rs->state_entry_time + 100000000; tmr.u.timer.expires_ns = rs->state_entry_time + 100000000;
vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &tmr); vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
/* Read it back and check the pending time is reported correctly */ /* Read it back and check the pending time is reported correctly */
tmr.u.timer.expires_ns = 0; tmr.u.timer.expires_ns = 0;
vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_GET_ATTR, &tmr); vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
TEST_ASSERT(tmr.u.timer.expires_ns == rs->state_entry_time + 100000000, TEST_ASSERT(tmr.u.timer.expires_ns == rs->state_entry_time + 100000000,
"Timer not reported pending"); "Timer not reported pending");
alarm(1); alarm(1);
...@@ -772,7 +772,7 @@ int main(int argc, char *argv[]) ...@@ -772,7 +772,7 @@ int main(int argc, char *argv[])
TEST_ASSERT(!evtchn_irq_expected, TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen"); "Expected event channel IRQ but it didn't happen");
/* Read timer and check it is no longer pending */ /* Read timer and check it is no longer pending */
vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_GET_ATTR, &tmr); vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
TEST_ASSERT(!tmr.u.timer.expires_ns, "Timer still reported pending"); TEST_ASSERT(!tmr.u.timer.expires_ns, "Timer still reported pending");
shinfo->evtchn_pending[0] = 0; shinfo->evtchn_pending[0] = 0;
...@@ -781,7 +781,7 @@ int main(int argc, char *argv[]) ...@@ -781,7 +781,7 @@ int main(int argc, char *argv[])
evtchn_irq_expected = true; evtchn_irq_expected = true;
tmr.u.timer.expires_ns = rs->state_entry_time - 100000000ULL; tmr.u.timer.expires_ns = rs->state_entry_time - 100000000ULL;
vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_SET_ATTR, &tmr); vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
alarm(1); alarm(1);
break; break;
...@@ -851,7 +851,7 @@ int main(int argc, char *argv[]) ...@@ -851,7 +851,7 @@ int main(int argc, char *argv[])
struct kvm_xen_vcpu_attr rst = { struct kvm_xen_vcpu_attr rst = {
.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA, .type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA,
}; };
vcpu_ioctl(vm, vcpu->id, KVM_XEN_VCPU_GET_ATTR, &rst); vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &rst);
if (verbose) { if (verbose) {
printf("Runstate: %s(%d), entry %" PRIu64 " ns\n", printf("Runstate: %s(%d), entry %" PRIu64 " ns\n",
......
...@@ -90,7 +90,7 @@ int main(int argc, char *argv[]) ...@@ -90,7 +90,7 @@ int main(int argc, char *argv[])
} }
vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vcpu_set_hv_cpuid(vm, vcpu->id); vcpu_set_hv_cpuid(vcpu);
struct kvm_xen_hvm_config hvmc = { struct kvm_xen_hvm_config hvmc = {
.flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL, .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL,
...@@ -107,7 +107,7 @@ int main(int argc, char *argv[]) ...@@ -107,7 +107,7 @@ int main(int argc, char *argv[])
volatile struct kvm_run *run = vcpu->run; volatile struct kvm_run *run = vcpu->run;
struct ucall uc; struct ucall uc;
vcpu_run(vm, vcpu->id); vcpu_run(vcpu);
if (run->exit_reason == KVM_EXIT_XEN) { if (run->exit_reason == KVM_EXIT_XEN) {
ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL); ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL);
...@@ -129,7 +129,7 @@ int main(int argc, char *argv[]) ...@@ -129,7 +129,7 @@ int main(int argc, char *argv[])
run->exit_reason, run->exit_reason,
exit_reason_str(run->exit_reason)); exit_reason_str(run->exit_reason));
switch (get_ucall(vm, vcpu->id, &uc)) { switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT: case UCALL_ABORT:
TEST_FAIL("%s", (const char *)uc.args[0]); TEST_FAIL("%s", (const char *)uc.args[0]);
/* NOT REACHED */ /* NOT REACHED */
......
...@@ -38,11 +38,11 @@ int main(int argc, char *argv[]) ...@@ -38,11 +38,11 @@ int main(int argc, char *argv[])
exit(KSFT_SKIP); exit(KSFT_SKIP);
} }
xss_val = vcpu_get_msr(vm, vcpu->id, MSR_IA32_XSS); xss_val = vcpu_get_msr(vcpu, MSR_IA32_XSS);
TEST_ASSERT(xss_val == 0, TEST_ASSERT(xss_val == 0,
"MSR_IA32_XSS should be initialized to zero\n"); "MSR_IA32_XSS should be initialized to zero\n");
vcpu_set_msr(vm, vcpu->id, MSR_IA32_XSS, xss_val); vcpu_set_msr(vcpu, MSR_IA32_XSS, xss_val);
/* /*
* At present, KVM only supports a guest IA32_XSS value of 0. Verify * At present, KVM only supports a guest IA32_XSS value of 0. Verify
...@@ -52,7 +52,7 @@ int main(int argc, char *argv[]) ...@@ -52,7 +52,7 @@ int main(int argc, char *argv[])
*/ */
xss_in_msr_list = kvm_msr_is_in_save_restore_list(MSR_IA32_XSS); xss_in_msr_list = kvm_msr_is_in_save_restore_list(MSR_IA32_XSS);
for (i = 0; i < MSR_BITS; ++i) { for (i = 0; i < MSR_BITS; ++i) {
r = _vcpu_set_msr(vm, vcpu->id, MSR_IA32_XSS, 1ull << i); r = _vcpu_set_msr(vcpu, MSR_IA32_XSS, 1ull << i);
/* /*
* Setting a list of MSRs returns the entry that "faulted", or * Setting a list of MSRs returns the entry that "faulted", or
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment