state_test.c 4.86 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * KVM_GET/SET_* tests
 *
 * Copyright (C) 2018, Red Hat, Inc.
 *
 * Tests for vCPU state save/restore, including nested guest state.
 */
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>

#include "test_util.h"

#include "kvm_util.h"
19
#include "processor.h"
20
#include "vmx.h"
21 22 23

#define VCPU_ID		5

24 25
void l2_guest_code(void)
{
26
	GUEST_SYNC(6);
27 28 29 30

        /* Exit to L1 */
	vmcall();

31 32
	/* L1 has now set up a shadow VMCS for us.  */
	GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
33
	GUEST_SYNC(10);
34 35
	GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
	GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee));
36
	GUEST_SYNC(11);
37 38
	GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee);
	GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee));
39
	GUEST_SYNC(12);
40 41 42 43 44 45 46 47 48 49 50 51

	/* Done, exit to L1 and never come back.  */
	vmcall();
}

void l1_guest_code(struct vmx_pages *vmx_pages)
{
#define L2_GUEST_STACK_SIZE 64
        unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];

	GUEST_ASSERT(vmx_pages->vmcs_gpa);
	GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
52 53
	GUEST_SYNC(3);
	GUEST_ASSERT(load_vmcs(vmx_pages));
54 55
	GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);

56
	GUEST_SYNC(4);
57 58 59 60 61
	GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);

	prepare_vmcs(vmx_pages, l2_guest_code,
		     &l2_guest_stack[L2_GUEST_STACK_SIZE]);

62
	GUEST_SYNC(5);
63 64 65 66 67 68 69 70 71 72 73
	GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
	GUEST_ASSERT(!vmlaunch());
	GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);

	/* Check that the launched state is preserved.  */
	GUEST_ASSERT(vmlaunch());

	GUEST_ASSERT(!vmresume());
	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);

74
	GUEST_SYNC(7);
75 76 77 78 79 80 81
	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);

	GUEST_ASSERT(!vmresume());
	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);

	vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + 3);

82 83 84 85 86
	vmwrite(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
	vmwrite(VMCS_LINK_POINTER, vmx_pages->shadow_vmcs_gpa);

	GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
	GUEST_ASSERT(vmlaunch());
87
	GUEST_SYNC(8);
88 89 90 91
	GUEST_ASSERT(vmlaunch());
	GUEST_ASSERT(vmresume());

	vmwrite(GUEST_RIP, 0xc0ffee);
92
	GUEST_SYNC(9);
93 94 95
	GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);

	GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa));
96 97
	GUEST_ASSERT(!vmresume());
	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
98 99 100 101 102

	GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
	GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
	GUEST_ASSERT(vmlaunch());
	GUEST_ASSERT(vmresume());
103
	GUEST_SYNC(13);
104 105 106
	GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
	GUEST_ASSERT(vmlaunch());
	GUEST_ASSERT(vmresume());
107 108 109
}

void guest_code(struct vmx_pages *vmx_pages)
110 111 112 113
{
	GUEST_SYNC(1);
	GUEST_SYNC(2);

114 115 116
	if (vmx_pages)
		l1_guest_code(vmx_pages);

117
	GUEST_DONE();
118 119 120 121
}

int main(int argc, char *argv[])
{
122 123
	vm_vaddr_t vmx_pages_gva = 0;

124 125 126 127
	struct kvm_regs regs1, regs2;
	struct kvm_vm *vm;
	struct kvm_run *run;
	struct kvm_x86_state *state;
128
	struct ucall uc;
129 130 131
	int stage;

	/* Create VM */
132
	vm = vm_create_default(VCPU_ID, 0, guest_code);
133 134 135 136
	vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
	run = vcpu_state(vm, VCPU_ID);

	vcpu_regs_get(vm, VCPU_ID, &regs1);
137 138

	if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
139
		vcpu_alloc_vmx(vm, &vmx_pages_gva);
140 141
		vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
	} else {
142
		pr_info("will skip nested state checks\n");
143 144 145
		vcpu_args_set(vm, VCPU_ID, 1, 0);
	}

146 147 148
	for (stage = 1;; stage++) {
		_vcpu_run(vm, VCPU_ID);
		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
149 150
			    "Stage %d: unexpected exit reason: %u (%s),\n",
			    stage, run->exit_reason,
151 152
			    exit_reason_str(run->exit_reason));

153 154 155 156
		switch (get_ucall(vm, VCPU_ID, &uc)) {
		case UCALL_ABORT:
			TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
				    __FILE__, uc.args[1]);
157
			/* NOT REACHED */
158
		case UCALL_SYNC:
159
			break;
160
		case UCALL_DONE:
161 162
			goto done;
		default:
163
			TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
164 165
		}

166 167 168 169
		/* UCALL_SYNC is handled here.  */
		TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
			    uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx",
			    stage, (ulong)uc.args[1]);
170

171
		state = vcpu_save_state(vm, VCPU_ID);
172 173 174
		memset(&regs1, 0, sizeof(regs1));
		vcpu_regs_get(vm, VCPU_ID, &regs1);

175 176 177 178
		kvm_vm_release(vm);

		/* Restore state in a new VM.  */
		kvm_vm_restart(vm, O_RDWR);
179
		vm_vcpu_add(vm, VCPU_ID);
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
		vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
		vcpu_load_state(vm, VCPU_ID, state);
		run = vcpu_state(vm, VCPU_ID);
		free(state);

		memset(&regs2, 0, sizeof(regs2));
		vcpu_regs_get(vm, VCPU_ID, &regs2);
		TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
			    "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
			    (ulong) regs2.rdi, (ulong) regs2.rsi);
	}

done:
	kvm_vm_free(vm);
}