Commit ef38871e authored by Sean Christopherson's avatar Sean Christopherson

KVM: selftests: Consolidate boilerplate code in get_ucall()

Consolidate the actual copying of a ucall struct from guest=>host into
the common get_ucall().  Return a host virtual address instead of a guest
virtual address even though the addr_gva2hva() part could be moved to
get_ucall() too.  Conceptually, get_ucall() is invoked from the host and
should return a host virtual address (and returning NULL for "nothing to
see here" is far superior to returning 0).

Use pointer shenanigans instead of an unnecessary bounce buffer when the
caller of get_ucall() provides a valid pointer.
Reviewed-by: default avatarAndrew Jones <andrew.jones@linux.dev>
Tested-by: default avatarPeter Gonda <pgonda@google.com>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20221006003409.649993-3-seanjc@google.com
parent 70466381
...@@ -27,9 +27,10 @@ struct ucall { ...@@ -27,9 +27,10 @@ struct ucall {
void ucall_arch_init(struct kvm_vm *vm, void *arg); void ucall_arch_init(struct kvm_vm *vm, void *arg);
void ucall_arch_uninit(struct kvm_vm *vm); void ucall_arch_uninit(struct kvm_vm *vm);
void ucall_arch_do_ucall(vm_vaddr_t uc); void ucall_arch_do_ucall(vm_vaddr_t uc);
uint64_t ucall_arch_get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc); void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu);
void ucall(uint64_t cmd, int nargs, ...); void ucall(uint64_t cmd, int nargs, ...);
uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc);
static inline void ucall_init(struct kvm_vm *vm, void *arg) static inline void ucall_init(struct kvm_vm *vm, void *arg)
{ {
...@@ -41,11 +42,6 @@ static inline void ucall_uninit(struct kvm_vm *vm) ...@@ -41,11 +42,6 @@ static inline void ucall_uninit(struct kvm_vm *vm)
ucall_arch_uninit(vm); ucall_arch_uninit(vm);
} }
static inline uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{
return ucall_arch_get_ucall(vcpu, uc);
}
#define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \ #define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \
ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4) ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4)
#define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage) #define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage)
......
...@@ -75,13 +75,9 @@ void ucall_arch_do_ucall(vm_vaddr_t uc) ...@@ -75,13 +75,9 @@ void ucall_arch_do_ucall(vm_vaddr_t uc)
WRITE_ONCE(*ucall_exit_mmio_addr, uc); WRITE_ONCE(*ucall_exit_mmio_addr, uc);
} }
uint64_t ucall_arch_get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
struct ucall ucall = {};
if (uc)
memset(uc, 0, sizeof(*uc));
if (run->exit_reason == KVM_EXIT_MMIO && if (run->exit_reason == KVM_EXIT_MMIO &&
run->mmio.phys_addr == (uint64_t)ucall_exit_mmio_addr) { run->mmio.phys_addr == (uint64_t)ucall_exit_mmio_addr) {
...@@ -90,12 +86,8 @@ uint64_t ucall_arch_get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) ...@@ -90,12 +86,8 @@ uint64_t ucall_arch_get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8, TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8,
"Unexpected ucall exit mmio address access"); "Unexpected ucall exit mmio address access");
memcpy(&gva, run->mmio.data, sizeof(gva)); memcpy(&gva, run->mmio.data, sizeof(gva));
memcpy(&ucall, addr_gva2hva(vcpu->vm, gva), sizeof(ucall)); return addr_gva2hva(vcpu->vm, gva);
vcpu_run_complete_io(vcpu);
if (uc)
memcpy(uc, &ucall, sizeof(ucall));
} }
return ucall.cmd; return NULL;
} }
...@@ -51,27 +51,15 @@ void ucall_arch_do_ucall(vm_vaddr_t uc) ...@@ -51,27 +51,15 @@ void ucall_arch_do_ucall(vm_vaddr_t uc)
uc, 0, 0, 0, 0, 0); uc, 0, 0, 0, 0, 0);
} }
uint64_t ucall_arch_get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
struct ucall ucall = {};
if (uc)
memset(uc, 0, sizeof(*uc));
if (run->exit_reason == KVM_EXIT_RISCV_SBI && if (run->exit_reason == KVM_EXIT_RISCV_SBI &&
run->riscv_sbi.extension_id == KVM_RISCV_SELFTESTS_SBI_EXT) { run->riscv_sbi.extension_id == KVM_RISCV_SELFTESTS_SBI_EXT) {
switch (run->riscv_sbi.function_id) { switch (run->riscv_sbi.function_id) {
case KVM_RISCV_SELFTESTS_SBI_UCALL: case KVM_RISCV_SELFTESTS_SBI_UCALL:
memcpy(&ucall, return addr_gva2hva(vcpu->vm, run->riscv_sbi.args[0]);
addr_gva2hva(vcpu->vm, run->riscv_sbi.args[0]),
sizeof(ucall));
vcpu_run_complete_io(vcpu);
if (uc)
memcpy(uc, &ucall, sizeof(ucall));
break;
case KVM_RISCV_SELFTESTS_SBI_UNEXP: case KVM_RISCV_SELFTESTS_SBI_UNEXP:
vcpu_dump(stderr, vcpu, 2); vcpu_dump(stderr, vcpu, 2);
TEST_ASSERT(0, "Unexpected trap taken by guest"); TEST_ASSERT(0, "Unexpected trap taken by guest");
...@@ -80,6 +68,5 @@ uint64_t ucall_arch_get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) ...@@ -80,6 +68,5 @@ uint64_t ucall_arch_get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
break; break;
} }
} }
return NULL;
return ucall.cmd;
} }
...@@ -20,13 +20,9 @@ void ucall_arch_do_ucall(vm_vaddr_t uc) ...@@ -20,13 +20,9 @@ void ucall_arch_do_ucall(vm_vaddr_t uc)
asm volatile ("diag 0,%0,0x501" : : "a"(uc) : "memory"); asm volatile ("diag 0,%0,0x501" : : "a"(uc) : "memory");
} }
uint64_t ucall_arch_get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
struct ucall ucall = {};
if (uc)
memset(uc, 0, sizeof(*uc));
if (run->exit_reason == KVM_EXIT_S390_SIEIC && if (run->exit_reason == KVM_EXIT_S390_SIEIC &&
run->s390_sieic.icptcode == 4 && run->s390_sieic.icptcode == 4 &&
...@@ -34,13 +30,7 @@ uint64_t ucall_arch_get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) ...@@ -34,13 +30,7 @@ uint64_t ucall_arch_get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
(run->s390_sieic.ipb >> 16) == 0x501) { (run->s390_sieic.ipb >> 16) == 0x501) {
int reg = run->s390_sieic.ipa & 0xf; int reg = run->s390_sieic.ipa & 0xf;
memcpy(&ucall, addr_gva2hva(vcpu->vm, run->s.regs.gprs[reg]), return addr_gva2hva(vcpu->vm, run->s.regs.gprs[reg]);
sizeof(ucall));
vcpu_run_complete_io(vcpu);
if (uc)
memcpy(uc, &ucall, sizeof(ucall));
} }
return NULL;
return ucall.cmd;
} }
...@@ -18,3 +18,22 @@ void ucall(uint64_t cmd, int nargs, ...) ...@@ -18,3 +18,22 @@ void ucall(uint64_t cmd, int nargs, ...)
ucall_arch_do_ucall((vm_vaddr_t)&uc); ucall_arch_do_ucall((vm_vaddr_t)&uc);
} }
uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{
struct ucall ucall;
void *addr;
if (!uc)
uc = &ucall;
addr = ucall_arch_get_ucall(vcpu);
if (addr) {
memcpy(uc, addr, sizeof(*uc));
vcpu_run_complete_io(vcpu);
} else {
memset(uc, 0, sizeof(*uc));
}
return uc->cmd;
}
...@@ -22,25 +22,15 @@ void ucall_arch_do_ucall(vm_vaddr_t uc) ...@@ -22,25 +22,15 @@ void ucall_arch_do_ucall(vm_vaddr_t uc)
: : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax", "memory"); : : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax", "memory");
} }
uint64_t ucall_arch_get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
struct ucall ucall = {};
if (uc)
memset(uc, 0, sizeof(*uc));
if (run->exit_reason == KVM_EXIT_IO && run->io.port == UCALL_PIO_PORT) { if (run->exit_reason == KVM_EXIT_IO && run->io.port == UCALL_PIO_PORT) {
struct kvm_regs regs; struct kvm_regs regs;
vcpu_regs_get(vcpu, &regs); vcpu_regs_get(vcpu, &regs);
memcpy(&ucall, addr_gva2hva(vcpu->vm, (vm_vaddr_t)regs.rdi), return addr_gva2hva(vcpu->vm, regs.rdi);
sizeof(ucall));
vcpu_run_complete_io(vcpu);
if (uc)
memcpy(uc, &ucall, sizeof(ucall));
} }
return NULL;
return ucall.cmd;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment