Commit 2040f414 authored by Thomas Huth's avatar Thomas Huth Committed by Christian Borntraeger

KVM: selftests: Split ucall.c into architecture specific files

The way we exit from a guest to userspace is very specific to the
architecture: On x86, we use PIO, on aarch64 we are using MMIO and on
s390x we're going to use an instruction instead. The possibility to
select a type via the ucall_type_t enum is currently also completely
unused, so the code in ucall.c currently looks more complex than
required. Let's split this up into architecture specific ucall.c
files instead, so we can get rid of the #ifdefs and the unnecessary
ucall_type_t handling.
Reviewed-by: default avatarAndrew Jones <drjones@redhat.com>
Signed-off-by: default avatarThomas Huth <thuth@redhat.com>
Acked-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Link: https://lore.kernel.org/r/20190731151525.17156-2-thuth@redhat.comSigned-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
parent 609488bc
...@@ -7,9 +7,9 @@ top_srcdir = ../../../.. ...@@ -7,9 +7,9 @@ top_srcdir = ../../../..
KSFT_KHDR_INSTALL := 1 KSFT_KHDR_INSTALL := 1
UNAME_M := $(shell uname -m) UNAME_M := $(shell uname -m)
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/ucall.c lib/sparsebit.c LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c
LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/ucall.c
LIBKVM_aarch64 = lib/aarch64/processor.c LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
LIBKVM_s390x = lib/s390x/processor.c LIBKVM_s390x = lib/s390x/processor.c
TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
......
...@@ -337,7 +337,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations, ...@@ -337,7 +337,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
#endif #endif
#ifdef __aarch64__ #ifdef __aarch64__
ucall_init(vm, UCALL_MMIO, NULL); ucall_init(vm, NULL);
#endif #endif
/* Export the shared variables to the guest */ /* Export the shared variables to the guest */
......
...@@ -165,12 +165,6 @@ int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd); ...@@ -165,12 +165,6 @@ int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd);
memcpy(&(g), _p, sizeof(g)); \ memcpy(&(g), _p, sizeof(g)); \
}) })
/* ucall implementation types */
typedef enum {
UCALL_PIO,
UCALL_MMIO,
} ucall_type_t;
/* Common ucalls */ /* Common ucalls */
enum { enum {
UCALL_NONE, UCALL_NONE,
...@@ -186,7 +180,7 @@ struct ucall { ...@@ -186,7 +180,7 @@ struct ucall {
uint64_t args[UCALL_MAX_ARGS]; uint64_t args[UCALL_MAX_ARGS];
}; };
void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg); void ucall_init(struct kvm_vm *vm, void *arg);
void ucall_uninit(struct kvm_vm *vm); void ucall_uninit(struct kvm_vm *vm);
void ucall(uint64_t cmd, int nargs, ...); void ucall(uint64_t cmd, int nargs, ...);
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc); uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc);
......
...@@ -5,11 +5,8 @@ ...@@ -5,11 +5,8 @@
* Copyright (C) 2018, Red Hat, Inc. * Copyright (C) 2018, Red Hat, Inc.
*/ */
#include "kvm_util.h" #include "kvm_util.h"
#include "kvm_util_internal.h" #include "../kvm_util_internal.h"
#define UCALL_PIO_PORT ((uint16_t)0x1000)
static ucall_type_t ucall_type;
static vm_vaddr_t *ucall_exit_mmio_addr; static vm_vaddr_t *ucall_exit_mmio_addr;
static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa) static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa)
...@@ -25,78 +22,55 @@ static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa) ...@@ -25,78 +22,55 @@ static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa)
return true; return true;
} }
void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg) void ucall_init(struct kvm_vm *vm, void *arg)
{ {
ucall_type = type; vm_paddr_t gpa, start, end, step, offset;
sync_global_to_guest(vm, ucall_type); unsigned int bits;
bool ret;
if (type == UCALL_PIO)
if (arg) {
gpa = (vm_paddr_t)arg;
ret = ucall_mmio_init(vm, gpa);
TEST_ASSERT(ret, "Can't set ucall mmio address to %lx", gpa);
return; return;
}
if (type == UCALL_MMIO) { /*
vm_paddr_t gpa, start, end, step, offset; * Find an address within the allowed physical and virtual address
unsigned bits; * spaces, that does _not_ have a KVM memory region associated with
bool ret; * it. Identity mapping an address like this allows the guest to
* access it, but as KVM doesn't know what to do with it, it
if (arg) { * will assume it's something userspace handles and exit with
gpa = (vm_paddr_t)arg; * KVM_EXIT_MMIO. Well, at least that's how it works for AArch64.
ret = ucall_mmio_init(vm, gpa); * Here we start with a guess that the addresses around 5/8th
TEST_ASSERT(ret, "Can't set ucall mmio address to %lx", gpa); * of the allowed space are unmapped and then work both down and
* up from there in 1/16th allowed space sized steps.
*
* Note, we need to use VA-bits - 1 when calculating the allowed
* virtual address space for an identity mapping because the upper
* half of the virtual address space is the two's complement of the
* lower and won't match physical addresses.
*/
bits = vm->va_bits - 1;
bits = vm->pa_bits < bits ? vm->pa_bits : bits;
end = 1ul << bits;
start = end * 5 / 8;
step = end / 16;
for (offset = 0; offset < end - start; offset += step) {
if (ucall_mmio_init(vm, start - offset))
return;
if (ucall_mmio_init(vm, start + offset))
return; return;
}
/*
* Find an address within the allowed physical and virtual address
* spaces, that does _not_ have a KVM memory region associated with
* it. Identity mapping an address like this allows the guest to
* access it, but as KVM doesn't know what to do with it, it
* will assume it's something userspace handles and exit with
* KVM_EXIT_MMIO. Well, at least that's how it works for AArch64.
* Here we start with a guess that the addresses around 5/8th
* of the allowed space are unmapped and then work both down and
* up from there in 1/16th allowed space sized steps.
*
* Note, we need to use VA-bits - 1 when calculating the allowed
* virtual address space for an identity mapping because the upper
* half of the virtual address space is the two's complement of the
* lower and won't match physical addresses.
*/
bits = vm->va_bits - 1;
bits = vm->pa_bits < bits ? vm->pa_bits : bits;
end = 1ul << bits;
start = end * 5 / 8;
step = end / 16;
for (offset = 0; offset < end - start; offset += step) {
if (ucall_mmio_init(vm, start - offset))
return;
if (ucall_mmio_init(vm, start + offset))
return;
}
TEST_ASSERT(false, "Can't find a ucall mmio address");
} }
TEST_ASSERT(false, "Can't find a ucall mmio address");
} }
void ucall_uninit(struct kvm_vm *vm) void ucall_uninit(struct kvm_vm *vm)
{ {
ucall_type = 0;
sync_global_to_guest(vm, ucall_type);
ucall_exit_mmio_addr = 0; ucall_exit_mmio_addr = 0;
sync_global_to_guest(vm, ucall_exit_mmio_addr); sync_global_to_guest(vm, ucall_exit_mmio_addr);
} }
static void ucall_pio_exit(struct ucall *uc)
{
#ifdef __x86_64__
asm volatile("in %[port], %%al"
: : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax");
#endif
}
static void ucall_mmio_exit(struct ucall *uc)
{
*ucall_exit_mmio_addr = (vm_vaddr_t)uc;
}
void ucall(uint64_t cmd, int nargs, ...) void ucall(uint64_t cmd, int nargs, ...)
{ {
struct ucall uc = { struct ucall uc = {
...@@ -112,42 +86,23 @@ void ucall(uint64_t cmd, int nargs, ...) ...@@ -112,42 +86,23 @@ void ucall(uint64_t cmd, int nargs, ...)
uc.args[i] = va_arg(va, uint64_t); uc.args[i] = va_arg(va, uint64_t);
va_end(va); va_end(va);
switch (ucall_type) { *ucall_exit_mmio_addr = (vm_vaddr_t)&uc;
case UCALL_PIO:
ucall_pio_exit(&uc);
break;
case UCALL_MMIO:
ucall_mmio_exit(&uc);
break;
};
} }
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
{ {
struct kvm_run *run = vcpu_state(vm, vcpu_id); struct kvm_run *run = vcpu_state(vm, vcpu_id);
struct ucall ucall = {}; struct ucall ucall = {};
bool got_ucall = false;
if (run->exit_reason == KVM_EXIT_MMIO &&
#ifdef __x86_64__
if (ucall_type == UCALL_PIO && run->exit_reason == KVM_EXIT_IO &&
run->io.port == UCALL_PIO_PORT) {
struct kvm_regs regs;
vcpu_regs_get(vm, vcpu_id, &regs);
memcpy(&ucall, addr_gva2hva(vm, (vm_vaddr_t)regs.rdi), sizeof(ucall));
got_ucall = true;
}
#endif
if (ucall_type == UCALL_MMIO && run->exit_reason == KVM_EXIT_MMIO &&
run->mmio.phys_addr == (uint64_t)ucall_exit_mmio_addr) { run->mmio.phys_addr == (uint64_t)ucall_exit_mmio_addr) {
vm_vaddr_t gva; vm_vaddr_t gva;
TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8, TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8,
"Unexpected ucall exit mmio address access"); "Unexpected ucall exit mmio address access");
memcpy(&gva, run->mmio.data, sizeof(gva)); memcpy(&gva, run->mmio.data, sizeof(gva));
memcpy(&ucall, addr_gva2hva(vm, gva), sizeof(ucall)); memcpy(&ucall, addr_gva2hva(vm, gva), sizeof(ucall));
got_ucall = true;
}
if (got_ucall) {
vcpu_run_complete_io(vm, vcpu_id); vcpu_run_complete_io(vm, vcpu_id);
if (uc) if (uc)
memcpy(uc, &ucall, sizeof(ucall)); memcpy(uc, &ucall, sizeof(ucall));
......
// SPDX-License-Identifier: GPL-2.0
/*
* ucall support. A ucall is a "hypercall to userspace".
*
* Copyright (C) 2018, Red Hat, Inc.
*/
#include "kvm_util.h"
#define UCALL_PIO_PORT ((uint16_t)0x1000)
void ucall_init(struct kvm_vm *vm, void *arg)
{
}
void ucall_uninit(struct kvm_vm *vm)
{
}
void ucall(uint64_t cmd, int nargs, ...)
{
struct ucall uc = {
.cmd = cmd,
};
va_list va;
int i;
nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
uc.args[i] = va_arg(va, uint64_t);
va_end(va);
asm volatile("in %[port], %%al"
: : [port] "d" (UCALL_PIO_PORT), "D" (&uc) : "rax");
}
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
{
struct kvm_run *run = vcpu_state(vm, vcpu_id);
struct ucall ucall = {};
if (run->exit_reason == KVM_EXIT_IO && run->io.port == UCALL_PIO_PORT) {
struct kvm_regs regs;
vcpu_regs_get(vm, vcpu_id, &regs);
memcpy(&ucall, addr_gva2hva(vm, (vm_vaddr_t)regs.rdi),
sizeof(ucall));
vcpu_run_complete_io(vm, vcpu_id);
if (uc)
memcpy(uc, &ucall, sizeof(ucall));
}
return ucall.cmd;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment