Commit 17a81bdb authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-next-5.4-1' of...

Merge tag 'kvm-s390-next-5.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

* More selftests
* Improved KVM_S390_MEM_OP ioctl input checking
* Add kvm_valid_regs and kvm_dirty_regs invalid bit checking
parents 95c06540 81cb736c
...@@ -3092,12 +3092,14 @@ This exception is also raised directly at the corresponding VCPU if the ...@@ -3092,12 +3092,14 @@ This exception is also raised directly at the corresponding VCPU if the
flag KVM_S390_MEMOP_F_INJECT_EXCEPTION is set in the "flags" field. flag KVM_S390_MEMOP_F_INJECT_EXCEPTION is set in the "flags" field.
The start address of the memory region has to be specified in the "gaddr" The start address of the memory region has to be specified in the "gaddr"
field, and the length of the region in the "size" field. "buf" is the buffer field, and the length of the region in the "size" field (which must not
supplied by the userspace application where the read data should be written be 0). The maximum value for "size" can be obtained by checking the
to for KVM_S390_MEMOP_LOGICAL_READ, or where the data that should be written KVM_CAP_S390_MEM_OP capability. "buf" is the buffer supplied by the
is stored for a KVM_S390_MEMOP_LOGICAL_WRITE. "buf" is unused and can be NULL userspace application where the read data should be written to for
when KVM_S390_MEMOP_F_CHECK_ONLY is specified. "ar" designates the access KVM_S390_MEMOP_LOGICAL_READ, or where the data that should be written is
register number to be used. stored for a KVM_S390_MEMOP_LOGICAL_WRITE. When KVM_S390_MEMOP_F_CHECK_ONLY
is specified, "buf" is unused and can be NULL. "ar" designates the access
register number to be used; the valid range is 0..15.
The "reserved" field is meant for future extensions. It is not used by The "reserved" field is meant for future extensions. It is not used by
KVM with the currently defined set of flags. KVM with the currently defined set of flags.
......
...@@ -231,6 +231,12 @@ struct kvm_guest_debug_arch { ...@@ -231,6 +231,12 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_GSCB (1UL << 9) #define KVM_SYNC_GSCB (1UL << 9)
#define KVM_SYNC_BPBC (1UL << 10) #define KVM_SYNC_BPBC (1UL << 10)
#define KVM_SYNC_ETOKEN (1UL << 11) #define KVM_SYNC_ETOKEN (1UL << 11)
#define KVM_SYNC_S390_VALID_FIELDS \
(KVM_SYNC_PREFIX | KVM_SYNC_GPRS | KVM_SYNC_ACRS | KVM_SYNC_CRS | \
KVM_SYNC_ARCH0 | KVM_SYNC_PFAULT | KVM_SYNC_VRS | KVM_SYNC_RICCB | \
KVM_SYNC_FPRS | KVM_SYNC_GSCB | KVM_SYNC_BPBC | KVM_SYNC_ETOKEN)
/* length and alignment of the sdnx as a power of two */ /* length and alignment of the sdnx as a power of two */
#define SDNXC 8 #define SDNXC 8
#define SDNXL (1UL << SDNXC) #define SDNXL (1UL << SDNXC)
......
...@@ -3998,6 +3998,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -3998,6 +3998,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (kvm_run->immediate_exit) if (kvm_run->immediate_exit)
return -EINTR; return -EINTR;
if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
return -EINVAL;
vcpu_load(vcpu); vcpu_load(vcpu);
if (guestdbg_exit_pending(vcpu)) { if (guestdbg_exit_pending(vcpu)) {
...@@ -4255,7 +4259,7 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu, ...@@ -4255,7 +4259,7 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
| KVM_S390_MEMOP_F_CHECK_ONLY; | KVM_S390_MEMOP_F_CHECK_ONLY;
if (mop->flags & ~supported_flags) if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
return -EINVAL; return -EINVAL;
if (mop->size > MEM_OP_MAX_SIZE) if (mop->size > MEM_OP_MAX_SIZE)
......
...@@ -7,10 +7,10 @@ top_srcdir = ../../../.. ...@@ -7,10 +7,10 @@ top_srcdir = ../../../..
KSFT_KHDR_INSTALL := 1 KSFT_KHDR_INSTALL := 1
UNAME_M := $(shell uname -m) UNAME_M := $(shell uname -m)
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/ucall.c lib/sparsebit.c LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c
LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/ucall.c
LIBKVM_aarch64 = lib/aarch64/processor.c LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
LIBKVM_s390x = lib/s390x/processor.c LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c
TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
...@@ -32,7 +32,9 @@ TEST_GEN_PROGS_aarch64 += clear_dirty_log_test ...@@ -32,7 +32,9 @@ TEST_GEN_PROGS_aarch64 += clear_dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_test TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
TEST_GEN_PROGS_s390x = s390x/memop
TEST_GEN_PROGS_s390x += s390x/sync_regs_test TEST_GEN_PROGS_s390x += s390x/sync_regs_test
TEST_GEN_PROGS_s390x += dirty_log_test
TEST_GEN_PROGS_s390x += kvm_create_max_vcpus TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M)) TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M))
......
...@@ -26,8 +26,8 @@ ...@@ -26,8 +26,8 @@
/* The memory slot index to track dirty pages */ /* The memory slot index to track dirty pages */
#define TEST_MEM_SLOT_INDEX 1 #define TEST_MEM_SLOT_INDEX 1
/* Default guest test memory offset, 1G */ /* Default guest test virtual memory offset */
#define DEFAULT_GUEST_TEST_MEM 0x40000000 #define DEFAULT_GUEST_TEST_MEM 0xc0000000
/* How many pages to dirty for each guest loop */ /* How many pages to dirty for each guest loop */
#define TEST_PAGES_PER_LOOP 1024 #define TEST_PAGES_PER_LOOP 1024
...@@ -38,6 +38,27 @@ ...@@ -38,6 +38,27 @@
/* Interval for each host loop (ms) */ /* Interval for each host loop (ms) */
#define TEST_HOST_LOOP_INTERVAL 10UL #define TEST_HOST_LOOP_INTERVAL 10UL
/* Dirty bitmaps are always little endian, so we need to swap on big endian */
#if defined(__s390x__)
# define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
# define test_bit_le(nr, addr) \
test_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
# define set_bit_le(nr, addr) \
set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
# define clear_bit_le(nr, addr) \
clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
# define test_and_set_bit_le(nr, addr) \
test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
# define test_and_clear_bit_le(nr, addr) \
test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
#else
# define test_bit_le test_bit
# define set_bit_le set_bit
# define clear_bit_le clear_bit
# define test_and_set_bit_le test_and_set_bit
# define test_and_clear_bit_le test_and_clear_bit
#endif
/* /*
* Guest/Host shared variables. Ensure addr_gva2hva() and/or * Guest/Host shared variables. Ensure addr_gva2hva() and/or
* sync_global_to/from_guest() are used when accessing from * sync_global_to/from_guest() are used when accessing from
...@@ -69,11 +90,23 @@ static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; ...@@ -69,11 +90,23 @@ static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
*/ */
static void guest_code(void) static void guest_code(void)
{ {
uint64_t addr;
int i; int i;
/*
* On s390x, all pages of a 1M segment are initially marked as dirty
* when a page of the segment is written to for the very first time.
* To compensate this specialty in this test, we need to touch all
* pages during the first iteration.
*/
for (i = 0; i < guest_num_pages; i++) {
addr = guest_test_virt_mem + i * guest_page_size;
*(uint64_t *)addr = READ_ONCE(iteration);
}
while (true) { while (true) {
for (i = 0; i < TEST_PAGES_PER_LOOP; i++) { for (i = 0; i < TEST_PAGES_PER_LOOP; i++) {
uint64_t addr = guest_test_virt_mem; addr = guest_test_virt_mem;
addr += (READ_ONCE(random_array[i]) % guest_num_pages) addr += (READ_ONCE(random_array[i]) % guest_num_pages)
* guest_page_size; * guest_page_size;
addr &= ~(host_page_size - 1); addr &= ~(host_page_size - 1);
...@@ -158,15 +191,15 @@ static void vm_dirty_log_verify(unsigned long *bmap) ...@@ -158,15 +191,15 @@ static void vm_dirty_log_verify(unsigned long *bmap)
value_ptr = host_test_mem + page * host_page_size; value_ptr = host_test_mem + page * host_page_size;
/* If this is a special page that we were tracking... */ /* If this is a special page that we were tracking... */
if (test_and_clear_bit(page, host_bmap_track)) { if (test_and_clear_bit_le(page, host_bmap_track)) {
host_track_next_count++; host_track_next_count++;
TEST_ASSERT(test_bit(page, bmap), TEST_ASSERT(test_bit_le(page, bmap),
"Page %"PRIu64" should have its dirty bit " "Page %"PRIu64" should have its dirty bit "
"set in this iteration but it is missing", "set in this iteration but it is missing",
page); page);
} }
if (test_bit(page, bmap)) { if (test_bit_le(page, bmap)) {
host_dirty_count++; host_dirty_count++;
/* /*
* If the bit is set, the value written onto * If the bit is set, the value written onto
...@@ -209,7 +242,7 @@ static void vm_dirty_log_verify(unsigned long *bmap) ...@@ -209,7 +242,7 @@ static void vm_dirty_log_verify(unsigned long *bmap)
* should report its dirtyness in the * should report its dirtyness in the
* next run * next run
*/ */
set_bit(page, host_bmap_track); set_bit_le(page, host_bmap_track);
} }
} }
} }
...@@ -293,6 +326,10 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations, ...@@ -293,6 +326,10 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
* case where the size is not aligned to 64 pages. * case where the size is not aligned to 64 pages.
*/ */
guest_num_pages = (1ul << (30 - guest_page_shift)) + 16; guest_num_pages = (1ul << (30 - guest_page_shift)) + 16;
#ifdef __s390x__
/* Round up to multiple of 1M (segment size) */
guest_num_pages = (guest_num_pages + 0xff) & ~0xffUL;
#endif
host_page_size = getpagesize(); host_page_size = getpagesize();
host_num_pages = (guest_num_pages * guest_page_size) / host_page_size + host_num_pages = (guest_num_pages * guest_page_size) / host_page_size +
!!((guest_num_pages * guest_page_size) % host_page_size); !!((guest_num_pages * guest_page_size) % host_page_size);
...@@ -304,6 +341,11 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations, ...@@ -304,6 +341,11 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
guest_test_phys_mem = phys_offset; guest_test_phys_mem = phys_offset;
} }
#ifdef __s390x__
/* Align to 1M (segment size) */
guest_test_phys_mem &= ~((1 << 20) - 1);
#endif
DEBUG("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem); DEBUG("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
bmap = bitmap_alloc(host_num_pages); bmap = bitmap_alloc(host_num_pages);
...@@ -337,7 +379,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations, ...@@ -337,7 +379,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
#endif #endif
#ifdef __aarch64__ #ifdef __aarch64__
ucall_init(vm, UCALL_MMIO, NULL); ucall_init(vm, NULL);
#endif #endif
/* Export the shared variables to the guest */ /* Export the shared variables to the guest */
...@@ -454,6 +496,9 @@ int main(int argc, char *argv[]) ...@@ -454,6 +496,9 @@ int main(int argc, char *argv[])
vm_guest_mode_params_init(VM_MODE_P48V48_64K, true, true); vm_guest_mode_params_init(VM_MODE_P48V48_64K, true, true);
} }
#endif #endif
#ifdef __s390x__
vm_guest_mode_params_init(VM_MODE_P40V48_4K, true, true);
#endif
while ((opt = getopt(argc, argv, "hi:I:p:m:")) != -1) { while ((opt = getopt(argc, argv, "hi:I:p:m:")) != -1) {
switch (opt) { switch (opt) {
......
...@@ -165,12 +165,6 @@ int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd); ...@@ -165,12 +165,6 @@ int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd);
memcpy(&(g), _p, sizeof(g)); \ memcpy(&(g), _p, sizeof(g)); \
}) })
/* ucall implementation types */
typedef enum {
UCALL_PIO,
UCALL_MMIO,
} ucall_type_t;
/* Common ucalls */ /* Common ucalls */
enum { enum {
UCALL_NONE, UCALL_NONE,
...@@ -186,7 +180,7 @@ struct ucall { ...@@ -186,7 +180,7 @@ struct ucall {
uint64_t args[UCALL_MAX_ARGS]; uint64_t args[UCALL_MAX_ARGS];
}; };
void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg); void ucall_init(struct kvm_vm *vm, void *arg);
void ucall_uninit(struct kvm_vm *vm); void ucall_uninit(struct kvm_vm *vm);
void ucall(uint64_t cmd, int nargs, ...); void ucall(uint64_t cmd, int nargs, ...);
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc); uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc);
......
...@@ -5,11 +5,8 @@ ...@@ -5,11 +5,8 @@
* Copyright (C) 2018, Red Hat, Inc. * Copyright (C) 2018, Red Hat, Inc.
*/ */
#include "kvm_util.h" #include "kvm_util.h"
#include "kvm_util_internal.h" #include "../kvm_util_internal.h"
#define UCALL_PIO_PORT ((uint16_t)0x1000)
static ucall_type_t ucall_type;
static vm_vaddr_t *ucall_exit_mmio_addr; static vm_vaddr_t *ucall_exit_mmio_addr;
static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa) static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa)
...@@ -25,17 +22,10 @@ static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa) ...@@ -25,17 +22,10 @@ static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa)
return true; return true;
} }
void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg) void ucall_init(struct kvm_vm *vm, void *arg)
{ {
ucall_type = type;
sync_global_to_guest(vm, ucall_type);
if (type == UCALL_PIO)
return;
if (type == UCALL_MMIO) {
vm_paddr_t gpa, start, end, step, offset; vm_paddr_t gpa, start, end, step, offset;
unsigned bits; unsigned int bits;
bool ret; bool ret;
if (arg) { if (arg) {
...@@ -73,30 +63,14 @@ void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg) ...@@ -73,30 +63,14 @@ void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg)
return; return;
} }
TEST_ASSERT(false, "Can't find a ucall mmio address"); TEST_ASSERT(false, "Can't find a ucall mmio address");
}
} }
void ucall_uninit(struct kvm_vm *vm) void ucall_uninit(struct kvm_vm *vm)
{ {
ucall_type = 0;
sync_global_to_guest(vm, ucall_type);
ucall_exit_mmio_addr = 0; ucall_exit_mmio_addr = 0;
sync_global_to_guest(vm, ucall_exit_mmio_addr); sync_global_to_guest(vm, ucall_exit_mmio_addr);
} }
static void ucall_pio_exit(struct ucall *uc)
{
#ifdef __x86_64__
asm volatile("in %[port], %%al"
: : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax");
#endif
}
static void ucall_mmio_exit(struct ucall *uc)
{
*ucall_exit_mmio_addr = (vm_vaddr_t)uc;
}
void ucall(uint64_t cmd, int nargs, ...) void ucall(uint64_t cmd, int nargs, ...)
{ {
struct ucall uc = { struct ucall uc = {
...@@ -112,42 +86,23 @@ void ucall(uint64_t cmd, int nargs, ...) ...@@ -112,42 +86,23 @@ void ucall(uint64_t cmd, int nargs, ...)
uc.args[i] = va_arg(va, uint64_t); uc.args[i] = va_arg(va, uint64_t);
va_end(va); va_end(va);
switch (ucall_type) { *ucall_exit_mmio_addr = (vm_vaddr_t)&uc;
case UCALL_PIO:
ucall_pio_exit(&uc);
break;
case UCALL_MMIO:
ucall_mmio_exit(&uc);
break;
};
} }
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
{ {
struct kvm_run *run = vcpu_state(vm, vcpu_id); struct kvm_run *run = vcpu_state(vm, vcpu_id);
struct ucall ucall = {}; struct ucall ucall = {};
bool got_ucall = false;
if (run->exit_reason == KVM_EXIT_MMIO &&
#ifdef __x86_64__
if (ucall_type == UCALL_PIO && run->exit_reason == KVM_EXIT_IO &&
run->io.port == UCALL_PIO_PORT) {
struct kvm_regs regs;
vcpu_regs_get(vm, vcpu_id, &regs);
memcpy(&ucall, addr_gva2hva(vm, (vm_vaddr_t)regs.rdi), sizeof(ucall));
got_ucall = true;
}
#endif
if (ucall_type == UCALL_MMIO && run->exit_reason == KVM_EXIT_MMIO &&
run->mmio.phys_addr == (uint64_t)ucall_exit_mmio_addr) { run->mmio.phys_addr == (uint64_t)ucall_exit_mmio_addr) {
vm_vaddr_t gva; vm_vaddr_t gva;
TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8, TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8,
"Unexpected ucall exit mmio address access"); "Unexpected ucall exit mmio address access");
memcpy(&gva, run->mmio.data, sizeof(gva)); memcpy(&gva, run->mmio.data, sizeof(gva));
memcpy(&ucall, addr_gva2hva(vm, gva), sizeof(ucall)); memcpy(&ucall, addr_gva2hva(vm, gva), sizeof(ucall));
got_ucall = true;
}
if (got_ucall) {
vcpu_run_complete_io(vm, vcpu_id); vcpu_run_complete_io(vm, vcpu_id);
if (uc) if (uc)
memcpy(uc, &ucall, sizeof(ucall)); memcpy(uc, &ucall, sizeof(ucall));
......
// SPDX-License-Identifier: GPL-2.0
/*
* ucall support. A ucall is a "hypercall to userspace".
*
* Copyright (C) 2019 Red Hat, Inc.
*/
#include "kvm_util.h"
void ucall_init(struct kvm_vm *vm, void *arg)
{
}
void ucall_uninit(struct kvm_vm *vm)
{
}
void ucall(uint64_t cmd, int nargs, ...)
{
struct ucall uc = {
.cmd = cmd,
};
va_list va;
int i;
nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
uc.args[i] = va_arg(va, uint64_t);
va_end(va);
/* Exit via DIAGNOSE 0x501 (normally used for breakpoints) */
asm volatile ("diag 0,%0,0x501" : : "a"(&uc) : "memory");
}
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
{
struct kvm_run *run = vcpu_state(vm, vcpu_id);
struct ucall ucall = {};
if (run->exit_reason == KVM_EXIT_S390_SIEIC &&
run->s390_sieic.icptcode == 4 &&
(run->s390_sieic.ipa >> 8) == 0x83 && /* 0x83 means DIAGNOSE */
(run->s390_sieic.ipb >> 16) == 0x501) {
int reg = run->s390_sieic.ipa & 0xf;
memcpy(&ucall, addr_gva2hva(vm, run->s.regs.gprs[reg]),
sizeof(ucall));
vcpu_run_complete_io(vm, vcpu_id);
if (uc)
memcpy(uc, &ucall, sizeof(ucall));
}
return ucall.cmd;
}
// SPDX-License-Identifier: GPL-2.0
/*
* ucall support. A ucall is a "hypercall to userspace".
*
* Copyright (C) 2018, Red Hat, Inc.
*/
#include "kvm_util.h"
#define UCALL_PIO_PORT ((uint16_t)0x1000)
void ucall_init(struct kvm_vm *vm, void *arg)
{
}
void ucall_uninit(struct kvm_vm *vm)
{
}
void ucall(uint64_t cmd, int nargs, ...)
{
struct ucall uc = {
.cmd = cmd,
};
va_list va;
int i;
nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
uc.args[i] = va_arg(va, uint64_t);
va_end(va);
asm volatile("in %[port], %%al"
: : [port] "d" (UCALL_PIO_PORT), "D" (&uc) : "rax");
}
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
{
struct kvm_run *run = vcpu_state(vm, vcpu_id);
struct ucall ucall = {};
if (run->exit_reason == KVM_EXIT_IO && run->io.port == UCALL_PIO_PORT) {
struct kvm_regs regs;
vcpu_regs_get(vm, vcpu_id, &regs);
memcpy(&ucall, addr_gva2hva(vm, (vm_vaddr_t)regs.rdi),
sizeof(ucall));
vcpu_run_complete_io(vm, vcpu_id);
if (uc)
memcpy(uc, &ucall, sizeof(ucall));
}
return ucall.cmd;
}
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Test for s390x KVM_S390_MEM_OP
*
* Copyright (C) 2019, Red Hat, Inc.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "test_util.h"
#include "kvm_util.h"
#define VCPU_ID 1
static uint8_t mem1[65536];
static uint8_t mem2[65536];
static void guest_code(void)
{
int i;
for (;;) {
for (i = 0; i < sizeof(mem2); i++)
mem2[i] = mem1[i];
GUEST_SYNC(0);
}
}
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
struct kvm_run *run;
struct kvm_s390_mem_op ksmo;
int rv, i, maxsize;
setbuf(stdout, NULL); /* Tell stdout not to buffer its content */
maxsize = kvm_check_cap(KVM_CAP_S390_MEM_OP);
if (!maxsize) {
fprintf(stderr, "CAP_S390_MEM_OP not supported -> skip test\n");
exit(KSFT_SKIP);
}
if (maxsize > sizeof(mem1))
maxsize = sizeof(mem1);
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, guest_code);
run = vcpu_state(vm, VCPU_ID);
for (i = 0; i < sizeof(mem1); i++)
mem1[i] = i * i + i;
/* Set the first array */
ksmo.gaddr = addr_gva2gpa(vm, (uintptr_t)mem1);
ksmo.flags = 0;
ksmo.size = maxsize;
ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
ksmo.buf = (uintptr_t)mem1;
ksmo.ar = 0;
vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
/* Let the guest code copy the first array to the second */
vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
"Unexpected exit reason: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
memset(mem2, 0xaa, sizeof(mem2));
/* Get the second array */
ksmo.gaddr = (uintptr_t)mem2;
ksmo.flags = 0;
ksmo.size = maxsize;
ksmo.op = KVM_S390_MEMOP_LOGICAL_READ;
ksmo.buf = (uintptr_t)mem2;
ksmo.ar = 0;
vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
TEST_ASSERT(!memcmp(mem1, mem2, maxsize),
"Memory contents do not match!");
/* Check error conditions - first bad size: */
ksmo.gaddr = (uintptr_t)mem1;
ksmo.flags = 0;
ksmo.size = -1;
ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
ksmo.buf = (uintptr_t)mem1;
ksmo.ar = 0;
rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes");
/* Zero size: */
ksmo.gaddr = (uintptr_t)mem1;
ksmo.flags = 0;
ksmo.size = 0;
ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
ksmo.buf = (uintptr_t)mem1;
ksmo.ar = 0;
rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM),
"ioctl allows 0 as size");
/* Bad flags: */
ksmo.gaddr = (uintptr_t)mem1;
ksmo.flags = -1;
ksmo.size = maxsize;
ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
ksmo.buf = (uintptr_t)mem1;
ksmo.ar = 0;
rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags");
/* Bad operation: */
ksmo.gaddr = (uintptr_t)mem1;
ksmo.flags = 0;
ksmo.size = maxsize;
ksmo.op = -1;
ksmo.buf = (uintptr_t)mem1;
ksmo.ar = 0;
rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
/* Bad guest address: */
ksmo.gaddr = ~0xfffUL;
ksmo.flags = KVM_S390_MEMOP_F_CHECK_ONLY;
ksmo.size = maxsize;
ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
ksmo.buf = (uintptr_t)mem1;
ksmo.ar = 0;
rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory access");
/* Bad host address: */
ksmo.gaddr = (uintptr_t)mem1;
ksmo.flags = 0;
ksmo.size = maxsize;
ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
ksmo.buf = 0;
ksmo.ar = 0;
rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
TEST_ASSERT(rv == -1 && errno == EFAULT,
"ioctl does not report bad host memory address");
/* Bad access register: */
run->psw_mask &= ~(3UL << (63 - 17));
run->psw_mask |= 1UL << (63 - 17); /* Enable AR mode */
vcpu_run(vm, VCPU_ID); /* To sync new state to SIE block */
ksmo.gaddr = (uintptr_t)mem1;
ksmo.flags = 0;
ksmo.size = maxsize;
ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
ksmo.buf = (uintptr_t)mem1;
ksmo.ar = 17;
rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows ARs > 15");
run->psw_mask &= ~(3UL << (63 - 17)); /* Disable AR mode */
vcpu_run(vm, VCPU_ID); /* Run to sync new state */
kvm_vm_free(vm);
return 0;
}
...@@ -25,9 +25,11 @@ ...@@ -25,9 +25,11 @@
static void guest_code(void) static void guest_code(void)
{ {
register u64 stage asm("11") = 0;
for (;;) { for (;;) {
asm volatile ("diag 0,0,0x501"); GUEST_SYNC(0);
asm volatile ("ahi 11,1"); asm volatile ("ahi %0,1" : : "r"(stage));
} }
} }
...@@ -83,6 +85,36 @@ int main(int argc, char *argv[]) ...@@ -83,6 +85,36 @@ int main(int argc, char *argv[])
run = vcpu_state(vm, VCPU_ID); run = vcpu_state(vm, VCPU_ID);
/* Request reading invalid register set from VCPU. */
run->kvm_valid_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv);
vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0;
run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv);
vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0;
/* Request setting invalid register set into VCPU. */
run->kvm_dirty_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv);
vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0;
run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv);
vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0;
/* Request and verify all valid register sets. */ /* Request and verify all valid register sets. */
run->kvm_valid_regs = TEST_SYNC_FIELDS; run->kvm_valid_regs = TEST_SYNC_FIELDS;
rv = _vcpu_run(vm, VCPU_ID); rv = _vcpu_run(vm, VCPU_ID);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment