Commit 920541bb authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-6.0' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
 "A small fix to the reported set of supported CPUID bits, and selftests
  fixes:

   - Skip tests that require EPT when it is not available

   - Do not hang when a test fails with an empty stack trace

   - avoid spurious failure when running access_tracking_perf_test in a
     KVM guest

   - work around GCC's tendency to optimize loops into mem*() functions,
     which breaks because the guest code in selftests cannot call into
     PLTs

   - fix -Warray-bounds error in fix_hypercall_test"

* tag 'for-linus-6.0' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: selftests: Compare insn opcodes directly in fix_hypercall_test
  KVM: selftests: Implement memcmp(), memcpy(), and memset() for guest use
  KVM: x86: Hide IA32_PLATFORM_DCA_CAP[31:0] from the guest
  KVM: selftests: Gracefully handle empty stack traces
  KVM: selftests: replace assertion with warning in access_tracking_perf_test
  KVM: selftests: Skip tests that require EPT when it is not available
parents 70575e77 39426507
......@@ -902,8 +902,6 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->edx = 0;
}
break;
case 9:
break;
case 0xa: { /* Architectural Performance Monitoring */
union cpuid10_eax eax;
union cpuid10_edx edx;
......
......@@ -48,6 +48,8 @@ LIBKVM += lib/rbtree.c
LIBKVM += lib/sparsebit.c
LIBKVM += lib/test_util.c
LIBKVM_STRING += lib/string_override.c
LIBKVM_x86_64 += lib/x86_64/apic.c
LIBKVM_x86_64 += lib/x86_64/handlers.S
LIBKVM_x86_64 += lib/x86_64/perf_test_util.c
......@@ -220,7 +222,8 @@ LIBKVM_C := $(filter %.c,$(LIBKVM))
LIBKVM_S := $(filter %.S,$(LIBKVM))
LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C))
LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S))
LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ)
LIBKVM_STRING_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_STRING))
LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(LIBKVM_STRING_OBJ)
EXTRA_CLEAN += $(LIBKVM_OBJS) cscope.*
......@@ -231,6 +234,12 @@ $(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c
$(LIBKVM_S_OBJ): $(OUTPUT)/%.o: %.S
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
# Compile the string overrides as freestanding to prevent the compiler from
# generating self-referential code, e.g. without "freestanding" the compiler may
# "optimize" memcmp() by invoking memcmp(), thus causing infinite recursion.
$(LIBKVM_STRING_OBJ): $(OUTPUT)/%.o: %.c
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c -ffreestanding $< -o $@
x := $(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS))))
$(TEST_GEN_PROGS): $(LIBKVM_OBJS)
$(TEST_GEN_PROGS_EXTENDED): $(LIBKVM_OBJS)
......
......@@ -31,8 +31,9 @@
* These limitations are worked around in this test by using a large enough
* region of memory for each vCPU such that the number of translations cached in
* the TLB and the number of pages held in pagevecs are a small fraction of the
* overall workload. And if either of those conditions are not true this test
* will fail rather than silently passing.
* overall workload. And if either of those conditions are not true (for example
* in nesting, where TLB size is unlimited) this test will print a warning
* rather than silently passing.
*/
#include <inttypes.h>
#include <limits.h>
......@@ -172,17 +173,23 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm,
vcpu_idx, no_pfn, pages);
/*
* Test that at least 90% of memory has been marked idle (the rest might
* not be marked idle because the pages have not yet made it to an LRU
* list or the translations are still cached in the TLB). 90% is
* Check that at least 90% of memory has been marked idle (the rest
* might not be marked idle because the pages have not yet made it to an
* LRU list or the translations are still cached in the TLB). 90% is
* arbitrary; high enough that we ensure most memory access went through
* access tracking but low enough as to not make the test too brittle
* over time and across architectures.
*
* Note that when run in nested virtualization, this check will trigger
* much more frequently because TLB size is unlimited and since no flush
* happens, much more pages are cached there and guest won't see the
* "idle" bit cleared.
*/
TEST_ASSERT(still_idle < pages / 10,
"vCPU%d: Too many pages still idle (%"PRIu64 " out of %"
PRIu64 ").\n",
vcpu_idx, still_idle, pages);
if (still_idle < pages / 10)
printf("WARNING: vCPU%d: Too many pages still idle (%" PRIu64
"out of %" PRIu64 "), this will affect performance results"
".\n",
vcpu_idx, still_idle, pages);
close(page_idle_fd);
close(pagemap_fd);
......
......@@ -617,6 +617,7 @@ void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t memslot);
void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t addr, uint64_t size);
bool kvm_vm_has_ept(struct kvm_vm *vm);
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t eptp_memslot);
void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm);
......
......@@ -22,7 +22,7 @@ static void test_dump_stack(void)
* Build and run this command:
*
* addr2line -s -e /proc/$PPID/exe -fpai {backtrace addresses} | \
* grep -v test_dump_stack | cat -n 1>&2
* cat -n 1>&2
*
* Note that the spacing is different and there's no newline.
*/
......@@ -36,18 +36,24 @@ static void test_dump_stack(void)
n * (((sizeof(void *)) * 2) + 1) +
/* Null terminator: */
1];
char *c;
char *c = cmd;
n = backtrace(stack, n);
c = &cmd[0];
c += sprintf(c, "%s", addr2line);
/*
* Skip the first 3 frames: backtrace, test_dump_stack, and
* test_assert. We hope that backtrace isn't inlined and the other two
* we've declared noinline.
* Skip the first 2 frames, which should be test_dump_stack() and
* test_assert(); both of which are declared noinline. Bail if the
* resulting stack trace would be empty. Otherwise, addr2line will block
* waiting for addresses to be passed in via stdin.
*/
if (n <= 2) {
fputs(" (stack trace empty)\n", stderr);
return;
}
c += sprintf(c, "%s", addr2line);
for (i = 2; i < n; i++)
c += sprintf(c, " %lx", ((unsigned long) stack[i]) - 1);
c += sprintf(c, "%s", pipeline);
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-result"
......
// SPDX-License-Identifier: GPL-2.0-only
#include <stddef.h>
/*
* Override the "basic" built-in string helpers so that they can be used in
* guest code. KVM selftests don't support dynamic loading in guest code and
* will jump into the weeds if the compiler decides to insert an out-of-line
* call via the PLT.
*/
int memcmp(const void *cs, const void *ct, size_t count)
{
const unsigned char *su1, *su2;
int res = 0;
for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) {
if ((res = *su1 - *su2) != 0)
break;
}
return res;
}
void *memcpy(void *dest, const void *src, size_t count)
{
char *tmp = dest;
const char *s = src;
while (count--)
*tmp++ = *s++;
return dest;
}
void *memset(void *s, int c, size_t count)
{
char *xs = s;
while (count--)
*xs++ = c;
return s;
}
......@@ -5,6 +5,8 @@
* Copyright (C) 2018, Google LLC.
*/
#include <asm/msr-index.h>
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
......@@ -542,9 +544,27 @@ void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
__nested_map(vmx, vm, addr, addr, size, PG_LEVEL_1G);
}
bool kvm_vm_has_ept(struct kvm_vm *vm)
{
struct kvm_vcpu *vcpu;
uint64_t ctrl;
vcpu = list_first_entry(&vm->vcpus, struct kvm_vcpu, list);
TEST_ASSERT(vcpu, "Cannot determine EPT support without vCPUs.\n");
ctrl = vcpu_get_msr(vcpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS) >> 32;
if (!(ctrl & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
return false;
ctrl = vcpu_get_msr(vcpu, MSR_IA32_VMX_PROCBASED_CTLS2) >> 32;
return ctrl & SECONDARY_EXEC_ENABLE_EPT;
}
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t eptp_memslot)
{
TEST_REQUIRE(kvm_vm_has_ept(vm));
vmx->eptp = (void *)vm_vaddr_alloc_page(vm);
vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp);
vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp);
......
......@@ -14,6 +14,9 @@
#include "kvm_util.h"
#include "processor.h"
/* VMCALL and VMMCALL are both 3-byte opcodes. */
#define HYPERCALL_INSN_SIZE 3
static bool ud_expected;
static void guest_ud_handler(struct ex_regs *regs)
......@@ -22,7 +25,7 @@ static void guest_ud_handler(struct ex_regs *regs)
GUEST_DONE();
}
extern unsigned char svm_hypercall_insn;
extern uint8_t svm_hypercall_insn[HYPERCALL_INSN_SIZE];
static uint64_t svm_do_sched_yield(uint8_t apic_id)
{
uint64_t ret;
......@@ -39,7 +42,7 @@ static uint64_t svm_do_sched_yield(uint8_t apic_id)
return ret;
}
extern unsigned char vmx_hypercall_insn;
extern uint8_t vmx_hypercall_insn[HYPERCALL_INSN_SIZE];
static uint64_t vmx_do_sched_yield(uint8_t apic_id)
{
uint64_t ret;
......@@ -56,30 +59,20 @@ static uint64_t vmx_do_sched_yield(uint8_t apic_id)
return ret;
}
static void assert_hypercall_insn(unsigned char *exp_insn, unsigned char *obs_insn)
{
uint32_t exp = 0, obs = 0;
memcpy(&exp, exp_insn, sizeof(exp));
memcpy(&obs, obs_insn, sizeof(obs));
GUEST_ASSERT_EQ(exp, obs);
}
static void guest_main(void)
{
unsigned char *native_hypercall_insn, *hypercall_insn;
uint8_t *native_hypercall_insn, *hypercall_insn;
uint8_t apic_id;
apic_id = GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID));
if (is_intel_cpu()) {
native_hypercall_insn = &vmx_hypercall_insn;
hypercall_insn = &svm_hypercall_insn;
native_hypercall_insn = vmx_hypercall_insn;
hypercall_insn = svm_hypercall_insn;
svm_do_sched_yield(apic_id);
} else if (is_amd_cpu()) {
native_hypercall_insn = &svm_hypercall_insn;
hypercall_insn = &vmx_hypercall_insn;
native_hypercall_insn = svm_hypercall_insn;
hypercall_insn = vmx_hypercall_insn;
vmx_do_sched_yield(apic_id);
} else {
GUEST_ASSERT(0);
......@@ -87,8 +80,13 @@ static void guest_main(void)
return;
}
/*
* The hypercall didn't #UD (guest_ud_handler() signals "done" if a #UD
* occurs). Verify that a #UD is NOT expected and that KVM patched in
* the native hypercall.
*/
GUEST_ASSERT(!ud_expected);
assert_hypercall_insn(native_hypercall_insn, hypercall_insn);
GUEST_ASSERT(!memcmp(native_hypercall_insn, hypercall_insn, HYPERCALL_INSN_SIZE));
GUEST_DONE();
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment