Commit ef6a74b2 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: sefltests: Add x86-64 test to verify MMU reacts to CPUID updates

Add an x86-only test to verify that x86's MMU reacts to CPUID updates
that impact the MMU.  KVM has had multiple bugs where it fails to
reconfigure the MMU after the guest's vCPU model changes.

Sadly, this test is effectively limited to shadow paging because the
hardware page walk handler doesn't support software disabling of GBPAGES
support, and KVM doesn't manually walk the GVA->GPA on faults for
performance reasons (doing so would large defeat the benefits of TDP).

Don't require !TDP for the tests as there is still value in running the
tests with TDP, even though the tests will fail (barring KVM hacks).
E.g. KVM should not completely explode if MAXPHYADDR results in KVM using
4-level vs. 5-level paging for the guest.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210622200529.3650424-20-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ad5f16e4
......@@ -15,6 +15,7 @@
/x86_64/hyperv_cpuid
/x86_64/hyperv_features
/x86_64/mmio_warning_test
/x86_64/mmu_role_test
/x86_64/platform_info_test
/x86_64/set_boot_cpu_id
/x86_64/set_sregs_test
......
......@@ -47,6 +47,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_features
TEST_GEN_PROGS_x86_64 += x86_64/kvm_pv_test
TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
TEST_GEN_PROGS_x86_64 += x86_64/mmu_role_test
TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
TEST_GEN_PROGS_x86_64 += x86_64/set_boot_cpu_id
TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
......
......@@ -55,6 +55,9 @@
#define CPUID_PKU (1ul << 3)
#define CPUID_LA57 (1ul << 16)
/* CPUID.0x8000_0001.EDX */
#define CPUID_GBPAGES (1ul << 26)
#define UNEXPECTED_VECTOR_PORT 0xfff0u
/* General Registers in 64-Bit Mode */
......
// SPDX-License-Identifier: GPL-2.0
#include "kvm_util.h"
#include "processor.h"
#define VCPU_ID 1
#define MMIO_GPA 0x100000000ull
static void guest_code(void)
{
(void)READ_ONCE(*((uint64_t *)MMIO_GPA));
(void)READ_ONCE(*((uint64_t *)MMIO_GPA));
GUEST_ASSERT(0);
}
static void guest_pf_handler(struct ex_regs *regs)
{
/* PFEC == RSVD | PRESENT (read, kernel). */
GUEST_ASSERT(regs->error_code == 0x9);
GUEST_DONE();
}
static void mmu_role_test(u32 *cpuid_reg, u32 evil_cpuid_val)
{
u32 good_cpuid_val = *cpuid_reg;
struct kvm_run *run;
struct kvm_vm *vm;
uint64_t cmd;
int r;
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, guest_code);
run = vcpu_state(vm, VCPU_ID);
/* Map 1gb page without a backing memlot. */
__virt_pg_map(vm, MMIO_GPA, MMIO_GPA, X86_PAGE_SIZE_1G);
r = _vcpu_run(vm, VCPU_ID);
/* Guest access to the 1gb page should trigger MMIO. */
TEST_ASSERT(r == 0, "vcpu_run failed: %d\n", r);
TEST_ASSERT(run->exit_reason == KVM_EXIT_MMIO,
"Unexpected exit reason: %u (%s), expected MMIO exit (1gb page w/o memslot)\n",
run->exit_reason, exit_reason_str(run->exit_reason));
TEST_ASSERT(run->mmio.len == 8, "Unexpected exit mmio size = %u", run->mmio.len);
TEST_ASSERT(run->mmio.phys_addr == MMIO_GPA,
"Unexpected exit mmio address = 0x%llx", run->mmio.phys_addr);
/*
* Effect the CPUID change for the guest and re-enter the guest. Its
* access should now #PF due to the PAGE_SIZE bit being reserved or
* the resulting GPA being invalid. Note, kvm_get_supported_cpuid()
* returns the struct that contains the entry being modified. Eww.
*/
*cpuid_reg = evil_cpuid_val;
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
/*
* Add a dummy memslot to coerce KVM into bumping the MMIO generation.
* KVM does not "officially" support mucking with CPUID after KVM_RUN,
* and will incorrectly reuse MMIO SPTEs. Don't delete the memslot!
* KVM x86 zaps all shadow pages on memslot deletion.
*/
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
MMIO_GPA << 1, 10, 1, 0);
/* Set up a #PF handler to eat the RSVD #PF and signal all done! */
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, VCPU_ID);
vm_handle_exception(vm, PF_VECTOR, guest_pf_handler);
r = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(r == 0, "vcpu_run failed: %d\n", r);
cmd = get_ucall(vm, VCPU_ID, NULL);
TEST_ASSERT(cmd == UCALL_DONE,
"Unexpected guest exit, exit_reason=%s, ucall.cmd = %lu\n",
exit_reason_str(run->exit_reason), cmd);
/*
* Restore the happy CPUID value for the next test. Yes, changes are
* indeed persistent across VM destruction.
*/
*cpuid_reg = good_cpuid_val;
kvm_vm_free(vm);
}
int main(int argc, char *argv[])
{
struct kvm_cpuid_entry2 *entry;
int opt;
/*
* All tests are opt-in because TDP doesn't play nice with reserved #PF
* in the GVA->GPA translation. The hardware page walker doesn't let
* software change GBPAGES or MAXPHYADDR, and KVM doesn't manually walk
* the GVA on fault for performance reasons.
*/
bool do_gbpages = false;
bool do_maxphyaddr = false;
setbuf(stdout, NULL);
while ((opt = getopt(argc, argv, "gm")) != -1) {
switch (opt) {
case 'g':
do_gbpages = true;
break;
case 'm':
do_maxphyaddr = true;
break;
case 'h':
default:
printf("usage: %s [-g (GBPAGES)] [-m (MAXPHYADDR)]\n", argv[0]);
break;
}
}
if (!do_gbpages && !do_maxphyaddr) {
print_skip("No sub-tests selected");
return 0;
}
entry = kvm_get_supported_cpuid_entry(0x80000001);
if (!(entry->edx & CPUID_GBPAGES)) {
print_skip("1gb hugepages not supported");
return 0;
}
if (do_gbpages) {
pr_info("Test MMIO after toggling CPUID.GBPAGES\n\n");
mmu_role_test(&entry->edx, entry->edx & ~CPUID_GBPAGES);
}
if (do_maxphyaddr) {
pr_info("Test MMIO after changing CPUID.MAXPHYADDR\n\n");
entry = kvm_get_supported_cpuid_entry(0x80000008);
mmu_role_test(&entry->eax, (entry->eax & ~0xff) | 0x20);
}
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment