Commit ad5f16e4 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: selftests: Add hugepage support for x86-64

Add x86-64 hugepage support in the form of a x86-only variant of
virt_pg_map() that takes an explicit page size.  To keep things simple,
follow the existing logic for 4k pages and disallow creating a hugepage
if the upper-level entry is present, even if the desired pfn matches.

Opportunistically fix a double "beyond beyond" reported by checkpatch.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210622200529.3650424-19-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent b007e904
...@@ -412,6 +412,14 @@ struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void); ...@@ -412,6 +412,14 @@ struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void);
void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid); void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid); struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
enum x86_page_size {
X86_PAGE_SIZE_4K = 0,
X86_PAGE_SIZE_2M,
X86_PAGE_SIZE_1G,
};
void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
enum x86_page_size page_size);
/* /*
* Basic CPU control in CR0 * Basic CPU control in CR0
*/ */
......
...@@ -198,55 +198,90 @@ static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr, ...@@ -198,55 +198,90 @@ static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr,
static struct pageUpperEntry *virt_create_upper_pte(struct kvm_vm *vm, static struct pageUpperEntry *virt_create_upper_pte(struct kvm_vm *vm,
uint64_t pt_pfn, uint64_t pt_pfn,
uint64_t vaddr, uint64_t vaddr,
int level) uint64_t paddr,
int level,
enum x86_page_size page_size)
{ {
struct pageUpperEntry *pte = virt_get_pte(vm, pt_pfn, vaddr, level); struct pageUpperEntry *pte = virt_get_pte(vm, pt_pfn, vaddr, level);
if (!pte->present) { if (!pte->present) {
pte->pfn = vm_alloc_page_table(vm) >> vm->page_shift;
pte->writable = true; pte->writable = true;
pte->present = true; pte->present = true;
pte->page_size = (level == page_size);
if (pte->page_size)
pte->pfn = paddr >> vm->page_shift;
else
pte->pfn = vm_alloc_page_table(vm) >> vm->page_shift;
} else {
/*
* Entry already present. Assert that the caller doesn't want
* a hugepage at this level, and that there isn't a hugepage at
* this level.
*/
TEST_ASSERT(level != page_size,
"Cannot create hugepage at level: %u, vaddr: 0x%lx\n",
page_size, vaddr);
TEST_ASSERT(!pte->page_size,
"Cannot create page table at level: %u, vaddr: 0x%lx\n",
level, vaddr);
} }
return pte; return pte;
} }
void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
enum x86_page_size page_size)
{ {
const uint64_t pg_size = 1ull << ((page_size * 9) + 12);
struct pageUpperEntry *pml4e, *pdpe, *pde; struct pageUpperEntry *pml4e, *pdpe, *pde;
struct pageTableEntry *pte; struct pageTableEntry *pte;
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K,
"unknown or unsupported guest mode, mode: 0x%x", vm->mode); "Unknown or unsupported guest mode, mode: 0x%x", vm->mode);
TEST_ASSERT((vaddr % vm->page_size) == 0, TEST_ASSERT((vaddr % pg_size) == 0,
"Virtual address not on page boundary,\n" "Virtual address not aligned,\n"
" vaddr: 0x%lx vm->page_size: 0x%x", "vaddr: 0x%lx page size: 0x%lx", vaddr, pg_size);
vaddr, vm->page_size); TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)),
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, "Invalid virtual address, vaddr: 0x%lx", vaddr);
(vaddr >> vm->page_shift)), TEST_ASSERT((paddr % pg_size) == 0,
"Invalid virtual address, vaddr: 0x%lx", "Physical address not aligned,\n"
vaddr); " paddr: 0x%lx page size: 0x%lx", paddr, pg_size);
TEST_ASSERT((paddr % vm->page_size) == 0,
"Physical address not on page boundary,\n"
" paddr: 0x%lx vm->page_size: 0x%x",
paddr, vm->page_size);
TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond beyond maximum supported,\n" "Physical address beyond maximum supported,\n"
" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
paddr, vm->max_gfn, vm->page_size); paddr, vm->max_gfn, vm->page_size);
/*
* Allocate upper level page tables, if not already present. Return
* early if a hugepage was created.
*/
pml4e = virt_create_upper_pte(vm, vm->pgd >> vm->page_shift,
vaddr, paddr, 3, page_size);
if (pml4e->page_size)
return;
pdpe = virt_create_upper_pte(vm, pml4e->pfn, vaddr, paddr, 2, page_size);
if (pdpe->page_size)
return;
/* Allocate upper level page tables, if not already present. */ pde = virt_create_upper_pte(vm, pdpe->pfn, vaddr, paddr, 1, page_size);
pml4e = virt_create_upper_pte(vm, vm->pgd >> vm->page_shift, vaddr, 3); if (pde->page_size)
pdpe = virt_create_upper_pte(vm, pml4e->pfn, vaddr, 2); return;
pde = virt_create_upper_pte(vm, pdpe->pfn, vaddr, 1);
/* Fill in page table entry. */ /* Fill in page table entry. */
pte = virt_get_pte(vm, pde->pfn, vaddr, 0); pte = virt_get_pte(vm, pde->pfn, vaddr, 0);
TEST_ASSERT(!pte->present,
"PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
pte->pfn = paddr >> vm->page_shift; pte->pfn = paddr >> vm->page_shift;
pte->writable = true; pte->writable = true;
pte->present = 1; pte->present = 1;
} }
void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
{
__virt_pg_map(vm, vaddr, paddr, X86_PAGE_SIZE_4K);
}
void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{ {
struct pageUpperEntry *pml4e, *pml4e_start; struct pageUpperEntry *pml4e, *pml4e_start;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment