Commit cce0c23d authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: selftests: Add wrapper to allocate page table page

Add a helper to allocate a page for use in constructing the guest's page
tables.  All architectures have identical address and memslot
requirements (which appear to be arbitrary anyways).

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210622200529.3650424-15-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 444d084b
...@@ -30,6 +30,7 @@ typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */ ...@@ -30,6 +30,7 @@ typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
/* Minimum allocated guest virtual and physical addresses */ /* Minimum allocated guest virtual and physical addresses */
#define KVM_UTIL_MIN_VADDR 0x2000 #define KVM_UTIL_MIN_VADDR 0x2000
#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
#define DEFAULT_GUEST_PHY_PAGES 512 #define DEFAULT_GUEST_PHY_PAGES 512
#define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000 #define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000
...@@ -262,6 +263,7 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, ...@@ -262,6 +263,7 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
uint32_t memslot); uint32_t memslot);
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
vm_paddr_t paddr_min, uint32_t memslot); vm_paddr_t paddr_min, uint32_t memslot);
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
/* /*
* Create a VM with reasonable defaults * Create a VM with reasonable defaults
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include "../kvm_util_internal.h" #include "../kvm_util_internal.h"
#include "processor.h" #include "processor.h"
#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
#define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000 #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000
static uint64_t page_align(struct kvm_vm *vm, uint64_t v) static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
...@@ -104,25 +103,19 @@ static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, ...@@ -104,25 +103,19 @@ static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
paddr, vm->max_gfn, vm->page_size); paddr, vm->max_gfn, vm->page_size);
ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8; ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
if (!*ptep) { if (!*ptep)
*ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0); *ptep = vm_alloc_page_table(vm) | 3;
*ptep |= 3;
}
switch (vm->pgtable_levels) { switch (vm->pgtable_levels) {
case 4: case 4:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8; ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
if (!*ptep) { if (!*ptep)
*ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0); *ptep = vm_alloc_page_table(vm) | 3;
*ptep |= 3;
}
/* fall through */ /* fall through */
case 3: case 3:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8; ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
if (!*ptep) { if (!*ptep)
*ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0); *ptep = vm_alloc_page_table(vm) | 3;
*ptep |= 3;
}
/* fall through */ /* fall through */
case 2: case 2:
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8; ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
......
...@@ -2209,6 +2209,14 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, ...@@ -2209,6 +2209,14 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
} }
/* Arbitrary minimum physical address used for virtual translation tables. */
#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
{
return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
}
/* /*
* Address Guest Virtual to Host Virtual * Address Guest Virtual to Host Virtual
* *
......
...@@ -9,8 +9,6 @@ ...@@ -9,8 +9,6 @@
#include "kvm_util.h" #include "kvm_util.h"
#include "../kvm_util_internal.h" #include "../kvm_util_internal.h"
#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
#define PAGES_PER_REGION 4 #define PAGES_PER_REGION 4
void virt_pgd_alloc(struct kvm_vm *vm) void virt_pgd_alloc(struct kvm_vm *vm)
......
...@@ -17,9 +17,6 @@ ...@@ -17,9 +17,6 @@
#define DEFAULT_CODE_SELECTOR 0x8 #define DEFAULT_CODE_SELECTOR 0x8
#define DEFAULT_DATA_SELECTOR 0x10 #define DEFAULT_DATA_SELECTOR 0x10
/* Minimum physical address used for virtual translation tables. */
#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
vm_vaddr_t exception_handlers; vm_vaddr_t exception_handlers;
/* Virtual translation table structure declarations */ /* Virtual translation table structure declarations */
...@@ -214,9 +211,7 @@ void virt_pgd_alloc(struct kvm_vm *vm) ...@@ -214,9 +211,7 @@ void virt_pgd_alloc(struct kvm_vm *vm)
/* If needed, create page map l4 table. */ /* If needed, create page map l4 table. */
if (!vm->pgd_created) { if (!vm->pgd_created) {
vm_paddr_t paddr = vm_phy_page_alloc(vm, vm->pgd = vm_alloc_page_table(vm);
KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
vm->pgd = paddr;
vm->pgd_created = true; vm->pgd_created = true;
} }
} }
...@@ -254,9 +249,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) ...@@ -254,9 +249,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
/* Allocate page directory pointer table if not present. */ /* Allocate page directory pointer table if not present. */
pml4e = addr_gpa2hva(vm, vm->pgd); pml4e = addr_gpa2hva(vm, vm->pgd);
if (!pml4e[index[3]].present) { if (!pml4e[index[3]].present) {
pml4e[index[3]].address = vm_phy_page_alloc(vm, pml4e[index[3]].address = vm_alloc_page_table(vm) >> vm->page_shift;
KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0)
>> vm->page_shift;
pml4e[index[3]].writable = true; pml4e[index[3]].writable = true;
pml4e[index[3]].present = true; pml4e[index[3]].present = true;
} }
...@@ -265,9 +258,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) ...@@ -265,9 +258,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
struct pageDirectoryPointerEntry *pdpe; struct pageDirectoryPointerEntry *pdpe;
pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size); pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
if (!pdpe[index[2]].present) { if (!pdpe[index[2]].present) {
pdpe[index[2]].address = vm_phy_page_alloc(vm, pdpe[index[2]].address = vm_alloc_page_table(vm) >> vm->page_shift;
KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0)
>> vm->page_shift;
pdpe[index[2]].writable = true; pdpe[index[2]].writable = true;
pdpe[index[2]].present = true; pdpe[index[2]].present = true;
} }
...@@ -276,9 +267,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) ...@@ -276,9 +267,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
struct pageDirectoryEntry *pde; struct pageDirectoryEntry *pde;
pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size); pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
if (!pde[index[1]].present) { if (!pde[index[1]].present) {
pde[index[1]].address = vm_phy_page_alloc(vm, pde[index[1]].address = vm_alloc_page_table(vm) >> vm->page_shift;
KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0)
>> vm->page_shift;
pde[index[1]].writable = true; pde[index[1]].writable = true;
pde[index[1]].present = true; pde[index[1]].present = true;
} }
......
...@@ -426,9 +426,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, ...@@ -426,9 +426,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
/* Allocate page directory pointer table if not present. */ /* Allocate page directory pointer table if not present. */
pml4e = vmx->eptp_hva; pml4e = vmx->eptp_hva;
if (!pml4e[index[3]].readable) { if (!pml4e[index[3]].readable) {
pml4e[index[3]].address = vm_phy_page_alloc(vm, pml4e[index[3]].address = vm_alloc_page_table(vm) >> vm->page_shift;
KVM_EPT_PAGE_TABLE_MIN_PADDR, 0)
>> vm->page_shift;
pml4e[index[3]].writable = true; pml4e[index[3]].writable = true;
pml4e[index[3]].readable = true; pml4e[index[3]].readable = true;
pml4e[index[3]].executable = true; pml4e[index[3]].executable = true;
...@@ -438,9 +436,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, ...@@ -438,9 +436,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
struct eptPageTableEntry *pdpe; struct eptPageTableEntry *pdpe;
pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size); pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
if (!pdpe[index[2]].readable) { if (!pdpe[index[2]].readable) {
pdpe[index[2]].address = vm_phy_page_alloc(vm, pdpe[index[2]].address = vm_alloc_page_table(vm) >> vm->page_shift;
KVM_EPT_PAGE_TABLE_MIN_PADDR, 0)
>> vm->page_shift;
pdpe[index[2]].writable = true; pdpe[index[2]].writable = true;
pdpe[index[2]].readable = true; pdpe[index[2]].readable = true;
pdpe[index[2]].executable = true; pdpe[index[2]].executable = true;
...@@ -450,9 +446,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, ...@@ -450,9 +446,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
struct eptPageTableEntry *pde; struct eptPageTableEntry *pde;
pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size); pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
if (!pde[index[1]].readable) { if (!pde[index[1]].readable) {
pde[index[1]].address = vm_phy_page_alloc(vm, pde[index[1]].address = vm_alloc_page_table(vm) >> vm->page_shift;
KVM_EPT_PAGE_TABLE_MIN_PADDR, 0)
>> vm->page_shift;
pde[index[1]].writable = true; pde[index[1]].writable = true;
pde[index[1]].readable = true; pde[index[1]].readable = true;
pde[index[1]].executable = true; pde[index[1]].executable = true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment