Commit 444d084b authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: selftests: Unconditionally allocate EPT tables in memslot 0

Drop the EPTP memslot param from all EPT helpers and shove the hardcoded
'0' down to the vm_phy_page_alloc() calls.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210622200529.3650424-14-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 4307af73
...@@ -608,15 +608,13 @@ bool nested_vmx_supported(void); ...@@ -608,15 +608,13 @@ bool nested_vmx_supported(void);
void nested_vmx_check_supported(void); void nested_vmx_check_supported(void);
void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot); uint64_t nested_paddr, uint64_t paddr);
void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr, uint64_t size, uint64_t nested_paddr, uint64_t paddr, uint64_t size);
uint32_t eptp_memslot);
void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t memslot, uint32_t eptp_memslot); uint32_t memslot);
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm, void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t eptp_memslot); uint32_t eptp_memslot);
void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm, void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm);
uint32_t eptp_memslot);
#endif /* SELFTEST_KVM_VMX_H */ #endif /* SELFTEST_KVM_VMX_H */
...@@ -393,7 +393,7 @@ void nested_vmx_check_supported(void) ...@@ -393,7 +393,7 @@ void nested_vmx_check_supported(void)
} }
void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot) uint64_t nested_paddr, uint64_t paddr)
{ {
uint16_t index[4]; uint16_t index[4];
struct eptPageTableEntry *pml4e; struct eptPageTableEntry *pml4e;
...@@ -427,7 +427,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, ...@@ -427,7 +427,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
pml4e = vmx->eptp_hva; pml4e = vmx->eptp_hva;
if (!pml4e[index[3]].readable) { if (!pml4e[index[3]].readable) {
pml4e[index[3]].address = vm_phy_page_alloc(vm, pml4e[index[3]].address = vm_phy_page_alloc(vm,
KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot) KVM_EPT_PAGE_TABLE_MIN_PADDR, 0)
>> vm->page_shift; >> vm->page_shift;
pml4e[index[3]].writable = true; pml4e[index[3]].writable = true;
pml4e[index[3]].readable = true; pml4e[index[3]].readable = true;
...@@ -439,7 +439,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, ...@@ -439,7 +439,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size); pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
if (!pdpe[index[2]].readable) { if (!pdpe[index[2]].readable) {
pdpe[index[2]].address = vm_phy_page_alloc(vm, pdpe[index[2]].address = vm_phy_page_alloc(vm,
KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot) KVM_EPT_PAGE_TABLE_MIN_PADDR, 0)
>> vm->page_shift; >> vm->page_shift;
pdpe[index[2]].writable = true; pdpe[index[2]].writable = true;
pdpe[index[2]].readable = true; pdpe[index[2]].readable = true;
...@@ -451,7 +451,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, ...@@ -451,7 +451,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size); pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
if (!pde[index[1]].readable) { if (!pde[index[1]].readable) {
pde[index[1]].address = vm_phy_page_alloc(vm, pde[index[1]].address = vm_phy_page_alloc(vm,
KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot) KVM_EPT_PAGE_TABLE_MIN_PADDR, 0)
>> vm->page_shift; >> vm->page_shift;
pde[index[1]].writable = true; pde[index[1]].writable = true;
pde[index[1]].readable = true; pde[index[1]].readable = true;
...@@ -492,8 +492,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, ...@@ -492,8 +492,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
* page range starting at nested_paddr to the page range starting at paddr. * page range starting at nested_paddr to the page range starting at paddr.
*/ */
void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr, uint64_t size, uint64_t nested_paddr, uint64_t paddr, uint64_t size)
uint32_t eptp_memslot)
{ {
size_t page_size = vm->page_size; size_t page_size = vm->page_size;
size_t npages = size / page_size; size_t npages = size / page_size;
...@@ -502,7 +501,7 @@ void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, ...@@ -502,7 +501,7 @@ void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
while (npages--) { while (npages--) {
nested_pg_map(vmx, vm, nested_paddr, paddr, eptp_memslot); nested_pg_map(vmx, vm, nested_paddr, paddr);
nested_paddr += page_size; nested_paddr += page_size;
paddr += page_size; paddr += page_size;
} }
...@@ -512,7 +511,7 @@ void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, ...@@ -512,7 +511,7 @@ void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
* physical pages in VM. * physical pages in VM.
*/ */
void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t memslot, uint32_t eptp_memslot) uint32_t memslot)
{ {
sparsebit_idx_t i, last; sparsebit_idx_t i, last;
struct userspace_mem_region *region = struct userspace_mem_region *region =
...@@ -528,8 +527,7 @@ void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, ...@@ -528,8 +527,7 @@ void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
nested_map(vmx, vm, nested_map(vmx, vm,
(uint64_t)i << vm->page_shift, (uint64_t)i << vm->page_shift,
(uint64_t)i << vm->page_shift, (uint64_t)i << vm->page_shift,
1 << vm->page_shift, 1 << vm->page_shift);
eptp_memslot);
} }
} }
...@@ -541,8 +539,7 @@ void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm, ...@@ -541,8 +539,7 @@ void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp); vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp);
} }
void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm, void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm)
uint32_t eptp_memslot)
{ {
vmx->apic_access = (void *)vm_vaddr_alloc_page(vm); vmx->apic_access = (void *)vm_vaddr_alloc_page(vm);
vmx->apic_access_hva = addr_gva2hva(vm, (uintptr_t)vmx->apic_access); vmx->apic_access_hva = addr_gva2hva(vm, (uintptr_t)vmx->apic_access);
......
...@@ -96,7 +96,7 @@ int main(int argc, char *argv[]) ...@@ -96,7 +96,7 @@ int main(int argc, char *argv[])
} }
vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva); vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
prepare_virtualize_apic_accesses(vmx, vm, 0); prepare_virtualize_apic_accesses(vmx, vm);
vcpu_args_set(vm, VCPU_ID, 2, vmx_pages_gva, high_gpa); vcpu_args_set(vm, VCPU_ID, 2, vmx_pages_gva, high_gpa);
while (!done) { while (!done) {
......
...@@ -107,9 +107,9 @@ int main(int argc, char *argv[]) ...@@ -107,9 +107,9 @@ int main(int argc, char *argv[])
* meaning after the last call to virt_map. * meaning after the last call to virt_map.
*/ */
prepare_eptp(vmx, vm, 0); prepare_eptp(vmx, vm, 0);
nested_map_memslot(vmx, vm, 0, 0); nested_map_memslot(vmx, vm, 0);
nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096, 0); nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096);
nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096, 0); nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096);
bmap = bitmap_alloc(TEST_MEM_PAGES); bmap = bitmap_alloc(TEST_MEM_PAGES);
host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM); host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment