Commit 91add12d authored by Sean Christopherson's avatar Sean Christopherson

KVM: selftests: Remove useless shifts when creating guest page tables

Remove the pointless shift from GPA=>GFN and immediately back to
GFN=>GPA when creating guest page tables.  Ignore the other walkers
that have a similar pattern for the moment, they will be converted
to use virt_get_pte() in the near future.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20221006004512.666529-4-seanjc@google.com
parent 751f2800
...@@ -177,7 +177,8 @@ struct kvm_x86_cpu_feature { ...@@ -177,7 +177,8 @@ struct kvm_x86_cpu_feature {
#define PAGE_MASK (~(PAGE_SIZE-1)) #define PAGE_MASK (~(PAGE_SIZE-1))
#define PHYSICAL_PAGE_MASK GENMASK_ULL(51, 12) #define PHYSICAL_PAGE_MASK GENMASK_ULL(51, 12)
#define PTE_GET_PFN(pte) (((pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) #define PTE_GET_PA(pte) ((pte) & PHYSICAL_PAGE_MASK)
#define PTE_GET_PFN(pte) (PTE_GET_PA(pte) >> PAGE_SHIFT)
/* General Registers in 64-Bit Mode */ /* General Registers in 64-Bit Mode */
struct gpr64_regs { struct gpr64_regs {
......
...@@ -131,23 +131,23 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm) ...@@ -131,23 +131,23 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
} }
} }
static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr, static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_gpa, uint64_t vaddr,
int level) int level)
{ {
uint64_t *page_table = addr_gpa2hva(vm, pt_pfn << vm->page_shift); uint64_t *page_table = addr_gpa2hva(vm, pt_gpa);
int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu; int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
return &page_table[index]; return &page_table[index];
} }
static uint64_t *virt_create_upper_pte(struct kvm_vm *vm, static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
uint64_t pt_pfn, uint64_t pt_gpa,
uint64_t vaddr, uint64_t vaddr,
uint64_t paddr, uint64_t paddr,
int current_level, int current_level,
int target_level) int target_level)
{ {
uint64_t *pte = virt_get_pte(vm, pt_pfn, vaddr, current_level); uint64_t *pte = virt_get_pte(vm, pt_gpa, vaddr, current_level);
if (!(*pte & PTE_PRESENT_MASK)) { if (!(*pte & PTE_PRESENT_MASK)) {
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK; *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK;
...@@ -197,21 +197,20 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level) ...@@ -197,21 +197,20 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
* Allocate upper level page tables, if not already present. Return * Allocate upper level page tables, if not already present. Return
* early if a hugepage was created. * early if a hugepage was created.
*/ */
pml4e = virt_create_upper_pte(vm, vm->pgd >> vm->page_shift, pml4e = virt_create_upper_pte(vm, vm->pgd, vaddr, paddr, PG_LEVEL_512G, level);
vaddr, paddr, PG_LEVEL_512G, level);
if (*pml4e & PTE_LARGE_MASK) if (*pml4e & PTE_LARGE_MASK)
return; return;
pdpe = virt_create_upper_pte(vm, PTE_GET_PFN(*pml4e), vaddr, paddr, PG_LEVEL_1G, level); pdpe = virt_create_upper_pte(vm, PTE_GET_PA(*pml4e), vaddr, paddr, PG_LEVEL_1G, level);
if (*pdpe & PTE_LARGE_MASK) if (*pdpe & PTE_LARGE_MASK)
return; return;
pde = virt_create_upper_pte(vm, PTE_GET_PFN(*pdpe), vaddr, paddr, PG_LEVEL_2M, level); pde = virt_create_upper_pte(vm, PTE_GET_PA(*pdpe), vaddr, paddr, PG_LEVEL_2M, level);
if (*pde & PTE_LARGE_MASK) if (*pde & PTE_LARGE_MASK)
return; return;
/* Fill in page table entry. */ /* Fill in page table entry. */
pte = virt_get_pte(vm, PTE_GET_PFN(*pde), vaddr, PG_LEVEL_4K); pte = virt_get_pte(vm, PTE_GET_PA(*pde), vaddr, PG_LEVEL_4K);
TEST_ASSERT(!(*pte & PTE_PRESENT_MASK), TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
"PTE already present for 4k page at vaddr: 0x%lx\n", vaddr); "PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK); *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment