Commit 83204fd3 authored by Suzuki K Poulose's avatar Suzuki K Poulose Committed by Sasha Levin

kvm arm: Move fake PGD handling to arch specific files

[ Upstream commit 120f0779 ]

Rearrange the code for fake pgd handling, which is applicable
only for arm64. This will later be removed once we introduce
the stage2 page table walker macros.
Reviewed-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Reviewed-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: default avatarSuzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: default avatarSasha Levin <alexander.levin@verizon.com>
parent 060e3937
......@@ -161,8 +161,6 @@ static inline bool kvm_page_empty(void *ptr)
#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
#define kvm_pud_table_empty(kvm, pudp) (0)
#define KVM_PREALLOC_LEVEL 0
static inline void *kvm_get_hwpgd(struct kvm *kvm)
{
return kvm->arch.pgd;
......@@ -173,6 +171,15 @@ static inline unsigned int kvm_get_hwpgd_size(void)
return PTRS_PER_S2_PGD * sizeof(pgd_t);
}
static inline pgd_t *kvm_setup_fake_pgd(pgd_t *hwpgd)
{
return hwpgd;
}
static inline void kvm_free_fake_pgd(pgd_t *pgd)
{
}
struct kvm;
#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
......
......@@ -685,47 +685,16 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
if (!hwpgd)
return -ENOMEM;
/* When the kernel uses more levels of page tables than the
/*
* When the kernel uses more levels of page tables than the
* guest, we allocate a fake PGD and pre-populate it to point
* to the next-level page table, which will be the real
* initial page table pointed to by the VTTBR.
*
* When KVM_PREALLOC_LEVEL==2, we allocate a single page for
* the PMD and the kernel will use folded pud.
* When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD
* pages.
*/
if (KVM_PREALLOC_LEVEL > 0) {
int i;
/*
* Allocate fake pgd for the page table manipulation macros to
* work. This is not used by the hardware and we have no
* alignment requirement for this allocation.
*/
pgd = kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
GFP_KERNEL | __GFP_ZERO);
if (!pgd) {
kvm_free_hwpgd(hwpgd);
return -ENOMEM;
}
/* Plug the HW PGD into the fake one. */
for (i = 0; i < PTRS_PER_S2_PGD; i++) {
if (KVM_PREALLOC_LEVEL == 1)
pgd_populate(NULL, pgd + i,
(pud_t *)hwpgd + i * PTRS_PER_PUD);
else if (KVM_PREALLOC_LEVEL == 2)
pud_populate(NULL, pud_offset(pgd, 0) + i,
(pmd_t *)hwpgd + i * PTRS_PER_PMD);
}
} else {
/*
* Allocate actual first-level Stage-2 page table used by the
* hardware for Stage-2 page table walks.
*/
pgd = (pgd_t *)hwpgd;
pgd = kvm_setup_fake_pgd(hwpgd);
if (IS_ERR(pgd)) {
kvm_free_hwpgd(hwpgd);
return PTR_ERR(pgd);
}
kvm_clean_pgd(pgd);
......@@ -838,9 +807,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
spin_unlock(&kvm->mmu_lock);
kvm_free_hwpgd(kvm_get_hwpgd(kvm));
if (KVM_PREALLOC_LEVEL > 0)
kfree(kvm->arch.pgd);
kvm_free_fake_pgd(kvm->arch.pgd);
kvm->arch.pgd = NULL;
}
......
......@@ -198,6 +198,49 @@ static inline unsigned int kvm_get_hwpgd_size(void)
return PTRS_PER_S2_PGD * sizeof(pgd_t);
}
/*
* Allocate fake pgd for the host kernel page table macros to work.
* This is not used by the hardware and we have no alignment
* requirement for this allocation.
*/
static inline pgd_t *kvm_setup_fake_pgd(pgd_t *hwpgd)
{
int i;
pgd_t *pgd;
if (!KVM_PREALLOC_LEVEL)
return hwpgd;
/*
* When KVM_PREALLOC_LEVEL==2, we allocate a single page for
* the PMD and the kernel will use folded pud.
* When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD
* pages.
*/
pgd = kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
GFP_KERNEL | __GFP_ZERO);
if (!pgd)
return ERR_PTR(-ENOMEM);
/* Plug the HW PGD into the fake one. */
for (i = 0; i < PTRS_PER_S2_PGD; i++) {
if (KVM_PREALLOC_LEVEL == 1)
pgd_populate(NULL, pgd + i,
(pud_t *)hwpgd + i * PTRS_PER_PUD);
else if (KVM_PREALLOC_LEVEL == 2)
pud_populate(NULL, pud_offset(pgd, 0) + i,
(pmd_t *)hwpgd + i * PTRS_PER_PMD);
}
return pgd;
}
static inline void kvm_free_fake_pgd(pgd_t *pgd)
{
if (KVM_PREALLOC_LEVEL > 0)
kfree(pgd);
}
static inline bool kvm_page_empty(void *ptr)
{
struct page *ptr_page = virt_to_page(ptr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment