Commit 120f0779 authored by Suzuki K Poulose's avatar Suzuki K Poulose Committed by Christoffer Dall

kvm arm: Move fake PGD handling to arch specific files

Rearrange the code for fake pgd handling, which is applicable
only for arm64. This will later be removed once we introduce
the stage2 page table walker macros.
Reviewed-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Reviewed-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: default avatarSuzuki K Poulose <suzuki.poulose@arm.com>
parent acd05010
...@@ -161,8 +161,6 @@ static inline bool kvm_page_empty(void *ptr) ...@@ -161,8 +161,6 @@ static inline bool kvm_page_empty(void *ptr)
#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
#define kvm_pud_table_empty(kvm, pudp) (0) #define kvm_pud_table_empty(kvm, pudp) (0)
#define KVM_PREALLOC_LEVEL 0
static inline void *kvm_get_hwpgd(struct kvm *kvm) static inline void *kvm_get_hwpgd(struct kvm *kvm)
{ {
return kvm->arch.pgd; return kvm->arch.pgd;
...@@ -173,6 +171,15 @@ static inline unsigned int kvm_get_hwpgd_size(void) ...@@ -173,6 +171,15 @@ static inline unsigned int kvm_get_hwpgd_size(void)
return PTRS_PER_S2_PGD * sizeof(pgd_t); return PTRS_PER_S2_PGD * sizeof(pgd_t);
} }
static inline pgd_t *kvm_setup_fake_pgd(pgd_t *hwpgd)
{
return hwpgd;
}
static inline void kvm_free_fake_pgd(pgd_t *pgd)
{
}
struct kvm; struct kvm;
#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) #define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
......
...@@ -684,47 +684,16 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) ...@@ -684,47 +684,16 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
if (!hwpgd) if (!hwpgd)
return -ENOMEM; return -ENOMEM;
/* When the kernel uses more levels of page tables than the /*
* When the kernel uses more levels of page tables than the
* guest, we allocate a fake PGD and pre-populate it to point * guest, we allocate a fake PGD and pre-populate it to point
* to the next-level page table, which will be the real * to the next-level page table, which will be the real
* initial page table pointed to by the VTTBR. * initial page table pointed to by the VTTBR.
*
* When KVM_PREALLOC_LEVEL==2, we allocate a single page for
* the PMD and the kernel will use folded pud.
* When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD
* pages.
*/ */
if (KVM_PREALLOC_LEVEL > 0) { pgd = kvm_setup_fake_pgd(hwpgd);
int i; if (IS_ERR(pgd)) {
kvm_free_hwpgd(hwpgd);
/* return PTR_ERR(pgd);
* Allocate fake pgd for the page table manipulation macros to
* work. This is not used by the hardware and we have no
* alignment requirement for this allocation.
*/
pgd = kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
GFP_KERNEL | __GFP_ZERO);
if (!pgd) {
kvm_free_hwpgd(hwpgd);
return -ENOMEM;
}
/* Plug the HW PGD into the fake one. */
for (i = 0; i < PTRS_PER_S2_PGD; i++) {
if (KVM_PREALLOC_LEVEL == 1)
pgd_populate(NULL, pgd + i,
(pud_t *)hwpgd + i * PTRS_PER_PUD);
else if (KVM_PREALLOC_LEVEL == 2)
pud_populate(NULL, pud_offset(pgd, 0) + i,
(pmd_t *)hwpgd + i * PTRS_PER_PMD);
}
} else {
/*
* Allocate actual first-level Stage-2 page table used by the
* hardware for Stage-2 page table walks.
*/
pgd = (pgd_t *)hwpgd;
} }
kvm_clean_pgd(pgd); kvm_clean_pgd(pgd);
...@@ -831,9 +800,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm) ...@@ -831,9 +800,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
kvm_free_hwpgd(kvm_get_hwpgd(kvm)); kvm_free_hwpgd(kvm_get_hwpgd(kvm));
if (KVM_PREALLOC_LEVEL > 0) kvm_free_fake_pgd(kvm->arch.pgd);
kfree(kvm->arch.pgd);
kvm->arch.pgd = NULL; kvm->arch.pgd = NULL;
} }
......
...@@ -208,6 +208,49 @@ static inline unsigned int kvm_get_hwpgd_size(void) ...@@ -208,6 +208,49 @@ static inline unsigned int kvm_get_hwpgd_size(void)
return PTRS_PER_S2_PGD * sizeof(pgd_t); return PTRS_PER_S2_PGD * sizeof(pgd_t);
} }
/*
* Allocate fake pgd for the host kernel page table macros to work.
* This is not used by the hardware and we have no alignment
* requirement for this allocation.
*/
static inline pgd_t *kvm_setup_fake_pgd(pgd_t *hwpgd)
{
int i;
pgd_t *pgd;
if (!KVM_PREALLOC_LEVEL)
return hwpgd;
/*
* When KVM_PREALLOC_LEVEL==2, we allocate a single page for
* the PMD and the kernel will use folded pud.
* When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD
* pages.
*/
pgd = kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
GFP_KERNEL | __GFP_ZERO);
if (!pgd)
return ERR_PTR(-ENOMEM);
/* Plug the HW PGD into the fake one. */
for (i = 0; i < PTRS_PER_S2_PGD; i++) {
if (KVM_PREALLOC_LEVEL == 1)
pgd_populate(NULL, pgd + i,
(pud_t *)hwpgd + i * PTRS_PER_PUD);
else if (KVM_PREALLOC_LEVEL == 2)
pud_populate(NULL, pud_offset(pgd, 0) + i,
(pmd_t *)hwpgd + i * PTRS_PER_PMD);
}
return pgd;
}
static inline void kvm_free_fake_pgd(pgd_t *pgd)
{
if (KVM_PREALLOC_LEVEL > 0)
kfree(pgd);
}
static inline bool kvm_page_empty(void *ptr) static inline bool kvm_page_empty(void *ptr)
{ {
struct page *ptr_page = virt_to_page(ptr); struct page *ptr_page = virt_to_page(ptr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment