Commit 5d4e08c4 authored by Mark Salter's avatar Mark Salter Committed by Christoffer Dall

arm: KVM: fix possible misalignment of PGDs and bounce page

The kvm/mmu code shared by arm and arm64 uses kalloc() to allocate
a bounce page (if hypervisor init code crosses page boundary) and
hypervisor PGDs. The problem is that kalloc() does not guarantee
the proper alignment. In the case of the bounce page, the page sized
buffer allocated may also cross a page boundary negating the purpose
and leading to a hang during kvm initialization. Likewise the PGDs
allocated may not meet the minimum alignment requirements of the
underlying MMU. This patch uses __get_free_page() to guarantee the
worst case alignment needs of the bounce page and PGDs on both arm
and arm64.

Cc: <stable@vger.kernel.org> # 3.10+
Signed-off-by: default avatarMark Salter <msalter@redhat.com>
Acked-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
parent 4e4468fa
...@@ -42,6 +42,8 @@ static unsigned long hyp_idmap_start; ...@@ -42,6 +42,8 @@ static unsigned long hyp_idmap_start;
static unsigned long hyp_idmap_end; static unsigned long hyp_idmap_end;
static phys_addr_t hyp_idmap_vector; static phys_addr_t hyp_idmap_vector;
#define pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
#define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x)) #define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x))
static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
...@@ -293,14 +295,14 @@ void free_boot_hyp_pgd(void) ...@@ -293,14 +295,14 @@ void free_boot_hyp_pgd(void)
if (boot_hyp_pgd) { if (boot_hyp_pgd) {
unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
kfree(boot_hyp_pgd); free_pages((unsigned long)boot_hyp_pgd, pgd_order);
boot_hyp_pgd = NULL; boot_hyp_pgd = NULL;
} }
if (hyp_pgd) if (hyp_pgd)
unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
kfree(init_bounce_page); free_page((unsigned long)init_bounce_page);
init_bounce_page = NULL; init_bounce_page = NULL;
mutex_unlock(&kvm_hyp_pgd_mutex); mutex_unlock(&kvm_hyp_pgd_mutex);
...@@ -330,7 +332,7 @@ void free_hyp_pgds(void) ...@@ -330,7 +332,7 @@ void free_hyp_pgds(void)
for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
kfree(hyp_pgd); free_pages((unsigned long)hyp_pgd, pgd_order);
hyp_pgd = NULL; hyp_pgd = NULL;
} }
...@@ -1024,7 +1026,7 @@ int kvm_mmu_init(void) ...@@ -1024,7 +1026,7 @@ int kvm_mmu_init(void)
size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start; size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
phys_addr_t phys_base; phys_addr_t phys_base;
init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL); init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
if (!init_bounce_page) { if (!init_bounce_page) {
kvm_err("Couldn't allocate HYP init bounce page\n"); kvm_err("Couldn't allocate HYP init bounce page\n");
err = -ENOMEM; err = -ENOMEM;
...@@ -1050,8 +1052,9 @@ int kvm_mmu_init(void) ...@@ -1050,8 +1052,9 @@ int kvm_mmu_init(void)
(unsigned long)phys_base); (unsigned long)phys_base);
} }
hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
if (!hyp_pgd || !boot_hyp_pgd) { if (!hyp_pgd || !boot_hyp_pgd) {
kvm_err("Hyp mode PGD not allocated\n"); kvm_err("Hyp mode PGD not allocated\n");
err = -ENOMEM; err = -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment