Commit 6232cfd0 authored by Will Deacon's avatar Will Deacon

Merge branch 'aarch64/kvm-bounce-page' into aarch64/for-next/core

Rework of the KVM HYP bounce page from Ard Biesheuvel. Subsequent arm64
idmap rework depends on this, so merge it here with Marc Zyngier's
blessing (kvm-arm co-maintainer).
parents ce47fbb7 e60a1fec
......@@ -11,7 +11,27 @@
#ifdef CONFIG_ARM_KERNMEM_PERMS
#include <asm/pgtable.h>
#endif
/*
* Poor man's version of LOG2CEIL(), which is
* not available in binutils before v2.24.
*/
#define LOG2_ROUNDUP(size) ( \
__LOG2_ROUNDUP(size, 2) \
__LOG2_ROUNDUP(size, 3) \
__LOG2_ROUNDUP(size, 4) \
__LOG2_ROUNDUP(size, 5) \
__LOG2_ROUNDUP(size, 6) \
__LOG2_ROUNDUP(size, 7) \
__LOG2_ROUNDUP(size, 8) \
__LOG2_ROUNDUP(size, 9) \
__LOG2_ROUNDUP(size, 10) \
__LOG2_ROUNDUP(size, 11) \
12)
#define __LOG2_ROUNDUP(size, order) \
(size) <= (1 << order) ? order :
#define PROC_INFO \
. = ALIGN(4); \
VMLINUX_SYMBOL(__proc_info_begin) = .; \
......@@ -23,11 +43,20 @@
VMLINUX_SYMBOL(__idmap_text_start) = .; \
*(.idmap.text) \
VMLINUX_SYMBOL(__idmap_text_end) = .; \
. = ALIGN(32); \
. = ALIGN(1 << LOG2_ROUNDUP(__hyp_idmap_size)); \
VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
*(.hyp.idmap.text) \
VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
/*
* If the HYP idmap .text section is populated, it needs to be positioned
* such that it will not cross a page boundary in the final output image.
* So align it to the section size rounded up to the next power of 2.
* If __hyp_idmap_size is undefined, the section will be empty so define
* it as 0 in that case.
*/
PROVIDE(__hyp_idmap_size = 0);
#ifdef CONFIG_HOTPLUG_CPU
#define ARM_CPU_DISCARD(x)
#define ARM_CPU_KEEP(x) x
......@@ -346,8 +375,11 @@ SECTIONS
*/
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
/*
* The HYP init code can't be more than a page long.
* The HYP init code can't be more than a page long,
* and should not cross a page boundary.
* The above comment applies as well.
*/
ASSERT(((__hyp_idmap_text_end - __hyp_idmap_text_start) <= PAGE_SIZE), "HYP init code too big")
ASSERT((__hyp_idmap_text_start & ~PAGE_MASK) + __hyp_idmap_size <= PAGE_SIZE,
"HYP init code too big or misaligned")
......@@ -157,3 +157,6 @@ target: @ We're now in the trampoline code, switch page tables
__kvm_hyp_init_end:
.popsection
.global __hyp_idmap_size
.set __hyp_idmap_size, __kvm_hyp_init_end - __kvm_hyp_init
......@@ -37,7 +37,6 @@ static pgd_t *boot_hyp_pgd;
static pgd_t *hyp_pgd;
static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
static void *init_bounce_page;
static unsigned long hyp_idmap_start;
static unsigned long hyp_idmap_end;
static phys_addr_t hyp_idmap_vector;
......@@ -405,9 +404,6 @@ void free_boot_hyp_pgd(void)
if (hyp_pgd)
unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
free_page((unsigned long)init_bounce_page);
init_bounce_page = NULL;
mutex_unlock(&kvm_hyp_pgd_mutex);
}
......@@ -1498,39 +1494,11 @@ int kvm_mmu_init(void)
hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
/*
* Our init code is crossing a page boundary. Allocate
* a bounce page, copy the code over and use that.
*/
size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
phys_addr_t phys_base;
init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
if (!init_bounce_page) {
kvm_err("Couldn't allocate HYP init bounce page\n");
err = -ENOMEM;
goto out;
}
memcpy(init_bounce_page, __hyp_idmap_text_start, len);
/*
* Warning: the code we just copied to the bounce page
* must be flushed to the point of coherency.
* Otherwise, the data may be sitting in L2, and HYP
* mode won't be able to observe it as it runs with
* caches off at that point.
*/
kvm_flush_dcache_to_poc(init_bounce_page, len);
phys_base = kvm_virt_to_phys(init_bounce_page);
hyp_idmap_vector += phys_base - hyp_idmap_start;
hyp_idmap_start = phys_base;
hyp_idmap_end = phys_base + len;
kvm_info("Using HYP init bounce page @%lx\n",
(unsigned long)phys_base);
}
/*
* We rely on the linker script to ensure at build time that the HYP
* init code does not cross a page boundary.
*/
BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
......
......@@ -23,10 +23,14 @@ jiffies = jiffies_64;
#define HYPERVISOR_TEXT \
/* \
* Force the alignment to be compatible with \
* the vectors requirements \
* Align to 4 KB so that \
* a) the HYP vector table is at its minimum \
* alignment of 2048 bytes \
* b) the HYP init code will not cross a page \
* boundary if its size does not exceed \
* 4 KB (see related ASSERT() below) \
*/ \
. = ALIGN(2048); \
. = ALIGN(SZ_4K); \
VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
*(.hyp.idmap.text) \
VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \
......@@ -163,10 +167,11 @@ SECTIONS
}
/*
* The HYP init code can't be more than a page long.
* The HYP init code can't be more than a page long,
* and should not cross a page boundary.
*/
ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end),
"HYP init code too big")
ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
"HYP init code too big or misaligned")
/*
* If padding is applied before .head.text, virt<->phys conversions will fail.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment