Commit cedbb8b7 authored by Marc Zyngier's avatar Marc Zyngier

arm64: KVM: VHE: Patch out kern_hyp_va

The kern_hyp_va macro is pretty meaninless with VHE, as there is
only one mapping - the kernel one.

In order to keep the code readable and efficient, use runtime
patching to replace the 'and' instruction used to compute the VA
with a 'nop'.
Reviewed-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent b81125c7
...@@ -23,13 +23,16 @@ ...@@ -23,13 +23,16 @@
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
/* /*
* As we only have the TTBR0_EL2 register, we cannot express * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
* "negative" addresses. This makes it impossible to directly share * "negative" addresses. This makes it impossible to directly share
* mappings with the kernel. * mappings with the kernel.
* *
* Instead, give the HYP mode its own VA region at a fixed offset from * Instead, give the HYP mode its own VA region at a fixed offset from
* the kernel by just masking the top bits (which are all ones for a * the kernel by just masking the top bits (which are all ones for a
* kernel address). * kernel address).
*
* ARMv8.1 (using VHE) does have a TTBR1_EL2, and doesn't use these
* macros (the entire kernel runs at EL2).
*/ */
#define HYP_PAGE_OFFSET_SHIFT VA_BITS #define HYP_PAGE_OFFSET_SHIFT VA_BITS
#define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1) #define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
...@@ -56,12 +59,19 @@ ...@@ -56,12 +59,19 @@
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
#include <asm/alternative.h>
#include <asm/cpufeature.h>
/* /*
* Convert a kernel VA into a HYP VA. * Convert a kernel VA into a HYP VA.
* reg: VA to be converted. * reg: VA to be converted.
*/ */
.macro kern_hyp_va reg .macro kern_hyp_va reg
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
and \reg, \reg, #HYP_PAGE_OFFSET_MASK and \reg, \reg, #HYP_PAGE_OFFSET_MASK
alternative_else
nop
alternative_endif
.endm .endm
#else #else
......
...@@ -25,9 +25,28 @@ ...@@ -25,9 +25,28 @@
#define __hyp_text __section(.hyp.text) notrace #define __hyp_text __section(.hyp.text) notrace
#define kern_hyp_va(v) (typeof(v))((unsigned long)(v) & HYP_PAGE_OFFSET_MASK) static inline unsigned long __kern_hyp_va(unsigned long v)
#define hyp_kern_va(v) (typeof(v))((unsigned long)(v) - HYP_PAGE_OFFSET \ {
+ PAGE_OFFSET) asm volatile(ALTERNATIVE("and %0, %0, %1",
"nop",
ARM64_HAS_VIRT_HOST_EXTN)
: "+r" (v) : "i" (HYP_PAGE_OFFSET_MASK));
return v;
}
#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
static inline unsigned long __hyp_kern_va(unsigned long v)
{
u64 offset = PAGE_OFFSET - HYP_PAGE_OFFSET;
asm volatile(ALTERNATIVE("add %0, %0, %1",
"nop",
ARM64_HAS_VIRT_HOST_EXTN)
: "+r" (v) : "r" (offset));
return v;
}
#define hyp_kern_va(v) (typeof(v))(__hyp_kern_va((unsigned long)(v)))
/** /**
* hyp_alternate_select - Generates patchable code sequences that are * hyp_alternate_select - Generates patchable code sequences that are
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment