Commit 3dbf100b authored by James Morse's avatar James Morse Committed by Marc Zyngier

KVM: arm64: Abstract the size of the HYP vectors pre-amble

The EL2 vector hardening feature causes KVM to generate vectors for
each type of CPU present in the system. The generated sequences already
do some of the early guest-exit work (i.e. saving registers). To avoid
duplication the generated vectors branch to the original vector just
after the preamble. This size is hard coded.

Adding new instructions to the HYP vector causes strange side effects,
which are difficult to debug as the affected code is patched in at
runtime.

Add KVM_VECTOR_PREAMBLE to tell kvm_patch_vector_branch() how big
the preamble is. The valid_vect macro can then validate this at
build time.
Reviewed-by: default avatarJulien Thierry <julien.thierry@arm.com>
Signed-off-by: default avatarJames Morse <james.morse@arm.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 2b68a2a9
...@@ -30,6 +30,12 @@ ...@@ -30,6 +30,12 @@
{ARM_EXCEPTION_TRAP, "TRAP" }, \ {ARM_EXCEPTION_TRAP, "TRAP" }, \
{ARM_EXCEPTION_HYP_GONE, "HYP_GONE" } {ARM_EXCEPTION_HYP_GONE, "HYP_GONE" }
/*
* Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
* that jumps over this.
*/
#define KVM_VECTOR_PREAMBLE (1 * AARCH64_INSN_SIZE)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/mm.h> #include <linux/mm.h>
......
...@@ -216,17 +216,32 @@ ENDPROC(\label) ...@@ -216,17 +216,32 @@ ENDPROC(\label)
.align 11 .align 11
.macro check_preamble_length start, end
/* kvm_patch_vector_branch() generates code that jumps over the preamble. */
.if ((\end-\start) != KVM_VECTOR_PREAMBLE)
.error "KVM vector preamble length mismatch"
.endif
.endm
.macro valid_vect target .macro valid_vect target
.align 7 .align 7
661:
stp x0, x1, [sp, #-16]! stp x0, x1, [sp, #-16]!
662:
b \target b \target
check_preamble_length 661b, 662b
.endm .endm
.macro invalid_vect target .macro invalid_vect target
.align 7 .align 7
661:
b \target b \target
662:
ldp x0, x1, [sp], #16 ldp x0, x1, [sp], #16
b \target b \target
check_preamble_length 661b, 662b
.endm .endm
ENTRY(__kvm_hyp_vector) ENTRY(__kvm_hyp_vector)
...@@ -271,7 +286,8 @@ ENDPROC(__kvm_hyp_vector) ...@@ -271,7 +286,8 @@ ENDPROC(__kvm_hyp_vector)
* movk x0, #((addr >> 32) & 0xffff), lsl #32 * movk x0, #((addr >> 32) & 0xffff), lsl #32
* br x0 * br x0
* *
* Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4. * Where:
* addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
* See kvm_patch_vector_branch for details. * See kvm_patch_vector_branch for details.
*/ */
alternative_cb kvm_patch_vector_branch alternative_cb kvm_patch_vector_branch
......
...@@ -170,11 +170,10 @@ void kvm_patch_vector_branch(struct alt_instr *alt, ...@@ -170,11 +170,10 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
addr |= ((u64)origptr & GENMASK_ULL(10, 7)); addr |= ((u64)origptr & GENMASK_ULL(10, 7));
/* /*
* Branch to the second instruction in the vectors in order to * Branch over the preamble in order to avoid the initial store on
* avoid the initial store on the stack (which we already * the stack (which we already perform in the hardening vectors).
* perform in the hardening vectors).
*/ */
addr += AARCH64_INSN_SIZE; addr += KVM_VECTOR_PREAMBLE;
/* stp x0, x1, [sp, #-16]! */ /* stp x0, x1, [sp, #-16]! */
insn = aarch64_insn_gen_load_store_pair(AARCH64_INSN_REG_0, insn = aarch64_insn_gen_load_store_pair(AARCH64_INSN_REG_0,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment