Commit 6e977984 authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm64: Save/restore sp_el0 as part of __guest_enter

We currently save/restore sp_el0 in C code. This is a bit unsafe,
as a lot of the C code expects 'current' to be accessible from
there (and the opportunity to run kernel code in HYP is specially
great with VHE).

Instead, let's move the save/restore of sp_el0 to the assembly
code (in __guest_enter), making sure that sp_el0 is correct
very early on when we exit the guest, and is preserved as long
as possible to its host value when we enter the guest.
Reviewed-by: default avatarAndrew Jones <drjones@redhat.com>
Acked-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent 6aea9e05
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
#define CPU_SP_EL0_OFFSET (CPU_XREG_OFFSET(30) + 8)
.text .text
.pushsection .hyp.text, "ax" .pushsection .hyp.text, "ax"
...@@ -47,6 +48,16 @@ ...@@ -47,6 +48,16 @@
ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
.endm .endm
.macro save_sp_el0 ctxt, tmp
mrs \tmp, sp_el0
str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
.endm
.macro restore_sp_el0 ctxt, tmp
ldr \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
msr sp_el0, \tmp
.endm
/* /*
* u64 __guest_enter(struct kvm_vcpu *vcpu, * u64 __guest_enter(struct kvm_vcpu *vcpu,
* struct kvm_cpu_context *host_ctxt); * struct kvm_cpu_context *host_ctxt);
...@@ -60,6 +71,9 @@ SYM_FUNC_START(__guest_enter) ...@@ -60,6 +71,9 @@ SYM_FUNC_START(__guest_enter)
// Store the host regs // Store the host regs
save_callee_saved_regs x1 save_callee_saved_regs x1
// Save the host's sp_el0
save_sp_el0 x1, x2
// Now the host state is stored if we have a pending RAS SError it must // Now the host state is stored if we have a pending RAS SError it must
// affect the host. If any asynchronous exception is pending we defer // affect the host. If any asynchronous exception is pending we defer
// the guest entry. The DSB isn't necessary before v8.2 as any SError // the guest entry. The DSB isn't necessary before v8.2 as any SError
...@@ -83,6 +97,9 @@ alternative_else_nop_endif ...@@ -83,6 +97,9 @@ alternative_else_nop_endif
// when this feature is enabled for kernel code. // when this feature is enabled for kernel code.
ptrauth_switch_to_guest x29, x0, x1, x2 ptrauth_switch_to_guest x29, x0, x1, x2
// Restore the guest's sp_el0
restore_sp_el0 x29, x0
// Restore guest regs x0-x17 // Restore guest regs x0-x17
ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)] ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)] ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
...@@ -130,6 +147,9 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL) ...@@ -130,6 +147,9 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
// Store the guest regs x18-x29, lr // Store the guest regs x18-x29, lr
save_callee_saved_regs x1 save_callee_saved_regs x1
// Store the guest's sp_el0
save_sp_el0 x1, x2
get_host_ctxt x2, x3 get_host_ctxt x2, x3
// Macro ptrauth_switch_to_guest format: // Macro ptrauth_switch_to_guest format:
...@@ -139,6 +159,9 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL) ...@@ -139,6 +159,9 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
// when this feature is enabled for kernel code. // when this feature is enabled for kernel code.
ptrauth_switch_to_host x1, x2, x3, x4, x5 ptrauth_switch_to_host x1, x2, x3, x4, x5
// Restore the hosts's sp_el0
restore_sp_el0 x2, x3
// Now restore the host regs // Now restore the host regs
restore_callee_saved_regs x2 restore_callee_saved_regs x2
......
...@@ -15,8 +15,9 @@ ...@@ -15,8 +15,9 @@
/* /*
* Non-VHE: Both host and guest must save everything. * Non-VHE: Both host and guest must save everything.
* *
* VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and pstate, * VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and
* which are handled as part of the el2 return state) on every switch. * pstate, which are handled as part of the el2 return state) on every
* switch (sp_el0 is being dealt with in the assembly code).
* tpidr_el0 and tpidrro_el0 only need to be switched when going * tpidr_el0 and tpidrro_el0 only need to be switched when going
* to host userspace or a different VCPU. EL1 registers only need to be * to host userspace or a different VCPU. EL1 registers only need to be
* switched when potentially going to run a different VCPU. The latter two * switched when potentially going to run a different VCPU. The latter two
...@@ -26,12 +27,6 @@ ...@@ -26,12 +27,6 @@
static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt) static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
{ {
ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1); ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
/*
* The host arm64 Linux uses sp_el0 to point to 'current' and it must
* therefore be saved/restored on every entry/exit to/from the guest.
*/
ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
} }
static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt) static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
...@@ -99,12 +94,6 @@ NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe); ...@@ -99,12 +94,6 @@ NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
{ {
write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1); write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
/*
* The host arm64 Linux uses sp_el0 to point to 'current' and it must
* therefore be saved/restored on every entry/exit to/from the guest.
*/
write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
} }
static void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt) static void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment