Commit 7134fa07 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvmarm-fixes-5.7-2' of...

Merge tag 'kvmarm-fixes-5.7-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-master

KVM/arm fixes for Linux 5.7, take #2

- Fix compilation with Clang
- Correctly initialize GICv4.1 in the absence of a virtual ITS
- Move SP_EL0 save/restore to the guest entry/exit code
- Handle PC wrap around on 32bit guests, and narrow all 32bit
  registers on userspace access
parents 9e5e19f5 0225fd5e
...@@ -200,6 +200,13 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -200,6 +200,13 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
} }
memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id)); memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
int i;
for (i = 0; i < 16; i++)
*vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i);
}
out: out:
return err; return err;
} }
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
#define CPU_SP_EL0_OFFSET (CPU_XREG_OFFSET(30) + 8)
.text .text
.pushsection .hyp.text, "ax" .pushsection .hyp.text, "ax"
...@@ -47,6 +48,16 @@ ...@@ -47,6 +48,16 @@
ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
.endm .endm
.macro save_sp_el0 ctxt, tmp
mrs \tmp, sp_el0
str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
.endm
.macro restore_sp_el0 ctxt, tmp
ldr \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
msr sp_el0, \tmp
.endm
/* /*
* u64 __guest_enter(struct kvm_vcpu *vcpu, * u64 __guest_enter(struct kvm_vcpu *vcpu,
* struct kvm_cpu_context *host_ctxt); * struct kvm_cpu_context *host_ctxt);
...@@ -60,6 +71,9 @@ SYM_FUNC_START(__guest_enter) ...@@ -60,6 +71,9 @@ SYM_FUNC_START(__guest_enter)
// Store the host regs // Store the host regs
save_callee_saved_regs x1 save_callee_saved_regs x1
// Save the host's sp_el0
save_sp_el0 x1, x2
// Now the host state is stored if we have a pending RAS SError it must // Now the host state is stored if we have a pending RAS SError it must
// affect the host. If any asynchronous exception is pending we defer // affect the host. If any asynchronous exception is pending we defer
// the guest entry. The DSB isn't necessary before v8.2 as any SError // the guest entry. The DSB isn't necessary before v8.2 as any SError
...@@ -83,6 +97,9 @@ alternative_else_nop_endif ...@@ -83,6 +97,9 @@ alternative_else_nop_endif
// when this feature is enabled for kernel code. // when this feature is enabled for kernel code.
ptrauth_switch_to_guest x29, x0, x1, x2 ptrauth_switch_to_guest x29, x0, x1, x2
// Restore the guest's sp_el0
restore_sp_el0 x29, x0
// Restore guest regs x0-x17 // Restore guest regs x0-x17
ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)] ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)] ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
...@@ -130,6 +147,9 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL) ...@@ -130,6 +147,9 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
// Store the guest regs x18-x29, lr // Store the guest regs x18-x29, lr
save_callee_saved_regs x1 save_callee_saved_regs x1
// Store the guest's sp_el0
save_sp_el0 x1, x2
get_host_ctxt x2, x3 get_host_ctxt x2, x3
// Macro ptrauth_switch_to_guest format: // Macro ptrauth_switch_to_guest format:
...@@ -139,6 +159,9 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL) ...@@ -139,6 +159,9 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
// when this feature is enabled for kernel code. // when this feature is enabled for kernel code.
ptrauth_switch_to_host x1, x2, x3, x4, x5 ptrauth_switch_to_host x1, x2, x3, x4, x5
// Restore the hosts's sp_el0
restore_sp_el0 x2, x3
// Now restore the host regs // Now restore the host regs
restore_callee_saved_regs x2 restore_callee_saved_regs x2
......
...@@ -198,7 +198,6 @@ SYM_CODE_END(__hyp_panic) ...@@ -198,7 +198,6 @@ SYM_CODE_END(__hyp_panic)
.macro invalid_vector label, target = __hyp_panic .macro invalid_vector label, target = __hyp_panic
.align 2 .align 2
SYM_CODE_START(\label) SYM_CODE_START(\label)
\label:
b \target b \target
SYM_CODE_END(\label) SYM_CODE_END(\label)
.endm .endm
......
...@@ -15,8 +15,9 @@ ...@@ -15,8 +15,9 @@
/* /*
* Non-VHE: Both host and guest must save everything. * Non-VHE: Both host and guest must save everything.
* *
* VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and pstate, * VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and
* which are handled as part of the el2 return state) on every switch. * pstate, which are handled as part of the el2 return state) on every
* switch (sp_el0 is being dealt with in the assembly code).
* tpidr_el0 and tpidrro_el0 only need to be switched when going * tpidr_el0 and tpidrro_el0 only need to be switched when going
* to host userspace or a different VCPU. EL1 registers only need to be * to host userspace or a different VCPU. EL1 registers only need to be
* switched when potentially going to run a different VCPU. The latter two * switched when potentially going to run a different VCPU. The latter two
...@@ -26,12 +27,6 @@ ...@@ -26,12 +27,6 @@
static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt) static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
{ {
ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1); ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
/*
* The host arm64 Linux uses sp_el0 to point to 'current' and it must
* therefore be saved/restored on every entry/exit to/from the guest.
*/
ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
} }
static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt) static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
...@@ -99,12 +94,6 @@ NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe); ...@@ -99,12 +94,6 @@ NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
{ {
write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1); write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
/*
* The host arm64 Linux uses sp_el0 to point to 'current' and it must
* therefore be saved/restored on every entry/exit to/from the guest.
*/
write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
} }
static void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt) static void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
......
...@@ -125,12 +125,16 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) ...@@ -125,12 +125,16 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
*/ */
void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
{ {
u32 pc = *vcpu_pc(vcpu);
bool is_thumb; bool is_thumb;
is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT); is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
if (is_thumb && !is_wide_instr) if (is_thumb && !is_wide_instr)
*vcpu_pc(vcpu) += 2; pc += 2;
else else
*vcpu_pc(vcpu) += 4; pc += 4;
*vcpu_pc(vcpu) = pc;
kvm_adjust_itstate(vcpu); kvm_adjust_itstate(vcpu);
} }
...@@ -294,8 +294,15 @@ int vgic_init(struct kvm *kvm) ...@@ -294,8 +294,15 @@ int vgic_init(struct kvm *kvm)
} }
} }
if (vgic_has_its(kvm)) { if (vgic_has_its(kvm))
vgic_lpi_translation_cache_init(kvm); vgic_lpi_translation_cache_init(kvm);
/*
* If we have GICv4.1 enabled, unconditionnaly request enable the
* v4 support so that we get HW-accelerated vSGIs. Otherwise, only
* enable it if we present a virtual ITS to the guest.
*/
if (vgic_supports_direct_msis(kvm)) {
ret = vgic_v4_init(kvm); ret = vgic_v4_init(kvm);
if (ret) if (ret)
goto out; goto out;
......
...@@ -50,7 +50,8 @@ bool vgic_has_its(struct kvm *kvm) ...@@ -50,7 +50,8 @@ bool vgic_has_its(struct kvm *kvm)
bool vgic_supports_direct_msis(struct kvm *kvm) bool vgic_supports_direct_msis(struct kvm *kvm)
{ {
return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm); return (kvm_vgic_global_state.has_gicv4_1 ||
(kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm)));
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment