Commit ceda9fff authored by Dave Martin's avatar Dave Martin Committed by Marc Zyngier

KVM: arm64: Convert lazy FPSIMD context switch trap to C

To make the lazy FPSIMD context switch trap code easier to hack on,
this patch converts it to C.

This is not amazingly efficient, but the trap should typically only
be taken once per host context switch.
Signed-off-by: default avatarDave Martin <Dave.Martin@arm.com>
Reviewed-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Reviewed-by: default avatarAlex Bennée <alex.bennee@linaro.org>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent bd2a6394
...@@ -172,40 +172,27 @@ ENTRY(__fpsimd_guest_restore) ...@@ -172,40 +172,27 @@ ENTRY(__fpsimd_guest_restore)
// x1: vcpu // x1: vcpu
// x2-x29,lr: vcpu regs // x2-x29,lr: vcpu regs
// vcpu x0-x1 on the stack // vcpu x0-x1 on the stack
stp x2, x3, [sp, #-16]! stp x2, x3, [sp, #-144]!
stp x4, lr, [sp, #-16]! stp x4, x5, [sp, #16]
stp x6, x7, [sp, #32]
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN stp x8, x9, [sp, #48]
mrs x2, cptr_el2 stp x10, x11, [sp, #64]
bic x2, x2, #CPTR_EL2_TFP stp x12, x13, [sp, #80]
msr cptr_el2, x2 stp x14, x15, [sp, #96]
alternative_else stp x16, x17, [sp, #112]
mrs x2, cpacr_el1 stp x18, lr, [sp, #128]
orr x2, x2, #CPACR_EL1_FPEN
msr cpacr_el1, x2 bl __hyp_switch_fpsimd
alternative_endif
isb ldp x4, x5, [sp, #16]
ldp x6, x7, [sp, #32]
mov x3, x1 ldp x8, x9, [sp, #48]
ldp x10, x11, [sp, #64]
ldr x0, [x3, #VCPU_HOST_CONTEXT] ldp x12, x13, [sp, #80]
kern_hyp_va x0 ldp x14, x15, [sp, #96]
add x0, x0, #CPU_GP_REG_OFFSET(CPU_FP_REGS) ldp x16, x17, [sp, #112]
bl __fpsimd_save_state ldp x18, lr, [sp, #128]
ldp x0, x1, [sp, #144]
add x2, x3, #VCPU_CONTEXT ldp x2, x3, [sp], #160
add x0, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
bl __fpsimd_restore_state
// Skip restoring fpexc32 for AArch64 guests
mrs x1, hcr_el2
tbnz x1, #HCR_RW_SHIFT, 1f
ldr x4, [x3, #VCPU_FPEXC32_EL2]
msr fpexc32_el2, x4
1:
ldp x4, lr, [sp], #16
ldp x2, x3, [sp], #16
ldp x0, x1, [sp], #16
eret eret
ENDPROC(__fpsimd_guest_restore) ENDPROC(__fpsimd_guest_restore)
...@@ -318,6 +318,30 @@ static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu) ...@@ -318,6 +318,30 @@ static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
} }
} }
void __hyp_text __hyp_switch_fpsimd(u64 esr __always_unused,
struct kvm_vcpu *vcpu)
{
kvm_cpu_context_t *host_ctxt;
if (has_vhe())
write_sysreg(read_sysreg(cpacr_el1) | CPACR_EL1_FPEN,
cpacr_el1);
else
write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP,
cptr_el2);
isb();
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
__fpsimd_save_state(&host_ctxt->gp_regs.fp_regs);
__fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);
/* Skip restoring fpexc32 for AArch64 guests */
if (!(read_sysreg(hcr_el2) & HCR_RW))
write_sysreg(vcpu->arch.ctxt.sys_regs[FPEXC32_EL2],
fpexc32_el2);
}
/* /*
* Return true when we were able to fixup the guest exit and should return to * Return true when we were able to fixup the guest exit and should return to
* the guest, false when we should restore the host state and return to the * the guest, false when we should restore the host state and return to the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment