Commit d2b2ecba authored by Jintack Lim's avatar Jintack Lim Committed by Oliver Upton

KVM: arm64: nv: Forward FP/ASIMD traps to guest hypervisor

Give precedence to the guest hypervisor's trap configuration when
routing an FP/ASIMD trap taken to EL2. Take advantage of the
infrastructure for translating CPTR_EL2 into the VHE (i.e. EL1) format
and base the trap decision solely on the VHE view of the register. The
in-memory value of CPTR_EL2 will always be up to date for the guest
hypervisor (more on that later), so just read it directly from memory.

Bury all of this behind a macro keyed off of the CPTR bitfield in
anticipation of supporting other traps (e.g. SVE).

[maz: account for HCR_EL2.E2H when testing for TFP/FPEN, with
 all the hard work actually being done by Chase Conklin]
[ oliver: translate nVHE->VHE format for testing traps; macro for reuse
 in other CPTR_EL2.xEN fields ]
Signed-off-by: default avatarJintack Lim <jintack.lim@linaro.org>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@arm.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Reviewed-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20240620164653.1130714-2-oliver.upton@linux.devSigned-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parent 83a7eefe
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#ifndef __ARM64_KVM_EMULATE_H__ #ifndef __ARM64_KVM_EMULATE_H__
#define __ARM64_KVM_EMULATE_H__ #define __ARM64_KVM_EMULATE_H__
#include <linux/bitfield.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
...@@ -660,4 +661,46 @@ static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu) ...@@ -660,4 +661,46 @@ static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
kvm_write_cptr_el2(val); kvm_write_cptr_el2(val);
} }
/*
* Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE
* format if E2H isn't set.
*/
static inline u64 vcpu_sanitised_cptr_el2(const struct kvm_vcpu *vcpu)
{
u64 cptr = __vcpu_sys_reg(vcpu, CPTR_EL2);
if (!vcpu_el2_e2h_is_set(vcpu))
cptr = translate_cptr_el2_to_cpacr_el1(cptr);
return cptr;
}
static inline bool ____cptr_xen_trap_enabled(const struct kvm_vcpu *vcpu,
unsigned int xen)
{
switch (xen) {
case 0b00:
case 0b10:
return true;
case 0b01:
return vcpu_el2_tge_is_set(vcpu) && !vcpu_is_el2(vcpu);
case 0b11:
default:
return false;
}
}
#define __guest_hyp_cptr_xen_trap_enabled(vcpu, xen) \
(!vcpu_has_nv(vcpu) ? false : \
____cptr_xen_trap_enabled(vcpu, \
SYS_FIELD_GET(CPACR_ELx, xen, \
vcpu_sanitised_cptr_el2(vcpu))))
static inline bool guest_hyp_fpsimd_traps_enabled(const struct kvm_vcpu *vcpu)
{
return __guest_hyp_cptr_xen_trap_enabled(vcpu, FPEN);
}
#endif /* __ARM64_KVM_EMULATE_H__ */ #endif /* __ARM64_KVM_EMULATE_H__ */
...@@ -94,11 +94,19 @@ static int handle_smc(struct kvm_vcpu *vcpu) ...@@ -94,11 +94,19 @@ static int handle_smc(struct kvm_vcpu *vcpu)
} }
/* /*
* Guest access to FP/ASIMD registers are routed to this handler only * This handles the cases where the system does not support FP/ASIMD or when
* when the system doesn't support FP/ASIMD. * we are running nested virtualization and the guest hypervisor is trapping
* FP/ASIMD accesses by its guest guest.
*
* All other handling of guest vs. host FP/ASIMD register state is handled in
* fixup_guest_exit().
*/ */
static int handle_no_fpsimd(struct kvm_vcpu *vcpu) static int kvm_handle_fpasimd(struct kvm_vcpu *vcpu)
{ {
if (guest_hyp_fpsimd_traps_enabled(vcpu))
return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
/* This is the case when the system doesn't support FP/ASIMD. */
kvm_inject_undefined(vcpu); kvm_inject_undefined(vcpu);
return 1; return 1;
} }
...@@ -304,7 +312,7 @@ static exit_handle_fn arm_exit_handlers[] = { ...@@ -304,7 +312,7 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug, [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
[ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug, [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
[ESR_ELx_EC_BRK64] = kvm_handle_guest_debug, [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
[ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd, [ESR_ELx_EC_FP_ASIMD] = kvm_handle_fpasimd,
[ESR_ELx_EC_PAC] = kvm_handle_ptrauth, [ESR_ELx_EC_PAC] = kvm_handle_ptrauth,
}; };
......
...@@ -354,6 +354,9 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) ...@@ -354,6 +354,9 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
/* Only handle traps the vCPU can support here: */ /* Only handle traps the vCPU can support here: */
switch (esr_ec) { switch (esr_ec) {
case ESR_ELx_EC_FP_ASIMD: case ESR_ELx_EC_FP_ASIMD:
/* Forward traps to the guest hypervisor as required */
if (guest_hyp_fpsimd_traps_enabled(vcpu))
return false;
break; break;
case ESR_ELx_EC_SVE: case ESR_ELx_EC_SVE:
if (!sve_guest) if (!sve_guest)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment