Commit 091258a0 authored by Marc Zyngier's avatar Marc Zyngier

Merge branch kvm-arm64/fpmr into kvmarm-master/next

* kvm-arm64/fpmr:
  : .
  : Add FP8 support to the KVM/arm64 floating point handling.
  :
  : This includes new ID registers (ID_AA64PFR2_EL1 ID_AA64FPFR0_EL1)
  : being made visible to guests, as well as a new confrol register
  : (FPMR) which gets context-switched.
  : .
  KVM: arm64: Expose ID_AA64PFR2_EL1 to userspace and guests
  KVM: arm64: Enable FP8 support when available and configured
  KVM: arm64: Expose ID_AA64FPFR0_EL1 as a writable ID reg
  KVM: arm64: Honor trap routing for FPMR
  KVM: arm64: Add save/restore support for FPMR
  KVM: arm64: Move FPMR into the sysreg array
  KVM: arm64: Add predicate for FPMR support in a VM
  KVM: arm64: Move SVCR into the sysreg array
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents 8884fd12 13c7a51e
......@@ -446,6 +446,10 @@ enum vcpu_sysreg {
GCR_EL1, /* Tag Control Register */
TFSRE0_EL1, /* Tag Fault Status Register (EL0) */
/* FP/SIMD/SVE */
SVCR,
FPMR,
/* 32bit specific registers. */
DACR32_EL2, /* Domain Access Control Register */
IFSR32_EL2, /* Instruction Fault Status Register */
......@@ -595,6 +599,16 @@ struct kvm_host_data {
struct cpu_sve_state *sve_state;
};
union {
/* HYP VA pointer to the host storage for FPMR */
u64 *fpmr_ptr;
/*
* Used by pKVM only, as it needs to provide storage
* for the host
*/
u64 fpmr;
};
/* Ownership of the FP regs */
enum {
FP_STATE_FREE,
......@@ -664,8 +678,6 @@ struct kvm_vcpu_arch {
void *sve_state;
enum fp_type fp_type;
unsigned int sve_max_vl;
u64 svcr;
u64 fpmr;
/* Stage 2 paging state used by the hardware on next switch */
struct kvm_s2_mmu *hw_mmu;
......@@ -1473,4 +1485,8 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
(pa + pi + pa3) == 1; \
})
#define kvm_has_fpmr(k) \
(system_supports_fpmr() && \
kvm_has_feat((k), ID_AA64PFR2_EL1, FPMR, IMP))
#endif /* __ARM64_KVM_HOST_H__ */
......@@ -83,6 +83,7 @@ enum cgt_group_id {
CGT_CPTR_TAM,
CGT_CPTR_TCPAC,
CGT_HCRX_EnFPM,
CGT_HCRX_TCR2En,
/*
......@@ -372,6 +373,12 @@ static const struct trap_bits coarse_trap_bits[] = {
.mask = CPTR_EL2_TCPAC,
.behaviour = BEHAVE_FORWARD_ANY,
},
[CGT_HCRX_EnFPM] = {
.index = HCRX_EL2,
.value = 0,
.mask = HCRX_EL2_EnFPM,
.behaviour = BEHAVE_FORWARD_ANY,
},
[CGT_HCRX_TCR2En] = {
.index = HCRX_EL2,
.value = 0,
......@@ -1108,6 +1115,7 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
SR_TRAP(SYS_CNTP_CTL_EL0, CGT_CNTHCTL_EL1PTEN),
SR_TRAP(SYS_CNTPCT_EL0, CGT_CNTHCTL_EL1PCTEN),
SR_TRAP(SYS_CNTPCTSS_EL0, CGT_CNTHCTL_EL1PCTEN),
SR_TRAP(SYS_FPMR, CGT_HCRX_EnFPM),
};
static DEFINE_XARRAY(sr_forward_xa);
......
......@@ -63,6 +63,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
*/
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
*host_data_ptr(fpsimd_state) = kern_hyp_va(&current->thread.uw.fpsimd_state);
*host_data_ptr(fpmr_ptr) = kern_hyp_va(&current->thread.uw.fpmr);
vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
......@@ -134,8 +135,8 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
fp_state.sve_state = vcpu->arch.sve_state;
fp_state.sve_vl = vcpu->arch.sve_max_vl;
fp_state.sme_state = NULL;
fp_state.svcr = &vcpu->arch.svcr;
fp_state.fpmr = &vcpu->arch.fpmr;
fp_state.svcr = &__vcpu_sys_reg(vcpu, SVCR);
fp_state.fpmr = &__vcpu_sys_reg(vcpu, FPMR);
fp_state.fp_type = &vcpu->arch.fp_type;
if (vcpu_has_sve(vcpu))
......
......@@ -403,6 +403,9 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
else
__fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
write_sysreg_s(__vcpu_sys_reg(vcpu, FPMR), SYS_FPMR);
/* Skip restoring fpexc32 for AArch64 guests */
if (!(read_sysreg(hcr_el2) & HCR_RW))
write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
......
......@@ -62,6 +62,8 @@ static void fpsimd_sve_flush(void)
static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
{
bool has_fpmr;
if (!guest_owns_fp_regs())
return;
......@@ -73,11 +75,18 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
else
__fpsimd_save_state(&vcpu->arch.ctxt.fp_regs);
has_fpmr = kvm_has_fpmr(kern_hyp_va(vcpu->kvm));
if (has_fpmr)
__vcpu_sys_reg(vcpu, FPMR) = read_sysreg_s(SYS_FPMR);
if (system_supports_sve())
__hyp_sve_restore_host();
else
__fpsimd_restore_state(*host_data_ptr(fpsimd_state));
if (has_fpmr)
write_sysreg_s(*host_data_ptr(fpmr), SYS_FPMR);
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
}
......
......@@ -197,6 +197,15 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
} else {
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
}
if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm))) {
u64 val = read_sysreg_s(SYS_FPMR);
if (unlikely(is_protected_kvm_enabled()))
*host_data_ptr(fpmr) = val;
else
**host_data_ptr(fpmr_ptr) = val;
}
}
static const exit_handler_fn hyp_exit_handlers[] = {
......
......@@ -312,6 +312,9 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
{
__fpsimd_save_state(*host_data_ptr(fpsimd_state));
if (kvm_has_fpmr(vcpu->kvm))
**host_data_ptr(fpmr_ptr) = read_sysreg_s(SYS_FPMR);
}
static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
......
......@@ -1545,6 +1545,10 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
break;
case SYS_ID_AA64PFR2_EL1:
/* We only expose FPMR */
val &= ID_AA64PFR2_EL1_FPMR;
break;
case SYS_ID_AA64ISAR1_EL1:
if (!vcpu_has_ptrauth(vcpu))
val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
......@@ -1675,6 +1679,24 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
return REG_HIDDEN;
}
static unsigned int sme_visibility(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SME, IMP))
return 0;
return REG_HIDDEN;
}
static unsigned int fp8_visibility(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
if (kvm_has_fpmr(vcpu->kvm))
return 0;
return REG_HIDDEN;
}
static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
......@@ -2369,12 +2391,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_AA64PFR0_EL1_AdvSIMD |
ID_AA64PFR0_EL1_FP), },
ID_SANITISED(ID_AA64PFR1_EL1),
ID_UNALLOCATED(4,2),
ID_WRITABLE(ID_AA64PFR2_EL1, ID_AA64PFR2_EL1_FPMR),
ID_UNALLOCATED(4,3),
ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
ID_HIDDEN(ID_AA64SMFR0_EL1),
ID_UNALLOCATED(4,6),
ID_UNALLOCATED(4,7),
ID_WRITABLE(ID_AA64FPFR0_EL1, ~ID_AA64FPFR0_EL1_RES0),
/* CRm=5 */
{ SYS_DESC(SYS_ID_AA64DFR0_EL1),
......@@ -2541,7 +2563,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
CTR_EL0_IDC_MASK |
CTR_EL0_DminLine_MASK |
CTR_EL0_IminLine_MASK),
{ SYS_DESC(SYS_SVCR), undef_access },
{ SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = sme_visibility },
{ SYS_DESC(SYS_FPMR), undef_access, reset_val, FPMR, 0, .visibility = fp8_visibility },
{ PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr,
.reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr },
......@@ -4566,6 +4589,9 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu)
if (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP))
vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En;
if (kvm_has_fpmr(kvm))
vcpu->arch.hcrx_el2 |= HCRX_EL2_EnFPM;
}
if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment