Commit 3d0dba57 authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm64: PMU: Move the ID_AA64DFR0_EL1.PMUver limit to VM creation

As further patches will enable the selection of a PMU revision
from userspace, sample the supported PMU revision at VM creation
time, rather than building each time the ID_AA64DFR0_EL1 register
is accessed.

This shouldn't result in any change in behaviour.
Reviewed-by: default avatarReiji Watanabe <reijiw@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221113163832.3154370-11-maz@kernel.org
parent 26d2d059
...@@ -163,6 +163,10 @@ struct kvm_arch { ...@@ -163,6 +163,10 @@ struct kvm_arch {
u8 pfr0_csv2; u8 pfr0_csv2;
u8 pfr0_csv3; u8 pfr0_csv3;
struct {
u8 imp:4;
u8 unimp:4;
} dfr0_pmuver;
/* Hypercall features firmware registers' descriptor */ /* Hypercall features firmware registers' descriptor */
struct kvm_smccc_features smccc_feat; struct kvm_smccc_features smccc_feat;
......
...@@ -164,6 +164,12 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -164,6 +164,12 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
set_default_spectre(kvm); set_default_spectre(kvm);
kvm_arm_init_hypercalls(kvm); kvm_arm_init_hypercalls(kvm);
/*
* Initialise the default PMUver before there is a chance to
* create an actual PMU.
*/
kvm->arch.dfr0_pmuver.imp = kvm_arm_pmu_get_pmuver_limit();
return ret; return ret;
out_free_stage2_pgd: out_free_stage2_pgd:
kvm_free_stage2_pgd(&kvm->arch.mmu); kvm_free_stage2_pgd(&kvm->arch.mmu);
......
...@@ -1047,3 +1047,14 @@ int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) ...@@ -1047,3 +1047,14 @@ int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
return -ENXIO; return -ENXIO;
} }
u8 kvm_arm_pmu_get_pmuver_limit(void)
{
u64 tmp;
tmp = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
tmp = cpuid_feature_cap_perfmon_field(tmp,
ID_AA64DFR0_EL1_PMUVer_SHIFT,
ID_AA64DFR0_EL1_PMUVer_V3P4);
return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp);
}
...@@ -1062,6 +1062,27 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu, ...@@ -1062,6 +1062,27 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
return true; return true;
} }
static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
{
if (kvm_vcpu_has_pmu(vcpu))
return vcpu->kvm->arch.dfr0_pmuver.imp;
return vcpu->kvm->arch.dfr0_pmuver.unimp;
}
static u8 pmuver_to_perfmon(u8 pmuver)
{
switch (pmuver) {
case ID_AA64DFR0_EL1_PMUVer_IMP:
return ID_DFR0_PERFMON_8_0;
case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
return ID_DFR0_PERFMON_IMP_DEF;
default:
/* Anything ARMv8.1+ and NI have the same value. For now. */
return pmuver;
}
}
/* Read a sanitised cpufeature ID register by sys_reg_desc */ /* Read a sanitised cpufeature ID register by sys_reg_desc */
static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r) static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r)
{ {
...@@ -1111,18 +1132,17 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r ...@@ -1111,18 +1132,17 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r
/* Limit debug to ARMv8.0 */ /* Limit debug to ARMv8.0 */
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer); val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6); val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6);
/* Limit guests to PMUv3 for ARMv8.4 */ /* Set PMUver to the required version */
val = cpuid_feature_cap_perfmon_field(val, val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
ID_AA64DFR0_EL1_PMUVer_SHIFT, val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_EL1_PMUVer_V3P4 : 0); vcpu_pmuver(vcpu));
/* Hide SPE from guests */ /* Hide SPE from guests */
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer); val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
break; break;
case SYS_ID_DFR0_EL1: case SYS_ID_DFR0_EL1:
/* Limit guests to PMUv3 for ARMv8.4 */ val &= ~ARM64_FEATURE_MASK(ID_DFR0_PERFMON);
val = cpuid_feature_cap_perfmon_field(val, val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_PERFMON),
ID_DFR0_PERFMON_SHIFT, pmuver_to_perfmon(vcpu_pmuver(vcpu)));
kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_4 : 0);
break; break;
} }
......
...@@ -89,6 +89,8 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); ...@@ -89,6 +89,8 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
vcpu->arch.pmu.events = *kvm_get_pmu_events(); \ vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
} while (0) } while (0)
u8 kvm_arm_pmu_get_pmuver_limit(void);
#else #else
struct kvm_pmu { struct kvm_pmu {
}; };
...@@ -154,6 +156,10 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) ...@@ -154,6 +156,10 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
{
return 0;
}
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment