Commit 6ca2b9ca authored by Mark Brown's avatar Mark Brown Committed by Catalin Marinas

arm64/sysreg: Add _EL1 into ID_AA64PFR1_EL1 constant names

Our standard is to include the _EL1 in the constant names for registers but
we did not do that for ID_AA64PFR1_EL1, update to do so in preparation for
conversion to automatic generation. No functional change.
Signed-off-by: default avatarMark Brown <broonie@kernel.org>
Reviewed-by: default avatarKristina Martsenko <kristina.martsenko@arm.com>
Link: https://lore.kernel.org/r/20220905225425.1871461-8-broonie@kernel.orgSigned-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 55adc08d
...@@ -624,16 +624,16 @@ static inline bool id_aa64pfr0_sve(u64 pfr0) ...@@ -624,16 +624,16 @@ static inline bool id_aa64pfr0_sve(u64 pfr0)
static inline bool id_aa64pfr1_sme(u64 pfr1) static inline bool id_aa64pfr1_sme(u64 pfr1)
{ {
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_SME_SHIFT); u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_SME_SHIFT);
return val > 0; return val > 0;
} }
static inline bool id_aa64pfr1_mte(u64 pfr1) static inline bool id_aa64pfr1_mte(u64 pfr1)
{ {
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_MTE_SHIFT); u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT);
return val >= ID_AA64PFR1_MTE; return val >= ID_AA64PFR1_EL1_MTE;
} }
void __init setup_cpu_features(void); void __init setup_cpu_features(void);
......
...@@ -149,7 +149,7 @@ ...@@ -149,7 +149,7 @@
mov x0, xzr mov x0, xzr
mrs x1, id_aa64pfr1_el1 mrs x1, id_aa64pfr1_el1
ubfx x1, x1, #ID_AA64PFR1_SME_SHIFT, #4 ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4
cbz x1, .Lset_fgt_\@ cbz x1, .Lset_fgt_\@
/* Disable nVHE traps of TPIDR2 and SMPRI */ /* Disable nVHE traps of TPIDR2 and SMPRI */
......
...@@ -714,23 +714,23 @@ ...@@ -714,23 +714,23 @@
#define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2 #define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2
/* id_aa64pfr1 */ /* id_aa64pfr1 */
#define ID_AA64PFR1_SME_SHIFT 24 #define ID_AA64PFR1_EL1_SME_SHIFT 24
#define ID_AA64PFR1_MPAMFRAC_SHIFT 16 #define ID_AA64PFR1_EL1_MPAMFRAC_SHIFT 16
#define ID_AA64PFR1_RASFRAC_SHIFT 12 #define ID_AA64PFR1_EL1_RASFRAC_SHIFT 12
#define ID_AA64PFR1_MTE_SHIFT 8 #define ID_AA64PFR1_EL1_MTE_SHIFT 8
#define ID_AA64PFR1_SSBS_SHIFT 4 #define ID_AA64PFR1_EL1_SSBS_SHIFT 4
#define ID_AA64PFR1_BT_SHIFT 0 #define ID_AA64PFR1_EL1_BT_SHIFT 0
#define ID_AA64PFR1_SSBS_PSTATE_NI 0 #define ID_AA64PFR1_EL1_SSBS_PSTATE_NI 0
#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1 #define ID_AA64PFR1_EL1_SSBS_PSTATE_ONLY 1
#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2 #define ID_AA64PFR1_EL1_SSBS_PSTATE_INSNS 2
#define ID_AA64PFR1_BT_BTI 0x1 #define ID_AA64PFR1_EL1_BT_BTI 0x1
#define ID_AA64PFR1_SME 1 #define ID_AA64PFR1_EL1_SME 1
#define ID_AA64PFR1_MTE_NI 0x0 #define ID_AA64PFR1_EL1_MTE_NI 0x0
#define ID_AA64PFR1_MTE_EL0 0x1 #define ID_AA64PFR1_EL1_MTE_EL0 0x1
#define ID_AA64PFR1_MTE 0x2 #define ID_AA64PFR1_EL1_MTE 0x2
#define ID_AA64PFR1_MTE_ASYMM 0x3 #define ID_AA64PFR1_EL1_MTE_ASYMM 0x3
/* id_aa64mmfr0 */ /* id_aa64mmfr0 */
#define ID_AA64MMFR0_EL1_ECV_SHIFT 60 #define ID_AA64MMFR0_EL1_ECV_SHIFT 60
......
...@@ -264,14 +264,14 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { ...@@ -264,14 +264,14 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SME_SHIFT, 4, 0), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SME_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MPAMFRAC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_RASFRAC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MTE_SHIFT, 4, ID_AA64PFR1_MTE_NI), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MTE_SHIFT, 4, ID_AA64PFR1_EL1_MTE_NI),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI), ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, ID_AA64PFR1_EL1_SSBS_PSTATE_NI),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_BT_SHIFT, 4, 0),
ARM64_FTR_END, ARM64_FTR_END,
}; };
...@@ -2367,10 +2367,10 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -2367,10 +2367,10 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE, .type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR1_EL1, .sys_reg = SYS_ID_AA64PFR1_EL1,
.field_pos = ID_AA64PFR1_SSBS_SHIFT, .field_pos = ID_AA64PFR1_EL1_SSBS_SHIFT,
.field_width = 4, .field_width = 4,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY, .min_field_value = ID_AA64PFR1_EL1_SSBS_PSTATE_ONLY,
}, },
#ifdef CONFIG_ARM64_CNP #ifdef CONFIG_ARM64_CNP
{ {
...@@ -2528,9 +2528,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -2528,9 +2528,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.cpu_enable = bti_enable, .cpu_enable = bti_enable,
.sys_reg = SYS_ID_AA64PFR1_EL1, .sys_reg = SYS_ID_AA64PFR1_EL1,
.field_pos = ID_AA64PFR1_BT_SHIFT, .field_pos = ID_AA64PFR1_EL1_BT_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = ID_AA64PFR1_BT_BTI, .min_field_value = ID_AA64PFR1_EL1_BT_BTI,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
}, },
#endif #endif
...@@ -2541,9 +2541,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -2541,9 +2541,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR1_EL1, .sys_reg = SYS_ID_AA64PFR1_EL1,
.field_pos = ID_AA64PFR1_MTE_SHIFT, .field_pos = ID_AA64PFR1_EL1_MTE_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = ID_AA64PFR1_MTE, .min_field_value = ID_AA64PFR1_EL1_MTE,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.cpu_enable = cpu_enable_mte, .cpu_enable = cpu_enable_mte,
}, },
...@@ -2553,9 +2553,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -2553,9 +2553,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_BOOT_CPU_FEATURE, .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR1_EL1, .sys_reg = SYS_ID_AA64PFR1_EL1,
.field_pos = ID_AA64PFR1_MTE_SHIFT, .field_pos = ID_AA64PFR1_EL1_MTE_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = ID_AA64PFR1_MTE_ASYMM, .min_field_value = ID_AA64PFR1_EL1_MTE_ASYMM,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
}, },
#endif /* CONFIG_ARM64_MTE */ #endif /* CONFIG_ARM64_MTE */
...@@ -2577,9 +2577,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -2577,9 +2577,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_SME, .capability = ARM64_SME,
.sys_reg = SYS_ID_AA64PFR1_EL1, .sys_reg = SYS_ID_AA64PFR1_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR1_SME_SHIFT, .field_pos = ID_AA64PFR1_EL1_SME_SHIFT,
.field_width = 4, .field_width = 4,
.min_field_value = ID_AA64PFR1_SME, .min_field_value = ID_AA64PFR1_EL1_SME,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.cpu_enable = sme_kernel_enable, .cpu_enable = sme_kernel_enable,
}, },
...@@ -2739,24 +2739,24 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { ...@@ -2739,24 +2739,24 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F32MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F32MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F32MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F32MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F64MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM), HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F64MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
#endif #endif
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS), HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS),
#ifdef CONFIG_ARM64_BTI #ifdef CONFIG_ARM64_BTI
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_BT_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_BT_BTI, CAP_HWCAP, KERNEL_HWCAP_BTI), HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_BT_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_BT_BTI, CAP_HWCAP, KERNEL_HWCAP_BTI),
#endif #endif
#ifdef CONFIG_ARM64_PTR_AUTH #ifdef CONFIG_ARM64_PTR_AUTH
HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA), HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA),
HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG), HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG),
#endif #endif
#ifdef CONFIG_ARM64_MTE #ifdef CONFIG_ARM64_MTE
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE), HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE),
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_MTE_ASYMM, CAP_HWCAP, KERNEL_HWCAP_MTE3), HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_MTE_ASYMM, CAP_HWCAP, KERNEL_HWCAP_MTE3),
#endif /* CONFIG_ARM64_MTE */ #endif /* CONFIG_ARM64_MTE */
HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV), HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP), HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP),
HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES), HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES),
HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT), HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT),
#ifdef CONFIG_ARM64_SME #ifdef CONFIG_ARM64_SME
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_SME, CAP_HWCAP, KERNEL_HWCAP_SME), HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_EL1_SME, CAP_HWCAP, KERNEL_HWCAP_SME),
HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_FA64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_FA64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I16I64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I16I64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F64F64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64), HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F64F64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64),
......
...@@ -109,7 +109,7 @@ SYM_CODE_START_LOCAL(__finalise_el2) ...@@ -109,7 +109,7 @@ SYM_CODE_START_LOCAL(__finalise_el2)
msr_s SYS_ZCR_EL2, x1 // length for EL1. msr_s SYS_ZCR_EL2, x1 // length for EL1.
.Lskip_sve: .Lskip_sve:
check_override id_aa64pfr1 ID_AA64PFR1_SME_SHIFT .Linit_sme .Lskip_sme check_override id_aa64pfr1 ID_AA64PFR1_EL1_SME_SHIFT .Linit_sme .Lskip_sme
.Linit_sme: /* SME register access and priority mapping */ .Linit_sme: /* SME register access and priority mapping */
mrs x0, cptr_el2 // Disable SME traps mrs x0, cptr_el2 // Disable SME traps
......
...@@ -98,9 +98,9 @@ static const struct ftr_set_desc pfr1 __initconst = { ...@@ -98,9 +98,9 @@ static const struct ftr_set_desc pfr1 __initconst = {
.name = "id_aa64pfr1", .name = "id_aa64pfr1",
.override = &id_aa64pfr1_override, .override = &id_aa64pfr1_override,
.fields = { .fields = {
FIELD("bt", ID_AA64PFR1_BT_SHIFT, NULL ), FIELD("bt", ID_AA64PFR1_EL1_BT_SHIFT, NULL ),
FIELD("mte", ID_AA64PFR1_MTE_SHIFT, NULL), FIELD("mte", ID_AA64PFR1_EL1_MTE_SHIFT, NULL),
FIELD("sme", ID_AA64PFR1_SME_SHIFT, pfr1_sme_filter), FIELD("sme", ID_AA64PFR1_EL1_SME_SHIFT, pfr1_sme_filter),
{} {}
}, },
}; };
......
...@@ -62,8 +62,8 @@ ...@@ -62,8 +62,8 @@
* - Speculative Store Bypassing * - Speculative Store Bypassing
*/ */
#define PVM_ID_AA64PFR1_ALLOW (\ #define PVM_ID_AA64PFR1_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64PFR1_BT) | \ ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_BT) | \
ARM64_FEATURE_MASK(ID_AA64PFR1_SSBS) \ ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SSBS) \
) )
/* /*
......
...@@ -66,7 +66,7 @@ static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu) ...@@ -66,7 +66,7 @@ static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
u64 hcr_clear = 0; u64 hcr_clear = 0;
/* Memory Tagging: Trap and Treat as Untagged if not supported. */ /* Memory Tagging: Trap and Treat as Untagged if not supported. */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_MTE), feature_ids)) { if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) {
hcr_set |= HCR_TID5; hcr_set |= HCR_TID5;
hcr_clear |= HCR_DCT | HCR_ATA; hcr_clear |= HCR_DCT | HCR_ATA;
} }
......
...@@ -106,7 +106,7 @@ static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu) ...@@ -106,7 +106,7 @@ static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu)
u64 allow_mask = PVM_ID_AA64PFR1_ALLOW; u64 allow_mask = PVM_ID_AA64PFR1_ALLOW;
if (!kvm_has_mte(kvm)) if (!kvm_has_mte(kvm))
allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE); allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
return id_aa64pfr1_el1_sys_val & allow_mask; return id_aa64pfr1_el1_sys_val & allow_mask;
} }
......
...@@ -1090,9 +1090,9 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, ...@@ -1090,9 +1090,9 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
break; break;
case SYS_ID_AA64PFR1_EL1: case SYS_ID_AA64PFR1_EL1:
if (!kvm_has_mte(vcpu->kvm)) if (!kvm_has_mte(vcpu->kvm))
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_SME); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
break; break;
case SYS_ID_AA64ISAR1_EL1: case SYS_ID_AA64ISAR1_EL1:
if (!vcpu_has_ptrauth(vcpu)) if (!vcpu_has_ptrauth(vcpu))
......
...@@ -686,7 +686,7 @@ static bool arm64_early_this_cpu_has_bti(void) ...@@ -686,7 +686,7 @@ static bool arm64_early_this_cpu_has_bti(void)
pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1); pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1);
return cpuid_feature_extract_unsigned_field(pfr1, return cpuid_feature_extract_unsigned_field(pfr1,
ID_AA64PFR1_BT_SHIFT); ID_AA64PFR1_EL1_BT_SHIFT);
} }
/* /*
......
...@@ -434,8 +434,8 @@ SYM_FUNC_START(__cpu_setup) ...@@ -434,8 +434,8 @@ SYM_FUNC_START(__cpu_setup)
* (ID_AA64PFR1_EL1[11:8] > 1). * (ID_AA64PFR1_EL1[11:8] > 1).
*/ */
mrs x10, ID_AA64PFR1_EL1 mrs x10, ID_AA64PFR1_EL1
ubfx x10, x10, #ID_AA64PFR1_MTE_SHIFT, #4 ubfx x10, x10, #ID_AA64PFR1_EL1_MTE_SHIFT, #4
cmp x10, #ID_AA64PFR1_MTE cmp x10, #ID_AA64PFR1_EL1_MTE
b.lt 1f b.lt 1f
/* Normal Tagged memory type at the corresponding MAIR index */ /* Normal Tagged memory type at the corresponding MAIR index */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment