Commit aa50479b authored by Mark Brown's avatar Mark Brown Committed by Will Deacon

arm64/sysreg: Add _EL1 into ID_AA64ISAR1_EL1 definition names

Normally we include the full register name in the defines for fields within
registers but this has not been followed for ID registers. In preparation
for automatic generation of defines add the _EL1s into the defines for
ID_AA64ISAR1_EL1 to follow the convention. No functional changes.
Signed-off-by: default avatarMark Brown <broonie@kernel.org>
Link: https://lore.kernel.org/r/20220704170302.2609529-16-broonie@kernel.orgSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent b7e4a2d7
...@@ -59,7 +59,7 @@ alternative_else_nop_endif ...@@ -59,7 +59,7 @@ alternative_else_nop_endif
.macro __ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3 .macro __ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3
mrs \tmp1, id_aa64isar1_el1 mrs \tmp1, id_aa64isar1_el1
ubfx \tmp1, \tmp1, #ID_AA64ISAR1_APA_SHIFT, #8 ubfx \tmp1, \tmp1, #ID_AA64ISAR1_EL1_APA_SHIFT, #8
mrs_s \tmp2, SYS_ID_AA64ISAR2_EL1 mrs_s \tmp2, SYS_ID_AA64ISAR2_EL1
ubfx \tmp2, \tmp2, #ID_AA64ISAR2_APA3_SHIFT, #4 ubfx \tmp2, \tmp2, #ID_AA64ISAR2_APA3_SHIFT, #4
orr \tmp1, \tmp1, \tmp2 orr \tmp1, \tmp1, \tmp2
......
...@@ -705,37 +705,37 @@ ...@@ -705,37 +705,37 @@
#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8)) #define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8))
/* id_aa64isar1 */ /* id_aa64isar1 */
#define ID_AA64ISAR1_I8MM_SHIFT 52 #define ID_AA64ISAR1_EL1_I8MM_SHIFT 52
#define ID_AA64ISAR1_DGH_SHIFT 48 #define ID_AA64ISAR1_EL1_DGH_SHIFT 48
#define ID_AA64ISAR1_BF16_SHIFT 44 #define ID_AA64ISAR1_EL1_BF16_SHIFT 44
#define ID_AA64ISAR1_SPECRES_SHIFT 40 #define ID_AA64ISAR1_EL1_SPECRES_SHIFT 40
#define ID_AA64ISAR1_SB_SHIFT 36 #define ID_AA64ISAR1_EL1_SB_SHIFT 36
#define ID_AA64ISAR1_FRINTTS_SHIFT 32 #define ID_AA64ISAR1_EL1_FRINTTS_SHIFT 32
#define ID_AA64ISAR1_GPI_SHIFT 28 #define ID_AA64ISAR1_EL1_GPI_SHIFT 28
#define ID_AA64ISAR1_GPA_SHIFT 24 #define ID_AA64ISAR1_EL1_GPA_SHIFT 24
#define ID_AA64ISAR1_LRCPC_SHIFT 20 #define ID_AA64ISAR1_EL1_LRCPC_SHIFT 20
#define ID_AA64ISAR1_FCMA_SHIFT 16 #define ID_AA64ISAR1_EL1_FCMA_SHIFT 16
#define ID_AA64ISAR1_JSCVT_SHIFT 12 #define ID_AA64ISAR1_EL1_JSCVT_SHIFT 12
#define ID_AA64ISAR1_API_SHIFT 8 #define ID_AA64ISAR1_EL1_API_SHIFT 8
#define ID_AA64ISAR1_APA_SHIFT 4 #define ID_AA64ISAR1_EL1_APA_SHIFT 5
#define ID_AA64ISAR1_DPB_SHIFT 0 #define ID_AA64ISAR1_EL1_DPB_SHIFT 0
#define ID_AA64ISAR1_APA_NI 0x0 #define ID_AA64ISAR1_EL1_APA_NI 0x0
#define ID_AA64ISAR1_APA_PAuth 0x1 #define ID_AA64ISAR1_EL1_APA_PAuth 0x1
#define ID_AA64ISAR1_APA_ARCH_EPAC 0x2 #define ID_AA64ISAR1_EL1_APA_ARCH_EPAC 0x2
#define ID_AA64ISAR1_APA_Pauth2 0x3 #define ID_AA64ISAR1_EL1_APA_Pauth2 0x3
#define ID_AA64ISAR1_APA_FPAC 0x4 #define ID_AA64ISAR1_EL1_APA_FPAC 0x4
#define ID_AA64ISAR1_APA_FPACCOMBINE 0x5 #define ID_AA64ISAR1_EL1_APA_FPACCOMBINE 0x5
#define ID_AA64ISAR1_API_NI 0x0 #define ID_AA64ISAR1_EL1_API_NI 0x0
#define ID_AA64ISAR1_API_PAuth 0x1 #define ID_AA64ISAR1_EL1_API_PAuth 0x1
#define ID_AA64ISAR1_API_EPAC 0x2 #define ID_AA64ISAR1_EL1_API_EPAC 0x2
#define ID_AA64ISAR1_API_PAuth2 0x3 #define ID_AA64ISAR1_EL1_API_PAuth2 0x3
#define ID_AA64ISAR1_API_FPAC 0x4 #define ID_AA64ISAR1_EL1_API_FPAC 0x4
#define ID_AA64ISAR1_API_FPACCOMBINE 0x5 #define ID_AA64ISAR1_EL1_API_FPACCOMBINE 0x5
#define ID_AA64ISAR1_GPA_NI 0x0 #define ID_AA64ISAR1_EL1_GPA_NI 0x0
#define ID_AA64ISAR1_GPA_IMP 0x1 #define ID_AA64ISAR1_EL1_GPA_IMP 0x1
#define ID_AA64ISAR1_GPI_NI 0x0 #define ID_AA64ISAR1_EL1_GPI_NI 0x0
#define ID_AA64ISAR1_GPI_IMP 0x1 #define ID_AA64ISAR1_EL1_GPI_IMP 0x1
/* id_aa64isar2 */ /* id_aa64isar2 */
#define ID_AA64ISAR2_BC_SHIFT 28 #define ID_AA64ISAR2_BC_SHIFT 28
......
This diff is collapsed.
...@@ -63,10 +63,10 @@ static const struct ftr_set_desc isar1 __initconst = { ...@@ -63,10 +63,10 @@ static const struct ftr_set_desc isar1 __initconst = {
.name = "id_aa64isar1", .name = "id_aa64isar1",
.override = &id_aa64isar1_override, .override = &id_aa64isar1_override,
.fields = { .fields = {
{ "gpi", ID_AA64ISAR1_GPI_SHIFT }, { "gpi", ID_AA64ISAR1_EL1_GPI_SHIFT },
{ "gpa", ID_AA64ISAR1_GPA_SHIFT }, { "gpa", ID_AA64ISAR1_EL1_GPA_SHIFT },
{ "api", ID_AA64ISAR1_API_SHIFT }, { "api", ID_AA64ISAR1_EL1_API_SHIFT },
{ "apa", ID_AA64ISAR1_APA_SHIFT }, { "apa", ID_AA64ISAR1_EL1_APA_SHIFT },
{} {}
}, },
}; };
......
...@@ -176,20 +176,20 @@ ...@@ -176,20 +176,20 @@
) )
#define PVM_ID_AA64ISAR1_ALLOW (\ #define PVM_ID_AA64ISAR1_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64ISAR1_DPB) | \ ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_DPB) | \
ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) | \ ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) | \
ARM64_FEATURE_MASK(ID_AA64ISAR1_API) | \ ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) | \
ARM64_FEATURE_MASK(ID_AA64ISAR1_JSCVT) | \ ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_JSCVT) | \
ARM64_FEATURE_MASK(ID_AA64ISAR1_FCMA) | \ ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_FCMA) | \
ARM64_FEATURE_MASK(ID_AA64ISAR1_LRCPC) | \ ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_LRCPC) | \
ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) | \ ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) | \
ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI) | \ ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI) | \
ARM64_FEATURE_MASK(ID_AA64ISAR1_FRINTTS) | \ ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_FRINTTS) | \
ARM64_FEATURE_MASK(ID_AA64ISAR1_SB) | \ ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_SB) | \
ARM64_FEATURE_MASK(ID_AA64ISAR1_SPECRES) | \ ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_SPECRES) | \
ARM64_FEATURE_MASK(ID_AA64ISAR1_BF16) | \ ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_BF16) | \
ARM64_FEATURE_MASK(ID_AA64ISAR1_DGH) | \ ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_DGH) | \
ARM64_FEATURE_MASK(ID_AA64ISAR1_I8MM) \ ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_I8MM) \
) )
#define PVM_ID_AA64ISAR2_ALLOW (\ #define PVM_ID_AA64ISAR2_ALLOW (\
......
...@@ -173,10 +173,10 @@ static u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu) ...@@ -173,10 +173,10 @@ static u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu)
u64 allow_mask = PVM_ID_AA64ISAR1_ALLOW; u64 allow_mask = PVM_ID_AA64ISAR1_ALLOW;
if (!vcpu_has_ptrauth(vcpu)) if (!vcpu_has_ptrauth(vcpu))
allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) | allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
ARM64_FEATURE_MASK(ID_AA64ISAR1_API) | ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) | ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI)); ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
return id_aa64isar1_el1_sys_val & allow_mask; return id_aa64isar1_el1_sys_val & allow_mask;
} }
......
...@@ -1136,10 +1136,10 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, ...@@ -1136,10 +1136,10 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
break; break;
case SYS_ID_AA64ISAR1_EL1: case SYS_ID_AA64ISAR1_EL1:
if (!vcpu_has_ptrauth(vcpu)) if (!vcpu_has_ptrauth(vcpu))
val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) | val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
ARM64_FEATURE_MASK(ID_AA64ISAR1_API) | ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) | ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI)); ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
break; break;
case SYS_ID_AA64ISAR2_EL1: case SYS_ID_AA64ISAR2_EL1:
if (!vcpu_has_ptrauth(vcpu)) if (!vcpu_has_ptrauth(vcpu))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment