Commit 753d734f authored by Marc Zyngier's avatar Marc Zyngier

Merge remote-tracking branch 'arm64/for-next/sysregs' into kvmarm-master/next

Merge arm64's sysreg repainting branch to avoid too many
ugly conflicts...
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents 86f27d84 acb3f4bc
...@@ -165,31 +165,6 @@ ...@@ -165,31 +165,6 @@
#define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5) #define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5)
#define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6) #define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6)
#define SYS_ID_PFR0_EL1 sys_reg(3, 0, 0, 1, 0)
#define SYS_ID_PFR1_EL1 sys_reg(3, 0, 0, 1, 1)
#define SYS_ID_PFR2_EL1 sys_reg(3, 0, 0, 3, 4)
#define SYS_ID_DFR0_EL1 sys_reg(3, 0, 0, 1, 2)
#define SYS_ID_DFR1_EL1 sys_reg(3, 0, 0, 3, 5)
#define SYS_ID_AFR0_EL1 sys_reg(3, 0, 0, 1, 3)
#define SYS_ID_MMFR0_EL1 sys_reg(3, 0, 0, 1, 4)
#define SYS_ID_MMFR1_EL1 sys_reg(3, 0, 0, 1, 5)
#define SYS_ID_MMFR2_EL1 sys_reg(3, 0, 0, 1, 6)
#define SYS_ID_MMFR3_EL1 sys_reg(3, 0, 0, 1, 7)
#define SYS_ID_MMFR4_EL1 sys_reg(3, 0, 0, 2, 6)
#define SYS_ID_MMFR5_EL1 sys_reg(3, 0, 0, 3, 6)
#define SYS_ID_ISAR0_EL1 sys_reg(3, 0, 0, 2, 0)
#define SYS_ID_ISAR1_EL1 sys_reg(3, 0, 0, 2, 1)
#define SYS_ID_ISAR2_EL1 sys_reg(3, 0, 0, 2, 2)
#define SYS_ID_ISAR3_EL1 sys_reg(3, 0, 0, 2, 3)
#define SYS_ID_ISAR4_EL1 sys_reg(3, 0, 0, 2, 4)
#define SYS_ID_ISAR5_EL1 sys_reg(3, 0, 0, 2, 5)
#define SYS_ID_ISAR6_EL1 sys_reg(3, 0, 0, 2, 7)
#define SYS_MVFR0_EL1 sys_reg(3, 0, 0, 3, 0)
#define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1)
#define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2)
#define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1) #define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1)
#define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5) #define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5)
#define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6) #define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6)
...@@ -692,114 +667,6 @@ ...@@ -692,114 +667,6 @@
#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48 #define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48
#endif #endif
#define ID_DFR0_PERFMON_SHIFT 24
#define ID_DFR0_PERFMON_8_0 0x3
#define ID_DFR0_PERFMON_8_1 0x4
#define ID_DFR0_PERFMON_8_4 0x5
#define ID_DFR0_PERFMON_8_5 0x6
#define ID_DFR0_PERFMON_8_7 0x7
#define ID_DFR0_PERFMON_IMP_DEF 0xf
#define ID_ISAR4_SWP_FRAC_SHIFT 28
#define ID_ISAR4_PSR_M_SHIFT 24
#define ID_ISAR4_SYNCH_PRIM_FRAC_SHIFT 20
#define ID_ISAR4_BARRIER_SHIFT 16
#define ID_ISAR4_SMC_SHIFT 12
#define ID_ISAR4_WRITEBACK_SHIFT 8
#define ID_ISAR4_WITHSHIFTS_SHIFT 4
#define ID_ISAR4_UNPRIV_SHIFT 0
#define ID_DFR1_MTPMU_SHIFT 0
#define ID_ISAR0_DIVIDE_SHIFT 24
#define ID_ISAR0_DEBUG_SHIFT 20
#define ID_ISAR0_COPROC_SHIFT 16
#define ID_ISAR0_CMPBRANCH_SHIFT 12
#define ID_ISAR0_BITFIELD_SHIFT 8
#define ID_ISAR0_BITCOUNT_SHIFT 4
#define ID_ISAR0_SWAP_SHIFT 0
#define ID_ISAR5_RDM_SHIFT 24
#define ID_ISAR5_CRC32_SHIFT 16
#define ID_ISAR5_SHA2_SHIFT 12
#define ID_ISAR5_SHA1_SHIFT 8
#define ID_ISAR5_AES_SHIFT 4
#define ID_ISAR5_SEVL_SHIFT 0
#define ID_ISAR6_I8MM_SHIFT 24
#define ID_ISAR6_BF16_SHIFT 20
#define ID_ISAR6_SPECRES_SHIFT 16
#define ID_ISAR6_SB_SHIFT 12
#define ID_ISAR6_FHM_SHIFT 8
#define ID_ISAR6_DP_SHIFT 4
#define ID_ISAR6_JSCVT_SHIFT 0
#define ID_MMFR0_INNERSHR_SHIFT 28
#define ID_MMFR0_FCSE_SHIFT 24
#define ID_MMFR0_AUXREG_SHIFT 20
#define ID_MMFR0_TCM_SHIFT 16
#define ID_MMFR0_SHARELVL_SHIFT 12
#define ID_MMFR0_OUTERSHR_SHIFT 8
#define ID_MMFR0_PMSA_SHIFT 4
#define ID_MMFR0_VMSA_SHIFT 0
#define ID_MMFR4_EVT_SHIFT 28
#define ID_MMFR4_CCIDX_SHIFT 24
#define ID_MMFR4_LSM_SHIFT 20
#define ID_MMFR4_HPDS_SHIFT 16
#define ID_MMFR4_CNP_SHIFT 12
#define ID_MMFR4_XNX_SHIFT 8
#define ID_MMFR4_AC2_SHIFT 4
#define ID_MMFR4_SPECSEI_SHIFT 0
#define ID_MMFR5_ETS_SHIFT 0
#define ID_PFR0_DIT_SHIFT 24
#define ID_PFR0_CSV2_SHIFT 16
#define ID_PFR0_STATE3_SHIFT 12
#define ID_PFR0_STATE2_SHIFT 8
#define ID_PFR0_STATE1_SHIFT 4
#define ID_PFR0_STATE0_SHIFT 0
#define ID_DFR0_PERFMON_SHIFT 24
#define ID_DFR0_MPROFDBG_SHIFT 20
#define ID_DFR0_MMAPTRC_SHIFT 16
#define ID_DFR0_COPTRC_SHIFT 12
#define ID_DFR0_MMAPDBG_SHIFT 8
#define ID_DFR0_COPSDBG_SHIFT 4
#define ID_DFR0_COPDBG_SHIFT 0
#define ID_PFR2_SSBS_SHIFT 4
#define ID_PFR2_CSV3_SHIFT 0
#define MVFR0_FPROUND_SHIFT 28
#define MVFR0_FPSHVEC_SHIFT 24
#define MVFR0_FPSQRT_SHIFT 20
#define MVFR0_FPDIVIDE_SHIFT 16
#define MVFR0_FPTRAP_SHIFT 12
#define MVFR0_FPDP_SHIFT 8
#define MVFR0_FPSP_SHIFT 4
#define MVFR0_SIMD_SHIFT 0
#define MVFR1_SIMDFMAC_SHIFT 28
#define MVFR1_FPHP_SHIFT 24
#define MVFR1_SIMDHP_SHIFT 20
#define MVFR1_SIMDSP_SHIFT 16
#define MVFR1_SIMDINT_SHIFT 12
#define MVFR1_SIMDLS_SHIFT 8
#define MVFR1_FPDNAN_SHIFT 4
#define MVFR1_FPFTZ_SHIFT 0
#define ID_PFR1_GIC_SHIFT 28
#define ID_PFR1_VIRT_FRAC_SHIFT 24
#define ID_PFR1_SEC_FRAC_SHIFT 20
#define ID_PFR1_GENTIMER_SHIFT 16
#define ID_PFR1_VIRTUALIZATION_SHIFT 12
#define ID_PFR1_MPROGMOD_SHIFT 8
#define ID_PFR1_SECURITY_SHIFT 4
#define ID_PFR1_PROGMOD_SHIFT 0
#if defined(CONFIG_ARM64_4K_PAGES) #if defined(CONFIG_ARM64_4K_PAGES)
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN #define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN
...@@ -817,9 +684,6 @@ ...@@ -817,9 +684,6 @@
#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT #define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT
#endif #endif
#define MVFR2_FPMISC_SHIFT 4
#define MVFR2_SIMDMISC_SHIFT 0
#define CPACR_EL1_FPEN_EL1EN (BIT(20)) /* enable EL1 access */ #define CPACR_EL1_FPEN_EL1EN (BIT(20)) /* enable EL1 access */
#define CPACR_EL1_FPEN_EL0EN (BIT(21)) /* enable EL0 access, if EL1EN set */ #define CPACR_EL1_FPEN_EL0EN (BIT(21)) /* enable EL0 access, if EL1EN set */
...@@ -853,10 +717,6 @@ ...@@ -853,10 +717,6 @@
#define SYS_RGSR_EL1_SEED_SHIFT 8 #define SYS_RGSR_EL1_SEED_SHIFT 8
#define SYS_RGSR_EL1_SEED_MASK 0xffffUL #define SYS_RGSR_EL1_SEED_MASK 0xffffUL
/* GMID_EL1 field definitions */
#define GMID_EL1_BS_SHIFT 0
#define GMID_EL1_BS_SIZE 4
/* TFSR{,E0}_EL1 bit definitions */ /* TFSR{,E0}_EL1 bit definitions */
#define SYS_TFSR_EL1_TF0_SHIFT 0 #define SYS_TFSR_EL1_TF0_SHIFT 0
#define SYS_TFSR_EL1_TF1_SHIFT 1 #define SYS_TFSR_EL1_TF1_SHIFT 1
......
This diff is collapsed.
...@@ -1071,9 +1071,9 @@ static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu) ...@@ -1071,9 +1071,9 @@ static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
static u8 perfmon_to_pmuver(u8 perfmon) static u8 perfmon_to_pmuver(u8 perfmon)
{ {
switch (perfmon) { switch (perfmon) {
case ID_DFR0_PERFMON_8_0: case ID_DFR0_EL1_PerfMon_PMUv3:
return ID_AA64DFR0_EL1_PMUVer_IMP; return ID_AA64DFR0_EL1_PMUVer_IMP;
case ID_DFR0_PERFMON_IMP_DEF: case ID_DFR0_EL1_PerfMon_IMPDEF:
return ID_AA64DFR0_EL1_PMUVer_IMP_DEF; return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;
default: default:
/* Anything ARMv8.1+ and NI have the same value. For now. */ /* Anything ARMv8.1+ and NI have the same value. For now. */
...@@ -1085,9 +1085,9 @@ static u8 pmuver_to_perfmon(u8 pmuver) ...@@ -1085,9 +1085,9 @@ static u8 pmuver_to_perfmon(u8 pmuver)
{ {
switch (pmuver) { switch (pmuver) {
case ID_AA64DFR0_EL1_PMUVer_IMP: case ID_AA64DFR0_EL1_PMUVer_IMP:
return ID_DFR0_PERFMON_8_0; return ID_DFR0_EL1_PerfMon_PMUv3;
case ID_AA64DFR0_EL1_PMUVer_IMP_DEF: case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
return ID_DFR0_PERFMON_IMP_DEF; return ID_DFR0_EL1_PerfMon_IMPDEF;
default: default:
/* Anything ARMv8.1+ and NI have the same value. For now. */ /* Anything ARMv8.1+ and NI have the same value. For now. */
return pmuver; return pmuver;
...@@ -1151,8 +1151,8 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r ...@@ -1151,8 +1151,8 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer); val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
break; break;
case SYS_ID_DFR0_EL1: case SYS_ID_DFR0_EL1:
val &= ~ARM64_FEATURE_MASK(ID_DFR0_PERFMON); val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_PERFMON), val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon),
pmuver_to_perfmon(vcpu_pmuver(vcpu))); pmuver_to_perfmon(vcpu_pmuver(vcpu)));
break; break;
} }
...@@ -1307,12 +1307,12 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu, ...@@ -1307,12 +1307,12 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
* AArch64 side (as everything is emulated with that), and * AArch64 side (as everything is emulated with that), and
* that this is a PMUv3. * that this is a PMUv3.
*/ */
perfmon = FIELD_GET(ARM64_FEATURE_MASK(ID_DFR0_PERFMON), val); perfmon = FIELD_GET(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon), val);
if ((perfmon != ID_DFR0_PERFMON_IMP_DEF && perfmon > host_perfmon) || if ((perfmon != ID_DFR0_EL1_PerfMon_IMPDEF && perfmon > host_perfmon) ||
(perfmon != 0 && perfmon < ID_DFR0_PERFMON_8_0)) (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3))
return -EINVAL; return -EINVAL;
valid_pmu = (perfmon != 0 && perfmon != ID_DFR0_PERFMON_IMP_DEF); valid_pmu = (perfmon != 0 && perfmon != ID_DFR0_EL1_PerfMon_IMPDEF);
/* Make sure view register and PMU support do match */ /* Make sure view register and PMU support do match */
if (kvm_vcpu_has_pmu(vcpu) != valid_pmu) if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
...@@ -1320,7 +1320,7 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu, ...@@ -1320,7 +1320,7 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
/* We can only differ with PerfMon, and anything else is an error */ /* We can only differ with PerfMon, and anything else is an error */
val ^= read_id_reg(vcpu, rd); val ^= read_id_reg(vcpu, rd);
val &= ~ARM64_FEATURE_MASK(ID_DFR0_PERFMON); val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
if (val) if (val)
return -EINVAL; return -EINVAL;
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
*/ */
.macro multitag_transfer_size, reg, tmp .macro multitag_transfer_size, reg, tmp
mrs_s \reg, SYS_GMID_EL1 mrs_s \reg, SYS_GMID_EL1
ubfx \reg, \reg, #GMID_EL1_BS_SHIFT, #GMID_EL1_BS_SIZE ubfx \reg, \reg, #GMID_EL1_BS_SHIFT, #GMID_EL1_BS_WIDTH
mov \tmp, #4 mov \tmp, #4
lsl \reg, \tmp, \reg lsl \reg, \tmp, \reg
.endm .endm
......
...@@ -33,7 +33,7 @@ function expect_fields(nf) { ...@@ -33,7 +33,7 @@ function expect_fields(nf) {
# Print a CPP macro definition, padded with spaces so that the macro bodies # Print a CPP macro definition, padded with spaces so that the macro bodies
# line up in a column # line up in a column
function define(name, val) { function define(name, val) {
printf "%-48s%s\n", "#define " name, val printf "%-56s%s\n", "#define " name, val
} }
# Print standard BITMASK/SHIFT/WIDTH CPP definitions for a field # Print standard BITMASK/SHIFT/WIDTH CPP definitions for a field
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment