Commit d25f30fe authored by Marc Zyngier's avatar Marc Zyngier

Merge branch kvm-arm64/aarch32-idreg-trap into kvmarm-master/next

* kvm-arm64/aarch32-idreg-trap:
  : .
  : Add trapping/sanitising infrastructure for AArch32 systen registers,
  : allowing more control over what we actually expose (such as the PMU).
  :
  : Patches courtesy of Oliver and Alexandru.
  : .
  KVM: arm64: Fix new instances of 32bit ESRs
  KVM: arm64: Hide AArch32 PMU registers when not available
  KVM: arm64: Start trapping ID registers for 32 bit guests
  KVM: arm64: Plumb cp10 ID traps through the AArch64 sysreg handler
  KVM: arm64: Wire up CP15 feature registers to their AArch64 equivalents
  KVM: arm64: Don't write to Rt unless sys_reg emulation succeeds
  KVM: arm64: Return a bool from emulate_cp()
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents 904cabf4 ee87a9bd
......@@ -80,11 +80,12 @@
* FMO: Override CPSR.F and enable signaling with VF
* SWIO: Turn set/way invalidates into set/way clean+invalidate
* PTW: Take a stage2 fault if a stage1 walk steps in device memory
* TID3: Trap EL1 reads of group 3 ID registers
*/
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
HCR_BSU_IS | HCR_FB | HCR_TACR | \
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
HCR_FMO | HCR_IMO | HCR_PTW )
HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 )
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
......
......@@ -87,13 +87,6 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
if (vcpu_el1_is_32bit(vcpu))
vcpu->arch.hcr_el2 &= ~HCR_RW;
else
/*
* TID3: trap feature register accesses that we virtualise.
* For now this is conditional, since no AArch32 feature regs
* are currently virtualised.
*/
vcpu->arch.hcr_el2 |= HCR_TID3;
if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
vcpu_el1_is_32bit(vcpu))
......
......@@ -688,6 +688,7 @@ int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu);
void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
......
......@@ -194,6 +194,7 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
[ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32,
[ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store,
[ESR_ELx_EC_CP10_ID] = kvm_handle_cp10_id,
[ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64,
[ESR_ELx_EC_HVC32] = handle_hvc,
[ESR_ELx_EC_SMC32] = handle_smc,
......
......@@ -2023,20 +2023,22 @@ static const struct sys_reg_desc cp14_64_regs[] = {
{ Op1( 0), CRm( 2), .access = trap_raz_wi },
};
#define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \
AA32(_map), \
Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \
.visibility = pmu_visibility
/* Macro to expand the PMEVCNTRn register */
#define PMU_PMEVCNTR(n) \
/* PMEVCNTRn */ \
{ Op1(0), CRn(0b1110), \
CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
access_pmu_evcntr }
{ CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
(0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
.access = access_pmu_evcntr }
/* Macro to expand the PMEVTYPERn register */
#define PMU_PMEVTYPER(n) \
/* PMEVTYPERn */ \
{ Op1(0), CRn(0b1110), \
CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
access_pmu_evtyper }
{ CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
(0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
.access = access_pmu_evtyper }
/*
* Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
* depending on the way they are accessed (as a 32bit or a 64bit
......@@ -2076,25 +2078,25 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
/* PMU */
{ Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
{ Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
{ Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
{ Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
{ Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
{ Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
{ AA32(LO), Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
{ AA32(LO), Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
{ Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
{ Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
{ Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
{ Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
{ Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
{ Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
{ Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
{ AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 4), access_pmceid },
{ AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 5), access_pmceid },
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
{ CP15_PMU_SYS_REG(LO, 0, 9, 12, 6), .access = access_pmceid },
{ CP15_PMU_SYS_REG(LO, 0, 9, 12, 7), .access = access_pmceid },
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
{ CP15_PMU_SYS_REG(HI, 0, 9, 14, 4), .access = access_pmceid },
{ CP15_PMU_SYS_REG(HI, 0, 9, 14, 5), .access = access_pmceid },
/* PMMIR */
{ Op1( 0), CRn( 9), CRm(14), Op2( 6), trap_raz_wi },
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
/* PRRR/MAIR0 */
{ AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
......@@ -2179,7 +2181,7 @@ static const struct sys_reg_desc cp15_regs[] = {
PMU_PMEVTYPER(29),
PMU_PMEVTYPER(30),
/* PMCCFILTR */
{ Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
{ CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
{ Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
{ Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
......@@ -2188,7 +2190,7 @@ static const struct sys_reg_desc cp15_regs[] = {
static const struct sys_reg_desc cp15_64_regs[] = {
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
{ Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
{ CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
{ Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
......@@ -2255,9 +2257,9 @@ static void perform_access(struct kvm_vcpu *vcpu,
* @table: array of trap descriptors
* @num: size of the trap descriptor array
*
* Return 0 if the access has been handled, and -1 if not.
* Return true if the access has been handled, false if not.
*/
static int emulate_cp(struct kvm_vcpu *vcpu,
static bool emulate_cp(struct kvm_vcpu *vcpu,
struct sys_reg_params *params,
const struct sys_reg_desc *table,
size_t num)
......@@ -2265,17 +2267,17 @@ static int emulate_cp(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r;
if (!table)
return -1; /* Not handled */
return false; /* Not handled */
r = find_reg(params, table, num);
if (r) {
perform_access(vcpu, params, r);
return 0;
return true;
}
/* Not handled */
return -1;
return false;
}
static void unhandled_cp_access(struct kvm_vcpu *vcpu,
......@@ -2313,7 +2315,7 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
size_t nr_global)
{
struct sys_reg_params params;
u32 esr = kvm_vcpu_get_esr(vcpu);
u64 esr = kvm_vcpu_get_esr(vcpu);
int Rt = kvm_vcpu_sys_get_rt(vcpu);
int Rt2 = (esr >> 10) & 0x1f;
......@@ -2339,7 +2341,7 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
* potential register operation in the case of a read and return
* with success.
*/
if (!emulate_cp(vcpu, &params, global, nr_global)) {
if (emulate_cp(vcpu, &params, global, nr_global)) {
/* Split up the value between registers for the read side */
if (!params.is_write) {
vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
......@@ -2353,34 +2355,144 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
return 1;
}
static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
/*
* The CP10 ID registers are architecturally mapped to AArch64 feature
* registers. Abuse that fact so we can rely on the AArch64 handler for accesses
* from AArch32.
*/
static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
{
u8 reg_id = (esr >> 10) & 0xf;
bool valid;
params->is_write = ((esr & 1) == 0);
params->Op0 = 3;
params->Op1 = 0;
params->CRn = 0;
params->CRm = 3;
/* CP10 ID registers are read-only */
valid = !params->is_write;
switch (reg_id) {
/* MVFR0 */
case 0b0111:
params->Op2 = 0;
break;
/* MVFR1 */
case 0b0110:
params->Op2 = 1;
break;
/* MVFR2 */
case 0b0101:
params->Op2 = 2;
break;
default:
valid = false;
}
if (valid)
return true;
kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
params->is_write ? "write" : "read", reg_id);
return false;
}
/**
* kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
* VFP Register' from AArch32.
* @vcpu: The vCPU pointer
*
* MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
* Work out the correct AArch64 system register encoding and reroute to the
* AArch64 system register emulation.
*/
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
{
int Rt = kvm_vcpu_sys_get_rt(vcpu);
u64 esr = kvm_vcpu_get_esr(vcpu);
struct sys_reg_params params;
/* UNDEF on any unhandled register access */
if (!kvm_esr_cp10_id_to_sys64(esr, &params)) {
kvm_inject_undefined(vcpu);
return 1;
}
if (emulate_sys_reg(vcpu, &params))
vcpu_set_reg(vcpu, Rt, params.regval);
return 1;
}
/**
* kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
* CRn=0, which corresponds to the AArch32 feature
* registers.
* @vcpu: the vCPU pointer
* @params: the system register access parameters.
*
* Our cp15 system register tables do not enumerate the AArch32 feature
* registers. Conveniently, our AArch64 table does, and the AArch32 system
* register encoding can be trivially remapped into the AArch64 for the feature
* registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
*
* According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
* System registers with (coproc=0b1111, CRn==c0)", read accesses from this
* range are either UNKNOWN or RES0. Rerouting remains architectural as we
* treat undefined registers in this range as RAZ.
*/
static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
struct sys_reg_params *params)
{
int Rt = kvm_vcpu_sys_get_rt(vcpu);
/* Treat impossible writes to RO registers as UNDEFINED */
if (params->is_write) {
unhandled_cp_access(vcpu, params);
return 1;
}
params->Op0 = 3;
/*
* All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
* Avoid conflicting with future expansion of AArch64 feature registers
* and simply treat them as RAZ here.
*/
if (params->CRm > 3)
params->regval = 0;
else if (!emulate_sys_reg(vcpu, params))
return 1;
vcpu_set_reg(vcpu, Rt, params->regval);
return 1;
}
/**
* kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
struct sys_reg_params *params,
const struct sys_reg_desc *global,
size_t nr_global)
{
struct sys_reg_params params;
u32 esr = kvm_vcpu_get_esr(vcpu);
int Rt = kvm_vcpu_sys_get_rt(vcpu);
params.CRm = (esr >> 1) & 0xf;
params.regval = vcpu_get_reg(vcpu, Rt);
params.is_write = ((esr & 1) == 0);
params.CRn = (esr >> 10) & 0xf;
params.Op0 = 0;
params.Op1 = (esr >> 14) & 0x7;
params.Op2 = (esr >> 17) & 0x7;
params->regval = vcpu_get_reg(vcpu, Rt);
if (!emulate_cp(vcpu, &params, global, nr_global)) {
if (!params.is_write)
vcpu_set_reg(vcpu, Rt, params.regval);
if (emulate_cp(vcpu, params, global, nr_global)) {
if (!params->is_write)
vcpu_set_reg(vcpu, Rt, params->regval);
return 1;
}
unhandled_cp_access(vcpu, &params);
unhandled_cp_access(vcpu, params);
return 1;
}
......@@ -2391,7 +2503,20 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
{
return kvm_handle_cp_32(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
struct sys_reg_params params;
params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
/*
* Certain AArch32 ID registers are handled by rerouting to the AArch64
* system register table. Registers in the ID range where CRm=0 are
* excluded from this scheme as they do not trivially map into AArch64
* system register encodings.
*/
if (params.Op1 == 0 && params.CRn == 0 && params.CRm)
return kvm_emulate_cp15_id_reg(vcpu, &params);
return kvm_handle_cp_32(vcpu, &params, cp15_regs, ARRAY_SIZE(cp15_regs));
}
int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
......@@ -2401,7 +2526,11 @@ int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
{
return kvm_handle_cp_32(vcpu, cp14_regs, ARRAY_SIZE(cp14_regs));
struct sys_reg_params params;
params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
return kvm_handle_cp_32(vcpu, &params, cp14_regs, ARRAY_SIZE(cp14_regs));
}
static bool is_imp_def_sys_reg(struct sys_reg_params *params)
......@@ -2410,7 +2539,14 @@ static bool is_imp_def_sys_reg(struct sys_reg_params *params)
return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
}
static int emulate_sys_reg(struct kvm_vcpu *vcpu,
/**
* emulate_sys_reg - Emulate a guest access to an AArch64 system register
* @vcpu: The VCPU pointer
* @params: Decoded system register parameters
*
* Return: true if the system register access was successful, false otherwise.
*/
static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
struct sys_reg_params *params)
{
const struct sys_reg_desc *r;
......@@ -2419,7 +2555,10 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
if (likely(r)) {
perform_access(vcpu, params, r);
} else if (is_imp_def_sys_reg(params)) {
return true;
}
if (is_imp_def_sys_reg(params)) {
kvm_inject_undefined(vcpu);
} else {
print_sys_reg_msg(params,
......@@ -2427,7 +2566,7 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
*vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
kvm_inject_undefined(vcpu);
}
return 1;
return false;
}
/**
......@@ -2455,18 +2594,18 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
struct sys_reg_params params;
unsigned long esr = kvm_vcpu_get_esr(vcpu);
int Rt = kvm_vcpu_sys_get_rt(vcpu);
int ret;
trace_kvm_handle_sys_reg(esr);
params = esr_sys64_to_params(esr);
params.regval = vcpu_get_reg(vcpu, Rt);
ret = emulate_sys_reg(vcpu, &params);
if (!emulate_sys_reg(vcpu, &params))
return 1;
if (!params.is_write)
vcpu_set_reg(vcpu, Rt, params.regval);
return ret;
return 1;
}
/******************************************************************************
......
......@@ -35,12 +35,19 @@ struct sys_reg_params {
.Op2 = ((esr) >> 17) & 0x7, \
.is_write = !((esr) & 1) })
#define esr_cp1x_32_to_params(esr) \
((struct sys_reg_params){ .Op1 = ((esr) >> 14) & 0x7, \
.CRn = ((esr) >> 10) & 0xf, \
.CRm = ((esr) >> 1) & 0xf, \
.Op2 = ((esr) >> 17) & 0x7, \
.is_write = !((esr) & 1) })
struct sys_reg_desc {
/* Sysreg string for debug */
const char *name;
enum {
AA32_ZEROHIGH,
AA32_DIRECT,
AA32_LO,
AA32_HI,
} aarch32_map;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment