Commit 484c22df authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvmarm-fixes-5.18-2' of...

Merge tag 'kvmarm-fixes-5.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for 5.18, take #2

- Take care of faults occuring between the PARange and
  IPA range by injecting an exception

- Fix S2 faults taken from a host EL0 in protected mode

- Work around Oops caused by a PMU access from a 32bit
  guest when PMU has been created. This is a temporary
  bodge until we fix it for good.
parents e852be8b 85ea6b1e
...@@ -40,6 +40,7 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu); ...@@ -40,6 +40,7 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_vabt(struct kvm_vcpu *vcpu); void kvm_inject_vabt(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
void kvm_vcpu_wfi(struct kvm_vcpu *vcpu); void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
......
...@@ -198,15 +198,15 @@ SYM_CODE_START(__kvm_hyp_host_vector) ...@@ -198,15 +198,15 @@ SYM_CODE_START(__kvm_hyp_host_vector)
invalid_host_el2_vect // FIQ EL2h invalid_host_el2_vect // FIQ EL2h
invalid_host_el2_vect // Error EL2h invalid_host_el2_vect // Error EL2h
host_el1_sync_vect // Synchronous 64-bit EL1 host_el1_sync_vect // Synchronous 64-bit EL1/EL0
invalid_host_el1_vect // IRQ 64-bit EL1 invalid_host_el1_vect // IRQ 64-bit EL1/EL0
invalid_host_el1_vect // FIQ 64-bit EL1 invalid_host_el1_vect // FIQ 64-bit EL1/EL0
invalid_host_el1_vect // Error 64-bit EL1 invalid_host_el1_vect // Error 64-bit EL1/EL0
invalid_host_el1_vect // Synchronous 32-bit EL1 host_el1_sync_vect // Synchronous 32-bit EL1/EL0
invalid_host_el1_vect // IRQ 32-bit EL1 invalid_host_el1_vect // IRQ 32-bit EL1/EL0
invalid_host_el1_vect // FIQ 32-bit EL1 invalid_host_el1_vect // FIQ 32-bit EL1/EL0
invalid_host_el1_vect // Error 32-bit EL1 invalid_host_el1_vect // Error 32-bit EL1/EL0
SYM_CODE_END(__kvm_hyp_host_vector) SYM_CODE_END(__kvm_hyp_host_vector)
/* /*
......
...@@ -145,6 +145,34 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) ...@@ -145,6 +145,34 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
inject_abt64(vcpu, true, addr); inject_abt64(vcpu, true, addr);
} }
void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
{
unsigned long addr, esr;
addr = kvm_vcpu_get_fault_ipa(vcpu);
addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
if (kvm_vcpu_trap_is_iabt(vcpu))
kvm_inject_pabt(vcpu, addr);
else
kvm_inject_dabt(vcpu, addr);
/*
* If AArch64 or LPAE, set FSC to 0 to indicate an Address
* Size Fault at level 0, as if exceeding PARange.
*
* Non-LPAE guests will only get the external abort, as there
* is no way to to describe the ASF.
*/
if (vcpu_el1_is_32bit(vcpu) &&
!(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
return;
esr = vcpu_read_sys_reg(vcpu, ESR_EL1);
esr &= ~GENMASK_ULL(5, 0);
vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
}
/** /**
* kvm_inject_undefined - inject an undefined instruction into the guest * kvm_inject_undefined - inject an undefined instruction into the guest
* @vcpu: The vCPU in which to inject the exception * @vcpu: The vCPU in which to inject the exception
......
...@@ -1337,6 +1337,25 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) ...@@ -1337,6 +1337,25 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
is_iabt = kvm_vcpu_trap_is_iabt(vcpu); is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
if (fault_status == FSC_FAULT) {
/* Beyond sanitised PARange (which is the IPA limit) */
if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
kvm_inject_size_fault(vcpu);
return 1;
}
/* Falls between the IPA range and the PARange? */
if (fault_ipa >= BIT_ULL(vcpu->arch.hw_mmu->pgt->ia_bits)) {
fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
if (is_iabt)
kvm_inject_pabt(vcpu, fault_ipa);
else
kvm_inject_dabt(vcpu, fault_ipa);
return 1;
}
}
/* Synchronous External Abort? */ /* Synchronous External Abort? */
if (kvm_vcpu_abt_issea(vcpu)) { if (kvm_vcpu_abt_issea(vcpu)) {
/* /*
......
...@@ -177,6 +177,9 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) ...@@ -177,6 +177,9 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
struct kvm_pmu *pmu = &vcpu->arch.pmu; struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_pmc *pmc = &pmu->pmc[select_idx]; struct kvm_pmc *pmc = &pmu->pmc[select_idx];
if (!kvm_vcpu_has_pmu(vcpu))
return 0;
counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
if (kvm_pmu_pmc_is_chained(pmc) && if (kvm_pmu_pmc_is_chained(pmc) &&
...@@ -198,6 +201,9 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) ...@@ -198,6 +201,9 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
{ {
u64 reg; u64 reg;
if (!kvm_vcpu_has_pmu(vcpu))
return;
reg = (select_idx == ARMV8_PMU_CYCLE_IDX) reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx; ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
__vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx); __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
...@@ -322,6 +328,9 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) ...@@ -322,6 +328,9 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
struct kvm_pmu *pmu = &vcpu->arch.pmu; struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_pmc *pmc; struct kvm_pmc *pmc;
if (!kvm_vcpu_has_pmu(vcpu))
return;
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val) if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
return; return;
...@@ -357,7 +366,7 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) ...@@ -357,7 +366,7 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
struct kvm_pmu *pmu = &vcpu->arch.pmu; struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_pmc *pmc; struct kvm_pmc *pmc;
if (!val) if (!kvm_vcpu_has_pmu(vcpu) || !val)
return; return;
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
...@@ -527,6 +536,9 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) ...@@ -527,6 +536,9 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
struct kvm_pmu *pmu = &vcpu->arch.pmu; struct kvm_pmu *pmu = &vcpu->arch.pmu;
int i; int i;
if (!kvm_vcpu_has_pmu(vcpu))
return;
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
return; return;
...@@ -576,6 +588,9 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) ...@@ -576,6 +588,9 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
{ {
int i; int i;
if (!kvm_vcpu_has_pmu(vcpu))
return;
if (val & ARMV8_PMU_PMCR_E) { if (val & ARMV8_PMU_PMCR_E) {
kvm_pmu_enable_counter_mask(vcpu, kvm_pmu_enable_counter_mask(vcpu,
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0)); __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
...@@ -739,6 +754,9 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, ...@@ -739,6 +754,9 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
{ {
u64 reg, mask; u64 reg, mask;
if (!kvm_vcpu_has_pmu(vcpu))
return;
mask = ARMV8_PMU_EVTYPE_MASK; mask = ARMV8_PMU_EVTYPE_MASK;
mask &= ~ARMV8_PMU_EVTYPE_EVENT; mask &= ~ARMV8_PMU_EVTYPE_EVENT;
mask |= kvm_pmu_event_mask(vcpu->kvm); mask |= kvm_pmu_event_mask(vcpu->kvm);
...@@ -827,6 +845,9 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) ...@@ -827,6 +845,9 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
u64 val, mask = 0; u64 val, mask = 0;
int base, i, nr_events; int base, i, nr_events;
if (!kvm_vcpu_has_pmu(vcpu))
return 0;
if (!pmceid1) { if (!pmceid1) {
val = read_sysreg(pmceid0_el0); val = read_sysreg(pmceid0_el0);
base = 0; base = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment