Commit d9f89b4e authored by Andrew Jones's avatar Andrew Jones Committed by Marc Zyngier

KVM: arm/arm64: PMU: Fix overflow interrupt injection

kvm_pmu_overflow_set() is called from perf's interrupt handler,
making the call of kvm_vgic_inject_irq() from it introduced with
"KVM: arm/arm64: PMU: remove request-less vcpu kick" a really bad
idea, as it's quite easy to try and retake a lock that the
interrupted context is already holding. The fix is to use a vcpu
kick, leaving the interrupt injection to kvm_pmu_sync_hwstate(),
like it was doing before the refactoring. We don't just revert,
though, because before the kick was request-less, leaving the vcpu
exposed to the request-less vcpu kick race, and also because the
kick was used unnecessarily from register access handlers.
Reviewed-by: default avatarChristoffer Dall <cdall@linaro.org>
Signed-off-by: default avatarAndrew Jones <drjones@redhat.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 79962a5c
...@@ -764,7 +764,7 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -764,7 +764,7 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (p->is_write) { if (p->is_write) {
if (r->CRm & 0x2) if (r->CRm & 0x2)
/* accessing PMOVSSET_EL0 */ /* accessing PMOVSSET_EL0 */
kvm_pmu_overflow_set(vcpu, p->regval & mask); vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
else else
/* accessing PMOVSCLR_EL0 */ /* accessing PMOVSCLR_EL0 */
vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask); vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
......
...@@ -48,7 +48,6 @@ void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); ...@@ -48,7 +48,6 @@ void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu); bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
...@@ -86,7 +85,6 @@ static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} ...@@ -86,7 +85,6 @@ static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {} static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
......
...@@ -203,11 +203,15 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) ...@@ -203,11 +203,15 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
return reg; return reg;
} }
static void kvm_pmu_check_overflow(struct kvm_vcpu *vcpu) static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
{ {
struct kvm_pmu *pmu = &vcpu->arch.pmu; struct kvm_pmu *pmu = &vcpu->arch.pmu;
bool overflow = !!kvm_pmu_overflow_status(vcpu); bool overflow;
if (!kvm_arm_pmu_v3_ready(vcpu))
return;
overflow = !!kvm_pmu_overflow_status(vcpu);
if (pmu->irq_level == overflow) if (pmu->irq_level == overflow)
return; return;
...@@ -215,33 +219,11 @@ static void kvm_pmu_check_overflow(struct kvm_vcpu *vcpu) ...@@ -215,33 +219,11 @@ static void kvm_pmu_check_overflow(struct kvm_vcpu *vcpu)
if (likely(irqchip_in_kernel(vcpu->kvm))) { if (likely(irqchip_in_kernel(vcpu->kvm))) {
int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
pmu->irq_num, overflow, pmu->irq_num, overflow, pmu);
&vcpu->arch.pmu);
WARN_ON(ret); WARN_ON(ret);
} }
} }
/**
* kvm_pmu_overflow_set - set PMU overflow interrupt
* @vcpu: The vcpu pointer
* @val: the value guest writes to PMOVSSET register
*/
void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val)
{
if (val == 0)
return;
vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= val;
kvm_pmu_check_overflow(vcpu);
}
static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
{
if (!kvm_arm_pmu_v3_ready(vcpu))
return;
kvm_pmu_check_overflow(vcpu);
}
bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
{ {
struct kvm_pmu *pmu = &vcpu->arch.pmu; struct kvm_pmu *pmu = &vcpu->arch.pmu;
...@@ -303,7 +285,7 @@ static inline struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc) ...@@ -303,7 +285,7 @@ static inline struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
} }
/** /**
* When perf event overflows, call kvm_pmu_overflow_set to set overflow status. * When the perf event overflows, set the overflow status and inform the vcpu.
*/ */
static void kvm_pmu_perf_overflow(struct perf_event *perf_event, static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
struct perf_sample_data *data, struct perf_sample_data *data,
...@@ -313,7 +295,12 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event, ...@@ -313,7 +295,12 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
int idx = pmc->idx; int idx = pmc->idx;
kvm_pmu_overflow_set(vcpu, BIT(idx)); vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
if (kvm_pmu_overflow_status(vcpu)) {
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu);
}
} }
/** /**
...@@ -341,7 +328,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) ...@@ -341,7 +328,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
reg = lower_32_bits(reg); reg = lower_32_bits(reg);
vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg; vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
if (!reg) if (!reg)
kvm_pmu_overflow_set(vcpu, BIT(i)); vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
} }
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment