Commit 95e92e45 authored by Julien Thierry's avatar Julien Thierry Committed by Will Deacon

KVM: arm64: pmu: Make overflow handler NMI safe

kvm_vcpu_kick() is not NMI safe. When the overflow handler is called from
NMI context, defer waking the vcpu to an irq_work queue.

A vcpu can be freed while it's not running by kvm_destroy_vm(). Prevent
running the irq_work for a non-existent vcpu by calling irq_work_sync() on
the PMU destroy path.

[Alexandru E.: Added irq_work_sync()]
Signed-off-by: default avatarJulien Thierry <julien.thierry@arm.com>
Signed-off-by: default avatarAlexandru Elisei <alexandru.elisei@arm.com>
Tested-by: Sumit Garg <sumit.garg@linaro.org> (Developerbox)
Cc: Julien Thierry <julien.thierry.kdev@gmail.com>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Suzuki K Pouloze <suzuki.poulose@arm.com>
Cc: kvm@vger.kernel.org
Cc: kvmarm@lists.cs.columbia.edu
Link: https://lore.kernel.org/r/20200924110706.254996-6-alexandru.elisei@arm.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent 05ab7281
...@@ -269,6 +269,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) ...@@ -269,6 +269,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
kvm_pmu_release_perf_event(&pmu->pmc[i]); kvm_pmu_release_perf_event(&pmu->pmc[i]);
irq_work_sync(&vcpu->arch.pmu.overflow_work);
} }
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
...@@ -433,6 +434,22 @@ void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) ...@@ -433,6 +434,22 @@ void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
kvm_pmu_update_state(vcpu); kvm_pmu_update_state(vcpu);
} }
/**
* When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
* to the event.
* This is why we need a callback to do it once outside of the NMI context.
*/
static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
{
struct kvm_vcpu *vcpu;
struct kvm_pmu *pmu;
pmu = container_of(work, struct kvm_pmu, overflow_work);
vcpu = kvm_pmc_to_vcpu(pmu->pmc);
kvm_vcpu_kick(vcpu);
}
/** /**
* When the perf event overflows, set the overflow status and inform the vcpu. * When the perf event overflows, set the overflow status and inform the vcpu.
*/ */
...@@ -465,7 +482,11 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event, ...@@ -465,7 +482,11 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
if (kvm_pmu_overflow_status(vcpu)) { if (kvm_pmu_overflow_status(vcpu)) {
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
if (!in_nmi())
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
else
irq_work_queue(&vcpu->arch.pmu.overflow_work);
} }
cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD); cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
...@@ -764,6 +785,9 @@ static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) ...@@ -764,6 +785,9 @@ static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
return ret; return ret;
} }
init_irq_work(&vcpu->arch.pmu.overflow_work,
kvm_pmu_perf_overflow_notify_vcpu);
vcpu->arch.pmu.created = true; vcpu->arch.pmu.created = true;
return 0; return 0;
} }
......
...@@ -27,6 +27,7 @@ struct kvm_pmu { ...@@ -27,6 +27,7 @@ struct kvm_pmu {
bool ready; bool ready;
bool created; bool created;
bool irq_level; bool irq_level;
struct irq_work overflow_work;
}; };
#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) #define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment