Commit 90c91dfb authored by Peter Zijlstra's avatar Peter Zijlstra

perf/core: Fix endless multiplex timer

Kan and Andi reported that we fail to kill rotation when the flexible
events go empty, but the context does not. XXX moar

Fixes: fd7d5517 ("perf/cgroups: Don't rotate events for cgroups unnecessarily")
Reported-by: default avatarAndi Kleen <ak@linux.intel.com>
Reported-by: default avatarKan Liang <kan.liang@linux.intel.com>
Tested-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200305123851.GX2596@hirez.programming.kicks-ass.net
parent d8a73868
...@@ -2291,6 +2291,7 @@ __perf_remove_from_context(struct perf_event *event, ...@@ -2291,6 +2291,7 @@ __perf_remove_from_context(struct perf_event *event,
if (!ctx->nr_events && ctx->is_active) { if (!ctx->nr_events && ctx->is_active) {
ctx->is_active = 0; ctx->is_active = 0;
ctx->rotate_necessary = 0;
if (ctx->task) { if (ctx->task) {
WARN_ON_ONCE(cpuctx->task_ctx != ctx); WARN_ON_ONCE(cpuctx->task_ctx != ctx);
cpuctx->task_ctx = NULL; cpuctx->task_ctx = NULL;
...@@ -3188,12 +3189,6 @@ static void ctx_sched_out(struct perf_event_context *ctx, ...@@ -3188,12 +3189,6 @@ static void ctx_sched_out(struct perf_event_context *ctx,
if (!ctx->nr_active || !(is_active & EVENT_ALL)) if (!ctx->nr_active || !(is_active & EVENT_ALL))
return; return;
/*
* If we had been multiplexing, no rotations are necessary, now no events
* are active.
*/
ctx->rotate_necessary = 0;
perf_pmu_disable(ctx->pmu); perf_pmu_disable(ctx->pmu);
if (is_active & EVENT_PINNED) { if (is_active & EVENT_PINNED) {
list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list) list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list)
...@@ -3203,6 +3198,13 @@ static void ctx_sched_out(struct perf_event_context *ctx, ...@@ -3203,6 +3198,13 @@ static void ctx_sched_out(struct perf_event_context *ctx,
if (is_active & EVENT_FLEXIBLE) { if (is_active & EVENT_FLEXIBLE) {
list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list) list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list)
group_sched_out(event, cpuctx, ctx); group_sched_out(event, cpuctx, ctx);
/*
* Since we cleared EVENT_FLEXIBLE, also clear
* rotate_necessary, is will be reset by
* ctx_flexible_sched_in() when needed.
*/
ctx->rotate_necessary = 0;
} }
perf_pmu_enable(ctx->pmu); perf_pmu_enable(ctx->pmu);
} }
...@@ -3985,6 +3987,12 @@ ctx_event_to_rotate(struct perf_event_context *ctx) ...@@ -3985,6 +3987,12 @@ ctx_event_to_rotate(struct perf_event_context *ctx)
typeof(*event), group_node); typeof(*event), group_node);
} }
/*
* Unconditionally clear rotate_necessary; if ctx_flexible_sched_in()
* finds there are unschedulable events, it will set it again.
*/
ctx->rotate_necessary = 0;
return event; return event;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment