Commit 63e30d3e authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf: Make ctx->is_active and cpuctx->task_ctx consistent

For no apparent reason and to great confusion the rules for
ctx->is_active and cpuctx->task_ctx are different. This means that its
not always possible to find all active (task) contexts.

Fix this such that if ctx->is_active gets set, we also set (or verify)
cpuctx->task_ctx.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 25432ae9
...@@ -2329,6 +2329,12 @@ static void ctx_sched_out(struct perf_event_context *ctx, ...@@ -2329,6 +2329,12 @@ static void ctx_sched_out(struct perf_event_context *ctx,
lockdep_assert_held(&ctx->lock); lockdep_assert_held(&ctx->lock);
ctx->is_active &= ~event_type; ctx->is_active &= ~event_type;
if (ctx->task) {
WARN_ON_ONCE(cpuctx->task_ctx != ctx);
if (!ctx->is_active)
cpuctx->task_ctx = NULL;
}
if (likely(!ctx->nr_events)) if (likely(!ctx->nr_events))
return; return;
...@@ -2629,7 +2635,6 @@ static void task_ctx_sched_out(struct perf_cpu_context *cpuctx, ...@@ -2629,7 +2635,6 @@ static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
return; return;
ctx_sched_out(ctx, cpuctx, EVENT_ALL); ctx_sched_out(ctx, cpuctx, EVENT_ALL);
cpuctx->task_ctx = NULL;
} }
/* /*
...@@ -2712,6 +2717,13 @@ ctx_sched_in(struct perf_event_context *ctx, ...@@ -2712,6 +2717,13 @@ ctx_sched_in(struct perf_event_context *ctx,
lockdep_assert_held(&ctx->lock); lockdep_assert_held(&ctx->lock);
ctx->is_active |= event_type; ctx->is_active |= event_type;
if (ctx->task) {
if (!is_active)
cpuctx->task_ctx = ctx;
else
WARN_ON_ONCE(cpuctx->task_ctx != ctx);
}
if (likely(!ctx->nr_events)) if (likely(!ctx->nr_events))
return; return;
...@@ -2756,12 +2768,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx, ...@@ -2756,12 +2768,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
* cpu flexible, task flexible. * cpu flexible, task flexible.
*/ */
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
perf_event_sched_in(cpuctx, ctx, task);
if (ctx->nr_events)
cpuctx->task_ctx = ctx;
perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
perf_pmu_enable(ctx->pmu); perf_pmu_enable(ctx->pmu);
perf_ctx_unlock(cpuctx, ctx); perf_ctx_unlock(cpuctx, ctx);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment