Commit 3a80b4a3 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: Fix a race on perf_counter_ctx

While extending perfcounters with BTS hw-tracing, Markus
Metzger managed to trigger this warning:

   [  995.557128] WARNING: at kernel/perf_counter.c:1191 __perf_counter_task_sched_out+0x48/0x6b()

triggers because commit
9f498cc5 (perf_counter: Full
task tracing) removed clearing of tsk->perf_counter_ctxp out
from under ctx->lock which introduced a race (against
perf_lock_task_context).

Move it back and deal with the exit notification by explicitly
passing along the former task context.
Reported-by: default avatarMarkus T Metzger <markus.t.metzger@intel.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1249667341.17467.5.camel@twins>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 3a43ce68
...@@ -2850,7 +2850,8 @@ perf_counter_read_event(struct perf_counter *counter, ...@@ -2850,7 +2850,8 @@ perf_counter_read_event(struct perf_counter *counter,
*/ */
struct perf_task_event { struct perf_task_event {
struct task_struct *task; struct task_struct *task;
struct perf_counter_context *task_ctx;
struct { struct {
struct perf_event_header header; struct perf_event_header header;
...@@ -2910,24 +2911,23 @@ static void perf_counter_task_ctx(struct perf_counter_context *ctx, ...@@ -2910,24 +2911,23 @@ static void perf_counter_task_ctx(struct perf_counter_context *ctx,
static void perf_counter_task_event(struct perf_task_event *task_event) static void perf_counter_task_event(struct perf_task_event *task_event)
{ {
struct perf_cpu_context *cpuctx; struct perf_cpu_context *cpuctx;
struct perf_counter_context *ctx; struct perf_counter_context *ctx = task_event->task_ctx;
cpuctx = &get_cpu_var(perf_cpu_context); cpuctx = &get_cpu_var(perf_cpu_context);
perf_counter_task_ctx(&cpuctx->ctx, task_event); perf_counter_task_ctx(&cpuctx->ctx, task_event);
put_cpu_var(perf_cpu_context); put_cpu_var(perf_cpu_context);
rcu_read_lock(); rcu_read_lock();
/* if (!ctx)
* doesn't really matter which of the child contexts the ctx = rcu_dereference(task_event->task->perf_counter_ctxp);
* events ends up in.
*/
ctx = rcu_dereference(current->perf_counter_ctxp);
if (ctx) if (ctx)
perf_counter_task_ctx(ctx, task_event); perf_counter_task_ctx(ctx, task_event);
rcu_read_unlock(); rcu_read_unlock();
} }
static void perf_counter_task(struct task_struct *task, int new) static void perf_counter_task(struct task_struct *task,
struct perf_counter_context *task_ctx,
int new)
{ {
struct perf_task_event task_event; struct perf_task_event task_event;
...@@ -2937,8 +2937,9 @@ static void perf_counter_task(struct task_struct *task, int new) ...@@ -2937,8 +2937,9 @@ static void perf_counter_task(struct task_struct *task, int new)
return; return;
task_event = (struct perf_task_event){ task_event = (struct perf_task_event){
.task = task, .task = task,
.event = { .task_ctx = task_ctx,
.event = {
.header = { .header = {
.type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT, .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT,
.misc = 0, .misc = 0,
...@@ -2956,7 +2957,7 @@ static void perf_counter_task(struct task_struct *task, int new) ...@@ -2956,7 +2957,7 @@ static void perf_counter_task(struct task_struct *task, int new)
void perf_counter_fork(struct task_struct *task) void perf_counter_fork(struct task_struct *task)
{ {
perf_counter_task(task, 1); perf_counter_task(task, NULL, 1);
} }
/* /*
...@@ -4310,7 +4311,7 @@ void perf_counter_exit_task(struct task_struct *child) ...@@ -4310,7 +4311,7 @@ void perf_counter_exit_task(struct task_struct *child)
unsigned long flags; unsigned long flags;
if (likely(!child->perf_counter_ctxp)) { if (likely(!child->perf_counter_ctxp)) {
perf_counter_task(child, 0); perf_counter_task(child, NULL, 0);
return; return;
} }
...@@ -4330,6 +4331,7 @@ void perf_counter_exit_task(struct task_struct *child) ...@@ -4330,6 +4331,7 @@ void perf_counter_exit_task(struct task_struct *child)
* incremented the context's refcount before we do put_ctx below. * incremented the context's refcount before we do put_ctx below.
*/ */
spin_lock(&child_ctx->lock); spin_lock(&child_ctx->lock);
child->perf_counter_ctxp = NULL;
/* /*
* If this context is a clone; unclone it so it can't get * If this context is a clone; unclone it so it can't get
* swapped to another process while we're removing all * swapped to another process while we're removing all
...@@ -4343,9 +4345,7 @@ void perf_counter_exit_task(struct task_struct *child) ...@@ -4343,9 +4345,7 @@ void perf_counter_exit_task(struct task_struct *child)
* won't get any samples after PERF_EVENT_EXIT. We can however still * won't get any samples after PERF_EVENT_EXIT. We can however still
* get a few PERF_EVENT_READ events. * get a few PERF_EVENT_READ events.
*/ */
perf_counter_task(child, 0); perf_counter_task(child, child_ctx, 0);
child->perf_counter_ctxp = NULL;
/* /*
* We can recurse on the same lock type through: * We can recurse on the same lock type through:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment