Commit a096309b authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf: Fix scaling vs. perf_install_in_context()

Completely reworks perf_install_in_context() (again!) in order to
ensure that there will be no ctx time hole between add_event_to_ctx()
and any potential ctx_sched_in().
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dvyukov@google.com
Cc: eranian@google.com
Cc: oleg@redhat.com
Cc: panand@redhat.com
Cc: sasha.levin@oracle.com
Cc: vince@deater.net
Link: http://lkml.kernel.org/r/20160224174948.279399438@infradead.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent bd2afa49
...@@ -276,10 +276,10 @@ static void event_function_call(struct perf_event *event, event_f func, void *da ...@@ -276,10 +276,10 @@ static void event_function_call(struct perf_event *event, event_f func, void *da
return; return;
} }
again:
if (task == TASK_TOMBSTONE) if (task == TASK_TOMBSTONE)
return; return;
again:
if (!task_function_call(task, event_function, &efs)) if (!task_function_call(task, event_function, &efs))
return; return;
...@@ -289,13 +289,15 @@ static void event_function_call(struct perf_event *event, event_f func, void *da ...@@ -289,13 +289,15 @@ static void event_function_call(struct perf_event *event, event_f func, void *da
* a concurrent perf_event_context_sched_out(). * a concurrent perf_event_context_sched_out().
*/ */
task = ctx->task; task = ctx->task;
if (task != TASK_TOMBSTONE) { if (task == TASK_TOMBSTONE) {
if (ctx->is_active) { raw_spin_unlock_irq(&ctx->lock);
raw_spin_unlock_irq(&ctx->lock); return;
goto again; }
} if (ctx->is_active) {
func(event, NULL, ctx, data); raw_spin_unlock_irq(&ctx->lock);
goto again;
} }
func(event, NULL, ctx, data);
raw_spin_unlock_irq(&ctx->lock); raw_spin_unlock_irq(&ctx->lock);
} }
...@@ -2116,49 +2118,68 @@ static void ctx_resched(struct perf_cpu_context *cpuctx, ...@@ -2116,49 +2118,68 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
/* /*
* Cross CPU call to install and enable a performance event * Cross CPU call to install and enable a performance event
* *
* Must be called with ctx->mutex held * Very similar to remote_function() + event_function() but cannot assume that
* things like ctx->is_active and cpuctx->task_ctx are set.
*/ */
static int __perf_install_in_context(void *info) static int __perf_install_in_context(void *info)
{ {
struct perf_event_context *ctx = info; struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
struct perf_event_context *task_ctx = cpuctx->task_ctx; struct perf_event_context *task_ctx = cpuctx->task_ctx;
bool activate = true;
int ret = 0;
raw_spin_lock(&cpuctx->ctx.lock); raw_spin_lock(&cpuctx->ctx.lock);
if (ctx->task) { if (ctx->task) {
raw_spin_lock(&ctx->lock); raw_spin_lock(&ctx->lock);
/*
* If we hit the 'wrong' task, we've since scheduled and
* everything should be sorted, nothing to do!
*/
task_ctx = ctx; task_ctx = ctx;
if (ctx->task != current)
/* If we're on the wrong CPU, try again */
if (task_cpu(ctx->task) != smp_processor_id()) {
ret = -ESRCH;
goto unlock; goto unlock;
}
/* /*
* If task_ctx is set, it had better be to us. * If we're on the right CPU, see if the task we target is
* current, if not we don't have to activate the ctx, a future
* context switch will do that for us.
*/ */
WARN_ON_ONCE(cpuctx->task_ctx != ctx && cpuctx->task_ctx); if (ctx->task != current)
activate = false;
else
WARN_ON_ONCE(cpuctx->task_ctx && cpuctx->task_ctx != ctx);
} else if (task_ctx) { } else if (task_ctx) {
raw_spin_lock(&task_ctx->lock); raw_spin_lock(&task_ctx->lock);
} }
ctx_resched(cpuctx, task_ctx); if (activate) {
ctx_sched_out(ctx, cpuctx, EVENT_TIME);
add_event_to_ctx(event, ctx);
ctx_resched(cpuctx, task_ctx);
} else {
add_event_to_ctx(event, ctx);
}
unlock: unlock:
perf_ctx_unlock(cpuctx, task_ctx); perf_ctx_unlock(cpuctx, task_ctx);
return 0; return ret;
} }
/* /*
* Attach a performance event to a context * Attach a performance event to a context.
*
* Very similar to event_function_call, see comment there.
*/ */
static void static void
perf_install_in_context(struct perf_event_context *ctx, perf_install_in_context(struct perf_event_context *ctx,
struct perf_event *event, struct perf_event *event,
int cpu) int cpu)
{ {
struct task_struct *task = NULL; struct task_struct *task = READ_ONCE(ctx->task);
lockdep_assert_held(&ctx->mutex); lockdep_assert_held(&ctx->mutex);
...@@ -2166,42 +2187,46 @@ perf_install_in_context(struct perf_event_context *ctx, ...@@ -2166,42 +2187,46 @@ perf_install_in_context(struct perf_event_context *ctx,
if (event->cpu != -1) if (event->cpu != -1)
event->cpu = cpu; event->cpu = cpu;
if (!task) {
cpu_function_call(cpu, __perf_install_in_context, event);
return;
}
/*
* Should not happen, we validate the ctx is still alive before calling.
*/
if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
return;
/* /*
* Installing events is tricky because we cannot rely on ctx->is_active * Installing events is tricky because we cannot rely on ctx->is_active
* to be set in case this is the nr_events 0 -> 1 transition. * to be set in case this is the nr_events 0 -> 1 transition.
*
* So what we do is we add the event to the list here, which will allow
* a future context switch to DTRT and then send a racy IPI. If the IPI
* fails to hit the right task, this means a context switch must have
* happened and that will have taken care of business.
*/ */
raw_spin_lock_irq(&ctx->lock); again:
task = ctx->task;
/* /*
* If between ctx = find_get_context() and mutex_lock(&ctx->mutex) the * Cannot use task_function_call() because we need to run on the task's
* ctx gets destroyed, we must not install an event into it. * CPU regardless of whether its current or not.
*
* This is normally tested for after we acquire the mutex, so this is
* a sanity check.
*/ */
if (!cpu_function_call(task_cpu(task), __perf_install_in_context, event))
return;
raw_spin_lock_irq(&ctx->lock);
task = ctx->task;
if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) { if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
/*
* Cannot happen because we already checked above (which also
* cannot happen), and we hold ctx->mutex, which serializes us
* against perf_event_exit_task_context().
*/
raw_spin_unlock_irq(&ctx->lock); raw_spin_unlock_irq(&ctx->lock);
return; return;
} }
if (ctx->is_active) {
update_context_time(ctx);
update_cgrp_time_from_event(event);
}
add_event_to_ctx(event, ctx);
raw_spin_unlock_irq(&ctx->lock); raw_spin_unlock_irq(&ctx->lock);
/*
if (task) * Since !ctx->is_active doesn't mean anything, we must IPI
task_function_call(task, __perf_install_in_context, ctx); * unconditionally.
else */
cpu_function_call(cpu, __perf_install_in_context, ctx); goto again;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment