Commit f55fc2a5 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf: Restructure perf syscall point of no return

The exclusive_event_installable() stuff only works because its
exclusive with the grouping bits.

Rework the code such that there is a sane place to error out before we
go do things we cannot undo.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent dfe1f3cb
...@@ -8297,13 +8297,30 @@ SYSCALL_DEFINE5(perf_event_open, ...@@ -8297,13 +8297,30 @@ SYSCALL_DEFINE5(perf_event_open,
if (move_group) { if (move_group) {
gctx = group_leader->ctx; gctx = group_leader->ctx;
mutex_lock_double(&gctx->mutex, &ctx->mutex);
} else {
mutex_lock(&ctx->mutex);
}
/*
* Must be under the same ctx::mutex as perf_install_in_context(),
* because we need to serialize with concurrent event creation.
*/
if (!exclusive_event_installable(event, ctx)) {
/* exclusive and group stuff are assumed mutually exclusive */
WARN_ON_ONCE(move_group);
err = -EBUSY;
goto err_locked;
}
WARN_ON_ONCE(ctx->parent_ctx);
if (move_group) {
/* /*
* See perf_event_ctx_lock() for comments on the details * See perf_event_ctx_lock() for comments on the details
* of swizzling perf_event::ctx. * of swizzling perf_event::ctx.
*/ */
mutex_lock_double(&gctx->mutex, &ctx->mutex);
perf_remove_from_context(group_leader, false); perf_remove_from_context(group_leader, false);
list_for_each_entry(sibling, &group_leader->sibling_list, list_for_each_entry(sibling, &group_leader->sibling_list,
...@@ -8311,13 +8328,7 @@ SYSCALL_DEFINE5(perf_event_open, ...@@ -8311,13 +8328,7 @@ SYSCALL_DEFINE5(perf_event_open,
perf_remove_from_context(sibling, false); perf_remove_from_context(sibling, false);
put_ctx(gctx); put_ctx(gctx);
} }
} else {
mutex_lock(&ctx->mutex);
}
WARN_ON_ONCE(ctx->parent_ctx);
if (move_group) {
/* /*
* Wait for everybody to stop referencing the events through * Wait for everybody to stop referencing the events through
* the old lists, before installing it on new lists. * the old lists, before installing it on new lists.
...@@ -8349,22 +8360,20 @@ SYSCALL_DEFINE5(perf_event_open, ...@@ -8349,22 +8360,20 @@ SYSCALL_DEFINE5(perf_event_open,
perf_event__state_init(group_leader); perf_event__state_init(group_leader);
perf_install_in_context(ctx, group_leader, group_leader->cpu); perf_install_in_context(ctx, group_leader, group_leader->cpu);
get_ctx(ctx); get_ctx(ctx);
}
if (!exclusive_event_installable(event, ctx)) { /*
err = -EBUSY; * Now that all events are installed in @ctx, nothing
mutex_unlock(&ctx->mutex); * references @gctx anymore, so drop the last reference we have
fput(event_file); * on it.
goto err_context; */
put_ctx(gctx);
} }
perf_install_in_context(ctx, event, event->cpu); perf_install_in_context(ctx, event, event->cpu);
perf_unpin_context(ctx); perf_unpin_context(ctx);
if (move_group) { if (move_group)
mutex_unlock(&gctx->mutex); mutex_unlock(&gctx->mutex);
put_ctx(gctx);
}
mutex_unlock(&ctx->mutex); mutex_unlock(&ctx->mutex);
put_online_cpus(); put_online_cpus();
...@@ -8391,6 +8400,12 @@ SYSCALL_DEFINE5(perf_event_open, ...@@ -8391,6 +8400,12 @@ SYSCALL_DEFINE5(perf_event_open,
fd_install(event_fd, event_file); fd_install(event_fd, event_file);
return event_fd; return event_fd;
err_locked:
if (move_group)
mutex_unlock(&gctx->mutex);
mutex_unlock(&ctx->mutex);
/* err_file: */
fput(event_file);
err_context: err_context:
perf_unpin_context(ctx); perf_unpin_context(ctx);
put_ctx(ctx); put_ctx(ctx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment