Commit 75f937f2 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: Fix ctx->mutex vs counter->mutex inversion

Simon triggered a lockdep inversion report about us taking ctx->mutex
vs counter->mutex in inverse orders. Fix that up.
Reported-by: default avatarSimon Holm Thøgersen <odie@cs.aau.dk>
Tested-by: default avatarSimon Holm Thøgersen <odie@cs.aau.dk>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 613d8602
...@@ -1620,22 +1620,6 @@ static void perf_counter_reset(struct perf_counter *counter) ...@@ -1620,22 +1620,6 @@ static void perf_counter_reset(struct perf_counter *counter)
perf_counter_update_userpage(counter); perf_counter_update_userpage(counter);
} }
static void perf_counter_for_each_sibling(struct perf_counter *counter,
void (*func)(struct perf_counter *))
{
struct perf_counter_context *ctx = counter->ctx;
struct perf_counter *sibling;
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
counter = counter->group_leader;
func(counter);
list_for_each_entry(sibling, &counter->sibling_list, list_entry)
func(sibling);
mutex_unlock(&ctx->mutex);
}
/* /*
* Holding the top-level counter's child_mutex means that any * Holding the top-level counter's child_mutex means that any
* descendant process that has inherited this counter will block * descendant process that has inherited this counter will block
...@@ -1658,14 +1642,18 @@ static void perf_counter_for_each_child(struct perf_counter *counter, ...@@ -1658,14 +1642,18 @@ static void perf_counter_for_each_child(struct perf_counter *counter,
static void perf_counter_for_each(struct perf_counter *counter, static void perf_counter_for_each(struct perf_counter *counter,
void (*func)(struct perf_counter *)) void (*func)(struct perf_counter *))
{ {
struct perf_counter *child; struct perf_counter_context *ctx = counter->ctx;
struct perf_counter *sibling;
WARN_ON_ONCE(counter->ctx->parent_ctx); WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&counter->child_mutex); mutex_lock(&ctx->mutex);
perf_counter_for_each_sibling(counter, func); counter = counter->group_leader;
list_for_each_entry(child, &counter->child_list, child_list)
perf_counter_for_each_sibling(child, func); perf_counter_for_each_child(counter, func);
mutex_unlock(&counter->child_mutex); func(counter);
list_for_each_entry(sibling, &counter->sibling_list, list_entry)
perf_counter_for_each_child(counter, func);
mutex_unlock(&ctx->mutex);
} }
static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment