Commit 7b648018 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf/core: Collapse more IPI loops

This patch collapses the two 'hard' cases, which are
perf_event_{dis,en}able().

I cannot seem to convince myself the current code is correct.

So starting with perf_event_disable(); we don't strictly need to test
for event->state == ACTIVE, ctx->is_active is enough. If the event is
not scheduled while the ctx is, __perf_event_disable() still does the
right thing.  Its a little less efficient to IPI in that case,
over-all simpler.

For perf_event_enable(); the same goes, but I think that's actually
broken in its current form. The current condition is: ctx->is_active
&& event->state == OFF, that means it doesn't do anything when
!ctx->active && event->state == OFF. This is wrong, it should still
mark the event INACTIVE in that case, otherwise we'll still not try
and schedule the event once the context becomes active again.

This patch implements the two function using the new
event_function_call() and does away with the tricky event->state
tests.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarAlexander Shishkin <alexander.shishkin@intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 9cc96b0a
...@@ -1766,6 +1766,20 @@ int __perf_event_disable(void *info) ...@@ -1766,6 +1766,20 @@ int __perf_event_disable(void *info)
return 0; return 0;
} }
void ___perf_event_disable(void *info)
{
struct perf_event *event = info;
/*
* Since we have the lock this context can't be scheduled
* in, so we can change the state safely.
*/
if (event->state == PERF_EVENT_STATE_INACTIVE) {
update_group_times(event);
event->state = PERF_EVENT_STATE_OFF;
}
}
/* /*
* Disable a event. * Disable a event.
* *
...@@ -1782,43 +1796,16 @@ int __perf_event_disable(void *info) ...@@ -1782,43 +1796,16 @@ int __perf_event_disable(void *info)
static void _perf_event_disable(struct perf_event *event) static void _perf_event_disable(struct perf_event *event)
{ {
struct perf_event_context *ctx = event->ctx; struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
if (!task) {
/*
* Disable the event on the cpu that it's on
*/
cpu_function_call(event->cpu, __perf_event_disable, event);
return;
}
retry:
if (!task_function_call(task, __perf_event_disable, event))
return;
raw_spin_lock_irq(&ctx->lock); raw_spin_lock_irq(&ctx->lock);
/* if (event->state <= PERF_EVENT_STATE_OFF) {
* If the event is still active, we need to retry the cross-call.
*/
if (event->state == PERF_EVENT_STATE_ACTIVE) {
raw_spin_unlock_irq(&ctx->lock); raw_spin_unlock_irq(&ctx->lock);
/* return;
* Reload the task pointer, it might have been changed by
* a concurrent perf_event_context_sched_out().
*/
task = ctx->task;
goto retry;
}
/*
* Since we have the lock this context can't be scheduled
* in, so we can change the state safely.
*/
if (event->state == PERF_EVENT_STATE_INACTIVE) {
update_group_times(event);
event->state = PERF_EVENT_STATE_OFF;
} }
raw_spin_unlock_irq(&ctx->lock); raw_spin_unlock_irq(&ctx->lock);
event_function_call(event, __perf_event_disable,
___perf_event_disable, event);
} }
/* /*
...@@ -2269,6 +2256,11 @@ static int __perf_event_enable(void *info) ...@@ -2269,6 +2256,11 @@ static int __perf_event_enable(void *info)
return 0; return 0;
} }
void ___perf_event_enable(void *info)
{
__perf_event_mark_enabled((struct perf_event *)info);
}
/* /*
* Enable a event. * Enable a event.
* *
...@@ -2281,58 +2273,26 @@ static int __perf_event_enable(void *info) ...@@ -2281,58 +2273,26 @@ static int __perf_event_enable(void *info)
static void _perf_event_enable(struct perf_event *event) static void _perf_event_enable(struct perf_event *event)
{ {
struct perf_event_context *ctx = event->ctx; struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
if (!task) { raw_spin_lock_irq(&ctx->lock);
/* if (event->state >= PERF_EVENT_STATE_INACTIVE) {
* Enable the event on the cpu that it's on raw_spin_unlock_irq(&ctx->lock);
*/
cpu_function_call(event->cpu, __perf_event_enable, event);
return; return;
} }
raw_spin_lock_irq(&ctx->lock);
if (event->state >= PERF_EVENT_STATE_INACTIVE)
goto out;
/* /*
* If the event is in error state, clear that first. * If the event is in error state, clear that first.
* That way, if we see the event in error state below, we *
* know that it has gone back into error state, as distinct * That way, if we see the event in error state below, we know that it
* from the task having been scheduled away before the * has gone back into error state, as distinct from the task having
* cross-call arrived. * been scheduled away before the cross-call arrived.
*/ */
if (event->state == PERF_EVENT_STATE_ERROR) if (event->state == PERF_EVENT_STATE_ERROR)
event->state = PERF_EVENT_STATE_OFF; event->state = PERF_EVENT_STATE_OFF;
retry:
if (!ctx->is_active) {
__perf_event_mark_enabled(event);
goto out;
}
raw_spin_unlock_irq(&ctx->lock); raw_spin_unlock_irq(&ctx->lock);
if (!task_function_call(task, __perf_event_enable, event)) event_function_call(event, __perf_event_enable,
return; ___perf_event_enable, event);
raw_spin_lock_irq(&ctx->lock);
/*
* If the context is active and the event is still off,
* we need to retry the cross-call.
*/
if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
/*
* task could have been flipped by a concurrent
* perf_event_context_sched_out()
*/
task = ctx->task;
goto retry;
}
out:
raw_spin_unlock_irq(&ctx->lock);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment