Commit 9ffcfa6f authored by Stephane Eranian's avatar Stephane Eranian Committed by Ingo Molnar

perf_events: Revert: Fix transaction recovery in group_sched_in()

This patch reverts commit 8e5fc1a7 (perf_events: Fix transaction
recovery in group_sched_in()) because it had one flaw in case the
group could never be scheduled. It would cause time_enabled to get
negative.
Signed-off-by: default avatarStephane Eranian <eranian@google.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4cbeeeb7.0aefd80a.6e40.0e2f@mx.google.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 96681fc3
...@@ -417,8 +417,8 @@ event_filter_match(struct perf_event *event) ...@@ -417,8 +417,8 @@ event_filter_match(struct perf_event *event)
return event->cpu == -1 || event->cpu == smp_processor_id(); return event->cpu == -1 || event->cpu == smp_processor_id();
} }
static int static void
__event_sched_out(struct perf_event *event, event_sched_out(struct perf_event *event,
struct perf_cpu_context *cpuctx, struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx) struct perf_event_context *ctx)
{ {
...@@ -437,13 +437,14 @@ __event_sched_out(struct perf_event *event, ...@@ -437,13 +437,14 @@ __event_sched_out(struct perf_event *event,
} }
if (event->state != PERF_EVENT_STATE_ACTIVE) if (event->state != PERF_EVENT_STATE_ACTIVE)
return 0; return;
event->state = PERF_EVENT_STATE_INACTIVE; event->state = PERF_EVENT_STATE_INACTIVE;
if (event->pending_disable) { if (event->pending_disable) {
event->pending_disable = 0; event->pending_disable = 0;
event->state = PERF_EVENT_STATE_OFF; event->state = PERF_EVENT_STATE_OFF;
} }
event->tstamp_stopped = ctx->time;
event->pmu->del(event, 0); event->pmu->del(event, 0);
event->oncpu = -1; event->oncpu = -1;
...@@ -452,19 +453,6 @@ __event_sched_out(struct perf_event *event, ...@@ -452,19 +453,6 @@ __event_sched_out(struct perf_event *event,
ctx->nr_active--; ctx->nr_active--;
if (event->attr.exclusive || !cpuctx->active_oncpu) if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0; cpuctx->exclusive = 0;
return 1;
}
static void
event_sched_out(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
int ret;
ret = __event_sched_out(event, cpuctx, ctx);
if (ret)
event->tstamp_stopped = ctx->time;
} }
static void static void
...@@ -664,7 +652,7 @@ void perf_event_disable(struct perf_event *event) ...@@ -664,7 +652,7 @@ void perf_event_disable(struct perf_event *event)
} }
static int static int
__event_sched_in(struct perf_event *event, event_sched_in(struct perf_event *event,
struct perf_cpu_context *cpuctx, struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx) struct perf_event_context *ctx)
{ {
...@@ -684,6 +672,8 @@ __event_sched_in(struct perf_event *event, ...@@ -684,6 +672,8 @@ __event_sched_in(struct perf_event *event,
return -EAGAIN; return -EAGAIN;
} }
event->tstamp_running += ctx->time - event->tstamp_stopped;
if (!is_software_event(event)) if (!is_software_event(event))
cpuctx->active_oncpu++; cpuctx->active_oncpu++;
ctx->nr_active++; ctx->nr_active++;
...@@ -694,35 +684,6 @@ __event_sched_in(struct perf_event *event, ...@@ -694,35 +684,6 @@ __event_sched_in(struct perf_event *event,
return 0; return 0;
} }
static inline int
event_sched_in(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
int ret = __event_sched_in(event, cpuctx, ctx);
if (ret)
return ret;
event->tstamp_running += ctx->time - event->tstamp_stopped;
return 0;
}
static void
group_commit_event_sched_in(struct perf_event *group_event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
struct perf_event *event;
u64 now = ctx->time;
group_event->tstamp_running += now - group_event->tstamp_stopped;
/*
* Schedule in siblings as one group (if any):
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
event->tstamp_running += now - event->tstamp_stopped;
}
}
static int static int
group_sched_in(struct perf_event *group_event, group_sched_in(struct perf_event *group_event,
struct perf_cpu_context *cpuctx, struct perf_cpu_context *cpuctx,
...@@ -736,13 +697,7 @@ group_sched_in(struct perf_event *group_event, ...@@ -736,13 +697,7 @@ group_sched_in(struct perf_event *group_event,
pmu->start_txn(pmu); pmu->start_txn(pmu);
/* if (event_sched_in(group_event, cpuctx, ctx)) {
* use __event_sched_in() to delay updating tstamp_running
* until the transaction is committed. In case of failure
* we will keep an unmodified tstamp_running which is a
* requirement to get correct timing information
*/
if (__event_sched_in(group_event, cpuctx, ctx)) {
pmu->cancel_txn(pmu); pmu->cancel_txn(pmu);
return -EAGAIN; return -EAGAIN;
} }
...@@ -751,31 +706,26 @@ group_sched_in(struct perf_event *group_event, ...@@ -751,31 +706,26 @@ group_sched_in(struct perf_event *group_event,
* Schedule in siblings as one group (if any): * Schedule in siblings as one group (if any):
*/ */
list_for_each_entry(event, &group_event->sibling_list, group_entry) { list_for_each_entry(event, &group_event->sibling_list, group_entry) {
if (__event_sched_in(event, cpuctx, ctx)) { if (event_sched_in(event, cpuctx, ctx)) {
partial_group = event; partial_group = event;
goto group_error; goto group_error;
} }
} }
if (!pmu->commit_txn(pmu)) { if (!pmu->commit_txn(pmu))
/* commit tstamp_running */
group_commit_event_sched_in(group_event, cpuctx, ctx);
return 0; return 0;
}
group_error: group_error:
/* /*
* Groups can be scheduled in as one unit only, so undo any * Groups can be scheduled in as one unit only, so undo any
* partial group before returning: * partial group before returning:
*
* use __event_sched_out() to avoid updating tstamp_stopped
* because the event never actually ran
*/ */
list_for_each_entry(event, &group_event->sibling_list, group_entry) { list_for_each_entry(event, &group_event->sibling_list, group_entry) {
if (event == partial_group) if (event == partial_group)
break; break;
__event_sched_out(event, cpuctx, ctx); event_sched_out(event, cpuctx, ctx);
} }
__event_sched_out(group_event, cpuctx, ctx); event_sched_out(group_event, cpuctx, ctx);
pmu->cancel_txn(pmu); pmu->cancel_txn(pmu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment