Commit f39d47ff authored by Stephane Eranian's avatar Stephane Eranian Committed by Ingo Molnar

perf: Fix double start/stop in x86_pmu_start()

The following patch fixes a bug introduced by the following
commit:

        e050e3f0 ("perf: Fix broken interrupt rate throttling")

The patch caused the following warning to pop up depending on
the sampling frequency adjustments:

  ------------[ cut here ]------------
  WARNING: at arch/x86/kernel/cpu/perf_event.c:995 x86_pmu_start+0x79/0xd4()

It was caused by the following call sequence:

perf_adjust_freq_unthr_context.part() {
     stop()
     if (delta > 0) {
          perf_adjust_period() {
              if (period > 8*...) {
                  stop()
                  ...
                  start()
              }
          }
      }
      start()
}

Which caused a double start and a double stop, thus triggering
the assert in x86_pmu_start().

The patch fixes the problem by avoiding the double calls. We
pass a new argument to perf_adjust_period() to indicate whether
or not the event is already stopped. We can't just remove the
start/stop from that function because it's called from
__perf_event_overflow where the event needs to be reloaded via a
stop/start back-toback call.

The patch reintroduces the assertion in x86_pmu_start() which
was removed by commit:

	84f2b9b2 ("perf: Remove deprecated WARN_ON_ONCE()")

In this second version, we've added calls to disable/enable PMU
during unthrottling or frequency adjustment based on bug report
of spurious NMI interrupts from Eric Dumazet.
Reported-and-tested-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarStephane Eranian <eranian@google.com>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: markus@trippelsdorf.de
Cc: paulus@samba.org
Link: http://lkml.kernel.org/r/20120207133956.GA4932@quad
[ Minor edits to the changelog and to the code ]
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 136e0b8e
...@@ -986,6 +986,9 @@ static void x86_pmu_start(struct perf_event *event, int flags) ...@@ -986,6 +986,9 @@ static void x86_pmu_start(struct perf_event *event, int flags)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx = event->hw.idx; int idx = event->hw.idx;
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
return;
if (WARN_ON_ONCE(idx == -1)) if (WARN_ON_ONCE(idx == -1))
return; return;
......
...@@ -2303,7 +2303,7 @@ do { \ ...@@ -2303,7 +2303,7 @@ do { \
static DEFINE_PER_CPU(int, perf_throttled_count); static DEFINE_PER_CPU(int, perf_throttled_count);
static DEFINE_PER_CPU(u64, perf_throttled_seq); static DEFINE_PER_CPU(u64, perf_throttled_seq);
static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
{ {
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
s64 period, sample_period; s64 period, sample_period;
...@@ -2322,9 +2322,13 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) ...@@ -2322,9 +2322,13 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
hwc->sample_period = sample_period; hwc->sample_period = sample_period;
if (local64_read(&hwc->period_left) > 8*sample_period) { if (local64_read(&hwc->period_left) > 8*sample_period) {
event->pmu->stop(event, PERF_EF_UPDATE); if (disable)
event->pmu->stop(event, PERF_EF_UPDATE);
local64_set(&hwc->period_left, 0); local64_set(&hwc->period_left, 0);
event->pmu->start(event, PERF_EF_RELOAD);
if (disable)
event->pmu->start(event, PERF_EF_RELOAD);
} }
} }
...@@ -2350,6 +2354,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, ...@@ -2350,6 +2354,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
return; return;
raw_spin_lock(&ctx->lock); raw_spin_lock(&ctx->lock);
perf_pmu_disable(ctx->pmu);
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE) if (event->state != PERF_EVENT_STATE_ACTIVE)
...@@ -2381,13 +2386,17 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, ...@@ -2381,13 +2386,17 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
/* /*
* restart the event * restart the event
* reload only if value has changed * reload only if value has changed
* we have stopped the event so tell that
* to perf_adjust_period() to avoid stopping it
* twice.
*/ */
if (delta > 0) if (delta > 0)
perf_adjust_period(event, period, delta); perf_adjust_period(event, period, delta, false);
event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
} }
perf_pmu_enable(ctx->pmu);
raw_spin_unlock(&ctx->lock); raw_spin_unlock(&ctx->lock);
} }
...@@ -4562,7 +4571,7 @@ static int __perf_event_overflow(struct perf_event *event, ...@@ -4562,7 +4571,7 @@ static int __perf_event_overflow(struct perf_event *event,
hwc->freq_time_stamp = now; hwc->freq_time_stamp = now;
if (delta > 0 && delta < 2*TICK_NSEC) if (delta > 0 && delta < 2*TICK_NSEC)
perf_adjust_period(event, delta, hwc->last_period); perf_adjust_period(event, delta, hwc->last_period, true);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment