Commit 391b170f authored by Pekka Paalanen's avatar Pekka Paalanen Committed by Ingo Molnar

mmiotrace: count events lost due to not recording

Impact: enhances lost events counting in mmiotrace

The tracing framework, or the ring buffer facility it uses, has a switch
to stop recording data. When recording is off, the trace events will be
lost. The framework does not count these, so mmiotrace has to count them
itself.
Signed-off-by: default avatarPekka Paalanen <pq@iki.fi>
Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent d2f8d7ee
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mmiotrace.h> #include <linux/mmiotrace.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <asm/atomic.h>
#include "trace.h" #include "trace.h"
...@@ -19,6 +20,7 @@ struct header_iter { ...@@ -19,6 +20,7 @@ struct header_iter {
static struct trace_array *mmio_trace_array; static struct trace_array *mmio_trace_array;
static bool overrun_detected; static bool overrun_detected;
static unsigned long prev_overruns; static unsigned long prev_overruns;
static atomic_t dropped_count;
static void mmio_reset_data(struct trace_array *tr) static void mmio_reset_data(struct trace_array *tr)
{ {
...@@ -121,11 +123,11 @@ static void mmio_close(struct trace_iterator *iter) ...@@ -121,11 +123,11 @@ static void mmio_close(struct trace_iterator *iter)
static unsigned long count_overruns(struct trace_iterator *iter) static unsigned long count_overruns(struct trace_iterator *iter)
{ {
unsigned long cnt = 0; unsigned long cnt = atomic_xchg(&dropped_count, 0);
unsigned long over = ring_buffer_overruns(iter->tr->buffer); unsigned long over = ring_buffer_overruns(iter->tr->buffer);
if (over > prev_overruns) if (over > prev_overruns)
cnt = over - prev_overruns; cnt += over - prev_overruns;
prev_overruns = over; prev_overruns = over;
return cnt; return cnt;
} }
...@@ -310,8 +312,10 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, ...@@ -310,8 +312,10 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
&irq_flags); &irq_flags);
if (!event) if (!event) {
atomic_inc(&dropped_count);
return; return;
}
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, preempt_count()); tracing_generic_entry_update(&entry->ent, 0, preempt_count());
entry->ent.type = TRACE_MMIO_RW; entry->ent.type = TRACE_MMIO_RW;
...@@ -338,8 +342,10 @@ static void __trace_mmiotrace_map(struct trace_array *tr, ...@@ -338,8 +342,10 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
&irq_flags); &irq_flags);
if (!event) if (!event) {
atomic_inc(&dropped_count);
return; return;
}
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, preempt_count()); tracing_generic_entry_update(&entry->ent, 0, preempt_count());
entry->ent.type = TRACE_MMIO_MAP; entry->ent.type = TRACE_MMIO_MAP;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment