Commit 4c9e2542 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: change event definition

Currently the definition of an event is slightly ambiguous. We have
wakeup events, for poll() and SIGIO, which are either generated
when a record crosses a page boundary (hw_events.wakeup_events == 0),
or every wakeup_events new records.

Now a record can be either a counter overflow record, or a number of
different things, like the mmap PROT_EXEC region notifications.

Then there is the PERF_COUNTER_IOC_REFRESH event limit, which only
considers counter overflows.

This patch changes then wakeup_events and SIGIO notification to only
consider overflow events. Furthermore it changes the SIGIO notification
to report SIGHUP when the event limit is reached and the counter will
be disabled.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <20090406094518.266679874@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 0c593b34
...@@ -439,6 +439,7 @@ struct perf_counter { ...@@ -439,6 +439,7 @@ struct perf_counter {
/* delayed work for NMIs and such */ /* delayed work for NMIs and such */
int pending_wakeup; int pending_wakeup;
int pending_kill;
int pending_disable; int pending_disable;
struct perf_pending_entry pending; struct perf_pending_entry pending;
......
...@@ -1596,7 +1596,11 @@ void perf_counter_wakeup(struct perf_counter *counter) ...@@ -1596,7 +1596,11 @@ void perf_counter_wakeup(struct perf_counter *counter)
rcu_read_unlock(); rcu_read_unlock();
wake_up_all(&counter->waitq); wake_up_all(&counter->waitq);
kill_fasync(&counter->fasync, SIGIO, POLL_IN);
if (counter->pending_kill) {
kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
counter->pending_kill = 0;
}
} }
/* /*
...@@ -1727,6 +1731,7 @@ struct perf_output_handle { ...@@ -1727,6 +1731,7 @@ struct perf_output_handle {
unsigned int head; unsigned int head;
int wakeup; int wakeup;
int nmi; int nmi;
int overflow;
}; };
static inline void __perf_output_wakeup(struct perf_output_handle *handle) static inline void __perf_output_wakeup(struct perf_output_handle *handle)
...@@ -1741,7 +1746,7 @@ static inline void __perf_output_wakeup(struct perf_output_handle *handle) ...@@ -1741,7 +1746,7 @@ static inline void __perf_output_wakeup(struct perf_output_handle *handle)
static int perf_output_begin(struct perf_output_handle *handle, static int perf_output_begin(struct perf_output_handle *handle,
struct perf_counter *counter, unsigned int size, struct perf_counter *counter, unsigned int size,
int nmi) int nmi, int overflow)
{ {
struct perf_mmap_data *data; struct perf_mmap_data *data;
unsigned int offset, head; unsigned int offset, head;
...@@ -1753,6 +1758,7 @@ static int perf_output_begin(struct perf_output_handle *handle, ...@@ -1753,6 +1758,7 @@ static int perf_output_begin(struct perf_output_handle *handle,
handle->counter = counter; handle->counter = counter;
handle->nmi = nmi; handle->nmi = nmi;
handle->overflow = overflow;
if (!data->nr_pages) if (!data->nr_pages)
goto fail; goto fail;
...@@ -1816,7 +1822,7 @@ static void perf_output_end(struct perf_output_handle *handle) ...@@ -1816,7 +1822,7 @@ static void perf_output_end(struct perf_output_handle *handle)
{ {
int wakeup_events = handle->counter->hw_event.wakeup_events; int wakeup_events = handle->counter->hw_event.wakeup_events;
if (wakeup_events) { if (handle->overflow && wakeup_events) {
int events = atomic_inc_return(&handle->data->events); int events = atomic_inc_return(&handle->data->events);
if (events >= wakeup_events) { if (events >= wakeup_events) {
atomic_sub(wakeup_events, &handle->data->events); atomic_sub(wakeup_events, &handle->data->events);
...@@ -1891,7 +1897,7 @@ static void perf_counter_output(struct perf_counter *counter, ...@@ -1891,7 +1897,7 @@ static void perf_counter_output(struct perf_counter *counter,
header.size += sizeof(u64); header.size += sizeof(u64);
} }
ret = perf_output_begin(&handle, counter, header.size, nmi); ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
if (ret) if (ret)
return; return;
...@@ -1955,7 +1961,7 @@ static void perf_counter_mmap_output(struct perf_counter *counter, ...@@ -1955,7 +1961,7 @@ static void perf_counter_mmap_output(struct perf_counter *counter,
{ {
struct perf_output_handle handle; struct perf_output_handle handle;
int size = mmap_event->event.header.size; int size = mmap_event->event.header.size;
int ret = perf_output_begin(&handle, counter, size, 0); int ret = perf_output_begin(&handle, counter, size, 0, 0);
if (ret) if (ret)
return; return;
...@@ -2084,8 +2090,10 @@ int perf_counter_overflow(struct perf_counter *counter, ...@@ -2084,8 +2090,10 @@ int perf_counter_overflow(struct perf_counter *counter,
int events = atomic_read(&counter->event_limit); int events = atomic_read(&counter->event_limit);
int ret = 0; int ret = 0;
counter->pending_kill = POLL_IN;
if (events && atomic_dec_and_test(&counter->event_limit)) { if (events && atomic_dec_and_test(&counter->event_limit)) {
ret = 1; ret = 1;
counter->pending_kill = POLL_HUP;
if (nmi) { if (nmi) {
counter->pending_disable = 1; counter->pending_disable = 1;
perf_pending_queue(&counter->pending, perf_pending_queue(&counter->pending,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment