Commit c66de4a5 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: uncouple data_head updates from wakeups

Keep data_head up-to-date irrespective of notifications. This fixes
the case where you disable a counter and don't get a notification for
the last few pending events, and it also allows polling usage.

[ Impact: increase precision of perfcounter mmap-ed fields ]
Suggested-by: default avatarCorey Ashford <cjashfor@linux.vnet.ibm.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <20090505155436.925084300@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 066d7dea
...@@ -362,9 +362,11 @@ struct perf_mmap_data { ...@@ -362,9 +362,11 @@ struct perf_mmap_data {
atomic_t head; /* write position */ atomic_t head; /* write position */
atomic_t events; /* event limit */ atomic_t events; /* event limit */
atomic_t wakeup_head; /* completed head */ atomic_t done_head; /* completed head */
atomic_t lock; /* concurrent writes */ atomic_t lock; /* concurrent writes */
atomic_t wakeup; /* needs a wakeup */
struct perf_counter_mmap_page *user_page; struct perf_counter_mmap_page *user_page;
void *data_pages[0]; void *data_pages[0];
}; };
......
...@@ -1696,7 +1696,6 @@ struct perf_output_handle { ...@@ -1696,7 +1696,6 @@ struct perf_output_handle {
struct perf_mmap_data *data; struct perf_mmap_data *data;
unsigned int offset; unsigned int offset;
unsigned int head; unsigned int head;
int wakeup;
int nmi; int nmi;
int overflow; int overflow;
int locked; int locked;
...@@ -1752,8 +1751,7 @@ static void perf_output_unlock(struct perf_output_handle *handle) ...@@ -1752,8 +1751,7 @@ static void perf_output_unlock(struct perf_output_handle *handle)
struct perf_mmap_data *data = handle->data; struct perf_mmap_data *data = handle->data;
int head, cpu; int head, cpu;
if (handle->wakeup) data->done_head = data->head;
data->wakeup_head = data->head;
if (!handle->locked) if (!handle->locked)
goto out; goto out;
...@@ -1764,13 +1762,11 @@ static void perf_output_unlock(struct perf_output_handle *handle) ...@@ -1764,13 +1762,11 @@ static void perf_output_unlock(struct perf_output_handle *handle)
* before we publish the new head, matched by a rmb() in userspace when * before we publish the new head, matched by a rmb() in userspace when
* reading this position. * reading this position.
*/ */
while ((head = atomic_xchg(&data->wakeup_head, 0))) { while ((head = atomic_xchg(&data->done_head, 0)))
data->user_page->data_head = head; data->user_page->data_head = head;
handle->wakeup = 1;
}
/* /*
* NMI can happen here, which means we can miss a wakeup_head update. * NMI can happen here, which means we can miss a done_head update.
*/ */
cpu = atomic_xchg(&data->lock, 0); cpu = atomic_xchg(&data->lock, 0);
...@@ -1779,7 +1775,7 @@ static void perf_output_unlock(struct perf_output_handle *handle) ...@@ -1779,7 +1775,7 @@ static void perf_output_unlock(struct perf_output_handle *handle)
/* /*
* Therefore we have to validate we did not indeed do so. * Therefore we have to validate we did not indeed do so.
*/ */
if (unlikely(atomic_read(&data->wakeup_head))) { if (unlikely(atomic_read(&data->done_head))) {
/* /*
* Since we had it locked, we can lock it again. * Since we had it locked, we can lock it again.
*/ */
...@@ -1789,7 +1785,7 @@ static void perf_output_unlock(struct perf_output_handle *handle) ...@@ -1789,7 +1785,7 @@ static void perf_output_unlock(struct perf_output_handle *handle)
goto again; goto again;
} }
if (handle->wakeup) if (atomic_xchg(&data->wakeup, 0))
perf_output_wakeup(handle); perf_output_wakeup(handle);
out: out:
local_irq_restore(handle->flags); local_irq_restore(handle->flags);
...@@ -1824,7 +1820,9 @@ static int perf_output_begin(struct perf_output_handle *handle, ...@@ -1824,7 +1820,9 @@ static int perf_output_begin(struct perf_output_handle *handle,
handle->offset = offset; handle->offset = offset;
handle->head = head; handle->head = head;
handle->wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT);
if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
atomic_set(&data->wakeup, 1);
return 0; return 0;
...@@ -1882,7 +1880,7 @@ static void perf_output_end(struct perf_output_handle *handle) ...@@ -1882,7 +1880,7 @@ static void perf_output_end(struct perf_output_handle *handle)
int events = atomic_inc_return(&data->events); int events = atomic_inc_return(&data->events);
if (events >= wakeup_events) { if (events >= wakeup_events) {
atomic_sub(wakeup_events, &data->events); atomic_sub(wakeup_events, &data->events);
handle->wakeup = 1; atomic_set(&data->wakeup, 1);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment