Commit 8e3747c1 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: Change data head from u32 to u64

Since some people worried that 4G might not be a large enough
as an mmap data window, extend it to 64 bit for capable
platforms.
Reported-by: default avatarStephane Eranian <eranian@googlemail.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent e4abb5d4
...@@ -212,7 +212,7 @@ struct perf_counter_mmap_page { ...@@ -212,7 +212,7 @@ struct perf_counter_mmap_page {
* User-space reading this value should issue an rmb(), on SMP capable * User-space reading this value should issue an rmb(), on SMP capable
* platforms, after reading this value -- see perf_counter_wakeup(). * platforms, after reading this value -- see perf_counter_wakeup().
*/ */
__u32 data_head; /* head in the data section */ __u64 data_head; /* head in the data section */
}; };
#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) #define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0)
...@@ -397,10 +397,11 @@ struct perf_mmap_data { ...@@ -397,10 +397,11 @@ struct perf_mmap_data {
int nr_locked; /* nr pages mlocked */ int nr_locked; /* nr pages mlocked */
atomic_t poll; /* POLL_ for wakeups */ atomic_t poll; /* POLL_ for wakeups */
atomic_t head; /* write position */
atomic_t events; /* event limit */ atomic_t events; /* event limit */
atomic_t done_head; /* completed head */ atomic_long_t head; /* write position */
atomic_long_t done_head; /* completed head */
atomic_t lock; /* concurrent writes */ atomic_t lock; /* concurrent writes */
atomic_t wakeup; /* needs a wakeup */ atomic_t wakeup; /* needs a wakeup */
......
...@@ -2067,8 +2067,8 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) ...@@ -2067,8 +2067,8 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
struct perf_output_handle { struct perf_output_handle {
struct perf_counter *counter; struct perf_counter *counter;
struct perf_mmap_data *data; struct perf_mmap_data *data;
unsigned int offset; unsigned long head;
unsigned int head; unsigned long offset;
int nmi; int nmi;
int overflow; int overflow;
int locked; int locked;
...@@ -2122,7 +2122,8 @@ static void perf_output_lock(struct perf_output_handle *handle) ...@@ -2122,7 +2122,8 @@ static void perf_output_lock(struct perf_output_handle *handle)
static void perf_output_unlock(struct perf_output_handle *handle) static void perf_output_unlock(struct perf_output_handle *handle)
{ {
struct perf_mmap_data *data = handle->data; struct perf_mmap_data *data = handle->data;
int head, cpu; unsigned long head;
int cpu;
data->done_head = data->head; data->done_head = data->head;
...@@ -2135,7 +2136,7 @@ static void perf_output_unlock(struct perf_output_handle *handle) ...@@ -2135,7 +2136,7 @@ static void perf_output_unlock(struct perf_output_handle *handle)
* before we publish the new head, matched by a rmb() in userspace when * before we publish the new head, matched by a rmb() in userspace when
* reading this position. * reading this position.
*/ */
while ((head = atomic_xchg(&data->done_head, 0))) while ((head = atomic_long_xchg(&data->done_head, 0)))
data->user_page->data_head = head; data->user_page->data_head = head;
/* /*
...@@ -2148,7 +2149,7 @@ static void perf_output_unlock(struct perf_output_handle *handle) ...@@ -2148,7 +2149,7 @@ static void perf_output_unlock(struct perf_output_handle *handle)
/* /*
* Therefore we have to validate we did not indeed do so. * Therefore we have to validate we did not indeed do so.
*/ */
if (unlikely(atomic_read(&data->done_head))) { if (unlikely(atomic_long_read(&data->done_head))) {
/* /*
* Since we had it locked, we can lock it again. * Since we had it locked, we can lock it again.
*/ */
...@@ -2195,7 +2196,7 @@ static int perf_output_begin(struct perf_output_handle *handle, ...@@ -2195,7 +2196,7 @@ static int perf_output_begin(struct perf_output_handle *handle,
do { do {
offset = head = atomic_read(&data->head); offset = head = atomic_read(&data->head);
head += size; head += size;
} while (atomic_cmpxchg(&data->head, offset, head) != offset); } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
handle->offset = offset; handle->offset = offset;
handle->head = head; handle->head = head;
...@@ -2246,7 +2247,7 @@ static void perf_output_copy(struct perf_output_handle *handle, ...@@ -2246,7 +2247,7 @@ static void perf_output_copy(struct perf_output_handle *handle,
* Check we didn't copy past our reservation window, taking the * Check we didn't copy past our reservation window, taking the
* possible unsigned int wrap into account. * possible unsigned int wrap into account.
*/ */
WARN_ON_ONCE(((int)(handle->head - handle->offset)) < 0); WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
} }
#define perf_output_put(handle, x) \ #define perf_output_put(handle, x) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment