Commit 4d855457 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: move PERF_RECORD_TIME

Move PERF_RECORD_TIME so that all the fixed length items come before
the variable length ones.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <20090408130409.307926436@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent de9ac07b
...@@ -100,9 +100,9 @@ enum sw_event_ids { ...@@ -100,9 +100,9 @@ enum sw_event_ids {
enum perf_counter_record_format { enum perf_counter_record_format {
PERF_RECORD_IP = 1U << 0, PERF_RECORD_IP = 1U << 0,
PERF_RECORD_TID = 1U << 1, PERF_RECORD_TID = 1U << 1,
PERF_RECORD_GROUP = 1U << 2, PERF_RECORD_TIME = 1U << 2,
PERF_RECORD_CALLCHAIN = 1U << 3, PERF_RECORD_GROUP = 1U << 3,
PERF_RECORD_TIME = 1U << 4, PERF_RECORD_CALLCHAIN = 1U << 4,
}; };
/* /*
...@@ -250,6 +250,7 @@ enum perf_event_type { ...@@ -250,6 +250,7 @@ enum perf_event_type {
* *
* { u64 ip; } && PERF_RECORD_IP * { u64 ip; } && PERF_RECORD_IP
* { u32 pid, tid; } && PERF_RECORD_TID * { u32 pid, tid; } && PERF_RECORD_TID
* { u64 time; } && PERF_RECORD_TIME
* *
* { u64 nr; * { u64 nr;
* { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP * { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP
...@@ -259,8 +260,6 @@ enum perf_event_type { ...@@ -259,8 +260,6 @@ enum perf_event_type {
* kernel, * kernel,
* user; * user;
* u64 ips[nr]; } && PERF_RECORD_CALLCHAIN * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN
*
* { u64 time; } && PERF_RECORD_TIME
* }; * };
*/ */
}; };
......
...@@ -1850,6 +1850,16 @@ static void perf_counter_output(struct perf_counter *counter, ...@@ -1850,6 +1850,16 @@ static void perf_counter_output(struct perf_counter *counter,
header.size += sizeof(tid_entry); header.size += sizeof(tid_entry);
} }
if (record_type & PERF_RECORD_TIME) {
/*
* Maybe do better on x86 and provide cpu_clock_nmi()
*/
time = sched_clock();
header.type |= PERF_RECORD_TIME;
header.size += sizeof(u64);
}
if (record_type & PERF_RECORD_GROUP) { if (record_type & PERF_RECORD_GROUP) {
header.type |= PERF_RECORD_GROUP; header.type |= PERF_RECORD_GROUP;
header.size += sizeof(u64) + header.size += sizeof(u64) +
...@@ -1867,16 +1877,6 @@ static void perf_counter_output(struct perf_counter *counter, ...@@ -1867,16 +1877,6 @@ static void perf_counter_output(struct perf_counter *counter,
} }
} }
if (record_type & PERF_RECORD_TIME) {
/*
* Maybe do better on x86 and provide cpu_clock_nmi()
*/
time = sched_clock();
header.type |= PERF_RECORD_TIME;
header.size += sizeof(u64);
}
ret = perf_output_begin(&handle, counter, header.size, nmi, 1); ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
if (ret) if (ret)
return; return;
...@@ -1889,6 +1889,9 @@ static void perf_counter_output(struct perf_counter *counter, ...@@ -1889,6 +1889,9 @@ static void perf_counter_output(struct perf_counter *counter,
if (record_type & PERF_RECORD_TID) if (record_type & PERF_RECORD_TID)
perf_output_put(&handle, tid_entry); perf_output_put(&handle, tid_entry);
if (record_type & PERF_RECORD_TIME)
perf_output_put(&handle, time);
if (record_type & PERF_RECORD_GROUP) { if (record_type & PERF_RECORD_GROUP) {
struct perf_counter *leader, *sub; struct perf_counter *leader, *sub;
u64 nr = counter->nr_siblings; u64 nr = counter->nr_siblings;
...@@ -1910,9 +1913,6 @@ static void perf_counter_output(struct perf_counter *counter, ...@@ -1910,9 +1913,6 @@ static void perf_counter_output(struct perf_counter *counter,
if (callchain) if (callchain)
perf_output_copy(&handle, callchain, callchain_size); perf_output_copy(&handle, callchain, callchain_size);
if (record_type & PERF_RECORD_TIME)
perf_output_put(&handle, time);
perf_output_end(&handle); perf_output_end(&handle);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment