Commit cc1b39db authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'tip/perf/core' of...

Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core

Pull ftrace updates from Steve Rostedt.
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 7e0dd574 1c7d6673
...@@ -11,3 +11,4 @@ header-y += reg.h ...@@ -11,3 +11,4 @@ header-y += reg.h
header-y += regdef.h header-y += regdef.h
header-y += sysinfo.h header-y += sysinfo.h
generic-y += exec.h generic-y += exec.h
generic-y += trace_clock.h
...@@ -31,5 +31,6 @@ generic-y += sockios.h ...@@ -31,5 +31,6 @@ generic-y += sockios.h
generic-y += termbits.h generic-y += termbits.h
generic-y += termios.h generic-y += termios.h
generic-y += timex.h generic-y += timex.h
generic-y += trace_clock.h
generic-y += types.h generic-y += types.h
generic-y += unaligned.h generic-y += unaligned.h
...@@ -43,6 +43,7 @@ generic-y += swab.h ...@@ -43,6 +43,7 @@ generic-y += swab.h
generic-y += termbits.h generic-y += termbits.h
generic-y += termios.h generic-y += termios.h
generic-y += topology.h generic-y += topology.h
generic-y += trace_clock.h
generic-y += types.h generic-y += types.h
generic-y += unaligned.h generic-y += unaligned.h
generic-y += user.h generic-y += user.h
generic-y += clkdev.h generic-y += clkdev.h
generic-y += exec.h generic-y += exec.h
generic-y += trace_clock.h
...@@ -38,6 +38,7 @@ generic-y += statfs.h ...@@ -38,6 +38,7 @@ generic-y += statfs.h
generic-y += termbits.h generic-y += termbits.h
generic-y += termios.h generic-y += termios.h
generic-y += topology.h generic-y += topology.h
generic-y += trace_clock.h
generic-y += types.h generic-y += types.h
generic-y += ucontext.h generic-y += ucontext.h
generic-y += unaligned.h generic-y += unaligned.h
......
...@@ -49,6 +49,7 @@ generic-y += termbits.h ...@@ -49,6 +49,7 @@ generic-y += termbits.h
generic-y += termios.h generic-y += termios.h
generic-y += tlbflush.h generic-y += tlbflush.h
generic-y += topology.h generic-y += topology.h
generic-y += trace_clock.h
generic-y += types.h generic-y += types.h
generic-y += ucontext.h generic-y += ucontext.h
generic-y += user.h generic-y += user.h
......
...@@ -11,3 +11,4 @@ header-y += sync_serial.h ...@@ -11,3 +11,4 @@ header-y += sync_serial.h
generic-y += clkdev.h generic-y += clkdev.h
generic-y += exec.h generic-y += exec.h
generic-y += module.h generic-y += module.h
generic-y += trace_clock.h
generic-y += clkdev.h generic-y += clkdev.h
generic-y += exec.h generic-y += exec.h
generic-y += trace_clock.h
...@@ -3,3 +3,4 @@ include include/asm-generic/Kbuild.asm ...@@ -3,3 +3,4 @@ include include/asm-generic/Kbuild.asm
generic-y += clkdev.h generic-y += clkdev.h
generic-y += exec.h generic-y += exec.h
generic-y += module.h generic-y += module.h
generic-y += trace_clock.h
...@@ -48,6 +48,7 @@ generic-y += stat.h ...@@ -48,6 +48,7 @@ generic-y += stat.h
generic-y += termbits.h generic-y += termbits.h
generic-y += termios.h generic-y += termios.h
generic-y += topology.h generic-y += topology.h
generic-y += trace_clock.h
generic-y += types.h generic-y += types.h
generic-y += ucontext.h generic-y += ucontext.h
generic-y += unaligned.h generic-y += unaligned.h
......
...@@ -2,3 +2,4 @@ ...@@ -2,3 +2,4 @@
generic-y += clkdev.h generic-y += clkdev.h
generic-y += exec.h generic-y += exec.h
generic-y += kvm_para.h generic-y += kvm_para.h
generic-y += trace_clock.h
...@@ -3,3 +3,4 @@ include include/asm-generic/Kbuild.asm ...@@ -3,3 +3,4 @@ include include/asm-generic/Kbuild.asm
generic-y += clkdev.h generic-y += clkdev.h
generic-y += exec.h generic-y += exec.h
generic-y += module.h generic-y += module.h
generic-y += trace_clock.h
...@@ -24,6 +24,7 @@ generic-y += sections.h ...@@ -24,6 +24,7 @@ generic-y += sections.h
generic-y += siginfo.h generic-y += siginfo.h
generic-y += statfs.h generic-y += statfs.h
generic-y += topology.h generic-y += topology.h
generic-y += trace_clock.h
generic-y += types.h generic-y += types.h
generic-y += word-at-a-time.h generic-y += word-at-a-time.h
generic-y += xor.h generic-y += xor.h
...@@ -3,3 +3,4 @@ include include/asm-generic/Kbuild.asm ...@@ -3,3 +3,4 @@ include include/asm-generic/Kbuild.asm
header-y += elf.h header-y += elf.h
generic-y += clkdev.h generic-y += clkdev.h
generic-y += exec.h generic-y += exec.h
generic-y += trace_clock.h
# MIPS headers # MIPS headers
generic-y += trace_clock.h
generic-y += clkdev.h generic-y += clkdev.h
generic-y += exec.h generic-y += exec.h
generic-y += trace_clock.h
...@@ -60,6 +60,7 @@ generic-y += swab.h ...@@ -60,6 +60,7 @@ generic-y += swab.h
generic-y += termbits.h generic-y += termbits.h
generic-y += termios.h generic-y += termios.h
generic-y += topology.h generic-y += topology.h
generic-y += trace_clock.h
generic-y += types.h generic-y += types.h
generic-y += ucontext.h generic-y += ucontext.h
generic-y += user.h generic-y += user.h
......
...@@ -3,3 +3,4 @@ generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \ ...@@ -3,3 +3,4 @@ generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \
segment.h topology.h vga.h device.h percpu.h hw_irq.h mutex.h \ segment.h topology.h vga.h device.h percpu.h hw_irq.h mutex.h \
div64.h irq_regs.h kdebug.h kvm_para.h local64.h local.h param.h \ div64.h irq_regs.h kdebug.h kvm_para.h local64.h local.h param.h \
poll.h xor.h clkdev.h exec.h poll.h xor.h clkdev.h exec.h
generic-y += trace_clock.h
...@@ -2,3 +2,4 @@ ...@@ -2,3 +2,4 @@
generic-y += clkdev.h generic-y += clkdev.h
generic-y += rwsem.h generic-y += rwsem.h
generic-y += trace_clock.h
generic-y += clkdev.h generic-y += clkdev.h
generic-y += trace_clock.h
...@@ -3,3 +3,4 @@ include include/asm-generic/Kbuild.asm ...@@ -3,3 +3,4 @@ include include/asm-generic/Kbuild.asm
header-y += header-y +=
generic-y += clkdev.h generic-y += clkdev.h
generic-y += trace_clock.h
...@@ -31,5 +31,6 @@ generic-y += socket.h ...@@ -31,5 +31,6 @@ generic-y += socket.h
generic-y += statfs.h generic-y += statfs.h
generic-y += termbits.h generic-y += termbits.h
generic-y += termios.h generic-y += termios.h
generic-y += trace_clock.h
generic-y += ucontext.h generic-y += ucontext.h
generic-y += xor.h generic-y += xor.h
...@@ -8,4 +8,5 @@ generic-y += local64.h ...@@ -8,4 +8,5 @@ generic-y += local64.h
generic-y += irq_regs.h generic-y += irq_regs.h
generic-y += local.h generic-y += local.h
generic-y += module.h generic-y += module.h
generic-y += trace_clock.h
generic-y += word-at-a-time.h generic-y += word-at-a-time.h
...@@ -34,5 +34,6 @@ generic-y += sockios.h ...@@ -34,5 +34,6 @@ generic-y += sockios.h
generic-y += statfs.h generic-y += statfs.h
generic-y += termbits.h generic-y += termbits.h
generic-y += termios.h generic-y += termios.h
generic-y += trace_clock.h
generic-y += types.h generic-y += types.h
generic-y += xor.h generic-y += xor.h
...@@ -2,3 +2,4 @@ generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h ...@@ -2,3 +2,4 @@ generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h
generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
generic-y += switch_to.h clkdev.h generic-y += switch_to.h clkdev.h
generic-y += trace_clock.h
...@@ -53,6 +53,7 @@ generic-y += syscalls.h ...@@ -53,6 +53,7 @@ generic-y += syscalls.h
generic-y += termbits.h generic-y += termbits.h
generic-y += termios.h generic-y += termios.h
generic-y += topology.h generic-y += topology.h
generic-y += trace_clock.h
generic-y += types.h generic-y += types.h
generic-y += ucontext.h generic-y += ucontext.h
generic-y += unaligned.h generic-y += unaligned.h
......
#ifndef _ASM_X86_TRACE_CLOCK_H
#define _ASM_X86_TRACE_CLOCK_H
#include <linux/compiler.h>
#include <linux/types.h>
#ifdef CONFIG_X86_TSC
extern u64 notrace trace_clock_x86_tsc(void);
# define ARCH_TRACE_CLOCKS \
{ trace_clock_x86_tsc, "x86-tsc", .in_ns = 0 },
#else /* !CONFIG_X86_TSC */
#define ARCH_TRACE_CLOCKS
#endif
#endif /* _ASM_X86_TRACE_CLOCK_H */
...@@ -61,6 +61,7 @@ obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o ...@@ -61,6 +61,7 @@ obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_X86_TSC) += trace_clock.o
obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
......
/*
* X86 trace clocks
*/
#include <asm/trace_clock.h>
#include <asm/barrier.h>
#include <asm/msr.h>
/*
* trace_clock_x86_tsc(): A clock that is just the cycle counter.
*
* Unlike the other clocks, this is not in nanoseconds.
*/
u64 notrace trace_clock_x86_tsc(void)
{
u64 ret;
rdtsc_barrier();
rdtscll(ret);
return ret;
}
...@@ -25,4 +25,5 @@ generic-y += siginfo.h ...@@ -25,4 +25,5 @@ generic-y += siginfo.h
generic-y += statfs.h generic-y += statfs.h
generic-y += termios.h generic-y += termios.h
generic-y += topology.h generic-y += topology.h
generic-y += trace_clock.h
generic-y += xor.h generic-y += xor.h
#ifndef _ASM_GENERIC_TRACE_CLOCK_H
#define _ASM_GENERIC_TRACE_CLOCK_H
/*
* Arch-specific trace clocks.
*/
/*
* Additional trace clocks added to the trace_clocks
* array in kernel/trace/trace.c
* None if the architecture has not defined it.
*/
#ifndef ARCH_TRACE_CLOCKS
# define ARCH_TRACE_CLOCKS
#endif
#endif /* _ASM_GENERIC_TRACE_CLOCK_H */
...@@ -86,6 +86,12 @@ struct trace_iterator { ...@@ -86,6 +86,12 @@ struct trace_iterator {
cpumask_var_t started; cpumask_var_t started;
}; };
enum trace_iter_flags {
TRACE_FILE_LAT_FMT = 1,
TRACE_FILE_ANNOTATE = 2,
TRACE_FILE_TIME_IN_NS = 4,
};
struct trace_event; struct trace_event;
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/trace_clock.h>
extern u64 notrace trace_clock_local(void); extern u64 notrace trace_clock_local(void);
extern u64 notrace trace_clock(void); extern u64 notrace trace_clock(void);
extern u64 notrace trace_clock_global(void); extern u64 notrace trace_clock_global(void);
......
...@@ -619,79 +619,6 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call ...@@ -619,79 +619,6 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
* Define the insertion callback to perf events
*
* The job is very similar to ftrace_raw_event_<call> except that we don't
* insert in the ring buffer but in a perf counter.
*
* static void ftrace_perf_<call>(proto)
* {
* struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
* struct ftrace_event_call *event_call = &event_<call>;
* extern void perf_tp_event(int, u64, u64, void *, int);
* struct ftrace_raw_##call *entry;
* struct perf_trace_buf *trace_buf;
* u64 __addr = 0, __count = 1;
* unsigned long irq_flags;
* struct trace_entry *ent;
* int __entry_size;
* int __data_size;
* int __cpu
* int pc;
*
* pc = preempt_count();
*
* __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
*
* // Below we want to get the aligned size by taking into account
* // the u32 field that will later store the buffer size
* __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
* sizeof(u64));
* __entry_size -= sizeof(u32);
*
* // Protect the non nmi buffer
* // This also protects the rcu read side
* local_irq_save(irq_flags);
* __cpu = smp_processor_id();
*
* if (in_nmi())
* trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
* else
* trace_buf = rcu_dereference_sched(perf_trace_buf);
*
* if (!trace_buf)
* goto end;
*
* trace_buf = per_cpu_ptr(trace_buf, __cpu);
*
* // Avoid recursion from perf that could mess up the buffer
* if (trace_buf->recursion++)
* goto end_recursion;
*
* raw_data = trace_buf->buf;
*
* // Make recursion update visible before entering perf_tp_event
* // so that we protect from perf recursions.
*
* barrier();
*
* //zero dead bytes from alignment to avoid stack leak to userspace:
* *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
* entry = (struct ftrace_raw_<call> *)raw_data;
* ent = &entry->ent;
* tracing_generic_entry_update(ent, irq_flags, pc);
* ent->type = event_call->id;
*
* <tstruct> <- do some jobs with dynamic arrays
*
* <assign> <- affect our values
*
* perf_tp_event(event_call->id, __addr, __count, entry,
* __entry_size); <- submit them to perf counter
*
* }
*/
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
......
...@@ -484,10 +484,12 @@ static const char *trace_options[] = { ...@@ -484,10 +484,12 @@ static const char *trace_options[] = {
static struct { static struct {
u64 (*func)(void); u64 (*func)(void);
const char *name; const char *name;
int in_ns; /* is this clock in nanoseconds? */
} trace_clocks[] = { } trace_clocks[] = {
{ trace_clock_local, "local" }, { trace_clock_local, "local", 1 },
{ trace_clock_global, "global" }, { trace_clock_global, "global", 1 },
{ trace_clock_counter, "counter" }, { trace_clock_counter, "counter", 0 },
ARCH_TRACE_CLOCKS
}; };
int trace_clock_id; int trace_clock_id;
...@@ -2477,6 +2479,10 @@ __tracing_open(struct inode *inode, struct file *file) ...@@ -2477,6 +2479,10 @@ __tracing_open(struct inode *inode, struct file *file)
if (ring_buffer_overruns(iter->tr->buffer)) if (ring_buffer_overruns(iter->tr->buffer))
iter->iter_flags |= TRACE_FILE_ANNOTATE; iter->iter_flags |= TRACE_FILE_ANNOTATE;
/* Output in nanoseconds only if we are using a clock in nanoseconds. */
if (trace_clocks[trace_clock_id].in_ns)
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
/* stop the trace while dumping */ /* stop the trace while dumping */
tracing_stop(); tracing_stop();
...@@ -3338,6 +3344,10 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) ...@@ -3338,6 +3344,10 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
if (trace_flags & TRACE_ITER_LATENCY_FMT) if (trace_flags & TRACE_ITER_LATENCY_FMT)
iter->iter_flags |= TRACE_FILE_LAT_FMT; iter->iter_flags |= TRACE_FILE_LAT_FMT;
/* Output in nanoseconds only if we are using a clock in nanoseconds. */
if (trace_clocks[trace_clock_id].in_ns)
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
iter->cpu_file = cpu_file; iter->cpu_file = cpu_file;
iter->tr = &global_trace; iter->tr = &global_trace;
mutex_init(&iter->mutex); mutex_init(&iter->mutex);
...@@ -4378,13 +4388,24 @@ tracing_stats_read(struct file *filp, char __user *ubuf, ...@@ -4378,13 +4388,24 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
cnt = ring_buffer_bytes_cpu(tr->buffer, cpu); cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);
trace_seq_printf(s, "bytes: %ld\n", cnt); trace_seq_printf(s, "bytes: %ld\n", cnt);
t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu)); if (trace_clocks[trace_clock_id].in_ns) {
usec_rem = do_div(t, USEC_PER_SEC); /* local or global for trace_clock */
trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem); t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
usec_rem = do_div(t, USEC_PER_SEC);
trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
t, usec_rem);
t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu)); t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
usec_rem = do_div(t, USEC_PER_SEC); usec_rem = do_div(t, USEC_PER_SEC);
trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
} else {
/* counter or tsc mode for trace_clock */
trace_seq_printf(s, "oldest event ts: %llu\n",
ring_buffer_oldest_event_ts(tr->buffer, cpu));
trace_seq_printf(s, "now ts: %llu\n",
ring_buffer_time_stamp(tr->buffer, cpu));
}
cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu); cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu);
trace_seq_printf(s, "dropped events: %ld\n", cnt); trace_seq_printf(s, "dropped events: %ld\n", cnt);
......
...@@ -406,10 +406,6 @@ void tracing_stop_sched_switch_record(void); ...@@ -406,10 +406,6 @@ void tracing_stop_sched_switch_record(void);
void tracing_start_sched_switch_record(void); void tracing_start_sched_switch_record(void);
int register_tracer(struct tracer *type); int register_tracer(struct tracer *type);
int is_tracing_stopped(void); int is_tracing_stopped(void);
enum trace_file_type {
TRACE_FILE_LAT_FMT = 1,
TRACE_FILE_ANNOTATE = 2,
};
extern cpumask_var_t __read_mostly tracing_buffer_mask; extern cpumask_var_t __read_mostly tracing_buffer_mask;
......
...@@ -610,24 +610,54 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) ...@@ -610,24 +610,54 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
return trace_print_lat_fmt(s, entry); return trace_print_lat_fmt(s, entry);
} }
static unsigned long preempt_mark_thresh = 100; static unsigned long preempt_mark_thresh_us = 100;
static int static int
lat_print_timestamp(struct trace_seq *s, u64 abs_usecs, lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
unsigned long rel_usecs)
{ {
return trace_seq_printf(s, " %4lldus%c: ", abs_usecs, unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE;
rel_usecs > preempt_mark_thresh ? '!' : unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
rel_usecs > 1 ? '+' : ' '); unsigned long long abs_ts = iter->ts - iter->tr->time_start;
unsigned long long rel_ts = next_ts - iter->ts;
struct trace_seq *s = &iter->seq;
if (in_ns) {
abs_ts = ns2usecs(abs_ts);
rel_ts = ns2usecs(rel_ts);
}
if (verbose && in_ns) {
unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC);
unsigned long abs_msec = (unsigned long)abs_ts;
unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC);
unsigned long rel_msec = (unsigned long)rel_ts;
return trace_seq_printf(
s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ",
ns2usecs(iter->ts),
abs_msec, abs_usec,
rel_msec, rel_usec);
} else if (verbose && !in_ns) {
return trace_seq_printf(
s, "[%016llx] %lld (+%lld): ",
iter->ts, abs_ts, rel_ts);
} else if (!verbose && in_ns) {
return trace_seq_printf(
s, " %4lldus%c: ",
abs_ts,
rel_ts > preempt_mark_thresh_us ? '!' :
rel_ts > 1 ? '+' : ' ');
} else { /* !verbose && !in_ns */
return trace_seq_printf(s, " %4lld: ", abs_ts);
}
} }
int trace_print_context(struct trace_iterator *iter) int trace_print_context(struct trace_iterator *iter)
{ {
struct trace_seq *s = &iter->seq; struct trace_seq *s = &iter->seq;
struct trace_entry *entry = iter->ent; struct trace_entry *entry = iter->ent;
unsigned long long t = ns2usecs(iter->ts); unsigned long long t;
unsigned long usec_rem = do_div(t, USEC_PER_SEC); unsigned long secs, usec_rem;
unsigned long secs = (unsigned long)t;
char comm[TASK_COMM_LEN]; char comm[TASK_COMM_LEN];
int ret; int ret;
...@@ -644,8 +674,13 @@ int trace_print_context(struct trace_iterator *iter) ...@@ -644,8 +674,13 @@ int trace_print_context(struct trace_iterator *iter)
return 0; return 0;
} }
return trace_seq_printf(s, " %5lu.%06lu: ", if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
secs, usec_rem); t = ns2usecs(iter->ts);
usec_rem = do_div(t, USEC_PER_SEC);
secs = (unsigned long)t;
return trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem);
} else
return trace_seq_printf(s, " %12llu: ", iter->ts);
} }
int trace_print_lat_context(struct trace_iterator *iter) int trace_print_lat_context(struct trace_iterator *iter)
...@@ -659,36 +694,29 @@ int trace_print_lat_context(struct trace_iterator *iter) ...@@ -659,36 +694,29 @@ int trace_print_lat_context(struct trace_iterator *iter)
*next_entry = trace_find_next_entry(iter, NULL, *next_entry = trace_find_next_entry(iter, NULL,
&next_ts); &next_ts);
unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
unsigned long rel_usecs;
/* Restore the original ent_size */ /* Restore the original ent_size */
iter->ent_size = ent_size; iter->ent_size = ent_size;
if (!next_entry) if (!next_entry)
next_ts = iter->ts; next_ts = iter->ts;
rel_usecs = ns2usecs(next_ts - iter->ts);
if (verbose) { if (verbose) {
char comm[TASK_COMM_LEN]; char comm[TASK_COMM_LEN];
trace_find_cmdline(entry->pid, comm); trace_find_cmdline(entry->pid, comm);
ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]" ret = trace_seq_printf(
" %ld.%03ldms (+%ld.%03ldms): ", comm, s, "%16s %5d %3d %d %08x %08lx ",
entry->pid, iter->cpu, entry->flags, comm, entry->pid, iter->cpu, entry->flags,
entry->preempt_count, iter->idx, entry->preempt_count, iter->idx);
ns2usecs(iter->ts),
abs_usecs / USEC_PER_MSEC,
abs_usecs % USEC_PER_MSEC,
rel_usecs / USEC_PER_MSEC,
rel_usecs % USEC_PER_MSEC);
} else { } else {
ret = lat_print_generic(s, entry, iter->cpu); ret = lat_print_generic(s, entry, iter->cpu);
if (ret)
ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
} }
if (ret)
ret = lat_print_timestamp(iter, next_ts);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment