Commit 763be8c0 authored by Mathieu Desnoyers's avatar Mathieu Desnoyers Committed by Greg Kroah-Hartman

lttng instrumentation: tracepoint events

Modifications to the in-kernel TRACE_EVENT are needed to generate the
compact event descriptions and the probe code LTTng generates. These
changes could apply to upstream TRACE_EVENT, but requires changing the
in-kernel API.
Signed-off-by: default avatarMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 69e1242e
The workflow for updating patches from newer kernel:
Diff mainline/ and lttng-module/ directories.
Pull the new headers from mainline kernel to mainline/.
Copy them into lttng-modules.
Apply diff. Fix conflicts.
#undef TRACE_SYSTEM
#define TRACE_SYSTEM irq
#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_IRQ_H
#include <linux/tracepoint.h>
#ifndef _TRACE_IRQ_DEF_
#define _TRACE_IRQ_DEF_
struct irqaction;
struct softirq_action;
#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
#define show_softirq_name(val) \
__print_symbolic(val, \
softirq_name(HI), \
softirq_name(TIMER), \
softirq_name(NET_TX), \
softirq_name(NET_RX), \
softirq_name(BLOCK), \
softirq_name(BLOCK_IOPOLL), \
softirq_name(TASKLET), \
softirq_name(SCHED), \
softirq_name(HRTIMER), \
softirq_name(RCU))
#endif /* _TRACE_IRQ_DEF_ */
/**
* irq_handler_entry - called immediately before the irq action handler
* @irq: irq number
* @action: pointer to struct irqaction
*
* The struct irqaction pointed to by @action contains various
* information about the handler, including the device name,
* @action->name, and the device id, @action->dev_id. When used in
* conjunction with the irq_handler_exit tracepoint, we can figure
* out irq handler latencies.
*/
TRACE_EVENT(irq_handler_entry,
TP_PROTO(int irq, struct irqaction *action),
TP_ARGS(irq, action),
TP_STRUCT__entry(
__field( int, irq )
__string( name, action->name )
),
TP_fast_assign(
tp_assign(irq, irq)
tp_strcpy(name, action->name)
),
TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
)
/**
* irq_handler_exit - called immediately after the irq action handler returns
* @irq: irq number
* @action: pointer to struct irqaction
* @ret: return value
*
* If the @ret value is set to IRQ_HANDLED, then we know that the corresponding
* @action->handler scuccessully handled this irq. Otherwise, the irq might be
* a shared irq line, or the irq was not handled successfully. Can be used in
* conjunction with the irq_handler_entry to understand irq handler latencies.
*/
TRACE_EVENT(irq_handler_exit,
TP_PROTO(int irq, struct irqaction *action, int ret),
TP_ARGS(irq, action, ret),
TP_STRUCT__entry(
__field( int, irq )
__field( int, ret )
),
TP_fast_assign(
tp_assign(irq, irq)
tp_assign(ret, ret)
),
TP_printk("irq=%d ret=%s",
__entry->irq, __entry->ret ? "handled" : "unhandled")
)
DECLARE_EVENT_CLASS(softirq,
TP_PROTO(unsigned int vec_nr),
TP_ARGS(vec_nr),
TP_STRUCT__entry(
__field( unsigned int, vec )
),
TP_fast_assign(
tp_assign(vec, vec_nr)
),
TP_printk("vec=%u [action=%s]", __entry->vec,
show_softirq_name(__entry->vec))
)
/**
* softirq_entry - called immediately before the softirq handler
* @vec_nr: softirq vector number
*
* When used in combination with the softirq_exit tracepoint
* we can determine the softirq handler runtine.
*/
DEFINE_EVENT(softirq, softirq_entry,
TP_PROTO(unsigned int vec_nr),
TP_ARGS(vec_nr)
)
/**
* softirq_exit - called immediately after the softirq handler returns
* @vec_nr: softirq vector number
*
* When used in combination with the softirq_entry tracepoint
* we can determine the softirq handler runtine.
*/
DEFINE_EVENT(softirq, softirq_exit,
TP_PROTO(unsigned int vec_nr),
TP_ARGS(vec_nr)
)
/**
* softirq_raise - called immediately when a softirq is raised
* @vec_nr: softirq vector number
*
* When used in combination with the softirq_entry tracepoint
* we can determine the softirq raise to run latency.
*/
DEFINE_EVENT(softirq, softirq_raise,
TP_PROTO(unsigned int vec_nr),
TP_ARGS(vec_nr)
)
#endif /* _TRACE_IRQ_H */
/* This part must be outside protection */
#include "../../../probes/define_trace.h"
#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_KVM_MAIN_H
#include <linux/tracepoint.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
#define kvm_trace_exit_reason \
ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI)
TRACE_EVENT(kvm_userspace_exit,
TP_PROTO(__u32 reason, int errno),
TP_ARGS(reason, errno),
TP_STRUCT__entry(
__field( __u32, reason )
__field( int, errno )
),
TP_fast_assign(
tp_assign(reason, reason)
tp_assign(errno, errno)
),
TP_printk("reason %s (%d)",
__entry->errno < 0 ?
(__entry->errno == -EINTR ? "restart" : "error") :
__print_symbolic(__entry->reason, kvm_trace_exit_reason),
__entry->errno < 0 ? -__entry->errno : __entry->reason)
)
#if defined(__KVM_HAVE_IOAPIC)
TRACE_EVENT(kvm_set_irq,
TP_PROTO(unsigned int gsi, int level, int irq_source_id),
TP_ARGS(gsi, level, irq_source_id),
TP_STRUCT__entry(
__field( unsigned int, gsi )
__field( int, level )
__field( int, irq_source_id )
),
TP_fast_assign(
tp_assign(gsi, gsi)
tp_assign(level, level)
tp_assign(irq_source_id, irq_source_id)
),
TP_printk("gsi %u level %d source %d",
__entry->gsi, __entry->level, __entry->irq_source_id)
)
#define kvm_deliver_mode \
{0x0, "Fixed"}, \
{0x1, "LowPrio"}, \
{0x2, "SMI"}, \
{0x3, "Res3"}, \
{0x4, "NMI"}, \
{0x5, "INIT"}, \
{0x6, "SIPI"}, \
{0x7, "ExtINT"}
TRACE_EVENT(kvm_ioapic_set_irq,
TP_PROTO(__u64 e, int pin, bool coalesced),
TP_ARGS(e, pin, coalesced),
TP_STRUCT__entry(
__field( __u64, e )
__field( int, pin )
__field( bool, coalesced )
),
TP_fast_assign(
tp_assign(e, e)
tp_assign(pin, pin)
tp_assign(coalesced, coalesced)
),
TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
__entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
__print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
(__entry->e & (1<<11)) ? "logical" : "physical",
(__entry->e & (1<<15)) ? "level" : "edge",
(__entry->e & (1<<16)) ? "|masked" : "",
__entry->coalesced ? " (coalesced)" : "")
)
TRACE_EVENT(kvm_msi_set_irq,
TP_PROTO(__u64 address, __u64 data),
TP_ARGS(address, data),
TP_STRUCT__entry(
__field( __u64, address )
__field( __u64, data )
),
TP_fast_assign(
tp_assign(address, address)
tp_assign(data, data)
),
TP_printk("dst %u vec %x (%s|%s|%s%s)",
(u8)(__entry->address >> 12), (u8)__entry->data,
__print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
(__entry->address & (1<<2)) ? "logical" : "physical",
(__entry->data & (1<<15)) ? "level" : "edge",
(__entry->address & (1<<3)) ? "|rh" : "")
)
#define kvm_irqchips \
{KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
{KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
{KVM_IRQCHIP_IOAPIC, "IOAPIC"}
TRACE_EVENT(kvm_ack_irq,
TP_PROTO(unsigned int irqchip, unsigned int pin),
TP_ARGS(irqchip, pin),
TP_STRUCT__entry(
__field( unsigned int, irqchip )
__field( unsigned int, pin )
),
TP_fast_assign(
tp_assign(irqchip, irqchip)
tp_assign(pin, pin)
),
TP_printk("irqchip %s pin %u",
__print_symbolic(__entry->irqchip, kvm_irqchips),
__entry->pin)
)
#endif /* defined(__KVM_HAVE_IOAPIC) */
#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
#define KVM_TRACE_MMIO_READ 1
#define KVM_TRACE_MMIO_WRITE 2
#define kvm_trace_symbol_mmio \
{ KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
{ KVM_TRACE_MMIO_READ, "read" }, \
{ KVM_TRACE_MMIO_WRITE, "write" }
TRACE_EVENT(kvm_mmio,
TP_PROTO(int type, int len, u64 gpa, u64 val),
TP_ARGS(type, len, gpa, val),
TP_STRUCT__entry(
__field( u32, type )
__field( u32, len )
__field( u64, gpa )
__field( u64, val )
),
TP_fast_assign(
tp_assign(type, type)
tp_assign(len, len)
tp_assign(gpa, gpa)
tp_assign(val, val)
),
TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
__print_symbolic(__entry->type, kvm_trace_symbol_mmio),
__entry->len, __entry->gpa, __entry->val)
)
#define kvm_fpu_load_symbol \
{0, "unload"}, \
{1, "load"}
TRACE_EVENT(kvm_fpu,
TP_PROTO(int load),
TP_ARGS(load),
TP_STRUCT__entry(
__field( u32, load )
),
TP_fast_assign(
tp_assign(load, load)
),
TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
)
TRACE_EVENT(kvm_age_page,
TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
TP_ARGS(hva, slot, ref),
TP_STRUCT__entry(
__field( u64, hva )
__field( u64, gfn )
__field( u8, referenced )
),
TP_fast_assign(
tp_assign(hva, hva)
tp_assign(gfn,
slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT))
tp_assign(referenced, ref)
),
TP_printk("hva %llx gfn %llx %s",
__entry->hva, __entry->gfn,
__entry->referenced ? "YOUNG" : "OLD")
)
#ifdef CONFIG_KVM_ASYNC_PF
DECLARE_EVENT_CLASS(kvm_async_get_page_class,
TP_PROTO(u64 gva, u64 gfn),
TP_ARGS(gva, gfn),
TP_STRUCT__entry(
__field(__u64, gva)
__field(u64, gfn)
),
TP_fast_assign(
tp_assign(gva, gva)
tp_assign(gfn, gfn)
),
TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
)
DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
TP_PROTO(u64 gva, u64 gfn),
TP_ARGS(gva, gfn)
)
DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
TP_PROTO(u64 gva, u64 gfn),
TP_ARGS(gva, gfn)
)
DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
TP_PROTO(u64 token, u64 gva),
TP_ARGS(token, gva),
TP_STRUCT__entry(
__field(__u64, token)
__field(__u64, gva)
),
TP_fast_assign(
tp_assign(token, token)
tp_assign(gva, gva)
),
TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
)
DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
TP_PROTO(u64 token, u64 gva),
TP_ARGS(token, gva)
)
DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
TP_PROTO(u64 token, u64 gva),
TP_ARGS(token, gva)
)
TRACE_EVENT(
kvm_async_pf_completed,
TP_PROTO(unsigned long address, struct page *page, u64 gva),
TP_ARGS(address, page, gva),
TP_STRUCT__entry(
__field(unsigned long, address)
__field(pfn_t, pfn)
__field(u64, gva)
),
TP_fast_assign(
tp_assign(address, address)
tp_assign(pfn, page ? page_to_pfn(page) : 0)
tp_assign(gva, gva)
),
TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva,
__entry->address, __entry->pfn)
)
#endif
#endif /* _TRACE_KVM_MAIN_H */
/* This part must be outside protection */
#include "../../../probes/define_trace.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM lttng
#if !defined(_TRACE_LTTNG_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_LTTNG_H
#include <linux/tracepoint.h>
TRACE_EVENT(lttng_metadata,
TP_PROTO(const char *str),
TP_ARGS(str),
/*
* Not exactly a string: more a sequence of bytes (dynamic
* array) without the length. This is a dummy anyway: we only
* use this declaration to generate an event metadata entry.
*/
TP_STRUCT__entry(
__string( str, str )
),
TP_fast_assign(
tp_strcpy(str, str)
),
TP_printk("")
)
#endif /* _TRACE_LTTNG_H */
/* This part must be outside protection */
#include "../../../probes/define_trace.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM sched
#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SCHED_H
#include <linux/sched.h>
#include <linux/tracepoint.h>
#ifndef _TRACE_SCHED_DEF_
#define _TRACE_SCHED_DEF_
static inline long __trace_sched_switch_state(struct task_struct *p)
{
long state = p->state;
#ifdef CONFIG_PREEMPT
/*
* For all intents and purposes a preempted task is a running task.
*/
if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
state = TASK_RUNNING;
#endif
return state;
}
#endif /* _TRACE_SCHED_DEF_ */
/*
* Tracepoint for calling kthread_stop, performed to end a kthread:
*/
TRACE_EVENT(sched_kthread_stop,
TP_PROTO(struct task_struct *t),
TP_ARGS(t),
TP_STRUCT__entry(
__array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, tid )
),
TP_fast_assign(
tp_memcpy(comm, t->comm, TASK_COMM_LEN)
tp_assign(tid, t->pid)
),
TP_printk("comm=%s tid=%d", __entry->comm, __entry->tid)
)
/*
* Tracepoint for the return value of the kthread stopping:
*/
TRACE_EVENT(sched_kthread_stop_ret,
TP_PROTO(int ret),
TP_ARGS(ret),
TP_STRUCT__entry(
__field( int, ret )
),
TP_fast_assign(
tp_assign(ret, ret)
),
TP_printk("ret=%d", __entry->ret)
)
/*
* Tracepoint for waking up a task:
*/
DECLARE_EVENT_CLASS(sched_wakeup_template,
TP_PROTO(struct task_struct *p, int success),
TP_ARGS(p, success),
TP_STRUCT__entry(
__array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, tid )
__field( int, prio )
__field( int, success )
__field( int, target_cpu )
),
TP_fast_assign(
tp_memcpy(comm, p->comm, TASK_COMM_LEN)
tp_assign(tid, p->pid)
tp_assign(prio, p->prio)
tp_assign(success, success)
tp_assign(target_cpu, task_cpu(p))
),
TP_printk("comm=%s tid=%d prio=%d success=%d target_cpu=%03d",
__entry->comm, __entry->tid, __entry->prio,
__entry->success, __entry->target_cpu)
)
DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
TP_PROTO(struct task_struct *p, int success),
TP_ARGS(p, success))
/*
* Tracepoint for waking up a new task:
*/
DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
TP_PROTO(struct task_struct *p, int success),
TP_ARGS(p, success))
/*
* Tracepoint for task switches, performed by the scheduler:
*/
TRACE_EVENT(sched_switch,
TP_PROTO(struct task_struct *prev,
struct task_struct *next),
TP_ARGS(prev, next),
TP_STRUCT__entry(
__array_text( char, prev_comm, TASK_COMM_LEN )
__field( pid_t, prev_tid )
__field( int, prev_prio )
__field( long, prev_state )
__array_text( char, next_comm, TASK_COMM_LEN )
__field( pid_t, next_tid )
__field( int, next_prio )
),
TP_fast_assign(
tp_memcpy(next_comm, next->comm, TASK_COMM_LEN)
tp_assign(prev_tid, prev->pid)
tp_assign(prev_prio, prev->prio - MAX_RT_PRIO)
tp_assign(prev_state, __trace_sched_switch_state(prev))
tp_memcpy(prev_comm, prev->comm, TASK_COMM_LEN)
tp_assign(next_tid, next->pid)
tp_assign(next_prio, next->prio - MAX_RT_PRIO)
),
TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_tid=%d next_prio=%d",
__entry->prev_comm, __entry->prev_tid, __entry->prev_prio,
__entry->prev_state ?
__print_flags(__entry->prev_state, "|",
{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
{ 16, "Z" }, { 32, "X" }, { 64, "x" },
{ 128, "W" }) : "R",
__entry->next_comm, __entry->next_tid, __entry->next_prio)
)
/*
* Tracepoint for a task being migrated:
*/
TRACE_EVENT(sched_migrate_task,
TP_PROTO(struct task_struct *p, int dest_cpu),
TP_ARGS(p, dest_cpu),
TP_STRUCT__entry(
__array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, tid )
__field( int, prio )
__field( int, orig_cpu )
__field( int, dest_cpu )
),
TP_fast_assign(
tp_memcpy(comm, p->comm, TASK_COMM_LEN)
tp_assign(tid, p->pid)
tp_assign(prio, p->prio - MAX_RT_PRIO)
tp_assign(orig_cpu, task_cpu(p))
tp_assign(dest_cpu, dest_cpu)
),
TP_printk("comm=%s tid=%d prio=%d orig_cpu=%d dest_cpu=%d",
__entry->comm, __entry->tid, __entry->prio,
__entry->orig_cpu, __entry->dest_cpu)
)
DECLARE_EVENT_CLASS(sched_process_template,
TP_PROTO(struct task_struct *p),
TP_ARGS(p),
TP_STRUCT__entry(
__array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, tid )
__field( int, prio )
),
TP_fast_assign(
tp_memcpy(comm, p->comm, TASK_COMM_LEN)
tp_assign(tid, p->pid)
tp_assign(prio, p->prio - MAX_RT_PRIO)
),
TP_printk("comm=%s tid=%d prio=%d",
__entry->comm, __entry->tid, __entry->prio)
)
/*
* Tracepoint for freeing a task:
*/
DEFINE_EVENT(sched_process_template, sched_process_free,
TP_PROTO(struct task_struct *p),
TP_ARGS(p))
/*
* Tracepoint for a task exiting:
*/
DEFINE_EVENT(sched_process_template, sched_process_exit,
TP_PROTO(struct task_struct *p),
TP_ARGS(p))
/*
* Tracepoint for waiting on task to unschedule:
*/
DEFINE_EVENT(sched_process_template, sched_wait_task,
TP_PROTO(struct task_struct *p),
TP_ARGS(p))
/*
* Tracepoint for a waiting task:
*/
TRACE_EVENT(sched_process_wait,
TP_PROTO(struct pid *pid),
TP_ARGS(pid),
TP_STRUCT__entry(
__array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, tid )
__field( int, prio )
),
TP_fast_assign(
tp_memcpy(comm, current->comm, TASK_COMM_LEN)
tp_assign(tid, pid_nr(pid))
tp_assign(prio, current->prio - MAX_RT_PRIO)
),
TP_printk("comm=%s tid=%d prio=%d",
__entry->comm, __entry->tid, __entry->prio)
)
/*
* Tracepoint for do_fork:
*/
TRACE_EVENT(sched_process_fork,
TP_PROTO(struct task_struct *parent, struct task_struct *child),
TP_ARGS(parent, child),
TP_STRUCT__entry(
__array_text( char, parent_comm, TASK_COMM_LEN )
__field( pid_t, parent_tid )
__array_text( char, child_comm, TASK_COMM_LEN )
__field( pid_t, child_tid )
),
TP_fast_assign(
tp_memcpy(parent_comm, parent->comm, TASK_COMM_LEN)
tp_assign(parent_tid, parent->pid)
tp_memcpy(child_comm, child->comm, TASK_COMM_LEN)
tp_assign(child_tid, child->pid)
),
TP_printk("comm=%s tid=%d child_comm=%s child_tid=%d",
__entry->parent_comm, __entry->parent_tid,
__entry->child_comm, __entry->child_tid)
)
/*
* XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
* adding sched_stat support to SCHED_FIFO/RR would be welcome.
*/
DECLARE_EVENT_CLASS(sched_stat_template,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay),
TP_STRUCT__entry(
__array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, tid )
__field( u64, delay )
),
TP_fast_assign(
tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
tp_assign(tid, tsk->pid)
tp_assign(delay, delay)
)
TP_perf_assign(
__perf_count(delay)
),
TP_printk("comm=%s tid=%d delay=%Lu [ns]",
__entry->comm, __entry->tid,
(unsigned long long)__entry->delay)
)
/*
* Tracepoint for accounting wait time (time the task is runnable
* but not actually running due to scheduler contention).
*/
DEFINE_EVENT(sched_stat_template, sched_stat_wait,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay))
/*
* Tracepoint for accounting sleep time (time the task is not runnable,
* including iowait, see below).
*/
DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay))
/*
* Tracepoint for accounting iowait time (time the task is not runnable
* due to waiting on IO to complete).
*/
DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay))
/*
* Tracepoint for accounting runtime (time the task is executing
* on a CPU).
*/
TRACE_EVENT(sched_stat_runtime,
TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
TP_ARGS(tsk, runtime, vruntime),
TP_STRUCT__entry(
__array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, tid )
__field( u64, runtime )
__field( u64, vruntime )
),
TP_fast_assign(
tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
tp_assign(tid, tsk->pid)
tp_assign(runtime, runtime)
tp_assign(vruntime, vruntime)
)
TP_perf_assign(
__perf_count(runtime)
),
TP_printk("comm=%s tid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
__entry->comm, __entry->tid,
(unsigned long long)__entry->runtime,
(unsigned long long)__entry->vruntime)
)
/*
* Tracepoint for showing priority inheritance modifying a tasks
* priority.
*/
TRACE_EVENT(sched_pi_setprio,
TP_PROTO(struct task_struct *tsk, int newprio),
TP_ARGS(tsk, newprio),
TP_STRUCT__entry(
__array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, tid )
__field( int, oldprio )
__field( int, newprio )
),
TP_fast_assign(
tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
tp_assign(tid, tsk->pid)
tp_assign(oldprio, tsk->prio - MAX_RT_PRIO)
tp_assign(newprio, newprio - MAX_RT_PRIO)
),
TP_printk("comm=%s tid=%d oldprio=%d newprio=%d",
__entry->comm, __entry->tid,
__entry->oldprio, __entry->newprio)
)
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
#include "../../../probes/define_trace.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM raw_syscalls
#define TRACE_INCLUDE_FILE syscalls
#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_EVENTS_SYSCALLS_H
#include <linux/tracepoint.h>
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
#ifndef _TRACE_SYSCALLS_DEF_
#define _TRACE_SYSCALLS_DEF_
#include <asm/ptrace.h>
#include <asm/syscall.h>
#endif /* _TRACE_SYSCALLS_DEF_ */
TRACE_EVENT(sys_enter,
TP_PROTO(struct pt_regs *regs, long id),
TP_ARGS(regs, id),
TP_STRUCT__entry(
__field( long, id )
__array( unsigned long, args, 6 )
),
TP_fast_assign(
tp_assign(id, id)
{
tp_memcpy(args,
({
unsigned long args_copy[6];
syscall_get_arguments(current, regs,
0, 6, args_copy);
args_copy;
}), 6 * sizeof(unsigned long));
}
),
TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
__entry->id,
__entry->args[0], __entry->args[1], __entry->args[2],
__entry->args[3], __entry->args[4], __entry->args[5])
)
TRACE_EVENT(sys_exit,
TP_PROTO(struct pt_regs *regs, long ret),
TP_ARGS(regs, ret),
TP_STRUCT__entry(
__field( long, id )
__field( long, ret )
),
TP_fast_assign(
tp_assign(id, syscall_get_nr(current, regs))
tp_assign(ret, ret)
),
TP_printk("NR %ld = %ld",
__entry->id, __entry->ret)
)
#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
#endif /* _TRACE_EVENTS_SYSCALLS_H */
/* This part must be outside protection */
#include "../../../probes/define_trace.h"
This diff is collapsed.
#undef TRACE_SYSTEM
#define TRACE_SYSTEM irq
#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_IRQ_H
#include <linux/tracepoint.h>
struct irqaction;
struct softirq_action;
#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
#define show_softirq_name(val) \
__print_symbolic(val, \
softirq_name(HI), \
softirq_name(TIMER), \
softirq_name(NET_TX), \
softirq_name(NET_RX), \
softirq_name(BLOCK), \
softirq_name(BLOCK_IOPOLL), \
softirq_name(TASKLET), \
softirq_name(SCHED), \
softirq_name(HRTIMER), \
softirq_name(RCU))
/**
* irq_handler_entry - called immediately before the irq action handler
* @irq: irq number
* @action: pointer to struct irqaction
*
* The struct irqaction pointed to by @action contains various
* information about the handler, including the device name,
* @action->name, and the device id, @action->dev_id. When used in
* conjunction with the irq_handler_exit tracepoint, we can figure
* out irq handler latencies.
*/
TRACE_EVENT(irq_handler_entry,
TP_PROTO(int irq, struct irqaction *action),
TP_ARGS(irq, action),
TP_STRUCT__entry(
__field( int, irq )
__string( name, action->name )
),
TP_fast_assign(
__entry->irq = irq;
__assign_str(name, action->name);
),
TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
);
/**
* irq_handler_exit - called immediately after the irq action handler returns
* @irq: irq number
* @action: pointer to struct irqaction
* @ret: return value
*
* If the @ret value is set to IRQ_HANDLED, then we know that the corresponding
* @action->handler scuccessully handled this irq. Otherwise, the irq might be
* a shared irq line, or the irq was not handled successfully. Can be used in
* conjunction with the irq_handler_entry to understand irq handler latencies.
*/
TRACE_EVENT(irq_handler_exit,
TP_PROTO(int irq, struct irqaction *action, int ret),
TP_ARGS(irq, action, ret),
TP_STRUCT__entry(
__field( int, irq )
__field( int, ret )
),
TP_fast_assign(
__entry->irq = irq;
__entry->ret = ret;
),
TP_printk("irq=%d ret=%s",
__entry->irq, __entry->ret ? "handled" : "unhandled")
);
DECLARE_EVENT_CLASS(softirq,
TP_PROTO(unsigned int vec_nr),
TP_ARGS(vec_nr),
TP_STRUCT__entry(
__field( unsigned int, vec )
),
TP_fast_assign(
__entry->vec = vec_nr;
),
TP_printk("vec=%u [action=%s]", __entry->vec,
show_softirq_name(__entry->vec))
);
/**
* softirq_entry - called immediately before the softirq handler
* @vec_nr: softirq vector number
*
* When used in combination with the softirq_exit tracepoint
* we can determine the softirq handler runtine.
*/
DEFINE_EVENT(softirq, softirq_entry,
TP_PROTO(unsigned int vec_nr),
TP_ARGS(vec_nr)
);
/**
* softirq_exit - called immediately after the softirq handler returns
* @vec_nr: softirq vector number
*
* When used in combination with the softirq_entry tracepoint
* we can determine the softirq handler runtine.
*/
DEFINE_EVENT(softirq, softirq_exit,
TP_PROTO(unsigned int vec_nr),
TP_ARGS(vec_nr)
);
/**
* softirq_raise - called immediately when a softirq is raised
* @vec_nr: softirq vector number
*
* When used in combination with the softirq_entry tracepoint
* we can determine the softirq raise to run latency.
*/
DEFINE_EVENT(softirq, softirq_raise,
TP_PROTO(unsigned int vec_nr),
TP_ARGS(vec_nr)
);
#endif /* _TRACE_IRQ_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_KVM_MAIN_H
#include <linux/tracepoint.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
#define kvm_trace_exit_reason \
ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI)
TRACE_EVENT(kvm_userspace_exit,
TP_PROTO(__u32 reason, int errno),
TP_ARGS(reason, errno),
TP_STRUCT__entry(
__field( __u32, reason )
__field( int, errno )
),
TP_fast_assign(
__entry->reason = reason;
__entry->errno = errno;
),
TP_printk("reason %s (%d)",
__entry->errno < 0 ?
(__entry->errno == -EINTR ? "restart" : "error") :
__print_symbolic(__entry->reason, kvm_trace_exit_reason),
__entry->errno < 0 ? -__entry->errno : __entry->reason)
);
#if defined(__KVM_HAVE_IOAPIC)
TRACE_EVENT(kvm_set_irq,
TP_PROTO(unsigned int gsi, int level, int irq_source_id),
TP_ARGS(gsi, level, irq_source_id),
TP_STRUCT__entry(
__field( unsigned int, gsi )
__field( int, level )
__field( int, irq_source_id )
),
TP_fast_assign(
__entry->gsi = gsi;
__entry->level = level;
__entry->irq_source_id = irq_source_id;
),
TP_printk("gsi %u level %d source %d",
__entry->gsi, __entry->level, __entry->irq_source_id)
);
#define kvm_deliver_mode \
{0x0, "Fixed"}, \
{0x1, "LowPrio"}, \
{0x2, "SMI"}, \
{0x3, "Res3"}, \
{0x4, "NMI"}, \
{0x5, "INIT"}, \
{0x6, "SIPI"}, \
{0x7, "ExtINT"}
TRACE_EVENT(kvm_ioapic_set_irq,
TP_PROTO(__u64 e, int pin, bool coalesced),
TP_ARGS(e, pin, coalesced),
TP_STRUCT__entry(
__field( __u64, e )
__field( int, pin )
__field( bool, coalesced )
),
TP_fast_assign(
__entry->e = e;
__entry->pin = pin;
__entry->coalesced = coalesced;
),
TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
__entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
__print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
(__entry->e & (1<<11)) ? "logical" : "physical",
(__entry->e & (1<<15)) ? "level" : "edge",
(__entry->e & (1<<16)) ? "|masked" : "",
__entry->coalesced ? " (coalesced)" : "")
);
TRACE_EVENT(kvm_msi_set_irq,
TP_PROTO(__u64 address, __u64 data),
TP_ARGS(address, data),
TP_STRUCT__entry(
__field( __u64, address )
__field( __u64, data )
),
TP_fast_assign(
__entry->address = address;
__entry->data = data;
),
TP_printk("dst %u vec %x (%s|%s|%s%s)",
(u8)(__entry->address >> 12), (u8)__entry->data,
__print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
(__entry->address & (1<<2)) ? "logical" : "physical",
(__entry->data & (1<<15)) ? "level" : "edge",
(__entry->address & (1<<3)) ? "|rh" : "")
);
#define kvm_irqchips \
{KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
{KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
{KVM_IRQCHIP_IOAPIC, "IOAPIC"}
TRACE_EVENT(kvm_ack_irq,
TP_PROTO(unsigned int irqchip, unsigned int pin),
TP_ARGS(irqchip, pin),
TP_STRUCT__entry(
__field( unsigned int, irqchip )
__field( unsigned int, pin )
),
TP_fast_assign(
__entry->irqchip = irqchip;
__entry->pin = pin;
),
TP_printk("irqchip %s pin %u",
__print_symbolic(__entry->irqchip, kvm_irqchips),
__entry->pin)
);
#endif /* defined(__KVM_HAVE_IOAPIC) */
#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
#define KVM_TRACE_MMIO_READ 1
#define KVM_TRACE_MMIO_WRITE 2
#define kvm_trace_symbol_mmio \
{ KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
{ KVM_TRACE_MMIO_READ, "read" }, \
{ KVM_TRACE_MMIO_WRITE, "write" }
TRACE_EVENT(kvm_mmio,
TP_PROTO(int type, int len, u64 gpa, u64 val),
TP_ARGS(type, len, gpa, val),
TP_STRUCT__entry(
__field( u32, type )
__field( u32, len )
__field( u64, gpa )
__field( u64, val )
),
TP_fast_assign(
__entry->type = type;
__entry->len = len;
__entry->gpa = gpa;
__entry->val = val;
),
TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
__print_symbolic(__entry->type, kvm_trace_symbol_mmio),
__entry->len, __entry->gpa, __entry->val)
);
#define kvm_fpu_load_symbol \
{0, "unload"}, \
{1, "load"}
TRACE_EVENT(kvm_fpu,
TP_PROTO(int load),
TP_ARGS(load),
TP_STRUCT__entry(
__field( u32, load )
),
TP_fast_assign(
__entry->load = load;
),
TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
);
TRACE_EVENT(kvm_age_page,
TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
TP_ARGS(hva, slot, ref),
TP_STRUCT__entry(
__field( u64, hva )
__field( u64, gfn )
__field( u8, referenced )
),
TP_fast_assign(
__entry->hva = hva;
__entry->gfn =
slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT);
__entry->referenced = ref;
),
TP_printk("hva %llx gfn %llx %s",
__entry->hva, __entry->gfn,
__entry->referenced ? "YOUNG" : "OLD")
);
#ifdef CONFIG_KVM_ASYNC_PF
DECLARE_EVENT_CLASS(kvm_async_get_page_class,
TP_PROTO(u64 gva, u64 gfn),
TP_ARGS(gva, gfn),
TP_STRUCT__entry(
__field(__u64, gva)
__field(u64, gfn)
),
TP_fast_assign(
__entry->gva = gva;
__entry->gfn = gfn;
),
TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
);
DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
TP_PROTO(u64 gva, u64 gfn),
TP_ARGS(gva, gfn)
);
DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
TP_PROTO(u64 gva, u64 gfn),
TP_ARGS(gva, gfn)
);
DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
TP_PROTO(u64 token, u64 gva),
TP_ARGS(token, gva),
TP_STRUCT__entry(
__field(__u64, token)
__field(__u64, gva)
),
TP_fast_assign(
__entry->token = token;
__entry->gva = gva;
),
TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
);
DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
TP_PROTO(u64 token, u64 gva),
TP_ARGS(token, gva)
);
DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
TP_PROTO(u64 token, u64 gva),
TP_ARGS(token, gva)
);
TRACE_EVENT(
kvm_async_pf_completed,
TP_PROTO(unsigned long address, struct page *page, u64 gva),
TP_ARGS(address, page, gva),
TP_STRUCT__entry(
__field(unsigned long, address)
__field(pfn_t, pfn)
__field(u64, gva)
),
TP_fast_assign(
__entry->address = address;
__entry->pfn = page ? page_to_pfn(page) : 0;
__entry->gva = gva;
),
TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva,
__entry->address, __entry->pfn)
);
#endif
#endif /* _TRACE_KVM_MAIN_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM sched
#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SCHED_H
#include <linux/sched.h>
#include <linux/tracepoint.h>
/*
* Tracepoint for calling kthread_stop, performed to end a kthread:
*/
TRACE_EVENT(sched_kthread_stop,
TP_PROTO(struct task_struct *t),
TP_ARGS(t),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
),
TP_fast_assign(
memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
__entry->pid = t->pid;
),
TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
);
/*
* Tracepoint for the return value of the kthread stopping:
*/
TRACE_EVENT(sched_kthread_stop_ret,
TP_PROTO(int ret),
TP_ARGS(ret),
TP_STRUCT__entry(
__field( int, ret )
),
TP_fast_assign(
__entry->ret = ret;
),
TP_printk("ret=%d", __entry->ret)
);
/*
* Tracepoint for waking up a task:
*/
DECLARE_EVENT_CLASS(sched_wakeup_template,
TP_PROTO(struct task_struct *p, int success),
TP_ARGS(p, success),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
__field( int, success )
__field( int, target_cpu )
),
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio;
__entry->success = success;
__entry->target_cpu = task_cpu(p);
),
TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
__entry->comm, __entry->pid, __entry->prio,
__entry->success, __entry->target_cpu)
);
DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
TP_PROTO(struct task_struct *p, int success),
TP_ARGS(p, success));
/*
* Tracepoint for waking up a new task:
*/
DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
TP_PROTO(struct task_struct *p, int success),
TP_ARGS(p, success));
#ifdef CREATE_TRACE_POINTS
static inline long __trace_sched_switch_state(struct task_struct *p)
{
long state = p->state;
#ifdef CONFIG_PREEMPT
/*
* For all intents and purposes a preempted task is a running task.
*/
if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
state = TASK_RUNNING;
#endif
return state;
}
#endif
/*
* Tracepoint for task switches, performed by the scheduler:
*/
TRACE_EVENT(sched_switch,
TP_PROTO(struct task_struct *prev,
struct task_struct *next),
TP_ARGS(prev, next),
TP_STRUCT__entry(
__array( char, prev_comm, TASK_COMM_LEN )
__field( pid_t, prev_pid )
__field( int, prev_prio )
__field( long, prev_state )
__array( char, next_comm, TASK_COMM_LEN )
__field( pid_t, next_pid )
__field( int, next_prio )
),
TP_fast_assign(
memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
__entry->prev_pid = prev->pid;
__entry->prev_prio = prev->prio;
__entry->prev_state = __trace_sched_switch_state(prev);
memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
__entry->next_pid = next->pid;
__entry->next_prio = next->prio;
),
TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d",
__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
__entry->prev_state ?
__print_flags(__entry->prev_state, "|",
{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
{ 16, "Z" }, { 32, "X" }, { 64, "x" },
{ 128, "W" }) : "R",
__entry->next_comm, __entry->next_pid, __entry->next_prio)
);
/*
* Tracepoint for a task being migrated:
*/
TRACE_EVENT(sched_migrate_task,
TP_PROTO(struct task_struct *p, int dest_cpu),
TP_ARGS(p, dest_cpu),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
__field( int, orig_cpu )
__field( int, dest_cpu )
),
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio;
__entry->orig_cpu = task_cpu(p);
__entry->dest_cpu = dest_cpu;
),
TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
__entry->comm, __entry->pid, __entry->prio,
__entry->orig_cpu, __entry->dest_cpu)
);
DECLARE_EVENT_CLASS(sched_process_template,
TP_PROTO(struct task_struct *p),
TP_ARGS(p),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
),
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio;
),
TP_printk("comm=%s pid=%d prio=%d",
__entry->comm, __entry->pid, __entry->prio)
);
/*
* Tracepoint for freeing a task:
*/
DEFINE_EVENT(sched_process_template, sched_process_free,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
/*
* Tracepoint for a task exiting:
*/
DEFINE_EVENT(sched_process_template, sched_process_exit,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
/*
* Tracepoint for waiting on task to unschedule:
*/
DEFINE_EVENT(sched_process_template, sched_wait_task,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
/*
* Tracepoint for a waiting task:
*/
TRACE_EVENT(sched_process_wait,
TP_PROTO(struct pid *pid),
TP_ARGS(pid),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
),
TP_fast_assign(
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
__entry->pid = pid_nr(pid);
__entry->prio = current->prio;
),
TP_printk("comm=%s pid=%d prio=%d",
__entry->comm, __entry->pid, __entry->prio)
);
/*
* Tracepoint for do_fork:
*/
TRACE_EVENT(sched_process_fork,
TP_PROTO(struct task_struct *parent, struct task_struct *child),
TP_ARGS(parent, child),
TP_STRUCT__entry(
__array( char, parent_comm, TASK_COMM_LEN )
__field( pid_t, parent_pid )
__array( char, child_comm, TASK_COMM_LEN )
__field( pid_t, child_pid )
),
TP_fast_assign(
memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
__entry->parent_pid = parent->pid;
memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
__entry->child_pid = child->pid;
),
TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
__entry->parent_comm, __entry->parent_pid,
__entry->child_comm, __entry->child_pid)
);
/*
* XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
* adding sched_stat support to SCHED_FIFO/RR would be welcome.
*/
DECLARE_EVENT_CLASS(sched_stat_template,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( u64, delay )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->delay = delay;
)
TP_perf_assign(
__perf_count(delay);
),
TP_printk("comm=%s pid=%d delay=%Lu [ns]",
__entry->comm, __entry->pid,
(unsigned long long)__entry->delay)
);
/*
* Tracepoint for accounting wait time (time the task is runnable
* but not actually running due to scheduler contention).
*/
DEFINE_EVENT(sched_stat_template, sched_stat_wait,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay));
/*
* Tracepoint for accounting sleep time (time the task is not runnable,
* including iowait, see below).
*/
DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay));
/*
* Tracepoint for accounting iowait time (time the task is not runnable
* due to waiting on IO to complete).
*/
DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay));
/*
* Tracepoint for accounting runtime (time the task is executing
* on a CPU).
*/
TRACE_EVENT(sched_stat_runtime,
TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
TP_ARGS(tsk, runtime, vruntime),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( u64, runtime )
__field( u64, vruntime )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->runtime = runtime;
__entry->vruntime = vruntime;
)
TP_perf_assign(
__perf_count(runtime);
),
TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
__entry->comm, __entry->pid,
(unsigned long long)__entry->runtime,
(unsigned long long)__entry->vruntime)
);
/*
* Tracepoint for showing priority inheritance modifying a tasks
* priority.
*/
TRACE_EVENT(sched_pi_setprio,
TP_PROTO(struct task_struct *tsk, int newprio),
TP_ARGS(tsk, newprio),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, oldprio )
__field( int, newprio )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->oldprio = tsk->prio;
__entry->newprio = newprio;
),
TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
__entry->comm, __entry->pid,
__entry->oldprio, __entry->newprio)
);
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM raw_syscalls
#define TRACE_INCLUDE_FILE syscalls
#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_EVENTS_SYSCALLS_H
#include <linux/tracepoint.h>
#include <asm/ptrace.h>
#include <asm/syscall.h>
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
extern void syscall_regfunc(void);
extern void syscall_unregfunc(void);
TRACE_EVENT_FN(sys_enter,
TP_PROTO(struct pt_regs *regs, long id),
TP_ARGS(regs, id),
TP_STRUCT__entry(
__field( long, id )
__array( unsigned long, args, 6 )
),
TP_fast_assign(
__entry->id = id;
syscall_get_arguments(current, regs, 0, 6, __entry->args);
),
TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
__entry->id,
__entry->args[0], __entry->args[1], __entry->args[2],
__entry->args[3], __entry->args[4], __entry->args[5]),
syscall_regfunc, syscall_unregfunc
);
TRACE_EVENT_FLAGS(sys_enter, TRACE_EVENT_FL_CAP_ANY)
TRACE_EVENT_FN(sys_exit,
TP_PROTO(struct pt_regs *regs, long ret),
TP_ARGS(regs, ret),
TP_STRUCT__entry(
__field( long, id )
__field( long, ret )
),
TP_fast_assign(
__entry->id = syscall_get_nr(current, regs);
__entry->ret = ret;
),
TP_printk("NR %ld = %ld",
__entry->id, __entry->ret),
syscall_regfunc, syscall_unregfunc
);
TRACE_EVENT_FLAGS(sys_exit, TRACE_EVENT_FL_CAP_ANY)
#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
#endif /* _TRACE_EVENTS_SYSCALLS_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment