Commit 60313ebe authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: Add fork event

Create a fork event so that we can easily clone the comm and
dso maps without having to generate all those events.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 20c84e95
...@@ -276,6 +276,14 @@ enum perf_event_type { ...@@ -276,6 +276,14 @@ enum perf_event_type {
PERF_EVENT_THROTTLE = 5, PERF_EVENT_THROTTLE = 5,
PERF_EVENT_UNTHROTTLE = 6, PERF_EVENT_UNTHROTTLE = 6,
/*
* struct {
* struct perf_event_header header;
* u32 pid, ppid;
* };
*/
PERF_EVENT_FORK = 7,
/* /*
* When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
* will be PERF_RECORD_* * will be PERF_RECORD_*
...@@ -618,6 +626,7 @@ extern void perf_counter_munmap(unsigned long addr, unsigned long len, ...@@ -618,6 +626,7 @@ extern void perf_counter_munmap(unsigned long addr, unsigned long len,
unsigned long pgoff, struct file *file); unsigned long pgoff, struct file *file);
extern void perf_counter_comm(struct task_struct *tsk); extern void perf_counter_comm(struct task_struct *tsk);
extern void perf_counter_fork(struct task_struct *tsk);
extern void perf_counter_task_migration(struct task_struct *task, int cpu); extern void perf_counter_task_migration(struct task_struct *task, int cpu);
...@@ -673,6 +682,7 @@ perf_counter_munmap(unsigned long addr, unsigned long len, ...@@ -673,6 +682,7 @@ perf_counter_munmap(unsigned long addr, unsigned long len,
unsigned long pgoff, struct file *file) { } unsigned long pgoff, struct file *file) { }
static inline void perf_counter_comm(struct task_struct *tsk) { } static inline void perf_counter_comm(struct task_struct *tsk) { }
static inline void perf_counter_fork(struct task_struct *tsk) { }
static inline void perf_counter_init(void) { } static inline void perf_counter_init(void) { }
static inline void perf_counter_task_migration(struct task_struct *task, static inline void perf_counter_task_migration(struct task_struct *task,
int cpu) { } int cpu) { }
......
...@@ -1412,12 +1412,12 @@ long do_fork(unsigned long clone_flags, ...@@ -1412,12 +1412,12 @@ long do_fork(unsigned long clone_flags,
if (clone_flags & CLONE_VFORK) { if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork; p->vfork_done = &vfork;
init_completion(&vfork); init_completion(&vfork);
} else { } else if (!(clone_flags & CLONE_VM)) {
/* /*
* vfork will do an exec which will call * vfork will do an exec which will call
* set_task_comm() * set_task_comm()
*/ */
perf_counter_comm(p); perf_counter_fork(p);
} }
audit_finish_fork(p); audit_finish_fork(p);
......
...@@ -40,9 +40,9 @@ static int perf_reserved_percpu __read_mostly; ...@@ -40,9 +40,9 @@ static int perf_reserved_percpu __read_mostly;
static int perf_overcommit __read_mostly = 1; static int perf_overcommit __read_mostly = 1;
static atomic_t nr_counters __read_mostly; static atomic_t nr_counters __read_mostly;
static atomic_t nr_mmap_tracking __read_mostly; static atomic_t nr_mmap_counters __read_mostly;
static atomic_t nr_munmap_tracking __read_mostly; static atomic_t nr_munmap_counters __read_mostly;
static atomic_t nr_comm_tracking __read_mostly; static atomic_t nr_comm_counters __read_mostly;
int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
...@@ -1447,11 +1447,11 @@ static void free_counter(struct perf_counter *counter) ...@@ -1447,11 +1447,11 @@ static void free_counter(struct perf_counter *counter)
atomic_dec(&nr_counters); atomic_dec(&nr_counters);
if (counter->attr.mmap) if (counter->attr.mmap)
atomic_dec(&nr_mmap_tracking); atomic_dec(&nr_mmap_counters);
if (counter->attr.munmap) if (counter->attr.munmap)
atomic_dec(&nr_munmap_tracking); atomic_dec(&nr_munmap_counters);
if (counter->attr.comm) if (counter->attr.comm)
atomic_dec(&nr_comm_tracking); atomic_dec(&nr_comm_counters);
if (counter->destroy) if (counter->destroy)
counter->destroy(counter); counter->destroy(counter);
...@@ -2475,6 +2475,105 @@ static void perf_counter_output(struct perf_counter *counter, ...@@ -2475,6 +2475,105 @@ static void perf_counter_output(struct perf_counter *counter,
perf_output_end(&handle); perf_output_end(&handle);
} }
/*
* fork tracking
*/
struct perf_fork_event {
struct task_struct *task;
struct {
struct perf_event_header header;
u32 pid;
u32 ppid;
} event;
};
static void perf_counter_fork_output(struct perf_counter *counter,
struct perf_fork_event *fork_event)
{
struct perf_output_handle handle;
int size = fork_event->event.header.size;
struct task_struct *task = fork_event->task;
int ret = perf_output_begin(&handle, counter, size, 0, 0);
if (ret)
return;
fork_event->event.pid = perf_counter_pid(counter, task);
fork_event->event.ppid = perf_counter_pid(counter, task->real_parent);
perf_output_put(&handle, fork_event->event);
perf_output_end(&handle);
}
static int perf_counter_fork_match(struct perf_counter *counter)
{
if (counter->attr.comm || counter->attr.mmap || counter->attr.munmap)
return 1;
return 0;
}
static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
struct perf_fork_event *fork_event)
{
struct perf_counter *counter;
if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
return;
rcu_read_lock();
list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
if (perf_counter_fork_match(counter))
perf_counter_fork_output(counter, fork_event);
}
rcu_read_unlock();
}
static void perf_counter_fork_event(struct perf_fork_event *fork_event)
{
struct perf_cpu_context *cpuctx;
struct perf_counter_context *ctx;
cpuctx = &get_cpu_var(perf_cpu_context);
perf_counter_fork_ctx(&cpuctx->ctx, fork_event);
put_cpu_var(perf_cpu_context);
rcu_read_lock();
/*
* doesn't really matter which of the child contexts the
* events ends up in.
*/
ctx = rcu_dereference(current->perf_counter_ctxp);
if (ctx)
perf_counter_fork_ctx(ctx, fork_event);
rcu_read_unlock();
}
void perf_counter_fork(struct task_struct *task)
{
struct perf_fork_event fork_event;
if (!atomic_read(&nr_comm_counters) &&
!atomic_read(&nr_mmap_counters) &&
!atomic_read(&nr_munmap_counters))
return;
fork_event = (struct perf_fork_event){
.task = task,
.event = {
.header = {
.type = PERF_EVENT_FORK,
.size = sizeof(fork_event.event),
},
},
};
perf_counter_fork_event(&fork_event);
}
/* /*
* comm tracking * comm tracking
*/ */
...@@ -2511,11 +2610,9 @@ static void perf_counter_comm_output(struct perf_counter *counter, ...@@ -2511,11 +2610,9 @@ static void perf_counter_comm_output(struct perf_counter *counter,
perf_output_end(&handle); perf_output_end(&handle);
} }
static int perf_counter_comm_match(struct perf_counter *counter, static int perf_counter_comm_match(struct perf_counter *counter)
struct perf_comm_event *comm_event)
{ {
if (counter->attr.comm && if (counter->attr.comm)
comm_event->event.header.type == PERF_EVENT_COMM)
return 1; return 1;
return 0; return 0;
...@@ -2531,7 +2628,7 @@ static void perf_counter_comm_ctx(struct perf_counter_context *ctx, ...@@ -2531,7 +2628,7 @@ static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
if (perf_counter_comm_match(counter, comm_event)) if (perf_counter_comm_match(counter))
perf_counter_comm_output(counter, comm_event); perf_counter_comm_output(counter, comm_event);
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -2570,7 +2667,7 @@ void perf_counter_comm(struct task_struct *task) ...@@ -2570,7 +2667,7 @@ void perf_counter_comm(struct task_struct *task)
{ {
struct perf_comm_event comm_event; struct perf_comm_event comm_event;
if (!atomic_read(&nr_comm_tracking)) if (!atomic_read(&nr_comm_counters))
return; return;
comm_event = (struct perf_comm_event){ comm_event = (struct perf_comm_event){
...@@ -2708,7 +2805,7 @@ void perf_counter_mmap(unsigned long addr, unsigned long len, ...@@ -2708,7 +2805,7 @@ void perf_counter_mmap(unsigned long addr, unsigned long len,
{ {
struct perf_mmap_event mmap_event; struct perf_mmap_event mmap_event;
if (!atomic_read(&nr_mmap_tracking)) if (!atomic_read(&nr_mmap_counters))
return; return;
mmap_event = (struct perf_mmap_event){ mmap_event = (struct perf_mmap_event){
...@@ -2729,7 +2826,7 @@ void perf_counter_munmap(unsigned long addr, unsigned long len, ...@@ -2729,7 +2826,7 @@ void perf_counter_munmap(unsigned long addr, unsigned long len,
{ {
struct perf_mmap_event mmap_event; struct perf_mmap_event mmap_event;
if (!atomic_read(&nr_munmap_tracking)) if (!atomic_read(&nr_munmap_counters))
return; return;
mmap_event = (struct perf_mmap_event){ mmap_event = (struct perf_mmap_event){
...@@ -3427,11 +3524,11 @@ perf_counter_alloc(struct perf_counter_attr *attr, ...@@ -3427,11 +3524,11 @@ perf_counter_alloc(struct perf_counter_attr *attr,
atomic_inc(&nr_counters); atomic_inc(&nr_counters);
if (counter->attr.mmap) if (counter->attr.mmap)
atomic_inc(&nr_mmap_tracking); atomic_inc(&nr_mmap_counters);
if (counter->attr.munmap) if (counter->attr.munmap)
atomic_inc(&nr_munmap_tracking); atomic_inc(&nr_munmap_counters);
if (counter->attr.comm) if (counter->attr.comm)
atomic_inc(&nr_comm_tracking); atomic_inc(&nr_comm_counters);
return counter; return counter;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment