Commit 1e0fb9ec authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Ingo Molnar

perf: Add pmu callbacks to track event mapping and unmapping

Signed-off-by: default avatarAndy Lutomirski <luto@amacapital.net>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Vince Weaver <vince@deater.net>
Cc: "hillf.zj" <hillf.zj@alibaba-inc.com>
Cc: Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/266afcba1d1f91ea5501e4e16e94bbbc1a9339b6.1414190806.git.luto@amacapital.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 22c4bd9f
...@@ -202,6 +202,13 @@ struct pmu { ...@@ -202,6 +202,13 @@ struct pmu {
*/ */
int (*event_init) (struct perf_event *event); int (*event_init) (struct perf_event *event);
/*
* Notification that the event was mapped or unmapped. Called
* in the context of the mapping task.
*/
void (*event_mapped) (struct perf_event *event); /*optional*/
void (*event_unmapped) (struct perf_event *event); /*optional*/
#define PERF_EF_START 0x01 /* start the counter when adding */ #define PERF_EF_START 0x01 /* start the counter when adding */
#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
......
...@@ -4293,6 +4293,9 @@ static void perf_mmap_open(struct vm_area_struct *vma) ...@@ -4293,6 +4293,9 @@ static void perf_mmap_open(struct vm_area_struct *vma)
atomic_inc(&event->mmap_count); atomic_inc(&event->mmap_count);
atomic_inc(&event->rb->mmap_count); atomic_inc(&event->rb->mmap_count);
if (event->pmu->event_mapped)
event->pmu->event_mapped(event);
} }
/* /*
...@@ -4312,6 +4315,9 @@ static void perf_mmap_close(struct vm_area_struct *vma) ...@@ -4312,6 +4315,9 @@ static void perf_mmap_close(struct vm_area_struct *vma)
int mmap_locked = rb->mmap_locked; int mmap_locked = rb->mmap_locked;
unsigned long size = perf_data_size(rb); unsigned long size = perf_data_size(rb);
if (event->pmu->event_unmapped)
event->pmu->event_unmapped(event);
atomic_dec(&rb->mmap_count); atomic_dec(&rb->mmap_count);
if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
...@@ -4513,6 +4519,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -4513,6 +4519,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &perf_mmap_vmops; vma->vm_ops = &perf_mmap_vmops;
if (event->pmu->event_mapped)
event->pmu->event_mapped(event);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment