Commit bf8e8f4b authored by Adrian Hunter's avatar Adrian Hunter Committed by Arnaldo Carvalho de Melo

perf evlist: Add 'system_wide' option

Add an option to cause a selected event to be opened always without a
pid when configured by perf_evsel__config().

This is needed when using the sched_switch tracepoint to follow object
code execution.

sched_switch occurs before the task switch and so it cannot record it in
a context limited to that task.  Note that also means that sched_switch
is useless when capturing data per-thread, as is the 'context-switches'
software event for the same reason.
Signed-off-by: default avatarAdrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1406786474-9306-9-git-send-email-adrian.hunter@intel.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent f247fb81
...@@ -265,17 +265,27 @@ int perf_evlist__add_newtp(struct perf_evlist *evlist, ...@@ -265,17 +265,27 @@ int perf_evlist__add_newtp(struct perf_evlist *evlist,
return 0; return 0;
} }
static int perf_evlist__nr_threads(struct perf_evlist *evlist,
struct perf_evsel *evsel)
{
if (evsel->system_wide)
return 1;
else
return thread_map__nr(evlist->threads);
}
void perf_evlist__disable(struct perf_evlist *evlist) void perf_evlist__disable(struct perf_evlist *evlist)
{ {
int cpu, thread; int cpu, thread;
struct perf_evsel *pos; struct perf_evsel *pos;
int nr_cpus = cpu_map__nr(evlist->cpus); int nr_cpus = cpu_map__nr(evlist->cpus);
int nr_threads = thread_map__nr(evlist->threads); int nr_threads;
for (cpu = 0; cpu < nr_cpus; cpu++) { for (cpu = 0; cpu < nr_cpus; cpu++) {
evlist__for_each(evlist, pos) { evlist__for_each(evlist, pos) {
if (!perf_evsel__is_group_leader(pos) || !pos->fd) if (!perf_evsel__is_group_leader(pos) || !pos->fd)
continue; continue;
nr_threads = perf_evlist__nr_threads(evlist, pos);
for (thread = 0; thread < nr_threads; thread++) for (thread = 0; thread < nr_threads; thread++)
ioctl(FD(pos, cpu, thread), ioctl(FD(pos, cpu, thread),
PERF_EVENT_IOC_DISABLE, 0); PERF_EVENT_IOC_DISABLE, 0);
...@@ -288,12 +298,13 @@ void perf_evlist__enable(struct perf_evlist *evlist) ...@@ -288,12 +298,13 @@ void perf_evlist__enable(struct perf_evlist *evlist)
int cpu, thread; int cpu, thread;
struct perf_evsel *pos; struct perf_evsel *pos;
int nr_cpus = cpu_map__nr(evlist->cpus); int nr_cpus = cpu_map__nr(evlist->cpus);
int nr_threads = thread_map__nr(evlist->threads); int nr_threads;
for (cpu = 0; cpu < nr_cpus; cpu++) { for (cpu = 0; cpu < nr_cpus; cpu++) {
evlist__for_each(evlist, pos) { evlist__for_each(evlist, pos) {
if (!perf_evsel__is_group_leader(pos) || !pos->fd) if (!perf_evsel__is_group_leader(pos) || !pos->fd)
continue; continue;
nr_threads = perf_evlist__nr_threads(evlist, pos);
for (thread = 0; thread < nr_threads; thread++) for (thread = 0; thread < nr_threads; thread++)
ioctl(FD(pos, cpu, thread), ioctl(FD(pos, cpu, thread),
PERF_EVENT_IOC_ENABLE, 0); PERF_EVENT_IOC_ENABLE, 0);
...@@ -305,12 +316,14 @@ int perf_evlist__disable_event(struct perf_evlist *evlist, ...@@ -305,12 +316,14 @@ int perf_evlist__disable_event(struct perf_evlist *evlist,
struct perf_evsel *evsel) struct perf_evsel *evsel)
{ {
int cpu, thread, err; int cpu, thread, err;
int nr_cpus = cpu_map__nr(evlist->cpus);
int nr_threads = perf_evlist__nr_threads(evlist, evsel);
if (!evsel->fd) if (!evsel->fd)
return 0; return 0;
for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { for (cpu = 0; cpu < nr_cpus; cpu++) {
for (thread = 0; thread < evlist->threads->nr; thread++) { for (thread = 0; thread < nr_threads; thread++) {
err = ioctl(FD(evsel, cpu, thread), err = ioctl(FD(evsel, cpu, thread),
PERF_EVENT_IOC_DISABLE, 0); PERF_EVENT_IOC_DISABLE, 0);
if (err) if (err)
...@@ -324,12 +337,14 @@ int perf_evlist__enable_event(struct perf_evlist *evlist, ...@@ -324,12 +337,14 @@ int perf_evlist__enable_event(struct perf_evlist *evlist,
struct perf_evsel *evsel) struct perf_evsel *evsel)
{ {
int cpu, thread, err; int cpu, thread, err;
int nr_cpus = cpu_map__nr(evlist->cpus);
int nr_threads = perf_evlist__nr_threads(evlist, evsel);
if (!evsel->fd) if (!evsel->fd)
return -EINVAL; return -EINVAL;
for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { for (cpu = 0; cpu < nr_cpus; cpu++) {
for (thread = 0; thread < evlist->threads->nr; thread++) { for (thread = 0; thread < nr_threads; thread++) {
err = ioctl(FD(evsel, cpu, thread), err = ioctl(FD(evsel, cpu, thread),
PERF_EVENT_IOC_ENABLE, 0); PERF_EVENT_IOC_ENABLE, 0);
if (err) if (err)
...@@ -343,7 +358,16 @@ static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) ...@@ -343,7 +358,16 @@ static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
{ {
int nr_cpus = cpu_map__nr(evlist->cpus); int nr_cpus = cpu_map__nr(evlist->cpus);
int nr_threads = thread_map__nr(evlist->threads); int nr_threads = thread_map__nr(evlist->threads);
int nfds = nr_cpus * nr_threads * evlist->nr_entries; int nfds = 0;
struct perf_evsel *evsel;
list_for_each_entry(evsel, &evlist->entries, node) {
if (evsel->system_wide)
nfds += nr_cpus;
else
nfds += nr_cpus * nr_threads;
}
evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
return evlist->pollfd != NULL ? 0 : -ENOMEM; return evlist->pollfd != NULL ? 0 : -ENOMEM;
} }
...@@ -636,7 +660,12 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, ...@@ -636,7 +660,12 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
struct perf_evsel *evsel; struct perf_evsel *evsel;
evlist__for_each(evlist, evsel) { evlist__for_each(evlist, evsel) {
int fd = FD(evsel, cpu, thread); int fd;
if (evsel->system_wide && thread)
continue;
fd = FD(evsel, cpu, thread);
if (*output == -1) { if (*output == -1) {
*output = fd; *output = fd;
......
...@@ -695,6 +695,10 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) ...@@ -695,6 +695,10 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{ {
int cpu, thread; int cpu, thread;
if (evsel->system_wide)
nthreads = 1;
evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
if (evsel->fd) { if (evsel->fd) {
...@@ -713,6 +717,9 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthrea ...@@ -713,6 +717,9 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthrea
{ {
int cpu, thread; int cpu, thread;
if (evsel->system_wide)
nthreads = 1;
for (cpu = 0; cpu < ncpus; cpu++) { for (cpu = 0; cpu < ncpus; cpu++) {
for (thread = 0; thread < nthreads; thread++) { for (thread = 0; thread < nthreads; thread++) {
int fd = FD(evsel, cpu, thread), int fd = FD(evsel, cpu, thread),
...@@ -743,6 +750,9 @@ int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads) ...@@ -743,6 +750,9 @@ int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
{ {
if (evsel->system_wide)
nthreads = 1;
evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
if (evsel->sample_id == NULL) if (evsel->sample_id == NULL)
return -ENOMEM; return -ENOMEM;
...@@ -787,6 +797,9 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) ...@@ -787,6 +797,9 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{ {
int cpu, thread; int cpu, thread;
if (evsel->system_wide)
nthreads = 1;
for (cpu = 0; cpu < ncpus; cpu++) for (cpu = 0; cpu < ncpus; cpu++)
for (thread = 0; thread < nthreads; ++thread) { for (thread = 0; thread < nthreads; ++thread) {
close(FD(evsel, cpu, thread)); close(FD(evsel, cpu, thread));
...@@ -875,6 +888,9 @@ int __perf_evsel__read(struct perf_evsel *evsel, ...@@ -875,6 +888,9 @@ int __perf_evsel__read(struct perf_evsel *evsel,
int cpu, thread; int cpu, thread;
struct perf_counts_values *aggr = &evsel->counts->aggr, count; struct perf_counts_values *aggr = &evsel->counts->aggr, count;
if (evsel->system_wide)
nthreads = 1;
aggr->val = aggr->ena = aggr->run = 0; aggr->val = aggr->ena = aggr->run = 0;
for (cpu = 0; cpu < ncpus; cpu++) { for (cpu = 0; cpu < ncpus; cpu++) {
...@@ -997,13 +1013,18 @@ static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp) ...@@ -997,13 +1013,18 @@ static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp)
static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
struct thread_map *threads) struct thread_map *threads)
{ {
int cpu, thread; int cpu, thread, nthreads;
unsigned long flags = PERF_FLAG_FD_CLOEXEC; unsigned long flags = PERF_FLAG_FD_CLOEXEC;
int pid = -1, err; int pid = -1, err;
enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE; enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
if (evsel->system_wide)
nthreads = 1;
else
nthreads = threads->nr;
if (evsel->fd == NULL && if (evsel->fd == NULL &&
perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0) perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0)
return -ENOMEM; return -ENOMEM;
if (evsel->cgrp) { if (evsel->cgrp) {
...@@ -1027,10 +1048,10 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, ...@@ -1027,10 +1048,10 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
for (cpu = 0; cpu < cpus->nr; cpu++) { for (cpu = 0; cpu < cpus->nr; cpu++) {
for (thread = 0; thread < threads->nr; thread++) { for (thread = 0; thread < nthreads; thread++) {
int group_fd; int group_fd;
if (!evsel->cgrp) if (!evsel->cgrp && !evsel->system_wide)
pid = threads->map[thread]; pid = threads->map[thread];
group_fd = get_group_fd(evsel, cpu, thread); group_fd = get_group_fd(evsel, cpu, thread);
...@@ -1103,7 +1124,7 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, ...@@ -1103,7 +1124,7 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
close(FD(evsel, cpu, thread)); close(FD(evsel, cpu, thread));
FD(evsel, cpu, thread) = -1; FD(evsel, cpu, thread) = -1;
} }
thread = threads->nr; thread = nthreads;
} while (--cpu >= 0); } while (--cpu >= 0);
return err; return err;
} }
......
...@@ -85,6 +85,7 @@ struct perf_evsel { ...@@ -85,6 +85,7 @@ struct perf_evsel {
bool needs_swap; bool needs_swap;
bool no_aux_samples; bool no_aux_samples;
bool immediate; bool immediate;
bool system_wide;
/* parse modifier helper */ /* parse modifier helper */
int exclude_GH; int exclude_GH;
int nr_members; int nr_members;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment