perf top: Use perf_evsel__open

Now that it handles group_fd and inherit we can use it, sharing it with
stat.

Next step: 'perf record' should use, then move the mmap_array out of
->priv and into perf_evsel, with top and record sharing this, and at the
same time, write a 'perf test' stress test.

Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 9d04f178
...@@ -1210,39 +1210,50 @@ static void perf_session__mmap_read(struct perf_session *self) ...@@ -1210,39 +1210,50 @@ static void perf_session__mmap_read(struct perf_session *self)
} }
} }
int group_fd;
static void start_counter(int i, struct perf_evlist *evlist, static void start_counter(int i, struct perf_evlist *evlist,
struct perf_evsel *evsel) struct perf_evsel *evsel)
{ {
struct xyarray *mmap_array = evsel->priv; struct xyarray *mmap_array = evsel->priv;
struct mmap_data *mm; struct mmap_data *mm;
struct perf_event_attr *attr;
int cpu = -1;
int thread_index; int thread_index;
if (target_tid == -1) for (thread_index = 0; thread_index < threads->nr; thread_index++) {
cpu = cpus->map[i]; assert(FD(evsel, i, thread_index) >= 0);
fcntl(FD(evsel, i, thread_index), F_SETFL, O_NONBLOCK);
attr = &evsel->attr;
attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; evlist->pollfd[evlist->nr_fds].fd = FD(evsel, i, thread_index);
evlist->pollfd[evlist->nr_fds].events = POLLIN;
evlist->nr_fds++;
if (freq) { mm = xyarray__entry(mmap_array, i, thread_index);
attr->sample_type |= PERF_SAMPLE_PERIOD; mm->prev = 0;
attr->freq = 1; mm->mask = mmap_pages*page_size - 1;
attr->sample_freq = freq; mm->base = mmap(NULL, (mmap_pages+1)*page_size,
PROT_READ, MAP_SHARED, FD(evsel, i, thread_index), 0);
if (mm->base == MAP_FAILED)
die("failed to mmap with %d (%s)\n", errno, strerror(errno));
} }
}
static void start_counters(struct perf_evlist *evlist)
{
struct perf_evsel *counter;
int i;
attr->inherit = (cpu < 0) && inherit; list_for_each_entry(counter, &evlist->entries, node) {
attr->mmap = 1; struct perf_event_attr *attr = &counter->attr;
for (thread_index = 0; thread_index < threads->nr; thread_index++) { attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
try_again:
FD(evsel, i, thread_index) = sys_perf_event_open(attr, if (freq) {
threads->map[thread_index], cpu, group_fd, 0); attr->sample_type |= PERF_SAMPLE_PERIOD;
attr->freq = 1;
attr->sample_freq = freq;
}
if (FD(evsel, i, thread_index) < 0) { attr->mmap = 1;
try_again:
if (perf_evsel__open(counter, cpus, threads, group, inherit) < 0) {
int err = errno; int err = errno;
if (err == EPERM || err == EACCES) if (err == EPERM || err == EACCES)
...@@ -1254,8 +1265,8 @@ static void start_counter(int i, struct perf_evlist *evlist, ...@@ -1254,8 +1265,8 @@ static void start_counter(int i, struct perf_evlist *evlist,
* based cpu-clock-tick sw counter, which * based cpu-clock-tick sw counter, which
* is always available even if no PMU support: * is always available even if no PMU support:
*/ */
if (attr->type == PERF_TYPE_HARDWARE if (attr->type == PERF_TYPE_HARDWARE &&
&& attr->config == PERF_COUNT_HW_CPU_CYCLES) { attr->config == PERF_COUNT_HW_CPU_CYCLES) {
if (verbose) if (verbose)
warning(" ... trying to fall back to cpu-clock-ticks\n"); warning(" ... trying to fall back to cpu-clock-ticks\n");
...@@ -1265,39 +1276,24 @@ static void start_counter(int i, struct perf_evlist *evlist, ...@@ -1265,39 +1276,24 @@ static void start_counter(int i, struct perf_evlist *evlist,
goto try_again; goto try_again;
} }
printf("\n"); printf("\n");
error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n", error("sys_perf_event_open() syscall returned with %d "
FD(evsel, i, thread_index), strerror(err)); "(%s). /bin/dmesg may provide additional information.\n",
err, strerror(err));
die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
exit(-1); exit(-1);
} }
assert(FD(evsel, i, thread_index) >= 0); }
fcntl(FD(evsel, i, thread_index), F_SETFL, O_NONBLOCK);
/*
* First counter acts as the group leader:
*/
if (group && group_fd == -1)
group_fd = FD(evsel, i, thread_index);
evlist->pollfd[evlist->nr_fds].fd = FD(evsel, i, thread_index);
evlist->pollfd[evlist->nr_fds].events = POLLIN;
evlist->nr_fds++;
mm = xyarray__entry(mmap_array, i, thread_index); for (i = 0; i < cpus->nr; i++) {
mm->prev = 0; list_for_each_entry(counter, &evlist->entries, node)
mm->mask = mmap_pages*page_size - 1; start_counter(i, evsel_list, counter);
mm->base = mmap(NULL, (mmap_pages+1)*page_size,
PROT_READ, MAP_SHARED, FD(evsel, i, thread_index), 0);
if (mm->base == MAP_FAILED)
die("failed to mmap with %d (%s)\n", errno, strerror(errno));
} }
} }
static int __cmd_top(void) static int __cmd_top(void)
{ {
pthread_t thread; pthread_t thread;
struct perf_evsel *counter; int ret;
int i, ret;
/* /*
* FIXME: perf_session__new should allow passing a O_MMAP, so that all this * FIXME: perf_session__new should allow passing a O_MMAP, so that all this
* mmap reading, etc is encapsulated in it. Use O_WRONLY for now. * mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
...@@ -1311,11 +1307,7 @@ static int __cmd_top(void) ...@@ -1311,11 +1307,7 @@ static int __cmd_top(void)
else else
event__synthesize_threads(event__process, session); event__synthesize_threads(event__process, session);
for (i = 0; i < cpus->nr; i++) { start_counters(evsel_list);
group_fd = -1;
list_for_each_entry(counter, &evsel_list->entries, node)
start_counter(i, evsel_list, counter);
}
/* Wait for a minimal set of events before starting the snapshot */ /* Wait for a minimal set of events before starting the snapshot */
poll(evsel_list->pollfd, evsel_list->nr_fds, 100); poll(evsel_list->pollfd, evsel_list->nr_fds, 100);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment