Commit d3d1af6f authored by Alexey Budankov's avatar Alexey Budankov Committed by Arnaldo Carvalho de Melo

perf record: Enable asynchronous trace writing

The trace file offset is read once before mmaps iterating loop and
written back after all performance data is enqueued for aio writing.

The trace file offset is incremented linearly after every successful aio
write operation.

record__aio_sync() blocks till completion of the started AIO operation
and then proceeds.

record__aio_mmap_read_sync() implements a barrier for all incomplete
aio write requests.
Signed-off-by: default avatarAlexey Budankov <alexey.budankov@linux.intel.com>
Reviewed-by: default avatarJiri Olsa <jolsa@redhat.com>
Acked-by: default avatarNamhyung Kim <namhyung@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/ce2d45e9-d236-871c-7c8f-1bed2d37e8ac@linux.intel.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 0b773831
...@@ -435,6 +435,11 @@ Specify vmlinux path which has debuginfo. ...@@ -435,6 +435,11 @@ Specify vmlinux path which has debuginfo.
--buildid-all:: --buildid-all::
Record build-id of all DSOs regardless whether it's actually hit or not. Record build-id of all DSOs regardless whether it's actually hit or not.
--aio::
Enable asynchronous (Posix AIO) trace writing mode.
Asynchronous mode is supported only when linking Perf tool with libc library
providing implementation for Posix AIO API.
--all-kernel:: --all-kernel::
Configure all used events to run in kernel space. Configure all used events to run in kernel space.
......
...@@ -124,6 +124,183 @@ static int record__write(struct record *rec, struct perf_mmap *map __maybe_unuse ...@@ -124,6 +124,183 @@ static int record__write(struct record *rec, struct perf_mmap *map __maybe_unuse
return 0; return 0;
} }
#ifdef HAVE_AIO_SUPPORT
static int record__aio_write(struct aiocb *cblock, int trace_fd,
void *buf, size_t size, off_t off)
{
int rc;
cblock->aio_fildes = trace_fd;
cblock->aio_buf = buf;
cblock->aio_nbytes = size;
cblock->aio_offset = off;
cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
do {
rc = aio_write(cblock);
if (rc == 0) {
break;
} else if (errno != EAGAIN) {
cblock->aio_fildes = -1;
pr_err("failed to queue perf data, error: %m\n");
break;
}
} while (1);
return rc;
}
static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
{
void *rem_buf;
off_t rem_off;
size_t rem_size;
int rc, aio_errno;
ssize_t aio_ret, written;
aio_errno = aio_error(cblock);
if (aio_errno == EINPROGRESS)
return 0;
written = aio_ret = aio_return(cblock);
if (aio_ret < 0) {
if (aio_errno != EINTR)
pr_err("failed to write perf data, error: %m\n");
written = 0;
}
rem_size = cblock->aio_nbytes - written;
if (rem_size == 0) {
cblock->aio_fildes = -1;
/*
* md->refcount is incremented in perf_mmap__push() for
* every enqueued aio write request so decrement it because
* the request is now complete.
*/
perf_mmap__put(md);
rc = 1;
} else {
/*
* aio write request may require restart with the
* reminder if the kernel didn't write whole
* chunk at once.
*/
rem_off = cblock->aio_offset + written;
rem_buf = (void *)(cblock->aio_buf + written);
record__aio_write(cblock, cblock->aio_fildes,
rem_buf, rem_size, rem_off);
rc = 0;
}
return rc;
}
static void record__aio_sync(struct perf_mmap *md)
{
struct aiocb *cblock = &md->aio.cblock;
struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
do {
if (cblock->aio_fildes == -1 || record__aio_complete(md, cblock))
return;
while (aio_suspend((const struct aiocb**)&cblock, 1, &timeout)) {
if (!(errno == EAGAIN || errno == EINTR))
pr_err("failed to sync perf data, error: %m\n");
}
} while (1);
}
static int record__aio_pushfn(void *to, struct aiocb *cblock, void *bf, size_t size, off_t off)
{
struct record *rec = to;
int ret, trace_fd = rec->session->data->file.fd;
rec->samples++;
ret = record__aio_write(cblock, trace_fd, bf, size, off);
if (!ret) {
rec->bytes_written += size;
if (switch_output_size(rec))
trigger_hit(&switch_output_trigger);
}
return ret;
}
static off_t record__aio_get_pos(int trace_fd)
{
return lseek(trace_fd, 0, SEEK_CUR);
}
static void record__aio_set_pos(int trace_fd, off_t pos)
{
lseek(trace_fd, pos, SEEK_SET);
}
static void record__aio_mmap_read_sync(struct record *rec)
{
int i;
struct perf_evlist *evlist = rec->evlist;
struct perf_mmap *maps = evlist->mmap;
if (!rec->opts.nr_cblocks)
return;
for (i = 0; i < evlist->nr_mmaps; i++) {
struct perf_mmap *map = &maps[i];
if (map->base)
record__aio_sync(map);
}
}
static int nr_cblocks_default = 1;
static int record__aio_parse(const struct option *opt,
const char *str __maybe_unused,
int unset)
{
struct record_opts *opts = (struct record_opts *)opt->value;
if (unset)
opts->nr_cblocks = 0;
else
opts->nr_cblocks = nr_cblocks_default;
return 0;
}
#else /* HAVE_AIO_SUPPORT */
static void record__aio_sync(struct perf_mmap *md __maybe_unused)
{
}
static int record__aio_pushfn(void *to __maybe_unused, struct aiocb *cblock __maybe_unused,
void *bf __maybe_unused, size_t size __maybe_unused, off_t off __maybe_unused)
{
return -1;
}
static off_t record__aio_get_pos(int trace_fd __maybe_unused)
{
return -1;
}
static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
{
}
static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
{
}
#endif
static int record__aio_enabled(struct record *rec)
{
return rec->opts.nr_cblocks > 0;
}
static int process_synthesized_event(struct perf_tool *tool, static int process_synthesized_event(struct perf_tool *tool,
union perf_event *event, union perf_event *event,
struct perf_sample *sample __maybe_unused, struct perf_sample *sample __maybe_unused,
...@@ -329,7 +506,7 @@ static int record__mmap_evlist(struct record *rec, ...@@ -329,7 +506,7 @@ static int record__mmap_evlist(struct record *rec,
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages, opts->auxtrace_mmap_pages,
opts->auxtrace_snapshot_mode) < 0) { opts->auxtrace_snapshot_mode, opts->nr_cblocks) < 0) {
if (errno == EPERM) { if (errno == EPERM) {
pr_err("Permission error mapping pages.\n" pr_err("Permission error mapping pages.\n"
"Consider increasing " "Consider increasing "
...@@ -525,6 +702,8 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli ...@@ -525,6 +702,8 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
int i; int i;
int rc = 0; int rc = 0;
struct perf_mmap *maps; struct perf_mmap *maps;
int trace_fd = rec->data.file.fd;
off_t off;
if (!evlist) if (!evlist)
return 0; return 0;
...@@ -536,14 +715,30 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli ...@@ -536,14 +715,30 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING) if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
return 0; return 0;
if (record__aio_enabled(rec))
off = record__aio_get_pos(trace_fd);
for (i = 0; i < evlist->nr_mmaps; i++) { for (i = 0; i < evlist->nr_mmaps; i++) {
struct perf_mmap *map = &maps[i]; struct perf_mmap *map = &maps[i];
if (map->base) { if (map->base) {
if (!record__aio_enabled(rec)) {
if (perf_mmap__push(map, rec, record__pushfn) != 0) { if (perf_mmap__push(map, rec, record__pushfn) != 0) {
rc = -1; rc = -1;
goto out; goto out;
} }
} else {
/*
* Call record__aio_sync() to wait till map->data buffer
* becomes available after previous aio write request.
*/
record__aio_sync(map);
if (perf_mmap__aio_push(map, rec, record__aio_pushfn, &off) != 0) {
record__aio_set_pos(trace_fd, off);
rc = -1;
goto out;
}
}
} }
if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode && if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
...@@ -553,6 +748,9 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli ...@@ -553,6 +748,9 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
} }
} }
if (record__aio_enabled(rec))
record__aio_set_pos(trace_fd, off);
/* /*
* Mark the round finished in case we wrote * Mark the round finished in case we wrote
* at least one event. * at least one event.
...@@ -658,6 +856,8 @@ record__switch_output(struct record *rec, bool at_exit) ...@@ -658,6 +856,8 @@ record__switch_output(struct record *rec, bool at_exit)
/* Same Size: "2015122520103046"*/ /* Same Size: "2015122520103046"*/
char timestamp[] = "InvalidTimestamp"; char timestamp[] = "InvalidTimestamp";
record__aio_mmap_read_sync(rec);
record__synthesize(rec, true); record__synthesize(rec, true);
if (target__none(&rec->opts.target)) if (target__none(&rec->opts.target))
record__synthesize_workload(rec, true); record__synthesize_workload(rec, true);
...@@ -1168,6 +1368,8 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) ...@@ -1168,6 +1368,8 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
record__synthesize_workload(rec, true); record__synthesize_workload(rec, true);
out_child: out_child:
record__aio_mmap_read_sync(rec);
if (forks) { if (forks) {
int exit_status; int exit_status;
...@@ -1706,6 +1908,11 @@ static struct option __record_options[] = { ...@@ -1706,6 +1908,11 @@ static struct option __record_options[] = {
"signal"), "signal"),
OPT_BOOLEAN(0, "dry-run", &dry_run, OPT_BOOLEAN(0, "dry-run", &dry_run,
"Parse options then exit"), "Parse options then exit"),
#ifdef HAVE_AIO_SUPPORT
OPT_CALLBACK_NOOPT(0, "aio", &record.opts,
NULL, "Enable asynchronous trace writing mode",
record__aio_parse),
#endif
OPT_END() OPT_END()
}; };
...@@ -1898,6 +2105,9 @@ int cmd_record(int argc, const char **argv) ...@@ -1898,6 +2105,9 @@ int cmd_record(int argc, const char **argv)
goto out; goto out;
} }
if (verbose > 0)
pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks);
err = __cmd_record(&record, argc, argv); err = __cmd_record(&record, argc, argv);
out: out:
perf_evlist__delete(rec->evlist); perf_evlist__delete(rec->evlist);
......
...@@ -83,6 +83,7 @@ struct record_opts { ...@@ -83,6 +83,7 @@ struct record_opts {
clockid_t clockid; clockid_t clockid;
u64 clockid_res_ns; u64 clockid_res_ns;
unsigned int proc_map_timeout; unsigned int proc_map_timeout;
int nr_cblocks;
}; };
struct option; struct option;
......
...@@ -1018,7 +1018,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, ...@@ -1018,7 +1018,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
*/ */
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages, unsigned int auxtrace_pages,
bool auxtrace_overwrite) bool auxtrace_overwrite, int nr_cblocks)
{ {
struct perf_evsel *evsel; struct perf_evsel *evsel;
const struct cpu_map *cpus = evlist->cpus; const struct cpu_map *cpus = evlist->cpus;
...@@ -1028,7 +1028,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, ...@@ -1028,7 +1028,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
* Its value is decided by evsel's write_backward. * Its value is decided by evsel's write_backward.
* So &mp should not be passed through const pointer. * So &mp should not be passed through const pointer.
*/ */
struct mmap_params mp = { .nr_cblocks = 0 }; struct mmap_params mp = { .nr_cblocks = nr_cblocks };
if (!evlist->mmap) if (!evlist->mmap)
evlist->mmap = perf_evlist__alloc_mmap(evlist, false); evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
...@@ -1060,7 +1060,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, ...@@ -1060,7 +1060,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages) int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
{ {
return perf_evlist__mmap_ex(evlist, pages, 0, false); return perf_evlist__mmap_ex(evlist, pages, 0, false, 0);
} }
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
......
...@@ -162,7 +162,7 @@ unsigned long perf_event_mlock_kb_in_pages(void); ...@@ -162,7 +162,7 @@ unsigned long perf_event_mlock_kb_in_pages(void);
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages, unsigned int auxtrace_pages,
bool auxtrace_overwrite); bool auxtrace_overwrite, int nr_cblocks);
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages); int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages);
void perf_evlist__munmap(struct perf_evlist *evlist); void perf_evlist__munmap(struct perf_evlist *evlist);
......
...@@ -158,7 +158,8 @@ static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp) ...@@ -158,7 +158,8 @@ static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
{ {
int delta_max; int delta_max;
if (mp->nr_cblocks) { map->aio.nr_cblocks = mp->nr_cblocks;
if (map->aio.nr_cblocks) {
map->aio.data = malloc(perf_mmap__mmap_len(map)); map->aio.data = malloc(perf_mmap__mmap_len(map));
if (!map->aio.data) { if (!map->aio.data) {
pr_debug2("failed to allocate data buffer, error %m\n"); pr_debug2("failed to allocate data buffer, error %m\n");
...@@ -187,6 +188,80 @@ static void perf_mmap__aio_munmap(struct perf_mmap *map) ...@@ -187,6 +188,80 @@ static void perf_mmap__aio_munmap(struct perf_mmap *map)
if (map->aio.data) if (map->aio.data)
zfree(&map->aio.data); zfree(&map->aio.data);
} }
int perf_mmap__aio_push(struct perf_mmap *md, void *to,
int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off),
off_t *off)
{
u64 head = perf_mmap__read_head(md);
unsigned char *data = md->base + page_size;
unsigned long size, size0 = 0;
void *buf;
int rc = 0;
rc = perf_mmap__read_init(md);
if (rc < 0)
return (rc == -EAGAIN) ? 0 : -1;
/*
* md->base data is copied into md->data buffer to
* release space in the kernel buffer as fast as possible,
* thru perf_mmap__consume() below.
*
* That lets the kernel to proceed with storing more
* profiling data into the kernel buffer earlier than other
* per-cpu kernel buffers are handled.
*
* Coping can be done in two steps in case the chunk of
* profiling data crosses the upper bound of the kernel buffer.
* In this case we first move part of data from md->start
* till the upper bound and then the reminder from the
* beginning of the kernel buffer till the end of
* the data chunk.
*/
size = md->end - md->start;
if ((md->start & md->mask) + size != (md->end & md->mask)) {
buf = &data[md->start & md->mask];
size = md->mask + 1 - (md->start & md->mask);
md->start += size;
memcpy(md->aio.data, buf, size);
size0 = size;
}
buf = &data[md->start & md->mask];
size = md->end - md->start;
md->start += size;
memcpy(md->aio.data + size0, buf, size);
/*
* Increment md->refcount to guard md->data buffer
* from premature deallocation because md object can be
* released earlier than aio write request started
* on mmap->data is complete.
*
* perf_mmap__put() is done at record__aio_complete()
* after started request completion.
*/
perf_mmap__get(md);
md->prev = head;
perf_mmap__consume(md);
rc = push(to, &md->aio.cblock, md->aio.data, size0 + size, *off);
if (!rc) {
*off += size0 + size;
} else {
/*
* Decrement md->refcount back if aio write
* operation failed to start.
*/
perf_mmap__put(md);
}
return rc;
}
#else #else
static int perf_mmap__aio_mmap(struct perf_mmap *map __maybe_unused, static int perf_mmap__aio_mmap(struct perf_mmap *map __maybe_unused,
struct mmap_params *mp __maybe_unused) struct mmap_params *mp __maybe_unused)
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "auxtrace.h" #include "auxtrace.h"
#include "event.h" #include "event.h"
struct aiocb;
/** /**
* struct perf_mmap - perf's ring buffer mmap details * struct perf_mmap - perf's ring buffer mmap details
* *
...@@ -33,6 +34,7 @@ struct perf_mmap { ...@@ -33,6 +34,7 @@ struct perf_mmap {
struct { struct {
void *data; void *data;
struct aiocb cblock; struct aiocb cblock;
int nr_cblocks;
} aio; } aio;
#endif #endif
}; };
...@@ -94,6 +96,18 @@ union perf_event *perf_mmap__read_event(struct perf_mmap *map); ...@@ -94,6 +96,18 @@ union perf_event *perf_mmap__read_event(struct perf_mmap *map);
int perf_mmap__push(struct perf_mmap *md, void *to, int perf_mmap__push(struct perf_mmap *md, void *to,
int push(struct perf_mmap *map, void *to, void *buf, size_t size)); int push(struct perf_mmap *map, void *to, void *buf, size_t size));
#ifdef HAVE_AIO_SUPPORT
int perf_mmap__aio_push(struct perf_mmap *md, void *to,
int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off),
off_t *off);
#else
static inline int perf_mmap__aio_push(struct perf_mmap *md __maybe_unused, void *to __maybe_unused,
int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off) __maybe_unused,
off_t *off __maybe_unused)
{
return 0;
}
#endif
size_t perf_mmap__mmap_len(struct perf_mmap *map); size_t perf_mmap__mmap_len(struct perf_mmap *map);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment