Commit d969135a authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-core-for-mingo' of...

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/urgent

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

  * Add summary only option to 'perf trace', suppressing the decoding of
    events, from David Ahern

  * 'perf trace --summary' formatting simplifications, from Pekka Emberg.

  * Beautify fifth argument of mmap() as fd, in 'perf trace', from Namhyung Kim.

  * Fix segfault on perf trace -i perf.data, from Namhyung Kim.

  * Fix segfault with --no-mmap-pages, from David Ahern.

  * Round mmap pages to power 2, from David Ahern.

  * Add direct access to dynamic arrays in libtraceevent, from Steven Rostedt.

  * Handle throttle events in 'object code reading' test, fix from Adrian Hunter.

  * Prevent condition that all sort keys are elided, fix from Namhyung Kim.

  * Synthesize non-exec MMAP records when --data used, allowing the resolution of
    data addresses to symbols (global variables, etc).

  * Don't force a refresh during progress update in the TUI, greatly reducing
    startup costs, fix from Patrick Palka.

  * Fix sw clock event period test wrt not checking if using > max_sample_freq.

  * Code cleanups by David Ahern and Adrian Hunter.
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 9b66bfb2 0497a9eb
...@@ -3435,6 +3435,19 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg ...@@ -3435,6 +3435,19 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
goto out_warning_op; goto out_warning_op;
} }
break; break;
case PRINT_DYNAMIC_ARRAY:
/* Without [], we pass the address to the dynamic data */
offset = pevent_read_number(pevent,
data + arg->dynarray.field->offset,
arg->dynarray.field->size);
/*
* The actual length of the dynamic array is stored
* in the top half of the field, and the offset
* is in the bottom half of the 32 bit field.
*/
offset &= 0xffff;
val = (unsigned long long)(data + offset);
break;
default: /* not sure what to do there */ default: /* not sure what to do there */
return 0; return 0;
} }
......
...@@ -93,9 +93,15 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs. ...@@ -93,9 +93,15 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
--comm:: --comm::
Show process COMM right beside its ID, on by default, disable with --no-comm. Show process COMM right beside its ID, on by default, disable with --no-comm.
-s::
--summary:: --summary::
Show a summary of syscalls by thread with min, max, and average times (in Show only a summary of syscalls by thread with min, max, and average times
msec) and relative stddev. (in msec) and relative stddev.
-S::
--with-summary::
Show all syscalls followed by a summary by thread with min, max, and
average times (in msec) and relative stddev.
--tool_stats:: --tool_stats::
Show tool stats such as number of times fd->pathname was discovered thru Show tool stats such as number of times fd->pathname was discovered thru
......
...@@ -1510,13 +1510,13 @@ static int kvm_events_live(struct perf_kvm_stat *kvm, ...@@ -1510,13 +1510,13 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
/* /*
* target related setups * target related setups
*/ */
err = perf_target__validate(&kvm->opts.target); err = target__validate(&kvm->opts.target);
if (err) { if (err) {
perf_target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ); target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ);
ui__warning("%s", errbuf); ui__warning("%s", errbuf);
} }
if (perf_target__none(&kvm->opts.target)) if (target__none(&kvm->opts.target))
kvm->opts.target.system_wide = true; kvm->opts.target.system_wide = true;
...@@ -1544,18 +1544,8 @@ static int kvm_events_live(struct perf_kvm_stat *kvm, ...@@ -1544,18 +1544,8 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
} }
kvm->session->evlist = kvm->evlist; kvm->session->evlist = kvm->evlist;
perf_session__set_id_hdr_size(kvm->session); perf_session__set_id_hdr_size(kvm->session);
machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target,
kvm->evlist->threads, false);
if (perf_target__has_task(&kvm->opts.target))
perf_event__synthesize_thread_map(&kvm->tool,
kvm->evlist->threads,
perf_event__process,
&kvm->session->machines.host);
else
perf_event__synthesize_threads(&kvm->tool, perf_event__process,
&kvm->session->machines.host);
err = kvm_live_open_events(kvm); err = kvm_live_open_events(kvm);
if (err) if (err)
goto out; goto out;
......
...@@ -76,12 +76,12 @@ struct perf_record { ...@@ -76,12 +76,12 @@ struct perf_record {
long samples; long samples;
}; };
static int write_output(struct perf_record *rec, void *buf, size_t size) static int do_write_output(struct perf_record *rec, void *buf, size_t size)
{ {
struct perf_data_file *file = &rec->file; struct perf_data_file *file = &rec->file;
while (size) { while (size) {
int ret = write(file->fd, buf, size); ssize_t ret = write(file->fd, buf, size);
if (ret < 0) { if (ret < 0) {
pr_err("failed to write perf data, error: %m\n"); pr_err("failed to write perf data, error: %m\n");
...@@ -97,6 +97,11 @@ static int write_output(struct perf_record *rec, void *buf, size_t size) ...@@ -97,6 +97,11 @@ static int write_output(struct perf_record *rec, void *buf, size_t size)
return 0; return 0;
} }
static int write_output(struct perf_record *rec, void *buf, size_t size)
{
return do_write_output(rec, buf, size);
}
static int process_synthesized_event(struct perf_tool *tool, static int process_synthesized_event(struct perf_tool *tool,
union perf_event *event, union perf_event *event,
struct perf_sample *sample __maybe_unused, struct perf_sample *sample __maybe_unused,
...@@ -480,16 +485,8 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) ...@@ -480,16 +485,8 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
perf_event__synthesize_guest_os, tool); perf_event__synthesize_guest_os, tool);
} }
if (perf_target__has_task(&opts->target)) err = __machine__synthesize_threads(machine, tool, &opts->target, evsel_list->threads,
err = perf_event__synthesize_thread_map(tool, evsel_list->threads, process_synthesized_event, opts->sample_address);
process_synthesized_event,
machine);
else if (perf_target__has_cpu(&opts->target))
err = perf_event__synthesize_threads(tool, process_synthesized_event,
machine);
else /* command specified */
err = 0;
if (err != 0) if (err != 0)
goto out_delete_session; goto out_delete_session;
...@@ -509,7 +506,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) ...@@ -509,7 +506,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
* (apart from group members) have enable_on_exec=1 set, * (apart from group members) have enable_on_exec=1 set,
* so don't spoil it by prematurely enabling them. * so don't spoil it by prematurely enabling them.
*/ */
if (!perf_target__none(&opts->target)) if (!target__none(&opts->target))
perf_evlist__enable(evsel_list); perf_evlist__enable(evsel_list);
/* /*
...@@ -538,7 +535,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) ...@@ -538,7 +535,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
* die with the process and we wait for that. Thus no need to * die with the process and we wait for that. Thus no need to
* disable events in this case. * disable events in this case.
*/ */
if (done && !disabled && !perf_target__none(&opts->target)) { if (done && !disabled && !target__none(&opts->target)) {
perf_evlist__disable(evsel_list); perf_evlist__disable(evsel_list);
disabled = true; disabled = true;
} }
...@@ -909,7 +906,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused) ...@@ -909,7 +906,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
argc = parse_options(argc, argv, record_options, record_usage, argc = parse_options(argc, argv, record_options, record_usage,
PARSE_OPT_STOP_AT_NON_OPTION); PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc && perf_target__none(&rec->opts.target)) if (!argc && target__none(&rec->opts.target))
usage_with_options(record_usage, record_options); usage_with_options(record_usage, record_options);
if (nr_cgroups && !rec->opts.target.system_wide) { if (nr_cgroups && !rec->opts.target.system_wide) {
...@@ -939,17 +936,17 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused) ...@@ -939,17 +936,17 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
goto out_symbol_exit; goto out_symbol_exit;
} }
err = perf_target__validate(&rec->opts.target); err = target__validate(&rec->opts.target);
if (err) { if (err) {
perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ); target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
ui__warning("%s", errbuf); ui__warning("%s", errbuf);
} }
err = perf_target__parse_uid(&rec->opts.target); err = target__parse_uid(&rec->opts.target);
if (err) { if (err) {
int saved_errno = errno; int saved_errno = errno;
perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ); target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
ui__error("%s", errbuf); ui__error("%s", errbuf);
err = -saved_errno; err = -saved_errno;
......
...@@ -108,7 +108,7 @@ enum { ...@@ -108,7 +108,7 @@ enum {
static struct perf_evlist *evsel_list; static struct perf_evlist *evsel_list;
static struct perf_target target = { static struct target target = {
.uid = UINT_MAX, .uid = UINT_MAX,
}; };
...@@ -294,11 +294,10 @@ static int create_perf_stat_counter(struct perf_evsel *evsel) ...@@ -294,11 +294,10 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
attr->inherit = !no_inherit; attr->inherit = !no_inherit;
if (perf_target__has_cpu(&target)) if (target__has_cpu(&target))
return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel)); return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
if (!perf_target__has_task(&target) && if (!target__has_task(&target) && perf_evsel__is_group_leader(evsel)) {
perf_evsel__is_group_leader(evsel)) {
attr->disabled = 1; attr->disabled = 1;
if (!initial_delay) if (!initial_delay)
attr->enable_on_exec = 1; attr->enable_on_exec = 1;
...@@ -1236,7 +1235,7 @@ static void print_stat(int argc, const char **argv) ...@@ -1236,7 +1235,7 @@ static void print_stat(int argc, const char **argv)
fprintf(output, "\'system wide"); fprintf(output, "\'system wide");
else if (target.cpu_list) else if (target.cpu_list)
fprintf(output, "\'CPU(s) %s", target.cpu_list); fprintf(output, "\'CPU(s) %s", target.cpu_list);
else if (!perf_target__has_task(&target)) { else if (!target__has_task(&target)) {
fprintf(output, "\'%s", argv[0]); fprintf(output, "\'%s", argv[0]);
for (i = 1; i < argc; i++) for (i = 1; i < argc; i++)
fprintf(output, " %s", argv[i]); fprintf(output, " %s", argv[i]);
...@@ -1667,7 +1666,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) ...@@ -1667,7 +1666,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
} else if (big_num_opt == 0) /* User passed --no-big-num */ } else if (big_num_opt == 0) /* User passed --no-big-num */
big_num = false; big_num = false;
if (!argc && perf_target__none(&target)) if (!argc && target__none(&target))
usage_with_options(stat_usage, options); usage_with_options(stat_usage, options);
if (run_count < 0) { if (run_count < 0) {
...@@ -1680,8 +1679,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) ...@@ -1680,8 +1679,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
} }
/* no_aggr, cgroup are for system-wide only */ /* no_aggr, cgroup are for system-wide only */
if ((aggr_mode != AGGR_GLOBAL || nr_cgroups) if ((aggr_mode != AGGR_GLOBAL || nr_cgroups) &&
&& !perf_target__has_cpu(&target)) { !target__has_cpu(&target)) {
fprintf(stderr, "both cgroup and no-aggregation " fprintf(stderr, "both cgroup and no-aggregation "
"modes only available in system-wide mode\n"); "modes only available in system-wide mode\n");
...@@ -1694,14 +1693,14 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) ...@@ -1694,14 +1693,14 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
if (add_default_attributes()) if (add_default_attributes())
goto out; goto out;
perf_target__validate(&target); target__validate(&target);
if (perf_evlist__create_maps(evsel_list, &target) < 0) { if (perf_evlist__create_maps(evsel_list, &target) < 0) {
if (perf_target__has_task(&target)) { if (target__has_task(&target)) {
pr_err("Problems finding threads of monitor\n"); pr_err("Problems finding threads of monitor\n");
parse_options_usage(stat_usage, options, "p", 1); parse_options_usage(stat_usage, options, "p", 1);
parse_options_usage(NULL, options, "t", 1); parse_options_usage(NULL, options, "t", 1);
} else if (perf_target__has_cpu(&target)) { } else if (target__has_cpu(&target)) {
perror("failed to parse CPUs map"); perror("failed to parse CPUs map");
parse_options_usage(stat_usage, options, "C", 1); parse_options_usage(stat_usage, options, "C", 1);
parse_options_usage(NULL, options, "a", 1); parse_options_usage(NULL, options, "a", 1);
......
...@@ -950,14 +950,8 @@ static int __cmd_top(struct perf_top *top) ...@@ -950,14 +950,8 @@ static int __cmd_top(struct perf_top *top)
if (ret) if (ret)
goto out_delete; goto out_delete;
if (perf_target__has_task(&opts->target)) machine__synthesize_threads(&top->session->machines.host, &opts->target,
perf_event__synthesize_thread_map(&top->tool, top->evlist->threads, top->evlist->threads, false);
perf_event__process,
&top->session->machines.host);
else
perf_event__synthesize_threads(&top->tool, perf_event__process,
&top->session->machines.host);
ret = perf_top__start_counters(top); ret = perf_top__start_counters(top);
if (ret) if (ret)
goto out_delete; goto out_delete;
...@@ -973,7 +967,7 @@ static int __cmd_top(struct perf_top *top) ...@@ -973,7 +967,7 @@ static int __cmd_top(struct perf_top *top)
* XXX 'top' still doesn't start workloads like record, trace, but should, * XXX 'top' still doesn't start workloads like record, trace, but should,
* so leave the check here. * so leave the check here.
*/ */
if (!perf_target__none(&opts->target)) if (!target__none(&opts->target))
perf_evlist__enable(top->evlist); perf_evlist__enable(top->evlist);
/* Wait for a minimal set of events before starting the snapshot */ /* Wait for a minimal set of events before starting the snapshot */
...@@ -1059,7 +1053,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) ...@@ -1059,7 +1053,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
.sym_pcnt_filter = 5, .sym_pcnt_filter = 5,
}; };
struct perf_record_opts *opts = &top.record_opts; struct perf_record_opts *opts = &top.record_opts;
struct perf_target *target = &opts->target; struct target *target = &opts->target;
const struct option options[] = { const struct option options[] = {
OPT_CALLBACK('e', "event", &top.evlist, "event", OPT_CALLBACK('e', "event", &top.evlist, "event",
"event selector. use 'perf list' to list available events", "event selector. use 'perf list' to list available events",
...@@ -1175,24 +1169,24 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) ...@@ -1175,24 +1169,24 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
setup_browser(false); setup_browser(false);
status = perf_target__validate(target); status = target__validate(target);
if (status) { if (status) {
perf_target__strerror(target, status, errbuf, BUFSIZ); target__strerror(target, status, errbuf, BUFSIZ);
ui__warning("%s", errbuf); ui__warning("%s", errbuf);
} }
status = perf_target__parse_uid(target); status = target__parse_uid(target);
if (status) { if (status) {
int saved_errno = errno; int saved_errno = errno;
perf_target__strerror(target, status, errbuf, BUFSIZ); target__strerror(target, status, errbuf, BUFSIZ);
ui__error("%s", errbuf); ui__error("%s", errbuf);
status = -saved_errno; status = -saved_errno;
goto out_delete_evlist; goto out_delete_evlist;
} }
if (perf_target__none(target)) if (target__none(target))
target->system_wide = true; target->system_wide = true;
if (perf_evlist__create_maps(top.evlist, target) < 0) if (perf_evlist__create_maps(top.evlist, target) < 0)
......
This diff is collapsed.
...@@ -248,7 +248,7 @@ enum perf_call_graph_mode { ...@@ -248,7 +248,7 @@ enum perf_call_graph_mode {
}; };
struct perf_record_opts { struct perf_record_opts {
struct perf_target target; struct target target;
int call_graph; int call_graph;
bool group; bool group;
bool inherit_stat; bool inherit_stat;
......
...@@ -275,8 +275,19 @@ static int process_event(struct machine *machine, struct perf_evlist *evlist, ...@@ -275,8 +275,19 @@ static int process_event(struct machine *machine, struct perf_evlist *evlist,
if (event->header.type == PERF_RECORD_SAMPLE) if (event->header.type == PERF_RECORD_SAMPLE)
return process_sample_event(machine, evlist, event, state); return process_sample_event(machine, evlist, event, state);
if (event->header.type < PERF_RECORD_MAX) if (event->header.type == PERF_RECORD_THROTTLE ||
return machine__process_event(machine, event, NULL); event->header.type == PERF_RECORD_UNTHROTTLE)
return 0;
if (event->header.type < PERF_RECORD_MAX) {
int ret;
ret = machine__process_event(machine, event, NULL);
if (ret < 0)
pr_debug("machine__process_event failed, event type %u\n",
event->header.type);
return ret;
}
return 0; return 0;
} }
...@@ -441,7 +452,7 @@ static int do_test_code_reading(bool try_kcore) ...@@ -441,7 +452,7 @@ static int do_test_code_reading(bool try_kcore)
} }
ret = perf_event__synthesize_thread_map(NULL, threads, ret = perf_event__synthesize_thread_map(NULL, threads,
perf_event__process, machine); perf_event__process, machine, false);
if (ret < 0) { if (ret < 0) {
pr_debug("perf_event__synthesize_thread_map failed\n"); pr_debug("perf_event__synthesize_thread_map failed\n");
goto out_err; goto out_err;
......
...@@ -32,7 +32,7 @@ static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name, ...@@ -32,7 +32,7 @@ static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
int test__perf_evsel__tp_sched_test(void) int test__perf_evsel__tp_sched_test(void)
{ {
struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0); struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch");
int ret = 0; int ret = 0;
if (evsel == NULL) { if (evsel == NULL) {
...@@ -63,7 +63,7 @@ int test__perf_evsel__tp_sched_test(void) ...@@ -63,7 +63,7 @@ int test__perf_evsel__tp_sched_test(void)
perf_evsel__delete(evsel); perf_evsel__delete(evsel);
evsel = perf_evsel__newtp("sched", "sched_wakeup", 0); evsel = perf_evsel__newtp("sched", "sched_wakeup");
if (perf_evsel__test_field(evsel, "comm", 16, true)) if (perf_evsel__test_field(evsel, "comm", 16, true))
ret = -1; ret = -1;
......
...@@ -65,7 +65,7 @@ int test__basic_mmap(void) ...@@ -65,7 +65,7 @@ int test__basic_mmap(void)
char name[64]; char name[64];
snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
evsels[i] = perf_evsel__newtp("syscalls", name, i); evsels[i] = perf_evsel__newtp("syscalls", name);
if (evsels[i] == NULL) { if (evsels[i] == NULL) {
pr_debug("perf_evsel__new\n"); pr_debug("perf_evsel__new\n");
goto out_free_evlist; goto out_free_evlist;
......
...@@ -26,7 +26,7 @@ int test__open_syscall_event_on_all_cpus(void) ...@@ -26,7 +26,7 @@ int test__open_syscall_event_on_all_cpus(void)
CPU_ZERO(&cpu_set); CPU_ZERO(&cpu_set);
evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); evsel = perf_evsel__newtp("syscalls", "sys_enter_open");
if (evsel == NULL) { if (evsel == NULL) {
pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
goto out_thread_map_delete; goto out_thread_map_delete;
......
...@@ -27,7 +27,7 @@ int test__syscall_open_tp_fields(void) ...@@ -27,7 +27,7 @@ int test__syscall_open_tp_fields(void)
goto out; goto out;
} }
evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); evsel = perf_evsel__newtp("syscalls", "sys_enter_open");
if (evsel == NULL) { if (evsel == NULL) {
pr_debug("%s: perf_evsel__newtp\n", __func__); pr_debug("%s: perf_evsel__newtp\n", __func__);
goto out_delete_evlist; goto out_delete_evlist;
......
...@@ -15,7 +15,7 @@ int test__open_syscall_event(void) ...@@ -15,7 +15,7 @@ int test__open_syscall_event(void)
return -1; return -1;
} }
evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); evsel = perf_evsel__newtp("syscalls", "sys_enter_open");
if (evsel == NULL) { if (evsel == NULL) {
pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
goto out_thread_map_delete; goto out_thread_map_delete;
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
#include "util/cpumap.h" #include "util/cpumap.h"
#include "util/thread_map.h" #include "util/thread_map.h"
#define NR_LOOPS 1000000 #define NR_LOOPS 10000000
/* /*
* This test will open software clock events (cpu-clock, task-clock) * This test will open software clock events (cpu-clock, task-clock)
...@@ -34,7 +34,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id) ...@@ -34,7 +34,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
.freq = 1, .freq = 1,
}; };
attr.sample_freq = 10000; attr.sample_freq = 500;
evlist = perf_evlist__new(); evlist = perf_evlist__new();
if (evlist == NULL) { if (evlist == NULL) {
...@@ -42,7 +42,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id) ...@@ -42,7 +42,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
return -1; return -1;
} }
evsel = perf_evsel__new(&attr, 0); evsel = perf_evsel__new(&attr);
if (evsel == NULL) { if (evsel == NULL) {
pr_debug("perf_evsel__new\n"); pr_debug("perf_evsel__new\n");
goto out_free_evlist; goto out_free_evlist;
...@@ -57,7 +57,14 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id) ...@@ -57,7 +57,14 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
goto out_delete_maps; goto out_delete_maps;
} }
perf_evlist__open(evlist); if (perf_evlist__open(evlist)) {
const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate";
err = -errno;
pr_debug("Couldn't open evlist: %s\nHint: check %s, using %" PRIu64 " in this test.\n",
strerror(errno), knob, (u64)attr.sample_freq);
goto out_delete_maps;
}
err = perf_evlist__mmap(evlist, 128, true); err = perf_evlist__mmap(evlist, 128, true);
if (err < 0) { if (err < 0) {
......
...@@ -28,7 +28,7 @@ int test__task_exit(void) ...@@ -28,7 +28,7 @@ int test__task_exit(void)
union perf_event *event; union perf_event *event;
struct perf_evsel *evsel; struct perf_evsel *evsel;
struct perf_evlist *evlist; struct perf_evlist *evlist;
struct perf_target target = { struct target target = {
.uid = UINT_MAX, .uid = UINT_MAX,
.uses_mmap = true, .uses_mmap = true,
}; };
......
...@@ -18,13 +18,14 @@ static void tui_progress__update(struct ui_progress *p) ...@@ -18,13 +18,14 @@ static void tui_progress__update(struct ui_progress *p)
if (p->total == 0) if (p->total == 0)
return; return;
ui__refresh_dimensions(true); ui__refresh_dimensions(false);
pthread_mutex_lock(&ui__lock); pthread_mutex_lock(&ui__lock);
y = SLtt_Screen_Rows / 2 - 2; y = SLtt_Screen_Rows / 2 - 2;
SLsmg_set_color(0); SLsmg_set_color(0);
SLsmg_draw_box(y, 0, 3, SLtt_Screen_Cols); SLsmg_draw_box(y, 0, 3, SLtt_Screen_Cols);
SLsmg_gotorc(y++, 1); SLsmg_gotorc(y++, 1);
SLsmg_write_string((char *)p->title); SLsmg_write_string((char *)p->title);
SLsmg_fill_region(y, 1, 1, SLtt_Screen_Cols - 2, ' ');
SLsmg_set_color(HE_COLORSET_SELECTED); SLsmg_set_color(HE_COLORSET_SELECTED);
bar = ((SLtt_Screen_Cols - 2) * p->curr) / p->total; bar = ((SLtt_Screen_Cols - 2) * p->curr) / p->total;
SLsmg_fill_region(y, 1, 1, bar, ' '); SLsmg_fill_region(y, 1, 1, bar, ' ');
......
...@@ -170,7 +170,8 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool, ...@@ -170,7 +170,8 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
union perf_event *event, union perf_event *event,
pid_t pid, pid_t tgid, pid_t pid, pid_t tgid,
perf_event__handler_t process, perf_event__handler_t process,
struct machine *machine) struct machine *machine,
bool mmap_data)
{ {
char filename[PATH_MAX]; char filename[PATH_MAX];
FILE *fp; FILE *fp;
...@@ -188,10 +189,6 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool, ...@@ -188,10 +189,6 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
} }
event->header.type = PERF_RECORD_MMAP; event->header.type = PERF_RECORD_MMAP;
/*
* Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
*/
event->header.misc = PERF_RECORD_MISC_USER;
while (1) { while (1) {
char bf[BUFSIZ]; char bf[BUFSIZ];
...@@ -215,9 +212,17 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool, ...@@ -215,9 +212,17 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
if (n != 5) if (n != 5)
continue; continue;
/*
* Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
*/
event->header.misc = PERF_RECORD_MISC_USER;
if (prot[2] != 'x') if (prot[2] != 'x') {
continue; if (!mmap_data || prot[0] != 'r')
continue;
event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
}
if (!strcmp(execname, "")) if (!strcmp(execname, ""))
strcpy(execname, anonstr); strcpy(execname, anonstr);
...@@ -304,20 +309,21 @@ static int __event__synthesize_thread(union perf_event *comm_event, ...@@ -304,20 +309,21 @@ static int __event__synthesize_thread(union perf_event *comm_event,
pid_t pid, int full, pid_t pid, int full,
perf_event__handler_t process, perf_event__handler_t process,
struct perf_tool *tool, struct perf_tool *tool,
struct machine *machine) struct machine *machine, bool mmap_data)
{ {
pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full, pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full,
process, machine); process, machine);
if (tgid == -1) if (tgid == -1)
return -1; return -1;
return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
process, machine); process, machine, mmap_data);
} }
int perf_event__synthesize_thread_map(struct perf_tool *tool, int perf_event__synthesize_thread_map(struct perf_tool *tool,
struct thread_map *threads, struct thread_map *threads,
perf_event__handler_t process, perf_event__handler_t process,
struct machine *machine) struct machine *machine,
bool mmap_data)
{ {
union perf_event *comm_event, *mmap_event; union perf_event *comm_event, *mmap_event;
int err = -1, thread, j; int err = -1, thread, j;
...@@ -334,7 +340,8 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool, ...@@ -334,7 +340,8 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
for (thread = 0; thread < threads->nr; ++thread) { for (thread = 0; thread < threads->nr; ++thread) {
if (__event__synthesize_thread(comm_event, mmap_event, if (__event__synthesize_thread(comm_event, mmap_event,
threads->map[thread], 0, threads->map[thread], 0,
process, tool, machine)) { process, tool, machine,
mmap_data)) {
err = -1; err = -1;
break; break;
} }
...@@ -356,10 +363,10 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool, ...@@ -356,10 +363,10 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
/* if not, generate events for it */ /* if not, generate events for it */
if (need_leader && if (need_leader &&
__event__synthesize_thread(comm_event, __event__synthesize_thread(comm_event, mmap_event,
mmap_event, comm_event->comm.pid, 0,
comm_event->comm.pid, 0, process, tool, machine,
process, tool, machine)) { mmap_data)) {
err = -1; err = -1;
break; break;
} }
...@@ -374,7 +381,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool, ...@@ -374,7 +381,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
int perf_event__synthesize_threads(struct perf_tool *tool, int perf_event__synthesize_threads(struct perf_tool *tool,
perf_event__handler_t process, perf_event__handler_t process,
struct machine *machine) struct machine *machine, bool mmap_data)
{ {
DIR *proc; DIR *proc;
struct dirent dirent, *next; struct dirent dirent, *next;
...@@ -404,7 +411,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool, ...@@ -404,7 +411,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
* one thread couldn't be synthesized. * one thread couldn't be synthesized.
*/ */
__event__synthesize_thread(comm_event, mmap_event, pid, 1, __event__synthesize_thread(comm_event, mmap_event, pid, 1,
process, tool, machine); process, tool, machine, mmap_data);
} }
err = 0; err = 0;
...@@ -528,19 +535,22 @@ int perf_event__process_lost(struct perf_tool *tool __maybe_unused, ...@@ -528,19 +535,22 @@ int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
{ {
return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n", return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
event->mmap.pid, event->mmap.tid, event->mmap.start, event->mmap.pid, event->mmap.tid, event->mmap.start,
event->mmap.len, event->mmap.pgoff, event->mmap.filename); event->mmap.len, event->mmap.pgoff,
(event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
event->mmap.filename);
} }
size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp) size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
{ {
return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
" %02x:%02x %"PRIu64" %"PRIu64"]: %s\n", " %02x:%02x %"PRIu64" %"PRIu64"]: %c %s\n",
event->mmap2.pid, event->mmap2.tid, event->mmap2.start, event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj, event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
event->mmap2.min, event->mmap2.ino, event->mmap2.min, event->mmap2.ino,
event->mmap2.ino_generation, event->mmap2.ino_generation,
(event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
event->mmap2.filename); event->mmap2.filename);
} }
......
...@@ -208,10 +208,10 @@ typedef int (*perf_event__handler_t)(struct perf_tool *tool, ...@@ -208,10 +208,10 @@ typedef int (*perf_event__handler_t)(struct perf_tool *tool,
int perf_event__synthesize_thread_map(struct perf_tool *tool, int perf_event__synthesize_thread_map(struct perf_tool *tool,
struct thread_map *threads, struct thread_map *threads,
perf_event__handler_t process, perf_event__handler_t process,
struct machine *machine); struct machine *machine, bool mmap_data);
int perf_event__synthesize_threads(struct perf_tool *tool, int perf_event__synthesize_threads(struct perf_tool *tool,
perf_event__handler_t process, perf_event__handler_t process,
struct machine *machine); struct machine *machine, bool mmap_data);
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
perf_event__handler_t process, perf_event__handler_t process,
struct machine *machine, struct machine *machine,
......
...@@ -117,6 +117,8 @@ void perf_evlist__delete(struct perf_evlist *evlist) ...@@ -117,6 +117,8 @@ void perf_evlist__delete(struct perf_evlist *evlist)
void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
{ {
list_add_tail(&entry->node, &evlist->entries); list_add_tail(&entry->node, &evlist->entries);
entry->idx = evlist->nr_entries;
if (!evlist->nr_entries++) if (!evlist->nr_entries++)
perf_evlist__set_id_pos(evlist); perf_evlist__set_id_pos(evlist);
} }
...@@ -165,7 +167,7 @@ int perf_evlist__add_default(struct perf_evlist *evlist) ...@@ -165,7 +167,7 @@ int perf_evlist__add_default(struct perf_evlist *evlist)
event_attr_init(&attr); event_attr_init(&attr);
evsel = perf_evsel__new(&attr, 0); evsel = perf_evsel__new(&attr);
if (evsel == NULL) if (evsel == NULL)
goto error; goto error;
...@@ -190,7 +192,7 @@ static int perf_evlist__add_attrs(struct perf_evlist *evlist, ...@@ -190,7 +192,7 @@ static int perf_evlist__add_attrs(struct perf_evlist *evlist,
size_t i; size_t i;
for (i = 0; i < nr_attrs; i++) { for (i = 0; i < nr_attrs; i++) {
evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i); evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
if (evsel == NULL) if (evsel == NULL)
goto out_delete_partial_list; goto out_delete_partial_list;
list_add_tail(&evsel->node, &head); list_add_tail(&evsel->node, &head);
...@@ -249,9 +251,8 @@ perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, ...@@ -249,9 +251,8 @@ perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
int perf_evlist__add_newtp(struct perf_evlist *evlist, int perf_evlist__add_newtp(struct perf_evlist *evlist,
const char *sys, const char *name, void *handler) const char *sys, const char *name, void *handler)
{ {
struct perf_evsel *evsel; struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
if (evsel == NULL) if (evsel == NULL)
return -1; return -1;
...@@ -704,12 +705,10 @@ static size_t perf_evlist__mmap_size(unsigned long pages) ...@@ -704,12 +705,10 @@ static size_t perf_evlist__mmap_size(unsigned long pages)
return (pages + 1) * page_size; return (pages + 1) * page_size;
} }
int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, static long parse_pages_arg(const char *str, unsigned long min,
int unset __maybe_unused) unsigned long max)
{ {
unsigned int *mmap_pages = opt->value;
unsigned long pages, val; unsigned long pages, val;
size_t size;
static struct parse_tag tags[] = { static struct parse_tag tags[] = {
{ .tag = 'B', .mult = 1 }, { .tag = 'B', .mult = 1 },
{ .tag = 'K', .mult = 1 << 10 }, { .tag = 'K', .mult = 1 << 10 },
...@@ -718,33 +717,49 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, ...@@ -718,33 +717,49 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
{ .tag = 0 }, { .tag = 0 },
}; };
if (str == NULL)
return -EINVAL;
val = parse_tag_value(str, tags); val = parse_tag_value(str, tags);
if (val != (unsigned long) -1) { if (val != (unsigned long) -1) {
/* we got file size value */ /* we got file size value */
pages = PERF_ALIGN(val, page_size) / page_size; pages = PERF_ALIGN(val, page_size) / page_size;
if (pages < (1UL << 31) && !is_power_of_2(pages)) {
pages = next_pow2(pages);
pr_info("rounding mmap pages size to %lu (%lu pages)\n",
pages * page_size, pages);
}
} else { } else {
/* we got pages count value */ /* we got pages count value */
char *eptr; char *eptr;
pages = strtoul(str, &eptr, 10); pages = strtoul(str, &eptr, 10);
if (*eptr != '\0') { if (*eptr != '\0')
pr_err("failed to parse --mmap_pages/-m value\n"); return -EINVAL;
return -1;
}
} }
if (pages > UINT_MAX || pages > SIZE_MAX / page_size) { if ((pages == 0) && (min == 0)) {
pr_err("--mmap_pages/-m value too big\n"); /* leave number of pages at 0 */
return -1; } else if (pages < (1UL << 31) && !is_power_of_2(pages)) {
/* round pages up to next power of 2 */
pages = next_pow2(pages);
pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
pages * page_size, pages);
} }
size = perf_evlist__mmap_size(pages); if (pages > max)
if (!size) { return -EINVAL;
pr_err("--mmap_pages/-m value must be a power of two.");
return pages;
}
int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
int unset __maybe_unused)
{
unsigned int *mmap_pages = opt->value;
unsigned long max = UINT_MAX;
long pages;
if (max < SIZE_MAX / page_size)
max = SIZE_MAX / page_size;
pages = parse_pages_arg(str, 1, max);
if (pages < 0) {
pr_err("Invalid argument for --mmap_pages/-m\n");
return -1; return -1;
} }
...@@ -796,8 +811,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, ...@@ -796,8 +811,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
return perf_evlist__mmap_per_cpu(evlist, prot, mask); return perf_evlist__mmap_per_cpu(evlist, prot, mask);
} }
int perf_evlist__create_maps(struct perf_evlist *evlist, int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
struct perf_target *target)
{ {
evlist->threads = thread_map__new_str(target->pid, target->tid, evlist->threads = thread_map__new_str(target->pid, target->tid,
target->uid); target->uid);
...@@ -805,9 +819,9 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, ...@@ -805,9 +819,9 @@ int perf_evlist__create_maps(struct perf_evlist *evlist,
if (evlist->threads == NULL) if (evlist->threads == NULL)
return -1; return -1;
if (perf_target__has_task(target)) if (target__has_task(target))
evlist->cpus = cpu_map__dummy_new(); evlist->cpus = cpu_map__dummy_new();
else if (!perf_target__has_cpu(target) && !target->uses_mmap) else if (!target__has_cpu(target) && !target->uses_mmap)
evlist->cpus = cpu_map__dummy_new(); evlist->cpus = cpu_map__dummy_new();
else else
evlist->cpus = cpu_map__new(target->cpu_list); evlist->cpus = cpu_map__new(target->cpu_list);
...@@ -1016,8 +1030,7 @@ int perf_evlist__open(struct perf_evlist *evlist) ...@@ -1016,8 +1030,7 @@ int perf_evlist__open(struct perf_evlist *evlist)
return err; return err;
} }
int perf_evlist__prepare_workload(struct perf_evlist *evlist, int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
struct perf_target *target,
const char *argv[], bool pipe_output, const char *argv[], bool pipe_output,
bool want_signal) bool want_signal)
{ {
...@@ -1069,7 +1082,7 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, ...@@ -1069,7 +1082,7 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist,
exit(-1); exit(-1);
} }
if (perf_target__none(target)) if (target__none(target))
evlist->threads->map[0] = evlist->workload.pid; evlist->threads->map[0] = evlist->workload.pid;
close(child_ready_pipe[1]); close(child_ready_pipe[1]);
......
...@@ -102,7 +102,7 @@ void perf_evlist__config(struct perf_evlist *evlist, ...@@ -102,7 +102,7 @@ void perf_evlist__config(struct perf_evlist *evlist,
int perf_record_opts__config(struct perf_record_opts *opts); int perf_record_opts__config(struct perf_record_opts *opts);
int perf_evlist__prepare_workload(struct perf_evlist *evlist, int perf_evlist__prepare_workload(struct perf_evlist *evlist,
struct perf_target *target, struct target *target,
const char *argv[], bool pipe_output, const char *argv[], bool pipe_output,
bool want_signal); bool want_signal);
int perf_evlist__start_workload(struct perf_evlist *evlist); int perf_evlist__start_workload(struct perf_evlist *evlist);
...@@ -134,8 +134,7 @@ static inline void perf_evlist__set_maps(struct perf_evlist *evlist, ...@@ -134,8 +134,7 @@ static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
evlist->threads = threads; evlist->threads = threads;
} }
int perf_evlist__create_maps(struct perf_evlist *evlist, int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
struct perf_target *target);
void perf_evlist__delete_maps(struct perf_evlist *evlist); void perf_evlist__delete_maps(struct perf_evlist *evlist);
int perf_evlist__apply_filters(struct perf_evlist *evlist); int perf_evlist__apply_filters(struct perf_evlist *evlist);
......
...@@ -168,7 +168,7 @@ void perf_evsel__init(struct perf_evsel *evsel, ...@@ -168,7 +168,7 @@ void perf_evsel__init(struct perf_evsel *evsel,
perf_evsel__calc_id_pos(evsel); perf_evsel__calc_id_pos(evsel);
} }
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
{ {
struct perf_evsel *evsel = zalloc(sizeof(*evsel)); struct perf_evsel *evsel = zalloc(sizeof(*evsel));
...@@ -219,7 +219,7 @@ struct event_format *event_format__new(const char *sys, const char *name) ...@@ -219,7 +219,7 @@ struct event_format *event_format__new(const char *sys, const char *name)
return format; return format;
} }
struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx) struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
{ {
struct perf_evsel *evsel = zalloc(sizeof(*evsel)); struct perf_evsel *evsel = zalloc(sizeof(*evsel));
...@@ -645,7 +645,7 @@ void perf_evsel__config(struct perf_evsel *evsel, ...@@ -645,7 +645,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
} }
} }
if (perf_target__has_cpu(&opts->target)) if (target__has_cpu(&opts->target))
perf_evsel__set_sample_bit(evsel, CPU); perf_evsel__set_sample_bit(evsel, CPU);
if (opts->period) if (opts->period)
...@@ -653,7 +653,7 @@ void perf_evsel__config(struct perf_evsel *evsel, ...@@ -653,7 +653,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
if (!perf_missing_features.sample_id_all && if (!perf_missing_features.sample_id_all &&
(opts->sample_time || !opts->no_inherit || (opts->sample_time || !opts->no_inherit ||
perf_target__has_cpu(&opts->target))) target__has_cpu(&opts->target)))
perf_evsel__set_sample_bit(evsel, TIME); perf_evsel__set_sample_bit(evsel, TIME);
if (opts->raw_samples) { if (opts->raw_samples) {
...@@ -696,7 +696,7 @@ void perf_evsel__config(struct perf_evsel *evsel, ...@@ -696,7 +696,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
* Setting enable_on_exec for independent events and * Setting enable_on_exec for independent events and
* group leaders for traced executed by perf. * group leaders for traced executed by perf.
*/ */
if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel)) if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
attr->enable_on_exec = 1; attr->enable_on_exec = 1;
} }
...@@ -2006,8 +2006,7 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err, ...@@ -2006,8 +2006,7 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
return false; return false;
} }
int perf_evsel__open_strerror(struct perf_evsel *evsel, int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
struct perf_target *target,
int err, char *msg, size_t size) int err, char *msg, size_t size)
{ {
switch (err) { switch (err) {
......
...@@ -96,8 +96,19 @@ struct thread_map; ...@@ -96,8 +96,19 @@ struct thread_map;
struct perf_evlist; struct perf_evlist;
struct perf_record_opts; struct perf_record_opts;
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx); struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx);
struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx);
static inline struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
{
return perf_evsel__new_idx(attr, 0);
}
struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx);
static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name)
{
return perf_evsel__newtp_idx(sys, name, 0);
}
struct event_format *event_format__new(const char *sys, const char *name); struct event_format *event_format__new(const char *sys, const char *name);
...@@ -307,8 +318,7 @@ int perf_evsel__fprintf(struct perf_evsel *evsel, ...@@ -307,8 +318,7 @@ int perf_evsel__fprintf(struct perf_evsel *evsel,
bool perf_evsel__fallback(struct perf_evsel *evsel, int err, bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
char *msg, size_t msgsize); char *msg, size_t msgsize);
int perf_evsel__open_strerror(struct perf_evsel *evsel, int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
struct perf_target *target,
int err, char *msg, size_t size); int err, char *msg, size_t size);
static inline int perf_evsel__group_idx(struct perf_evsel *evsel) static inline int perf_evsel__group_idx(struct perf_evsel *evsel)
......
...@@ -2797,7 +2797,7 @@ int perf_session__read_header(struct perf_session *session) ...@@ -2797,7 +2797,7 @@ int perf_session__read_header(struct perf_session *session)
perf_event__attr_swap(&f_attr.attr); perf_event__attr_swap(&f_attr.attr);
tmp = lseek(fd, 0, SEEK_CUR); tmp = lseek(fd, 0, SEEK_CUR);
evsel = perf_evsel__new(&f_attr.attr, i); evsel = perf_evsel__new(&f_attr.attr);
if (evsel == NULL) if (evsel == NULL)
goto out_delete_evlist; goto out_delete_evlist;
...@@ -2916,7 +2916,7 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused, ...@@ -2916,7 +2916,7 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
return -ENOMEM; return -ENOMEM;
} }
evsel = perf_evsel__new(&event->attr.attr, evlist->nr_entries); evsel = perf_evsel__new(&event->attr.attr);
if (evsel == NULL) if (evsel == NULL)
return -ENOMEM; return -ENOMEM;
......
...@@ -1394,3 +1394,15 @@ int machine__for_each_thread(struct machine *machine, ...@@ -1394,3 +1394,15 @@ int machine__for_each_thread(struct machine *machine,
} }
return rc; return rc;
} }
int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
struct target *target, struct thread_map *threads,
perf_event__handler_t process, bool data_mmap)
{
if (target__has_task(target))
return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
else if (target__has_cpu(target))
return perf_event__synthesize_threads(tool, process, machine, data_mmap);
/* command specified */
return 0;
}
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <sys/types.h> #include <sys/types.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include "map.h" #include "map.h"
#include "event.h"
struct addr_location; struct addr_location;
struct branch_stack; struct branch_stack;
...@@ -178,4 +179,15 @@ int machine__for_each_thread(struct machine *machine, ...@@ -178,4 +179,15 @@ int machine__for_each_thread(struct machine *machine,
int (*fn)(struct thread *thread, void *p), int (*fn)(struct thread *thread, void *p),
void *priv); void *priv);
int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
struct target *target, struct thread_map *threads,
perf_event__handler_t process, bool data_mmap);
static inline
int machine__synthesize_threads(struct machine *machine, struct target *target,
struct thread_map *threads, bool data_mmap)
{
return __machine__synthesize_threads(machine, NULL, target, threads,
perf_event__process, data_mmap);
}
#endif /* __PERF_MACHINE_H */ #endif /* __PERF_MACHINE_H */
...@@ -277,7 +277,7 @@ static int __add_event(struct list_head *list, int *idx, ...@@ -277,7 +277,7 @@ static int __add_event(struct list_head *list, int *idx,
event_attr_init(attr); event_attr_init(attr);
evsel = perf_evsel__new(attr, (*idx)++); evsel = perf_evsel__new_idx(attr, (*idx)++);
if (!evsel) if (!evsel)
return -ENOMEM; return -ENOMEM;
...@@ -378,7 +378,7 @@ static int add_tracepoint(struct list_head *list, int *idx, ...@@ -378,7 +378,7 @@ static int add_tracepoint(struct list_head *list, int *idx,
{ {
struct perf_evsel *evsel; struct perf_evsel *evsel;
evsel = perf_evsel__newtp(sys_name, evt_name, (*idx)++); evsel = perf_evsel__newtp_idx(sys_name, evt_name, (*idx)++);
if (!evsel) if (!evsel)
return -ENOMEM; return -ENOMEM;
...@@ -1097,7 +1097,7 @@ static bool is_event_supported(u8 type, unsigned config) ...@@ -1097,7 +1097,7 @@ static bool is_event_supported(u8 type, unsigned config)
.threads = { 0 }, .threads = { 0 },
}; };
evsel = perf_evsel__new(&attr, 0); evsel = perf_evsel__new(&attr);
if (evsel) { if (evsel) {
ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0; ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0;
perf_evsel__delete(evsel); perf_evsel__delete(evsel);
......
...@@ -1137,6 +1137,8 @@ static void sort_entry__setup_elide(struct sort_entry *se, ...@@ -1137,6 +1137,8 @@ static void sort_entry__setup_elide(struct sort_entry *se,
void sort__setup_elide(FILE *output) void sort__setup_elide(FILE *output)
{ {
struct sort_entry *se;
sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list,
"dso", output); "dso", output);
sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list,
...@@ -1172,4 +1174,15 @@ void sort__setup_elide(FILE *output) ...@@ -1172,4 +1174,15 @@ void sort__setup_elide(FILE *output)
"snoop", output); "snoop", output);
} }
/*
* It makes no sense to elide all of sort entries.
* Just revert them to show up again.
*/
list_for_each_entry(se, &hist_entry__sort_list, list) {
if (!se->elide)
return;
}
list_for_each_entry(se, &hist_entry__sort_list, list)
se->elide = false;
} }
...@@ -13,9 +13,9 @@ ...@@ -13,9 +13,9 @@
#include <string.h> #include <string.h>
enum perf_target_errno perf_target__validate(struct perf_target *target) enum target_errno target__validate(struct target *target)
{ {
enum perf_target_errno ret = PERF_ERRNO_TARGET__SUCCESS; enum target_errno ret = TARGET_ERRNO__SUCCESS;
if (target->pid) if (target->pid)
target->tid = target->pid; target->tid = target->pid;
...@@ -23,42 +23,42 @@ enum perf_target_errno perf_target__validate(struct perf_target *target) ...@@ -23,42 +23,42 @@ enum perf_target_errno perf_target__validate(struct perf_target *target)
/* CPU and PID are mutually exclusive */ /* CPU and PID are mutually exclusive */
if (target->tid && target->cpu_list) { if (target->tid && target->cpu_list) {
target->cpu_list = NULL; target->cpu_list = NULL;
if (ret == PERF_ERRNO_TARGET__SUCCESS) if (ret == TARGET_ERRNO__SUCCESS)
ret = PERF_ERRNO_TARGET__PID_OVERRIDE_CPU; ret = TARGET_ERRNO__PID_OVERRIDE_CPU;
} }
/* UID and PID are mutually exclusive */ /* UID and PID are mutually exclusive */
if (target->tid && target->uid_str) { if (target->tid && target->uid_str) {
target->uid_str = NULL; target->uid_str = NULL;
if (ret == PERF_ERRNO_TARGET__SUCCESS) if (ret == TARGET_ERRNO__SUCCESS)
ret = PERF_ERRNO_TARGET__PID_OVERRIDE_UID; ret = TARGET_ERRNO__PID_OVERRIDE_UID;
} }
/* UID and CPU are mutually exclusive */ /* UID and CPU are mutually exclusive */
if (target->uid_str && target->cpu_list) { if (target->uid_str && target->cpu_list) {
target->cpu_list = NULL; target->cpu_list = NULL;
if (ret == PERF_ERRNO_TARGET__SUCCESS) if (ret == TARGET_ERRNO__SUCCESS)
ret = PERF_ERRNO_TARGET__UID_OVERRIDE_CPU; ret = TARGET_ERRNO__UID_OVERRIDE_CPU;
} }
/* PID and SYSTEM are mutually exclusive */ /* PID and SYSTEM are mutually exclusive */
if (target->tid && target->system_wide) { if (target->tid && target->system_wide) {
target->system_wide = false; target->system_wide = false;
if (ret == PERF_ERRNO_TARGET__SUCCESS) if (ret == TARGET_ERRNO__SUCCESS)
ret = PERF_ERRNO_TARGET__PID_OVERRIDE_SYSTEM; ret = TARGET_ERRNO__PID_OVERRIDE_SYSTEM;
} }
/* UID and SYSTEM are mutually exclusive */ /* UID and SYSTEM are mutually exclusive */
if (target->uid_str && target->system_wide) { if (target->uid_str && target->system_wide) {
target->system_wide = false; target->system_wide = false;
if (ret == PERF_ERRNO_TARGET__SUCCESS) if (ret == TARGET_ERRNO__SUCCESS)
ret = PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM; ret = TARGET_ERRNO__UID_OVERRIDE_SYSTEM;
} }
return ret; return ret;
} }
enum perf_target_errno perf_target__parse_uid(struct perf_target *target) enum target_errno target__parse_uid(struct target *target)
{ {
struct passwd pwd, *result; struct passwd pwd, *result;
char buf[1024]; char buf[1024];
...@@ -66,7 +66,7 @@ enum perf_target_errno perf_target__parse_uid(struct perf_target *target) ...@@ -66,7 +66,7 @@ enum perf_target_errno perf_target__parse_uid(struct perf_target *target)
target->uid = UINT_MAX; target->uid = UINT_MAX;
if (str == NULL) if (str == NULL)
return PERF_ERRNO_TARGET__SUCCESS; return TARGET_ERRNO__SUCCESS;
/* Try user name first */ /* Try user name first */
getpwnam_r(str, &pwd, buf, sizeof(buf), &result); getpwnam_r(str, &pwd, buf, sizeof(buf), &result);
...@@ -79,22 +79,22 @@ enum perf_target_errno perf_target__parse_uid(struct perf_target *target) ...@@ -79,22 +79,22 @@ enum perf_target_errno perf_target__parse_uid(struct perf_target *target)
int uid = strtol(str, &endptr, 10); int uid = strtol(str, &endptr, 10);
if (*endptr != '\0') if (*endptr != '\0')
return PERF_ERRNO_TARGET__INVALID_UID; return TARGET_ERRNO__INVALID_UID;
getpwuid_r(uid, &pwd, buf, sizeof(buf), &result); getpwuid_r(uid, &pwd, buf, sizeof(buf), &result);
if (result == NULL) if (result == NULL)
return PERF_ERRNO_TARGET__USER_NOT_FOUND; return TARGET_ERRNO__USER_NOT_FOUND;
} }
target->uid = result->pw_uid; target->uid = result->pw_uid;
return PERF_ERRNO_TARGET__SUCCESS; return TARGET_ERRNO__SUCCESS;
} }
/* /*
* This must have a same ordering as the enum perf_target_errno. * This must have a same ordering as the enum target_errno.
*/ */
static const char *perf_target__error_str[] = { static const char *target__error_str[] = {
"PID/TID switch overriding CPU", "PID/TID switch overriding CPU",
"PID/TID switch overriding UID", "PID/TID switch overriding UID",
"UID switch overriding CPU", "UID switch overriding CPU",
...@@ -104,7 +104,7 @@ static const char *perf_target__error_str[] = { ...@@ -104,7 +104,7 @@ static const char *perf_target__error_str[] = {
"Problems obtaining information for user %s", "Problems obtaining information for user %s",
}; };
int perf_target__strerror(struct perf_target *target, int errnum, int target__strerror(struct target *target, int errnum,
char *buf, size_t buflen) char *buf, size_t buflen)
{ {
int idx; int idx;
...@@ -124,21 +124,19 @@ int perf_target__strerror(struct perf_target *target, int errnum, ...@@ -124,21 +124,19 @@ int perf_target__strerror(struct perf_target *target, int errnum,
return 0; return 0;
} }
if (errnum < __PERF_ERRNO_TARGET__START || if (errnum < __TARGET_ERRNO__START || errnum >= __TARGET_ERRNO__END)
errnum >= __PERF_ERRNO_TARGET__END)
return -1; return -1;
idx = errnum - __PERF_ERRNO_TARGET__START; idx = errnum - __TARGET_ERRNO__START;
msg = perf_target__error_str[idx]; msg = target__error_str[idx];
switch (errnum) { switch (errnum) {
case PERF_ERRNO_TARGET__PID_OVERRIDE_CPU case TARGET_ERRNO__PID_OVERRIDE_CPU ... TARGET_ERRNO__UID_OVERRIDE_SYSTEM:
... PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM:
snprintf(buf, buflen, "%s", msg); snprintf(buf, buflen, "%s", msg);
break; break;
case PERF_ERRNO_TARGET__INVALID_UID: case TARGET_ERRNO__INVALID_UID:
case PERF_ERRNO_TARGET__USER_NOT_FOUND: case TARGET_ERRNO__USER_NOT_FOUND:
snprintf(buf, buflen, msg, target->uid_str); snprintf(buf, buflen, msg, target->uid_str);
break; break;
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
#include <stdbool.h> #include <stdbool.h>
#include <sys/types.h> #include <sys/types.h>
struct perf_target { struct target {
const char *pid; const char *pid;
const char *tid; const char *tid;
const char *cpu_list; const char *cpu_list;
...@@ -14,8 +14,8 @@ struct perf_target { ...@@ -14,8 +14,8 @@ struct perf_target {
bool uses_mmap; bool uses_mmap;
}; };
enum perf_target_errno { enum target_errno {
PERF_ERRNO_TARGET__SUCCESS = 0, TARGET_ERRNO__SUCCESS = 0,
/* /*
* Choose an arbitrary negative big number not to clash with standard * Choose an arbitrary negative big number not to clash with standard
...@@ -24,42 +24,40 @@ enum perf_target_errno { ...@@ -24,42 +24,40 @@ enum perf_target_errno {
* *
* http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html * http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html
*/ */
__PERF_ERRNO_TARGET__START = -10000, __TARGET_ERRNO__START = -10000,
/* for target__validate() */
TARGET_ERRNO__PID_OVERRIDE_CPU = __TARGET_ERRNO__START,
TARGET_ERRNO__PID_OVERRIDE_UID,
TARGET_ERRNO__UID_OVERRIDE_CPU,
TARGET_ERRNO__PID_OVERRIDE_SYSTEM,
TARGET_ERRNO__UID_OVERRIDE_SYSTEM,
/* for perf_target__validate() */ /* for target__parse_uid() */
PERF_ERRNO_TARGET__PID_OVERRIDE_CPU = __PERF_ERRNO_TARGET__START, TARGET_ERRNO__INVALID_UID,
PERF_ERRNO_TARGET__PID_OVERRIDE_UID, TARGET_ERRNO__USER_NOT_FOUND,
PERF_ERRNO_TARGET__UID_OVERRIDE_CPU,
PERF_ERRNO_TARGET__PID_OVERRIDE_SYSTEM,
PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM,
/* for perf_target__parse_uid() */ __TARGET_ERRNO__END,
PERF_ERRNO_TARGET__INVALID_UID,
PERF_ERRNO_TARGET__USER_NOT_FOUND,
__PERF_ERRNO_TARGET__END,
}; };
enum perf_target_errno perf_target__validate(struct perf_target *target); enum target_errno target__validate(struct target *target);
enum perf_target_errno perf_target__parse_uid(struct perf_target *target); enum target_errno target__parse_uid(struct target *target);
int perf_target__strerror(struct perf_target *target, int errnum, char *buf, int target__strerror(struct target *target, int errnum, char *buf, size_t buflen);
size_t buflen);
static inline bool perf_target__has_task(struct perf_target *target) static inline bool target__has_task(struct target *target)
{ {
return target->tid || target->pid || target->uid_str; return target->tid || target->pid || target->uid_str;
} }
static inline bool perf_target__has_cpu(struct perf_target *target) static inline bool target__has_cpu(struct target *target)
{ {
return target->system_wide || target->cpu_list; return target->system_wide || target->cpu_list;
} }
static inline bool perf_target__none(struct perf_target *target) static inline bool target__none(struct target *target)
{ {
return !perf_target__has_task(target) && !perf_target__has_cpu(target); return !target__has_task(target) && !target__has_cpu(target);
} }
#endif /* _PERF_TARGET_H */ #endif /* _PERF_TARGET_H */
...@@ -27,7 +27,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) ...@@ -27,7 +27,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
float ksamples_per_sec; float ksamples_per_sec;
float esamples_percent; float esamples_percent;
struct perf_record_opts *opts = &top->record_opts; struct perf_record_opts *opts = &top->record_opts;
struct perf_target *target = &opts->target; struct target *target = &opts->target;
size_t ret = 0; size_t ret = 0;
if (top->samples) { if (top->samples) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment