Commit 803ca418 authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-core-for-mingo' of...

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

User visible changes:

  - Fixes and improvements for supporting annotating ARM binaries, support ARM
    call and jump instructions, more work needed to have arch specific stuff
    separated into tools/perf/arch/*/annotate/ (Russell King)

  - Fix several 'perf test' entries broken by recent perf/core changes (Jiri Olsa)

Infrastructure changes:

  - Consolidate perf_ev{list,sel}__{enable,disable}() calls (Jiri Olsa)

  - Pass correct string to dso__adjust_kmod_long_name() (Wang Nan)
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents f1ad4488 cfef25b8
...@@ -168,15 +168,25 @@ static int create_perf_stat_counter(struct perf_evsel *evsel) ...@@ -168,15 +168,25 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
attr->sample_period = 0; attr->sample_period = 0;
attr->sample_type = 0; attr->sample_type = 0;
if (target__has_cpu(&target)) /*
return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel)); * Disabling all counters initially, they will be enabled
* either manually by us or by kernel via enable_on_exec
if (!target__has_task(&target) && perf_evsel__is_group_leader(evsel)) { * set later.
*/
if (perf_evsel__is_group_leader(evsel)) {
attr->disabled = 1; attr->disabled = 1;
if (!initial_delay)
/*
* In case of initial_delay we enable tracee
* events manually.
*/
if (target__none(&target) && !initial_delay)
attr->enable_on_exec = 1; attr->enable_on_exec = 1;
} }
if (target__has_cpu(&target))
return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
return perf_evsel__open_per_thread(evsel, evsel_list->threads); return perf_evsel__open_per_thread(evsel, evsel_list->threads);
} }
...@@ -251,18 +261,18 @@ static void process_interval(void) ...@@ -251,18 +261,18 @@ static void process_interval(void)
print_counters(&rs, 0, NULL); print_counters(&rs, 0, NULL);
} }
static void handle_initial_delay(void) static void enable_counters(void)
{ {
struct perf_evsel *counter; if (initial_delay)
if (initial_delay) {
const int ncpus = cpu_map__nr(evsel_list->cpus),
nthreads = thread_map__nr(evsel_list->threads);
usleep(initial_delay * 1000); usleep(initial_delay * 1000);
evlist__for_each(evsel_list, counter)
perf_evsel__enable(counter, ncpus, nthreads); /*
} * We need to enable counters only if:
* - we don't have tracee (attaching to task or cpu)
* - we have initial delay configured
*/
if (!target__none(&target) || initial_delay)
perf_evlist__enable(evsel_list);
} }
static volatile int workload_exec_errno; static volatile int workload_exec_errno;
...@@ -359,7 +369,7 @@ static int __run_perf_stat(int argc, const char **argv) ...@@ -359,7 +369,7 @@ static int __run_perf_stat(int argc, const char **argv)
if (forks) { if (forks) {
perf_evlist__start_workload(evsel_list); perf_evlist__start_workload(evsel_list);
handle_initial_delay(); enable_counters();
if (interval) { if (interval) {
while (!waitpid(child_pid, &status, WNOHANG)) { while (!waitpid(child_pid, &status, WNOHANG)) {
...@@ -378,7 +388,7 @@ static int __run_perf_stat(int argc, const char **argv) ...@@ -378,7 +388,7 @@ static int __run_perf_stat(int argc, const char **argv)
if (WIFSIGNALED(status)) if (WIFSIGNALED(status))
psignal(WTERMSIG(status), argv[0]); psignal(WTERMSIG(status), argv[0]);
} else { } else {
handle_initial_delay(); enable_counters();
while (!done) { while (!done) {
nanosleep(&ts, NULL); nanosleep(&ts, NULL);
if (interval) if (interval)
......
...@@ -433,7 +433,6 @@ enum { ...@@ -433,7 +433,6 @@ enum {
static int do_test_code_reading(bool try_kcore) static int do_test_code_reading(bool try_kcore)
{ {
struct machines machines;
struct machine *machine; struct machine *machine;
struct thread *thread; struct thread *thread;
struct record_opts opts = { struct record_opts opts = {
...@@ -459,8 +458,7 @@ static int do_test_code_reading(bool try_kcore) ...@@ -459,8 +458,7 @@ static int do_test_code_reading(bool try_kcore)
pid = getpid(); pid = getpid();
machines__init(&machines); machine = machine__new_host();
machine = &machines.host;
ret = machine__create_kernel_maps(machine); ret = machine__create_kernel_maps(machine);
if (ret < 0) { if (ret < 0) {
...@@ -549,6 +547,13 @@ static int do_test_code_reading(bool try_kcore) ...@@ -549,6 +547,13 @@ static int do_test_code_reading(bool try_kcore)
if (ret < 0) { if (ret < 0) {
if (!excl_kernel) { if (!excl_kernel) {
excl_kernel = true; excl_kernel = true;
/*
* Both cpus and threads are now owned by evlist
* and will be freed by following perf_evlist__set_maps
* call. Getting refference to keep them alive.
*/
cpu_map__get(cpus);
thread_map__get(threads);
perf_evlist__set_maps(evlist, NULL, NULL); perf_evlist__set_maps(evlist, NULL, NULL);
perf_evlist__delete(evlist); perf_evlist__delete(evlist);
evlist = NULL; evlist = NULL;
...@@ -594,9 +599,8 @@ static int do_test_code_reading(bool try_kcore) ...@@ -594,9 +599,8 @@ static int do_test_code_reading(bool try_kcore)
cpu_map__put(cpus); cpu_map__put(cpus);
thread_map__put(threads); thread_map__put(threads);
} }
machines__destroy_kernel_maps(&machines);
machine__delete_threads(machine); machine__delete_threads(machine);
machines__exit(&machines); machine__delete(machine);
return err; return err;
} }
......
...@@ -160,14 +160,11 @@ static int krava_1(struct thread *thread) ...@@ -160,14 +160,11 @@ static int krava_1(struct thread *thread)
int test__dwarf_unwind(int subtest __maybe_unused) int test__dwarf_unwind(int subtest __maybe_unused)
{ {
struct machines machines;
struct machine *machine; struct machine *machine;
struct thread *thread; struct thread *thread;
int err = -1; int err = -1;
machines__init(&machines); machine = machine__new_host();
machine = machines__find(&machines, HOST_KERNEL_ID);
if (!machine) { if (!machine) {
pr_err("Could not get machine\n"); pr_err("Could not get machine\n");
return -1; return -1;
...@@ -199,7 +196,6 @@ int test__dwarf_unwind(int subtest __maybe_unused) ...@@ -199,7 +196,6 @@ int test__dwarf_unwind(int subtest __maybe_unused)
out: out:
machine__delete_threads(machine); machine__delete_threads(machine);
machine__exit(machine); machine__delete(machine);
machines__exit(&machines);
return err; return err;
} }
...@@ -103,7 +103,8 @@ int test__perf_evsel__roundtrip_name_test(int subtest __maybe_unused) ...@@ -103,7 +103,8 @@ int test__perf_evsel__roundtrip_name_test(int subtest __maybe_unused)
if (err) if (err)
ret = err; ret = err;
err = perf_evsel__name_array_test(perf_evsel__sw_names); err = __perf_evsel__name_array_test(perf_evsel__sw_names,
PERF_COUNT_SW_DUMMY + 1);
if (err) if (err)
ret = err; ret = err;
......
...@@ -87,6 +87,11 @@ struct machine *setup_fake_machine(struct machines *machines) ...@@ -87,6 +87,11 @@ struct machine *setup_fake_machine(struct machines *machines)
return NULL; return NULL;
} }
if (machine__create_kernel_maps(machine)) {
pr_debug("Not enough memory for machine setup\n");
goto out;
}
for (i = 0; i < ARRAY_SIZE(fake_threads); i++) { for (i = 0; i < ARRAY_SIZE(fake_threads); i++) {
struct thread *thread; struct thread *thread;
......
...@@ -149,7 +149,6 @@ static int synth_process(struct machine *machine) ...@@ -149,7 +149,6 @@ static int synth_process(struct machine *machine)
static int mmap_events(synth_cb synth) static int mmap_events(synth_cb synth)
{ {
struct machines machines;
struct machine *machine; struct machine *machine;
int err, i; int err, i;
...@@ -162,8 +161,7 @@ static int mmap_events(synth_cb synth) ...@@ -162,8 +161,7 @@ static int mmap_events(synth_cb synth)
*/ */
TEST_ASSERT_VAL("failed to create threads", !threads_create()); TEST_ASSERT_VAL("failed to create threads", !threads_create());
machines__init(&machines); machine = machine__new_host();
machine = &machines.host;
dump_trace = verbose > 1 ? 1 : 0; dump_trace = verbose > 1 ? 1 : 0;
...@@ -203,7 +201,7 @@ static int mmap_events(synth_cb synth) ...@@ -203,7 +201,7 @@ static int mmap_events(synth_cb synth)
} }
machine__delete_threads(machine); machine__delete_threads(machine);
machines__exit(&machines); machine__delete(machine);
return err; return err;
} }
......
...@@ -65,6 +65,11 @@ static int call__parse(struct ins_operands *ops) ...@@ -65,6 +65,11 @@ static int call__parse(struct ins_operands *ops)
name++; name++;
#ifdef __arm__
if (strchr(name, '+'))
return -1;
#endif
tok = strchr(name, '>'); tok = strchr(name, '>');
if (tok == NULL) if (tok == NULL)
return -1; return -1;
...@@ -246,7 +251,11 @@ static int mov__parse(struct ins_operands *ops) ...@@ -246,7 +251,11 @@ static int mov__parse(struct ins_operands *ops)
return -1; return -1;
target = ++s; target = ++s;
#ifdef __arm__
comment = strchr(s, ';');
#else
comment = strchr(s, '#'); comment = strchr(s, '#');
#endif
if (comment != NULL) if (comment != NULL)
s = comment - 1; s = comment - 1;
...@@ -354,6 +363,20 @@ static struct ins instructions[] = { ...@@ -354,6 +363,20 @@ static struct ins instructions[] = {
{ .name = "addq", .ops = &mov_ops, }, { .name = "addq", .ops = &mov_ops, },
{ .name = "addw", .ops = &mov_ops, }, { .name = "addw", .ops = &mov_ops, },
{ .name = "and", .ops = &mov_ops, }, { .name = "and", .ops = &mov_ops, },
#ifdef __arm__
{ .name = "b", .ops = &jump_ops, }, // might also be a call
{ .name = "bcc", .ops = &jump_ops, },
{ .name = "bcs", .ops = &jump_ops, },
{ .name = "beq", .ops = &jump_ops, },
{ .name = "bge", .ops = &jump_ops, },
{ .name = "bgt", .ops = &jump_ops, },
{ .name = "bhi", .ops = &jump_ops, },
{ .name = "bl", .ops = &call_ops, },
{ .name = "blt", .ops = &jump_ops, },
{ .name = "bls", .ops = &jump_ops, },
{ .name = "blx", .ops = &call_ops, },
{ .name = "bne", .ops = &jump_ops, },
#endif
{ .name = "bts", .ops = &mov_ops, }, { .name = "bts", .ops = &mov_ops, },
{ .name = "call", .ops = &call_ops, }, { .name = "call", .ops = &call_ops, },
{ .name = "callq", .ops = &call_ops, }, { .name = "callq", .ops = &call_ops, },
......
...@@ -336,20 +336,12 @@ static int perf_evlist__nr_threads(struct perf_evlist *evlist, ...@@ -336,20 +336,12 @@ static int perf_evlist__nr_threads(struct perf_evlist *evlist,
void perf_evlist__disable(struct perf_evlist *evlist) void perf_evlist__disable(struct perf_evlist *evlist)
{ {
int cpu, thread;
struct perf_evsel *pos; struct perf_evsel *pos;
int nr_cpus = cpu_map__nr(evlist->cpus);
int nr_threads;
for (cpu = 0; cpu < nr_cpus; cpu++) {
evlist__for_each(evlist, pos) { evlist__for_each(evlist, pos) {
if (!perf_evsel__is_group_leader(pos) || !pos->fd) if (!perf_evsel__is_group_leader(pos) || !pos->fd)
continue; continue;
nr_threads = perf_evlist__nr_threads(evlist, pos); perf_evsel__disable(pos);
for (thread = 0; thread < nr_threads; thread++)
ioctl(FD(pos, cpu, thread),
PERF_EVENT_IOC_DISABLE, 0);
}
} }
evlist->enabled = false; evlist->enabled = false;
...@@ -357,20 +349,12 @@ void perf_evlist__disable(struct perf_evlist *evlist) ...@@ -357,20 +349,12 @@ void perf_evlist__disable(struct perf_evlist *evlist)
void perf_evlist__enable(struct perf_evlist *evlist) void perf_evlist__enable(struct perf_evlist *evlist)
{ {
int cpu, thread;
struct perf_evsel *pos; struct perf_evsel *pos;
int nr_cpus = cpu_map__nr(evlist->cpus);
int nr_threads;
for (cpu = 0; cpu < nr_cpus; cpu++) {
evlist__for_each(evlist, pos) { evlist__for_each(evlist, pos) {
if (!perf_evsel__is_group_leader(pos) || !pos->fd) if (!perf_evsel__is_group_leader(pos) || !pos->fd)
continue; continue;
nr_threads = perf_evlist__nr_threads(evlist, pos); perf_evsel__enable(pos);
for (thread = 0; thread < nr_threads; thread++)
ioctl(FD(pos, cpu, thread),
PERF_EVENT_IOC_ENABLE, 0);
}
} }
evlist->enabled = true; evlist->enabled = true;
......
...@@ -981,13 +981,26 @@ int perf_evsel__append_filter(struct perf_evsel *evsel, ...@@ -981,13 +981,26 @@ int perf_evsel__append_filter(struct perf_evsel *evsel,
return -1; return -1;
} }
int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads) int perf_evsel__enable(struct perf_evsel *evsel)
{ {
int nthreads = thread_map__nr(evsel->threads);
int ncpus = cpu_map__nr(evsel->cpus);
return perf_evsel__run_ioctl(evsel, ncpus, nthreads, return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
PERF_EVENT_IOC_ENABLE, PERF_EVENT_IOC_ENABLE,
0); 0);
} }
int perf_evsel__disable(struct perf_evsel *evsel)
{
int nthreads = thread_map__nr(evsel->threads);
int ncpus = cpu_map__nr(evsel->cpus);
return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
PERF_EVENT_IOC_DISABLE,
0);
}
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
{ {
if (ncpus == 0 || nthreads == 0) if (ncpus == 0 || nthreads == 0)
......
...@@ -227,7 +227,8 @@ int perf_evsel__append_filter(struct perf_evsel *evsel, ...@@ -227,7 +227,8 @@ int perf_evsel__append_filter(struct perf_evsel *evsel,
const char *op, const char *filter); const char *op, const char *filter);
int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads, int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
const char *filter); const char *filter);
int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads); int perf_evsel__enable(struct perf_evsel *evsel);
int perf_evsel__disable(struct perf_evsel *evsel);
int perf_evsel__open_per_cpu(struct perf_evsel *evsel, int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
struct cpu_map *cpus); struct cpu_map *cpus);
......
...@@ -576,7 +576,7 @@ static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename) ...@@ -576,7 +576,7 @@ static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
if (!dup_filename) if (!dup_filename)
return; return;
dso__set_long_name(dso, filename, true); dso__set_long_name(dso, dup_filename, true);
} }
struct map *machine__findnew_module_map(struct machine *machine, u64 start, struct map *machine__findnew_module_map(struct machine *machine, u64 start,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment