Commit 1c748dc2 authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-core-for-mingo' of...

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

User visible changes:

  - Switch the default callchain output mode to 'graph,0.5,caller', to make it
    look like the default for other tools, reducing the learning curve for
    people used to 'caller' based viewing. (Arnaldo Carvalho de Melo)

  - Implement column based horizontal scrolling in the hists browser (top, report),
    making it possible to use the TUI for things like 'perf mem report' where
    there are many more columns than can fit in a terminal. (Arnaldo Carvalho de Melo)

  - Support sorting by symbol_iaddr with perf.data files produced by
    'perf mem record'. (Don Zickus)

  - Display DATA_SRC sample type bit, i.e. when running 'perf evlist -v' the
    "DATA_SRC" wasn't appearing when set, fix it to look like: (Jiri Olsa)

      cpu/mem-loads/pp: ...SNIP... sample_type: IP|TID|TIME|ADDR|CPU|PERIOD|DATA_SRC

  - Introduce the 'P' event modifier, meaning 'max precision level, please', i.e.:

     $ perf record -e cycles:P usleep 1

    Is now similar to:

     $ perf record usleep 1

    Useful, for instance, when specifying multiple events. (Jiri Olsa)

  - Make 'perf -v' and 'perf -h' work. (Jiri Olsa)

  - Fail properly when pattern matching fails to find a tracepoint, i.e.
    '-e non:existent' was being correctly handled, with a proper error message
    about that not being a valid event, but '-e non:existent*' wasn't,
    fix it. (Jiri Olsa)

Infrastructure changes:

  - Separate arch specific entries in 'perf test' and add an 'Intel CQM' one
    to be fun on x86 only. (Matt Fleming)
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents e3b0ac1b 27bf90bf
......@@ -69,7 +69,7 @@ static const char * const tracefs__known_mountpoints[] = {
struct fs {
const char *name;
const char * const *mounts;
char path[PATH_MAX + 1];
char path[PATH_MAX];
bool found;
long magic;
};
......
......@@ -12,9 +12,9 @@
#include "tracing_path.h"
char tracing_mnt[PATH_MAX + 1] = "/sys/kernel/debug";
char tracing_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing";
char tracing_events_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing/events";
char tracing_mnt[PATH_MAX] = "/sys/kernel/debug";
char tracing_path[PATH_MAX] = "/sys/kernel/debug/tracing";
char tracing_events_path[PATH_MAX] = "/sys/kernel/debug/tracing/events";
static void __tracing_path_set(const char *tracing, const char *mountpoint)
......
......@@ -30,6 +30,7 @@ counted. The following modifiers exist:
G - guest counting (in KVM guests)
H - host counting (not in KVM guests)
p - precise level
P - use maximum detected precise level
S - read sample value (PERF_SAMPLE_READ)
D - pin the event to the PMU
......
......@@ -192,7 +192,7 @@ OPTIONS
when available. Usually more convenient to use --branch-history
for this.
Default: fractal,0.5,callee,function.
Default: graph,0.5,caller
--children::
Accumulate callchain of children to parent entry so that then can
......
......@@ -27,6 +27,14 @@ OPTIONS
Setup buildid cache directory. It has higher priority than
buildid.dir config file option.
-v::
--version::
Display perf version.
-h::
--help::
Run perf help command.
DESCRIPTION
-----------
Performance counters for Linux are a new kernel-based subsystem
......
libperf-y += util/
libperf-$(CONFIG_DWARF_UNWIND) += tests/
libperf-y += tests/
#ifndef ARCH_TESTS_H
#define ARCH_TESTS_H
/* Tests */
int test__rdpmc(void);
int test__perf_time_to_tsc(void);
int test__insn_x86(void);
int test__intel_cqm_count_nmi_context(void);
#ifdef HAVE_DWARF_UNWIND_SUPPORT
struct thread;
struct perf_sample;
int test__arch_unwind_sample(struct perf_sample *sample,
struct thread *thread);
#endif
extern struct test arch_tests[];
#endif
libperf-y += regs_load.o
libperf-y += dwarf-unwind.o
libperf-$(CONFIG_DWARF_UNWIND) += regs_load.o
libperf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
libperf-y += arch-tests.o
libperf-y += rdpmc.o
libperf-y += perf-time-to-tsc.o
libperf-$(CONFIG_AUXTRACE) += insn-x86.o
libperf-y += intel-cqm.o
#include <string.h>
#include "tests/tests.h"
#include "arch-tests.h"
struct test arch_tests[] = {
{
.desc = "x86 rdpmc test",
.func = test__rdpmc,
},
{
.desc = "Test converting perf time to TSC",
.func = test__perf_time_to_tsc,
},
#ifdef HAVE_DWARF_UNWIND_SUPPORT
{
.desc = "Test dwarf unwind",
.func = test__dwarf_unwind,
},
#endif
#ifdef HAVE_AUXTRACE_SUPPORT
{
.desc = "Test x86 instruction decoder - new instructions",
.func = test__insn_x86,
},
#endif
{
.desc = "Test intel cqm nmi context read",
.func = test__intel_cqm_count_nmi_context,
},
{
.func = NULL,
},
};
......@@ -5,6 +5,7 @@
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
#include "arch-tests.h"
#define STACK_SIZE 8192
......
#include <linux/types.h>
#include "debug.h"
#include "tests.h"
#include "tests/tests.h"
#include "arch-tests.h"
#include "intel-pt-decoder/insn.h"
#include "intel-pt-decoder/intel-pt-insn-decoder.h"
......
#include "tests/tests.h"
#include "perf.h"
#include "cloexec.h"
#include "debug.h"
#include "evlist.h"
#include "evsel.h"
#include "arch-tests.h"
#include <sys/mman.h>
#include <string.h>
static pid_t spawn(void)
{
pid_t pid;
pid = fork();
if (pid)
return pid;
while(1);
sleep(5);
return 0;
}
/*
* Create an event group that contains both a sampled hardware
* (cpu-cycles) and software (intel_cqm/llc_occupancy/) event. We then
* wait for the hardware perf counter to overflow and generate a PMI,
* which triggers an event read for both of the events in the group.
*
* Since reading Intel CQM event counters requires sending SMP IPIs, the
* CQM pmu needs to handle the above situation gracefully, and return
* the last read counter value to avoid triggering a WARN_ON_ONCE() in
* smp_call_function_many() caused by sending IPIs from NMI context.
*/
int test__intel_cqm_count_nmi_context(void)
{
struct perf_evlist *evlist = NULL;
struct perf_evsel *evsel = NULL;
struct perf_event_attr pe;
int i, fd[2], flag, ret;
size_t mmap_len;
void *event;
pid_t pid;
int err = TEST_FAIL;
flag = perf_event_open_cloexec_flag();
evlist = perf_evlist__new();
if (!evlist) {
pr_debug("perf_evlist__new failed\n");
return TEST_FAIL;
}
ret = parse_events(evlist, "intel_cqm/llc_occupancy/", NULL);
if (ret) {
pr_debug("parse_events failed\n");
err = TEST_SKIP;
goto out;
}
evsel = perf_evlist__first(evlist);
if (!evsel) {
pr_debug("perf_evlist__first failed\n");
goto out;
}
memset(&pe, 0, sizeof(pe));
pe.size = sizeof(pe);
pe.type = PERF_TYPE_HARDWARE;
pe.config = PERF_COUNT_HW_CPU_CYCLES;
pe.read_format = PERF_FORMAT_GROUP;
pe.sample_period = 128;
pe.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_READ;
pid = spawn();
fd[0] = sys_perf_event_open(&pe, pid, -1, -1, flag);
if (fd[0] < 0) {
pr_debug("failed to open event\n");
goto out;
}
memset(&pe, 0, sizeof(pe));
pe.size = sizeof(pe);
pe.type = evsel->attr.type;
pe.config = evsel->attr.config;
fd[1] = sys_perf_event_open(&pe, pid, -1, fd[0], flag);
if (fd[1] < 0) {
pr_debug("failed to open event\n");
goto out;
}
/*
* Pick a power-of-two number of pages + 1 for the meta-data
* page (struct perf_event_mmap_page). See tools/perf/design.txt.
*/
mmap_len = page_size * 65;
event = mmap(NULL, mmap_len, PROT_READ, MAP_SHARED, fd[0], 0);
if (event == (void *)(-1)) {
pr_debug("failed to mmap %d\n", errno);
goto out;
}
sleep(1);
err = TEST_OK;
munmap(event, mmap_len);
for (i = 0; i < 2; i++)
close(fd[i]);
kill(pid, SIGKILL);
wait(NULL);
out:
perf_evlist__delete(evlist);
return err;
}
......@@ -9,7 +9,9 @@
#include "thread_map.h"
#include "cpumap.h"
#include "tsc.h"
#include "tests.h"
#include "tests/tests.h"
#include "arch-tests.h"
#define CHECK__(x) { \
while ((x) < 0) { \
......
......@@ -5,10 +5,9 @@
#include <linux/types.h>
#include "perf.h"
#include "debug.h"
#include "tests.h"
#include "tests/tests.h"
#include "cloexec.h"
#if defined(__x86_64__) || defined(__i386__)
#include "arch-tests.h"
static u64 rdpmc(unsigned int counter)
{
......@@ -173,5 +172,3 @@ int test__rdpmc(void)
return 0;
}
#endif
......@@ -633,7 +633,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
bool has_br_stack = false;
int branch_mode = -1;
bool branch_call_mode = false;
char callchain_default_opt[] = "fractal,0.5,callee";
char callchain_default_opt[] = "graph,0.5,caller";
const char * const report_usage[] = {
"perf report [<options>]",
NULL
......@@ -701,7 +701,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
"Only display entries with parent-match"),
OPT_CALLBACK_DEFAULT('g', "call-graph", &report, "output_type,min_percent[,print_limit],call_order[,branch]",
"Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold, optional print limit, callchain order, key (function or address), add branches. "
"Default: fractal,0.5,callee,function", &report_parse_callchain_opt, callchain_default_opt),
"Default: graph,0.5,caller", &report_parse_callchain_opt, callchain_default_opt),
OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
"Accumulate callchains of children and show total overhead as well"),
OPT_INTEGER(0, "max-stack", &report.max_stack,
......
......@@ -161,6 +161,20 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
if (!strcmp(cmd, "--help") || !strcmp(cmd, "--version"))
break;
/*
* Shortcut for '-h' and '-v' options to invoke help
* and version command.
*/
if (!strcmp(cmd, "-h")) {
(*argv)[0] = "--help";
break;
}
if (!strcmp(cmd, "-v")) {
(*argv)[0] = "--version";
break;
}
/*
* Check remaining flags.
*/
......
......@@ -8,7 +8,6 @@ perf-y += openat-syscall-all-cpus.o
perf-y += openat-syscall-tp-fields.o
perf-y += mmap-basic.o
perf-y += perf-record.o
perf-y += rdpmc.o
perf-y += evsel-roundtrip-name.o
perf-y += evsel-tp-sched.o
perf-y += fdarray.o
......@@ -35,11 +34,6 @@ perf-y += thread-map.o
perf-y += llvm.o
perf-y += topology.o
perf-$(CONFIG_X86) += perf-time-to-tsc.o
ifdef CONFIG_AUXTRACE
perf-$(CONFIG_X86) += insn-x86.o
endif
ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64))
perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
endif
......
......@@ -14,10 +14,13 @@
#include "parse-options.h"
#include "symbol.h"
static struct test {
const char *desc;
int (*func)(void);
} tests[] = {
struct test __weak arch_tests[] = {
{
.func = NULL,
},
};
static struct test generic_tests[] = {
{
.desc = "vmlinux symtab matches kallsyms",
.func = test__vmlinux_matches_kallsyms,
......@@ -38,12 +41,6 @@ static struct test {
.desc = "parse events tests",
.func = test__parse_events,
},
#if defined(__x86_64__) || defined(__i386__)
{
.desc = "x86 rdpmc test",
.func = test__rdpmc,
},
#endif
{
.desc = "Validate PERF_RECORD_* events & perf_sample fields",
.func = test__PERF_RECORD,
......@@ -104,12 +101,6 @@ static struct test {
.desc = "Test software clock events have valid period values",
.func = test__sw_clock_freq,
},
#if defined(__x86_64__) || defined(__i386__)
{
.desc = "Test converting perf time to TSC",
.func = test__perf_time_to_tsc,
},
#endif
{
.desc = "Test object code reading",
.func = test__code_reading,
......@@ -126,14 +117,6 @@ static struct test {
.desc = "Test parsing with no sample_id_all bit set",
.func = test__parse_no_sample_id_all,
},
#if defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__)
#ifdef HAVE_DWARF_UNWIND_SUPPORT
{
.desc = "Test dwarf unwind",
.func = test__dwarf_unwind,
},
#endif
#endif
{
.desc = "Test filtering hist entries",
.func = test__hists_filter,
......@@ -178,14 +161,6 @@ static struct test {
.desc = "Test LLVM searching and compiling",
.func = test__llvm,
},
#ifdef HAVE_AUXTRACE_SUPPORT
#if defined(__x86_64__) || defined(__i386__)
{
.desc = "Test x86 instruction decoder - new instructions",
.func = test__insn_x86,
},
#endif
#endif
{
.desc = "Test topology in session",
.func = test_session_topology,
......@@ -195,6 +170,11 @@ static struct test {
},
};
static struct test *tests[] = {
generic_tests,
arch_tests,
};
static bool perf_test__matches(struct test *test, int curr, int argc, const char *argv[])
{
int i;
......@@ -249,22 +229,25 @@ static int run_test(struct test *test)
return err;
}
#define for_each_test(t) for (t = &tests[0]; t->func; t++)
#define for_each_test(j, t) \
for (j = 0; j < ARRAY_SIZE(tests); j++) \
for (t = &tests[j][0]; t->func; t++)
static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
{
struct test *t;
unsigned int j;
int i = 0;
int width = 0;
for_each_test(t) {
for_each_test(j, t) {
int len = strlen(t->desc);
if (width < len)
width = len;
}
for_each_test(t) {
for_each_test(j, t) {
int curr = i++, err;
if (!perf_test__matches(t, curr, argc, argv))
......@@ -300,10 +283,11 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
static int perf_test__list(int argc, const char **argv)
{
unsigned int j;
struct test *t;
int i = 0;
for_each_test(t) {
for_each_test(j, t) {
if (argc > 1 && !strstr(t->desc, argv[1]))
continue;
......
......@@ -11,6 +11,10 @@
#include "thread.h"
#include "callchain.h"
#if defined (__x86_64__) || defined (__i386__)
#include "arch-tests.h"
#endif
/* For bsearch. We try to unwind functions in shared object. */
#include <stdlib.h>
......
......@@ -1259,6 +1259,17 @@ test__checkevent_breakpoint_len_rw_modifier(struct perf_evlist *evlist)
return test__checkevent_breakpoint_rw(evlist);
}
static int test__checkevent_precise_max_modifier(struct perf_evlist *evlist)
{
struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->attr.type);
TEST_ASSERT_VAL("wrong config",
PERF_COUNT_SW_TASK_CLOCK == evsel->attr.config);
return 0;
}
static int count_tracepoints(void)
{
struct dirent *events_ent;
......@@ -1562,6 +1573,11 @@ static struct evlist_test test__events[] = {
.check = test__checkevent_exclude_idle_modifier_1,
.id = 46,
},
{
.name = "task-clock:P,cycles",
.check = test__checkevent_precise_max_modifier,
.id = 47,
},
};
static struct evlist_test test__events_pmu[] = {
......
......@@ -24,13 +24,17 @@ enum {
TEST_SKIP = -2,
};
struct test {
const char *desc;
int (*func)(void);
};
/* Tests */
int test__vmlinux_matches_kallsyms(void);
int test__openat_syscall_event(void);
int test__openat_syscall_event_on_all_cpus(void);
int test__basic_mmap(void);
int test__PERF_RECORD(void);
int test__rdpmc(void);
int test__perf_evsel__roundtrip_name_test(void);
int test__perf_evsel__tp_sched_test(void);
int test__syscall_openat_tp_fields(void);
......@@ -46,7 +50,6 @@ int test__bp_signal(void);
int test__bp_signal_overflow(void);
int test__task_exit(void);
int test__sw_clock_freq(void);
int test__perf_time_to_tsc(void);
int test__code_reading(void);
int test__sample_parsing(void);
int test__keep_tracking(void);
......@@ -63,10 +66,9 @@ int test__fdarray__add(void);
int test__kmod_path__parse(void);
int test__thread_map(void);
int test__llvm(void);
int test__insn_x86(void);
int test_session_topology(void);
#if defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__)
#if defined(__arm__) || defined(__aarch64__)
#ifdef HAVE_DWARF_UNWIND_SUPPORT
struct thread;
struct perf_sample;
......
......@@ -393,6 +393,7 @@ int ui_browser__run(struct ui_browser *browser, int delay_secs)
if (browser->use_navkeypressed && !browser->navkeypressed) {
if (key == K_DOWN || key == K_UP ||
(browser->columns && (key == K_LEFT || key == K_RIGHT)) ||
key == K_PGDN || key == K_PGUP ||
key == K_HOME || key == K_END ||
key == ' ') {
......@@ -421,6 +422,18 @@ int ui_browser__run(struct ui_browser *browser, int delay_secs)
browser->seek(browser, -1, SEEK_CUR);
}
break;
case K_RIGHT:
if (!browser->columns)
goto out;
if (browser->horiz_scroll < browser->columns - 1)
++browser->horiz_scroll;
break;
case K_LEFT:
if (!browser->columns)
goto out;
if (browser->horiz_scroll != 0)
--browser->horiz_scroll;
break;
case K_PGDN:
case ' ':
if (browser->top_idx + browser->rows > browser->nr_entries - 1)
......@@ -459,6 +472,7 @@ int ui_browser__run(struct ui_browser *browser, int delay_secs)
browser->seek(browser, -offset, SEEK_END);
break;
default:
out:
return key;
}
}
......
......@@ -14,7 +14,7 @@
struct ui_browser {
u64 index, top_idx;
void *top, *entries;
u16 y, x, width, height, rows;
u16 y, x, width, height, rows, columns, horiz_scroll;
int current_color;
void *priv;
const char *title;
......
......@@ -784,11 +784,12 @@ static int hist_browser__show_entry(struct hist_browser *browser,
.size = sizeof(s),
.ptr = &arg,
};
int column = 0;
hist_browser__gotorc(browser, row, 0);
perf_hpp__for_each_format(fmt) {
if (perf_hpp__should_skip(fmt))
if (perf_hpp__should_skip(fmt) || column++ < browser->b.horiz_scroll)
continue;
if (current_entry && browser->b.navkeypressed) {
......@@ -861,14 +862,16 @@ static int advance_hpp_check(struct perf_hpp *hpp, int inc)
return hpp->size <= 0;
}
static int hists__scnprintf_headers(char *buf, size_t size, struct hists *hists)
static int hists_browser__scnprintf_headers(struct hist_browser *browser, char *buf, size_t size)
{
struct hists *hists = browser->hists;
struct perf_hpp dummy_hpp = {
.buf = buf,
.size = size,
};
struct perf_hpp_fmt *fmt;
size_t ret = 0;
int column = 0;
if (symbol_conf.use_callchain) {
ret = scnprintf(buf, size, " ");
......@@ -877,7 +880,7 @@ static int hists__scnprintf_headers(char *buf, size_t size, struct hists *hists)
}
perf_hpp__for_each_format(fmt) {
if (perf_hpp__should_skip(fmt))
if (perf_hpp__should_skip(fmt) || column++ < browser->b.horiz_scroll)
continue;
ret = fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
......@@ -896,7 +899,7 @@ static void hist_browser__show_headers(struct hist_browser *browser)
{
char headers[1024];
hists__scnprintf_headers(headers, sizeof(headers), browser->hists);
hists_browser__scnprintf_headers(browser, headers, sizeof(headers));
ui_browser__gotorc(&browser->b, 0, 0);
ui_browser__set_color(&browser->b, HE_COLORSET_ROOT);
ui_browser__write_nstring(&browser->b, headers, browser->b.width + 1);
......@@ -1806,8 +1809,17 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
memset(options, 0, sizeof(options));
memset(actions, 0, sizeof(actions));
perf_hpp__for_each_format(fmt)
perf_hpp__for_each_format(fmt) {
perf_hpp__reset_width(fmt, hists);
/*
* This is done just once, and activates the horizontal scrolling
* code in the ui_browser code, it would be better to have a the
* counter in the perf_hpp code, but I couldn't find doing it here
* works, FIXME by setting this in hist_browser__new, for now, be
* clever 8-)
*/
++browser->b.columns;
}
if (symbol_conf.col_width_list_str)
perf_hpp__set_user_width(symbol_conf.col_width_list_str);
......
......@@ -122,7 +122,7 @@ struct annotated_source {
struct list_head source;
struct source_line *lines;
int nr_histograms;
int sizeof_sym_hist;
size_t sizeof_sym_hist;
struct cyc_hist *cycles_hist;
struct sym_hist histograms[0];
};
......
......@@ -205,7 +205,7 @@ void perf_evlist__set_leader(struct perf_evlist *evlist)
}
}
static void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
{
attr->precise_ip = 3;
......
......@@ -290,4 +290,6 @@ void perf_evlist__to_front(struct perf_evlist *evlist,
void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
struct perf_evsel *tracking_evsel);
void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr);
#endif /* __PERF_EVLIST_H */
......@@ -882,6 +882,9 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
attr->clockid = opts->clockid;
}
if (evsel->precise_max)
perf_event_attr__set_max_precise_ip(attr);
/*
* Apply event specific term settings,
* it overloads any global configuration.
......@@ -1178,7 +1181,7 @@ static void __p_sample_type(char *buf, size_t size, u64 value)
bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
bit_name(IDENTIFIER), bit_name(REGS_INTR),
bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC),
{ .name = NULL, }
};
#undef bit_name
......
......@@ -111,6 +111,7 @@ struct perf_evsel {
bool system_wide;
bool tracking;
bool per_pkg;
bool precise_max;
/* parse modifier helper */
int exclude_GH;
int nr_members;
......
......@@ -132,6 +132,18 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
symlen);
}
if (h->mem_info->iaddr.sym) {
symlen = (int)h->mem_info->iaddr.sym->namelen + 4
+ unresolved_col_width + 2;
hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
symlen);
} else {
symlen = unresolved_col_width + 4 + 2;
hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
symlen);
}
if (h->mem_info->daddr.map) {
symlen = dso__name_len(h->mem_info->daddr.map->dso);
hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
......@@ -143,6 +155,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
} else {
symlen = unresolved_col_width + 4 + 2;
hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
}
......
......@@ -49,6 +49,7 @@ enum hist_column {
HISTC_MEM_LVL,
HISTC_MEM_SNOOP,
HISTC_MEM_DCACHELINE,
HISTC_MEM_IADDR_SYMBOL,
HISTC_TRANSACTION,
HISTC_CYCLES,
HISTC_NR_COLS, /* Last entry */
......
......@@ -449,7 +449,7 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx,
char evt_path[MAXPATHLEN];
struct dirent *evt_ent;
DIR *evt_dir;
int ret = 0;
int ret = 0, found = 0;
snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name);
evt_dir = opendir(evt_path);
......@@ -468,10 +468,17 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx,
if (!strglobmatch(evt_ent->d_name, evt_name))
continue;
found++;
ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name,
err, head_config);
}
if (!found) {
tracepoint_error(err, ENOENT, sys_name, evt_name);
ret = -1;
}
closedir(evt_dir);
return ret;
}
......@@ -923,6 +930,7 @@ struct event_modifier {
int eG;
int eI;
int precise;
int precise_max;
int exclude_GH;
int sample_read;
int pinned;
......@@ -938,6 +946,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
int eG = evsel ? evsel->attr.exclude_guest : 0;
int eI = evsel ? evsel->attr.exclude_idle : 0;
int precise = evsel ? evsel->attr.precise_ip : 0;
int precise_max = 0;
int sample_read = 0;
int pinned = evsel ? evsel->attr.pinned : 0;
......@@ -974,6 +983,8 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
/* use of precise requires exclude_guest */
if (!exclude_GH)
eG = 1;
} else if (*str == 'P') {
precise_max = 1;
} else if (*str == 'S') {
sample_read = 1;
} else if (*str == 'D') {
......@@ -1004,6 +1015,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
mod->eG = eG;
mod->eI = eI;
mod->precise = precise;
mod->precise_max = precise_max;
mod->exclude_GH = exclude_GH;
mod->sample_read = sample_read;
mod->pinned = pinned;
......@@ -1020,7 +1032,7 @@ static int check_modifier(char *str)
char *p = str;
/* The sizeof includes 0 byte as well. */
if (strlen(str) > (sizeof("ukhGHpppSDI") - 1))
if (strlen(str) > (sizeof("ukhGHpppPSDI") - 1))
return -1;
while (*p) {
......@@ -1059,6 +1071,7 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add)
evsel->attr.exclude_idle = mod.eI;
evsel->exclude_GH = mod.exclude_GH;
evsel->sample_read = mod.sample_read;
evsel->precise_max = mod.precise_max;
if (perf_evsel__is_group_leader(evsel))
evsel->attr.pinned = mod.pinned;
......
......@@ -122,7 +122,7 @@ num_raw_hex [a-fA-F0-9]+
name [a-zA-Z_*?][a-zA-Z0-9_*?.]*
name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.]*
/* If you add a modifier you need to update check_modifier() */
modifier_event [ukhpGHSDI]+
modifier_event [ukhpPGHSDI]+
modifier_bp [rwx]{1,3}
%%
......
......@@ -654,6 +654,35 @@ static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
width);
}
static int64_t
sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
{
uint64_t l = 0, r = 0;
if (left->mem_info)
l = left->mem_info->iaddr.addr;
if (right->mem_info)
r = right->mem_info->iaddr.addr;
return (int64_t)(r - l);
}
static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
uint64_t addr = 0;
struct map *map = NULL;
struct symbol *sym = NULL;
if (he->mem_info) {
addr = he->mem_info->iaddr.addr;
map = he->mem_info->iaddr.map;
sym = he->mem_info->iaddr.sym;
}
return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
width);
}
static int64_t
sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
{
......@@ -1077,6 +1106,13 @@ struct sort_entry sort_mem_daddr_sym = {
.se_width_idx = HISTC_MEM_DADDR_SYMBOL,
};
struct sort_entry sort_mem_iaddr_sym = {
.se_header = "Code Symbol",
.se_cmp = sort__iaddr_cmp,
.se_snprintf = hist_entry__iaddr_snprintf,
.se_width_idx = HISTC_MEM_IADDR_SYMBOL,
};
struct sort_entry sort_mem_daddr_dso = {
.se_header = "Data Object",
.se_cmp = sort__dso_daddr_cmp,
......@@ -1299,6 +1335,7 @@ static struct sort_dimension bstack_sort_dimensions[] = {
static struct sort_dimension memory_sort_dimensions[] = {
DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
......
......@@ -201,6 +201,7 @@ enum sort_type {
SORT_MEM_LVL,
SORT_MEM_SNOOP,
SORT_MEM_DCACHELINE,
SORT_MEM_IADDR_SYMBOL,
};
/*
......
......@@ -17,9 +17,9 @@
#include "callchain.h"
struct callchain_param callchain_param = {
.mode = CHAIN_GRAPH_REL,
.mode = CHAIN_GRAPH_ABS,
.min_percent = 0.5,
.order = ORDER_CALLEE,
.order = ORDER_CALLER,
.key = CCKEY_FUNCTION
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment