Commit 0a3d23a2 authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-core-for-mingo' of...

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

 * Do annotation using /proc/kcore and /proc/kallsyms, removing the
   need for a vmlinux file kernel assembly annotation. This also improves
   this use case because vmlinux has just the initial kernel image, not
   what is actually in use after various code patchings by things like
   alternatives, etc. From Adrian Hunter.

 * Add various improvements and fixes to the "vmlinux matches kallsyms"
   'perf test' entry, related to the /proc/kcore annotation feature.

 * Add --initial-delay option to 'perf stat' to skip measuring for
   the startup phase, from Andi Kleen.

 * Add perf kvm stat live mode that combines aspects of 'perf kvm stat' record
   and report, from David Ahern.

 * Add option to analyze specific VM in perf kvm stat report, from David Ahern.

 * Do not require /lib/modules/* on a guest, fix from Jason Wessel.

 * Group leader sampling, that allows just one event in a group to sample while
   the other events have just its values read, from Jiri Olsa.

 * Add support for a new modifier "D", which requests that the event, or group
   of events, be pinned to the PMU, from Michael Ellerman.

 * Fix segmentation fault on the gtk browser, from Namhyung Kim.
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 93786a5f cecb977e
...@@ -321,6 +321,7 @@ struct perf_event_attr { ...@@ -321,6 +321,7 @@ struct perf_event_attr {
#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
#define PERF_EVENT_IOC_ID _IOR('$', 7, u64 *)
enum perf_event_ioc_flags { enum perf_event_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0, PERF_IOC_FLAG_GROUP = 1U << 0,
......
...@@ -3568,6 +3568,15 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ...@@ -3568,6 +3568,15 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case PERF_EVENT_IOC_PERIOD: case PERF_EVENT_IOC_PERIOD:
return perf_event_period(event, (u64 __user *)arg); return perf_event_period(event, (u64 __user *)arg);
case PERF_EVENT_IOC_ID:
{
u64 id = primary_event_id(event);
if (copy_to_user((void __user *)arg, &id, sizeof(id)))
return -EFAULT;
return 0;
}
case PERF_EVENT_IOC_SET_OUTPUT: case PERF_EVENT_IOC_SET_OUTPUT:
{ {
int ret; int ret;
...@@ -4379,7 +4388,8 @@ static void perf_output_read_group(struct perf_output_handle *handle, ...@@ -4379,7 +4388,8 @@ static void perf_output_read_group(struct perf_output_handle *handle,
list_for_each_entry(sub, &leader->sibling_list, group_entry) { list_for_each_entry(sub, &leader->sibling_list, group_entry) {
n = 0; n = 0;
if (sub != event) if ((sub != event) &&
(sub->state == PERF_EVENT_STATE_ACTIVE))
sub->pmu->read(sub); sub->pmu->read(sub);
values[n++] = perf_event_count(sub); values[n++] = perf_event_count(sub);
......
...@@ -3,21 +3,6 @@ include ../../scripts/Makefile.include ...@@ -3,21 +3,6 @@ include ../../scripts/Makefile.include
CC = $(CROSS_COMPILE)gcc CC = $(CROSS_COMPILE)gcc
AR = $(CROSS_COMPILE)ar AR = $(CROSS_COMPILE)ar
# Makefiles suck: This macro sets a default value of $(2) for the
# variable named by $(1), unless the variable has been set by
# environment or command line. This is necessary for CC and AR
# because make sets default values, so the simpler ?= approach
# won't work as expected.
define allow-override
$(if $(or $(findstring environment,$(origin $(1))),\
$(findstring command line,$(origin $(1)))),,\
$(eval $(1) = $(2)))
endef
# Allow setting CC and AR, or setting CROSS_COMPILE as a prefix.
$(call allow-override,CC,$(CROSS_COMPILE)gcc)
$(call allow-override,AR,$(CROSS_COMPILE)ar)
# guard against environment variables # guard against environment variables
LIB_H= LIB_H=
LIB_OBJS= LIB_OBJS=
......
...@@ -29,6 +29,8 @@ counted. The following modifiers exist: ...@@ -29,6 +29,8 @@ counted. The following modifiers exist:
G - guest counting (in KVM guests) G - guest counting (in KVM guests)
H - host counting (not in KVM guests) H - host counting (not in KVM guests)
p - precise level p - precise level
S - read sample value (PERF_SAMPLE_READ)
D - pin the event to the PMU
The 'p' modifier can be used for specifying how precise the instruction The 'p' modifier can be used for specifying how precise the instruction
address should be. The 'p' modifier can be specified multiple times: address should be. The 'p' modifier can be specified multiple times:
......
...@@ -132,6 +132,11 @@ is a useful mode to detect imbalance between physical cores. To enable this mod ...@@ -132,6 +132,11 @@ is a useful mode to detect imbalance between physical cores. To enable this mod
use --per-core in addition to -a. (system-wide). The output includes the use --per-core in addition to -a. (system-wide). The output includes the
core number and the number of online logical processors on that physical processor. core number and the number of online logical processors on that physical processor.
-D msecs::
--initial-delay msecs::
After starting the program, wait msecs before measuring. This is useful to
filter out the startup phase of the program, which is often very different.
EXAMPLES EXAMPLES
-------- --------
......
...@@ -392,6 +392,7 @@ LIB_OBJS += $(OUTPUT)tests/sw-clock.o ...@@ -392,6 +392,7 @@ LIB_OBJS += $(OUTPUT)tests/sw-clock.o
ifeq ($(ARCH),x86) ifeq ($(ARCH),x86)
LIB_OBJS += $(OUTPUT)tests/perf-time-to-tsc.o LIB_OBJS += $(OUTPUT)tests/perf-time-to-tsc.o
endif endif
LIB_OBJS += $(OUTPUT)tests/code-reading.o
BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
BUILTIN_OBJS += $(OUTPUT)builtin-bench.o BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
......
...@@ -10,11 +10,11 @@ ...@@ -10,11 +10,11 @@
u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc) u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc)
{ {
u64 time, quot, rem; u64 t, quot, rem;
time = ns - tc->time_zero; t = ns - tc->time_zero;
quot = time / tc->time_mult; quot = t / tc->time_mult;
rem = time % tc->time_mult; rem = t % tc->time_mult;
return (quot << tc->time_shift) + return (quot << tc->time_shift) +
(rem << tc->time_shift) / tc->time_mult; (rem << tc->time_shift) / tc->time_mult;
} }
......
...@@ -206,7 +206,7 @@ static int perf_event__inject_buildid(struct perf_tool *tool, ...@@ -206,7 +206,7 @@ static int perf_event__inject_buildid(struct perf_tool *tool,
} }
thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
event->ip.ip, &al); event->ip.ip, &al, NULL);
if (al.map != NULL) { if (al.map != NULL) {
if (!al.map->dso->hit) { if (!al.map->dso->hit) {
......
This diff is collapsed.
...@@ -341,10 +341,10 @@ static void print_sample_addr(union perf_event *event, ...@@ -341,10 +341,10 @@ static void print_sample_addr(union perf_event *event,
return; return;
thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
sample->addr, &al); sample->addr, &al, NULL);
if (!al.map) if (!al.map)
thread__find_addr_map(thread, machine, cpumode, MAP__VARIABLE, thread__find_addr_map(thread, machine, cpumode, MAP__VARIABLE,
sample->addr, &al); sample->addr, &al, NULL);
al.cpu = sample->cpu; al.cpu = sample->cpu;
al.sym = NULL; al.sym = NULL;
......
...@@ -100,6 +100,7 @@ static const char *pre_cmd = NULL; ...@@ -100,6 +100,7 @@ static const char *pre_cmd = NULL;
static const char *post_cmd = NULL; static const char *post_cmd = NULL;
static bool sync_run = false; static bool sync_run = false;
static unsigned int interval = 0; static unsigned int interval = 0;
static unsigned int initial_delay = 0;
static bool forever = false; static bool forever = false;
static struct timespec ref_time; static struct timespec ref_time;
static struct cpu_map *aggr_map; static struct cpu_map *aggr_map;
...@@ -254,7 +255,8 @@ static int create_perf_stat_counter(struct perf_evsel *evsel) ...@@ -254,7 +255,8 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
if (!perf_target__has_task(&target) && if (!perf_target__has_task(&target) &&
perf_evsel__is_group_leader(evsel)) { perf_evsel__is_group_leader(evsel)) {
attr->disabled = 1; attr->disabled = 1;
attr->enable_on_exec = 1; if (!initial_delay)
attr->enable_on_exec = 1;
} }
return perf_evsel__open_per_thread(evsel, evsel_list->threads); return perf_evsel__open_per_thread(evsel, evsel_list->threads);
...@@ -414,6 +416,22 @@ static void print_interval(void) ...@@ -414,6 +416,22 @@ static void print_interval(void)
list_for_each_entry(counter, &evsel_list->entries, node) list_for_each_entry(counter, &evsel_list->entries, node)
print_counter_aggr(counter, prefix); print_counter_aggr(counter, prefix);
} }
fflush(output);
}
static void handle_initial_delay(void)
{
struct perf_evsel *counter;
if (initial_delay) {
const int ncpus = cpu_map__nr(evsel_list->cpus),
nthreads = thread_map__nr(evsel_list->threads);
usleep(initial_delay * 1000);
list_for_each_entry(counter, &evsel_list->entries, node)
perf_evsel__enable(counter, ncpus, nthreads);
}
} }
static int __run_perf_stat(int argc, const char **argv) static int __run_perf_stat(int argc, const char **argv)
...@@ -486,6 +504,7 @@ static int __run_perf_stat(int argc, const char **argv) ...@@ -486,6 +504,7 @@ static int __run_perf_stat(int argc, const char **argv)
if (forks) { if (forks) {
perf_evlist__start_workload(evsel_list); perf_evlist__start_workload(evsel_list);
handle_initial_delay();
if (interval) { if (interval) {
while (!waitpid(child_pid, &status, WNOHANG)) { while (!waitpid(child_pid, &status, WNOHANG)) {
...@@ -497,6 +516,7 @@ static int __run_perf_stat(int argc, const char **argv) ...@@ -497,6 +516,7 @@ static int __run_perf_stat(int argc, const char **argv)
if (WIFSIGNALED(status)) if (WIFSIGNALED(status))
psignal(WTERMSIG(status), argv[0]); psignal(WTERMSIG(status), argv[0]);
} else { } else {
handle_initial_delay();
while (!done) { while (!done) {
nanosleep(&ts, NULL); nanosleep(&ts, NULL);
if (interval) if (interval)
...@@ -1419,6 +1439,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) ...@@ -1419,6 +1439,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
"aggregate counts per processor socket", AGGR_SOCKET), "aggregate counts per processor socket", AGGR_SOCKET),
OPT_SET_UINT(0, "per-core", &aggr_mode, OPT_SET_UINT(0, "per-core", &aggr_mode,
"aggregate counts per physical processor core", AGGR_CORE), "aggregate counts per physical processor core", AGGR_CORE),
OPT_UINTEGER('D', "delay", &initial_delay,
"ms to wait before starting measurement after program start"),
OPT_END() OPT_END()
}; };
const char * const stat_usage[] = { const char * const stat_usage[] = {
......
...@@ -103,7 +103,8 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he) ...@@ -103,7 +103,8 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
/* /*
* We can't annotate with just /proc/kallsyms * We can't annotate with just /proc/kallsyms
*/ */
if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS) { if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
!dso__is_kcore(map->dso)) {
pr_err("Can't annotate %s: No vmlinux file was found in the " pr_err("Can't annotate %s: No vmlinux file was found in the "
"path\n", sym->name); "path\n", sym->name);
sleep(1); sleep(1);
...@@ -238,8 +239,6 @@ static void perf_top__show_details(struct perf_top *top) ...@@ -238,8 +239,6 @@ static void perf_top__show_details(struct perf_top *top)
pthread_mutex_unlock(&notes->lock); pthread_mutex_unlock(&notes->lock);
} }
static const char CONSOLE_CLEAR[] = "";
static struct hist_entry *perf_evsel__add_hist_entry(struct perf_evsel *evsel, static struct hist_entry *perf_evsel__add_hist_entry(struct perf_evsel *evsel,
struct addr_location *al, struct addr_location *al,
struct perf_sample *sample) struct perf_sample *sample)
......
...@@ -18,6 +18,7 @@ static struct syscall_fmt { ...@@ -18,6 +18,7 @@ static struct syscall_fmt {
} syscall_fmts[] = { } syscall_fmts[] = {
{ .name = "access", .errmsg = true, }, { .name = "access", .errmsg = true, },
{ .name = "arch_prctl", .errmsg = true, .alias = "prctl", }, { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
{ .name = "connect", .errmsg = true, },
{ .name = "fstat", .errmsg = true, .alias = "newfstat", }, { .name = "fstat", .errmsg = true, .alias = "newfstat", },
{ .name = "fstatat", .errmsg = true, .alias = "newfstatat", }, { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
{ .name = "futex", .errmsg = true, }, { .name = "futex", .errmsg = true, },
......
...@@ -21,7 +21,7 @@ def main(): ...@@ -21,7 +21,7 @@ def main():
evsel = perf.evsel(task = 1, comm = 1, mmap = 0, evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1, wakeup_events = 1, watermark = 1,
sample_id_all = 1, sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID) sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU)
evsel.open(cpus = cpus, threads = threads); evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads) evlist = perf.evlist(cpus, threads)
evlist.add(evsel) evlist.add(evsel)
......
[config]
command = record
args = -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1
[event-1:base-record]
fd=1
group_fd=-1
sample_type=343
read_format=12
inherit=0
[event-2:base-record]
fd=2
group_fd=1
# cache-misses
type=0
config=3
# default | PERF_SAMPLE_READ
sample_type=343
# PERF_FORMAT_ID | PERF_FORMAT_GROUP
read_format=12
mmap=0
comm=0
enable_on_exec=0
disabled=0
# inherit is disabled for group sampling
inherit=0
# sampling disabled
sample_freq=0
sample_period=0
...@@ -99,6 +99,10 @@ static struct test { ...@@ -99,6 +99,10 @@ static struct test {
.func = test__perf_time_to_tsc, .func = test__perf_time_to_tsc,
}, },
#endif #endif
{
.desc = "Test object code reading",
.func = test__code_reading,
},
{ {
.func = NULL, .func = NULL,
}, },
......
This diff is collapsed.
This diff is collapsed.
...@@ -36,5 +36,6 @@ int test__bp_signal_overflow(void); ...@@ -36,5 +36,6 @@ int test__bp_signal_overflow(void);
int test__task_exit(void); int test__task_exit(void);
int test__sw_clock_freq(void); int test__sw_clock_freq(void);
int test__perf_time_to_tsc(void); int test__perf_time_to_tsc(void);
int test__code_reading(void);
#endif /* TESTS_H */ #endif /* TESTS_H */
...@@ -16,6 +16,8 @@ static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused, ...@@ -16,6 +16,8 @@ static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused,
return 0; return 0;
} }
#define UM(x) kallsyms_map->unmap_ip(kallsyms_map, (x))
int test__vmlinux_matches_kallsyms(void) int test__vmlinux_matches_kallsyms(void)
{ {
int err = -1; int err = -1;
...@@ -25,6 +27,7 @@ int test__vmlinux_matches_kallsyms(void) ...@@ -25,6 +27,7 @@ int test__vmlinux_matches_kallsyms(void)
struct machine kallsyms, vmlinux; struct machine kallsyms, vmlinux;
enum map_type type = MAP__FUNCTION; enum map_type type = MAP__FUNCTION;
struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", }; struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
u64 mem_start, mem_end;
/* /*
* Step 1: * Step 1:
...@@ -73,7 +76,7 @@ int test__vmlinux_matches_kallsyms(void) ...@@ -73,7 +76,7 @@ int test__vmlinux_matches_kallsyms(void)
goto out; goto out;
} }
ref_reloc_sym.addr = sym->start; ref_reloc_sym.addr = UM(sym->start);
/* /*
* Step 5: * Step 5:
...@@ -123,10 +126,14 @@ int test__vmlinux_matches_kallsyms(void) ...@@ -123,10 +126,14 @@ int test__vmlinux_matches_kallsyms(void)
if (sym->start == sym->end) if (sym->start == sym->end)
continue; continue;
first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL); mem_start = vmlinux_map->unmap_ip(vmlinux_map, sym->start);
mem_end = vmlinux_map->unmap_ip(vmlinux_map, sym->end);
first_pair = machine__find_kernel_symbol(&kallsyms, type,
mem_start, NULL, NULL);
pair = first_pair; pair = first_pair;
if (pair && pair->start == sym->start) { if (pair && UM(pair->start) == mem_start) {
next_pair: next_pair:
if (strcmp(sym->name, pair->name) == 0) { if (strcmp(sym->name, pair->name) == 0) {
/* /*
...@@ -138,10 +145,11 @@ int test__vmlinux_matches_kallsyms(void) ...@@ -138,10 +145,11 @@ int test__vmlinux_matches_kallsyms(void)
* off the real size. More than that and we * off the real size. More than that and we
* _really_ have a problem. * _really_ have a problem.
*/ */
s64 skew = sym->end - pair->end; s64 skew = mem_end - UM(pair->end);
if (llabs(skew) >= page_size) if (llabs(skew) >= page_size)
pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n", pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
sym->start, sym->name, sym->end, pair->end); mem_start, sym->name, mem_end,
UM(pair->end));
/* /*
* Do not count this as a failure, because we * Do not count this as a failure, because we
...@@ -159,7 +167,7 @@ int test__vmlinux_matches_kallsyms(void) ...@@ -159,7 +167,7 @@ int test__vmlinux_matches_kallsyms(void)
if (nnd) { if (nnd) {
struct symbol *next = rb_entry(nnd, struct symbol, rb_node); struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
if (next->start == sym->start) { if (UM(next->start) == mem_start) {
pair = next; pair = next;
goto next_pair; goto next_pair;
} }
...@@ -172,10 +180,11 @@ int test__vmlinux_matches_kallsyms(void) ...@@ -172,10 +180,11 @@ int test__vmlinux_matches_kallsyms(void)
} }
pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n", pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
sym->start, sym->name, pair->name); mem_start, sym->name, pair->name);
} }
} else } else
pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name); pr_debug("%#" PRIx64 ": %s not on kallsyms\n",
mem_start, sym->name);
err = -1; err = -1;
} }
...@@ -208,16 +217,19 @@ int test__vmlinux_matches_kallsyms(void) ...@@ -208,16 +217,19 @@ int test__vmlinux_matches_kallsyms(void)
for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) { for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
struct map *pos = rb_entry(nd, struct map, rb_node), *pair; struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
pair = map_groups__find(&kallsyms.kmaps, type, pos->start); mem_start = vmlinux_map->unmap_ip(vmlinux_map, pos->start);
mem_end = vmlinux_map->unmap_ip(vmlinux_map, pos->end);
pair = map_groups__find(&kallsyms.kmaps, type, mem_start);
if (pair == NULL || pair->priv) if (pair == NULL || pair->priv)
continue; continue;
if (pair->start == pos->start) { if (pair->start == mem_start) {
pair->priv = 1; pair->priv = 1;
pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as", pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
pos->start, pos->end, pos->pgoff, pos->dso->name); pos->start, pos->end, pos->pgoff, pos->dso->name);
if (pos->pgoff != pair->pgoff || pos->end != pair->end) if (mem_end != pair->end)
pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "", pr_info(":\n*%" PRIx64 "-%" PRIx64 " %" PRIx64,
pair->start, pair->end, pair->pgoff); pair->start, pair->end, pair->pgoff);
pr_info(" %s\n", pair->dso->name); pr_info(" %s\n", pair->dso->name);
pair->priv = 1; pair->priv = 1;
......
...@@ -428,6 +428,14 @@ static void annotate_browser__init_asm_mode(struct annotate_browser *browser) ...@@ -428,6 +428,14 @@ static void annotate_browser__init_asm_mode(struct annotate_browser *browser)
browser->b.nr_entries = browser->nr_asm_entries; browser->b.nr_entries = browser->nr_asm_entries;
} }
#define SYM_TITLE_MAX_SIZE (PATH_MAX + 64)
static int sym_title(struct symbol *sym, struct map *map, char *title,
size_t sz)
{
return snprintf(title, sz, "%s %s", sym->name, map->dso->long_name);
}
static bool annotate_browser__callq(struct annotate_browser *browser, static bool annotate_browser__callq(struct annotate_browser *browser,
struct perf_evsel *evsel, struct perf_evsel *evsel,
struct hist_browser_timer *hbt) struct hist_browser_timer *hbt)
...@@ -438,6 +446,7 @@ static bool annotate_browser__callq(struct annotate_browser *browser, ...@@ -438,6 +446,7 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
struct annotation *notes; struct annotation *notes;
struct symbol *target; struct symbol *target;
u64 ip; u64 ip;
char title[SYM_TITLE_MAX_SIZE];
if (!ins__is_call(dl->ins)) if (!ins__is_call(dl->ins))
return false; return false;
...@@ -461,7 +470,8 @@ static bool annotate_browser__callq(struct annotate_browser *browser, ...@@ -461,7 +470,8 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
pthread_mutex_unlock(&notes->lock); pthread_mutex_unlock(&notes->lock);
symbol__tui_annotate(target, ms->map, evsel, hbt); symbol__tui_annotate(target, ms->map, evsel, hbt);
ui_browser__show_title(&browser->b, sym->name); sym_title(sym, ms->map, title, sizeof(title));
ui_browser__show_title(&browser->b, title);
return true; return true;
} }
...@@ -495,7 +505,7 @@ static bool annotate_browser__jump(struct annotate_browser *browser) ...@@ -495,7 +505,7 @@ static bool annotate_browser__jump(struct annotate_browser *browser)
dl = annotate_browser__find_offset(browser, dl->ops.target.offset, &idx); dl = annotate_browser__find_offset(browser, dl->ops.target.offset, &idx);
if (dl == NULL) { if (dl == NULL) {
ui_helpline__puts("Invallid jump offset"); ui_helpline__puts("Invalid jump offset");
return true; return true;
} }
...@@ -653,8 +663,10 @@ static int annotate_browser__run(struct annotate_browser *browser, ...@@ -653,8 +663,10 @@ static int annotate_browser__run(struct annotate_browser *browser,
const char *help = "Press 'h' for help on key bindings"; const char *help = "Press 'h' for help on key bindings";
int delay_secs = hbt ? hbt->refresh : 0; int delay_secs = hbt ? hbt->refresh : 0;
int key; int key;
char title[SYM_TITLE_MAX_SIZE];
if (ui_browser__show(&browser->b, sym->name, help) < 0) sym_title(sym, ms->map, title, sizeof(title));
if (ui_browser__show(&browser->b, title, help) < 0)
return -1; return -1;
annotate_browser__calc_percent(browser, evsel); annotate_browser__calc_percent(browser, evsel);
...@@ -720,7 +732,7 @@ static int annotate_browser__run(struct annotate_browser *browser, ...@@ -720,7 +732,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
"s Toggle source code view\n" "s Toggle source code view\n"
"/ Search string\n" "/ Search string\n"
"r Run available scripts\n" "r Run available scripts\n"
"? Search previous string\n"); "? Search string backwards\n");
continue; continue;
case 'r': case 'r':
{ {
......
...@@ -109,8 +109,6 @@ __HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us) ...@@ -109,8 +109,6 @@ __HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us)
void perf_gtk__init_hpp(void) void perf_gtk__init_hpp(void)
{ {
perf_hpp__column_enable(PERF_HPP__OVERHEAD);
perf_hpp__init(); perf_hpp__init();
perf_hpp__format[PERF_HPP__OVERHEAD].color = perf_hpp__format[PERF_HPP__OVERHEAD].color =
......
...@@ -110,10 +110,10 @@ static int jump__parse(struct ins_operands *ops) ...@@ -110,10 +110,10 @@ static int jump__parse(struct ins_operands *ops)
{ {
const char *s = strchr(ops->raw, '+'); const char *s = strchr(ops->raw, '+');
ops->target.addr = strtoll(ops->raw, NULL, 16); ops->target.addr = strtoull(ops->raw, NULL, 16);
if (s++ != NULL) if (s++ != NULL)
ops->target.offset = strtoll(s, NULL, 16); ops->target.offset = strtoull(s, NULL, 16);
else else
ops->target.offset = UINT64_MAX; ops->target.offset = UINT64_MAX;
...@@ -821,11 +821,55 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map, ...@@ -821,11 +821,55 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
if (dl == NULL) if (dl == NULL)
return -1; return -1;
if (dl->ops.target.offset == UINT64_MAX)
dl->ops.target.offset = dl->ops.target.addr -
map__rip_2objdump(map, sym->start);
/*
* kcore has no symbols, so add the call target name if it is on the
* same map.
*/
if (dl->ins && ins__is_call(dl->ins) && !dl->ops.target.name) {
struct symbol *s;
u64 ip = dl->ops.target.addr;
if (ip >= map->start && ip <= map->end) {
ip = map->map_ip(map, ip);
s = map__find_symbol(map, ip, NULL);
if (s && s->start == ip)
dl->ops.target.name = strdup(s->name);
}
}
disasm__add(&notes->src->source, dl); disasm__add(&notes->src->source, dl);
return 0; return 0;
} }
static void delete_last_nop(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
struct list_head *list = &notes->src->source;
struct disasm_line *dl;
while (!list_empty(list)) {
dl = list_entry(list->prev, struct disasm_line, node);
if (dl->ins && dl->ins->ops) {
if (dl->ins->ops != &nop_ops)
return;
} else {
if (!strstr(dl->line, " nop ") &&
!strstr(dl->line, " nopl ") &&
!strstr(dl->line, " nopw "))
return;
}
list_del(&dl->node);
disasm_line__free(dl);
}
}
int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize) int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
{ {
struct dso *dso = map->dso; struct dso *dso = map->dso;
...@@ -864,7 +908,8 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize) ...@@ -864,7 +908,8 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
free_filename = false; free_filename = false;
} }
if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS) { if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
!dso__is_kcore(dso)) {
char bf[BUILD_ID_SIZE * 2 + 16] = " with build id "; char bf[BUILD_ID_SIZE * 2 + 16] = " with build id ";
char *build_id_msg = NULL; char *build_id_msg = NULL;
...@@ -898,7 +943,7 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize) ...@@ -898,7 +943,7 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
snprintf(command, sizeof(command), snprintf(command, sizeof(command),
"%s %s%s --start-address=0x%016" PRIx64 "%s %s%s --start-address=0x%016" PRIx64
" --stop-address=0x%016" PRIx64 " --stop-address=0x%016" PRIx64
" -d %s %s -C %s|grep -v %s|expand", " -d %s %s -C %s 2>/dev/null|grep -v %s|expand",
objdump_path ? objdump_path : "objdump", objdump_path ? objdump_path : "objdump",
disassembler_style ? "-M " : "", disassembler_style ? "-M " : "",
disassembler_style ? disassembler_style : "", disassembler_style ? disassembler_style : "",
...@@ -918,6 +963,13 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize) ...@@ -918,6 +963,13 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
if (symbol__parse_objdump_line(sym, map, file, privsize) < 0) if (symbol__parse_objdump_line(sym, map, file, privsize) < 0)
break; break;
/*
* kallsyms does not have symbol sizes so there may a nop at the end.
* Remove it.
*/
if (dso__is_kcore(dso))
delete_last_nop(sym);
pclose(file); pclose(file);
out_free_filename: out_free_filename:
if (free_filename) if (free_filename)
......
...@@ -33,7 +33,7 @@ int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused, ...@@ -33,7 +33,7 @@ int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
} }
thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
event->ip.ip, &al); event->ip.ip, &al, NULL);
if (al.map != NULL) if (al.map != NULL)
al.map->dso->hit = 1; al.map->dso->hit = 1;
......
...@@ -78,6 +78,8 @@ int dso__binary_type_file(struct dso *dso, enum dso_binary_type type, ...@@ -78,6 +78,8 @@ int dso__binary_type_file(struct dso *dso, enum dso_binary_type type,
symbol_conf.symfs, build_id_hex, build_id_hex + 2); symbol_conf.symfs, build_id_hex, build_id_hex + 2);
break; break;
case DSO_BINARY_TYPE__VMLINUX:
case DSO_BINARY_TYPE__GUEST_VMLINUX:
case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
snprintf(file, size, "%s%s", snprintf(file, size, "%s%s",
symbol_conf.symfs, dso->long_name); symbol_conf.symfs, dso->long_name);
...@@ -93,11 +95,14 @@ int dso__binary_type_file(struct dso *dso, enum dso_binary_type type, ...@@ -93,11 +95,14 @@ int dso__binary_type_file(struct dso *dso, enum dso_binary_type type,
dso->long_name); dso->long_name);
break; break;
case DSO_BINARY_TYPE__KCORE:
case DSO_BINARY_TYPE__GUEST_KCORE:
snprintf(file, size, "%s", dso->long_name);
break;
default: default:
case DSO_BINARY_TYPE__KALLSYMS: case DSO_BINARY_TYPE__KALLSYMS:
case DSO_BINARY_TYPE__VMLINUX:
case DSO_BINARY_TYPE__GUEST_KALLSYMS: case DSO_BINARY_TYPE__GUEST_KALLSYMS:
case DSO_BINARY_TYPE__GUEST_VMLINUX:
case DSO_BINARY_TYPE__JAVA_JIT: case DSO_BINARY_TYPE__JAVA_JIT:
case DSO_BINARY_TYPE__NOT_FOUND: case DSO_BINARY_TYPE__NOT_FOUND:
ret = -1; ret = -1;
...@@ -419,6 +424,7 @@ struct dso *dso__new(const char *name) ...@@ -419,6 +424,7 @@ struct dso *dso__new(const char *name)
dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
dso->data_type = DSO_BINARY_TYPE__NOT_FOUND; dso->data_type = DSO_BINARY_TYPE__NOT_FOUND;
dso->loaded = 0; dso->loaded = 0;
dso->rel = 0;
dso->sorted_by_name = 0; dso->sorted_by_name = 0;
dso->has_build_id = 0; dso->has_build_id = 0;
dso->kernel = DSO_TYPE_USER; dso->kernel = DSO_TYPE_USER;
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <stdbool.h>
#include "types.h" #include "types.h"
#include "map.h" #include "map.h"
...@@ -20,6 +21,8 @@ enum dso_binary_type { ...@@ -20,6 +21,8 @@ enum dso_binary_type {
DSO_BINARY_TYPE__SYSTEM_PATH_DSO, DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
DSO_BINARY_TYPE__GUEST_KMODULE, DSO_BINARY_TYPE__GUEST_KMODULE,
DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
DSO_BINARY_TYPE__KCORE,
DSO_BINARY_TYPE__GUEST_KCORE,
DSO_BINARY_TYPE__NOT_FOUND, DSO_BINARY_TYPE__NOT_FOUND,
}; };
...@@ -84,6 +87,7 @@ struct dso { ...@@ -84,6 +87,7 @@ struct dso {
u8 lname_alloc:1; u8 lname_alloc:1;
u8 sorted_by_name; u8 sorted_by_name;
u8 loaded; u8 loaded;
u8 rel;
u8 build_id[BUILD_ID_SIZE]; u8 build_id[BUILD_ID_SIZE];
const char *short_name; const char *short_name;
char *long_name; char *long_name;
...@@ -146,4 +150,17 @@ size_t dso__fprintf_buildid(struct dso *dso, FILE *fp); ...@@ -146,4 +150,17 @@ size_t dso__fprintf_buildid(struct dso *dso, FILE *fp);
size_t dso__fprintf_symbols_by_name(struct dso *dso, size_t dso__fprintf_symbols_by_name(struct dso *dso,
enum map_type type, FILE *fp); enum map_type type, FILE *fp);
size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp); size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp);
static inline bool dso__is_vmlinux(struct dso *dso)
{
return dso->data_type == DSO_BINARY_TYPE__VMLINUX ||
dso->data_type == DSO_BINARY_TYPE__GUEST_VMLINUX;
}
static inline bool dso__is_kcore(struct dso *dso)
{
return dso->data_type == DSO_BINARY_TYPE__KCORE ||
dso->data_type == DSO_BINARY_TYPE__GUEST_KCORE;
}
#endif /* __PERF_DSO */ #endif /* __PERF_DSO */
...@@ -592,9 +592,10 @@ int perf_event__process(struct perf_tool *tool __maybe_unused, ...@@ -592,9 +592,10 @@ int perf_event__process(struct perf_tool *tool __maybe_unused,
void thread__find_addr_map(struct thread *self, void thread__find_addr_map(struct thread *self,
struct machine *machine, u8 cpumode, struct machine *machine, u8 cpumode,
enum map_type type, u64 addr, enum map_type type, u64 addr,
struct addr_location *al) struct addr_location *al, symbol_filter_t filter)
{ {
struct map_groups *mg = &self->mg; struct map_groups *mg = &self->mg;
bool load_map = false;
al->thread = self; al->thread = self;
al->addr = addr; al->addr = addr;
...@@ -609,11 +610,13 @@ void thread__find_addr_map(struct thread *self, ...@@ -609,11 +610,13 @@ void thread__find_addr_map(struct thread *self,
if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
al->level = 'k'; al->level = 'k';
mg = &machine->kmaps; mg = &machine->kmaps;
load_map = true;
} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
al->level = '.'; al->level = '.';
} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
al->level = 'g'; al->level = 'g';
mg = &machine->kmaps; mg = &machine->kmaps;
load_map = true;
} else { } else {
/* /*
* 'u' means guest os user space. * 'u' means guest os user space.
...@@ -654,8 +657,15 @@ void thread__find_addr_map(struct thread *self, ...@@ -654,8 +657,15 @@ void thread__find_addr_map(struct thread *self,
mg = &machine->kmaps; mg = &machine->kmaps;
goto try_again; goto try_again;
} }
} else } else {
/*
* Kernel maps might be changed when loading symbols so loading
* must be done prior to using kernel maps.
*/
if (load_map)
map__load(al->map, filter);
al->addr = al->map->map_ip(al->map, al->addr); al->addr = al->map->map_ip(al->map, al->addr);
}
} }
void thread__find_addr_location(struct thread *thread, struct machine *machine, void thread__find_addr_location(struct thread *thread, struct machine *machine,
...@@ -663,7 +673,7 @@ void thread__find_addr_location(struct thread *thread, struct machine *machine, ...@@ -663,7 +673,7 @@ void thread__find_addr_location(struct thread *thread, struct machine *machine,
struct addr_location *al, struct addr_location *al,
symbol_filter_t filter) symbol_filter_t filter)
{ {
thread__find_addr_map(thread, machine, cpumode, type, addr, al); thread__find_addr_map(thread, machine, cpumode, type, addr, al, filter);
if (al->map != NULL) if (al->map != NULL)
al->sym = map__find_symbol(al->map, al->addr, filter); al->sym = map__find_symbol(al->map, al->addr, filter);
else else
...@@ -699,7 +709,7 @@ int perf_event__preprocess_sample(const union perf_event *event, ...@@ -699,7 +709,7 @@ int perf_event__preprocess_sample(const union perf_event *event,
machine__create_kernel_maps(machine); machine__create_kernel_maps(machine);
thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
event->ip.ip, al); event->ip.ip, al, filter);
dump_printf(" ...... dso: %s\n", dump_printf(" ...... dso: %s\n",
al->map ? al->map->dso->long_name : al->map ? al->map->dso->long_name :
al->level == 'H' ? "[hypervisor]" : "<not found>"); al->level == 'H' ? "[hypervisor]" : "<not found>");
......
...@@ -80,6 +80,23 @@ struct stack_dump { ...@@ -80,6 +80,23 @@ struct stack_dump {
char *data; char *data;
}; };
struct sample_read_value {
u64 value;
u64 id;
};
struct sample_read {
u64 time_enabled;
u64 time_running;
union {
struct {
u64 nr;
struct sample_read_value *values;
} group;
struct sample_read_value one;
};
};
struct perf_sample { struct perf_sample {
u64 ip; u64 ip;
u32 pid, tid; u32 pid, tid;
...@@ -97,6 +114,7 @@ struct perf_sample { ...@@ -97,6 +114,7 @@ struct perf_sample {
struct branch_stack *branch_stack; struct branch_stack *branch_stack;
struct regs_dump user_regs; struct regs_dump user_regs;
struct stack_dump user_stack; struct stack_dump user_stack;
struct sample_read read;
}; };
#define PERF_MEM_DATA_SRC_NONE \ #define PERF_MEM_DATA_SRC_NONE \
......
...@@ -302,6 +302,24 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist, ...@@ -302,6 +302,24 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
{ {
u64 read_data[4] = { 0, }; u64 read_data[4] = { 0, };
int id_idx = 1; /* The first entry is the counter value */ int id_idx = 1; /* The first entry is the counter value */
u64 id;
int ret;
ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
if (!ret)
goto add;
if (errno != ENOTTY)
return -1;
/* Legacy way to get event id.. All hail to old kernels! */
/*
* This way does not work with group format read, so bail
* out in that case.
*/
if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
return -1;
if (!(evsel->attr.read_format & PERF_FORMAT_ID) || if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
read(fd, &read_data, sizeof(read_data)) == -1) read(fd, &read_data, sizeof(read_data)) == -1)
...@@ -312,25 +330,39 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist, ...@@ -312,25 +330,39 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
++id_idx; ++id_idx;
perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]); id = read_data[id_idx];
add:
perf_evlist__id_add(evlist, evsel, cpu, thread, id);
return 0; return 0;
} }
struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
{ {
struct hlist_head *head; struct hlist_head *head;
struct perf_sample_id *sid; struct perf_sample_id *sid;
int hash; int hash;
if (evlist->nr_entries == 1)
return perf_evlist__first(evlist);
hash = hash_64(id, PERF_EVLIST__HLIST_BITS); hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
head = &evlist->heads[hash]; head = &evlist->heads[hash];
hlist_for_each_entry(sid, head, node) hlist_for_each_entry(sid, head, node)
if (sid->id == id) if (sid->id == id)
return sid->evsel; return sid;
return NULL;
}
struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
{
struct perf_sample_id *sid;
if (evlist->nr_entries == 1)
return perf_evlist__first(evlist);
sid = perf_evlist__id2sid(evlist, id);
if (sid)
return sid->evsel;
if (!perf_evlist__sample_id_all(evlist)) if (!perf_evlist__sample_id_all(evlist))
return perf_evlist__first(evlist); return perf_evlist__first(evlist);
...@@ -662,6 +694,32 @@ u64 perf_evlist__sample_type(struct perf_evlist *evlist) ...@@ -662,6 +694,32 @@ u64 perf_evlist__sample_type(struct perf_evlist *evlist)
return first->attr.sample_type; return first->attr.sample_type;
} }
bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
{
struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
u64 read_format = first->attr.read_format;
u64 sample_type = first->attr.sample_type;
list_for_each_entry_continue(pos, &evlist->entries, node) {
if (read_format != pos->attr.read_format)
return false;
}
/* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
if ((sample_type & PERF_SAMPLE_READ) &&
!(read_format & PERF_FORMAT_ID)) {
return false;
}
return true;
}
u64 perf_evlist__read_format(struct perf_evlist *evlist)
{
struct perf_evsel *first = perf_evlist__first(evlist);
return first->attr.read_format;
}
u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
{ {
struct perf_evsel *first = perf_evlist__first(evlist); struct perf_evsel *first = perf_evlist__first(evlist);
...@@ -778,13 +836,6 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, ...@@ -778,13 +836,6 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist,
close(go_pipe[1]); close(go_pipe[1]);
fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
/*
* Do a dummy execvp to get the PLT entry resolved,
* so we avoid the resolver overhead on the real
* execvp call.
*/
execvp("", (char **)argv);
/* /*
* Tell the parent we're ready to go * Tell the parent we're ready to go
*/ */
......
...@@ -78,6 +78,8 @@ void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd); ...@@ -78,6 +78,8 @@ void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
int perf_evlist__open(struct perf_evlist *evlist); int perf_evlist__open(struct perf_evlist *evlist);
...@@ -118,6 +120,7 @@ int perf_evlist__apply_filters(struct perf_evlist *evlist); ...@@ -118,6 +120,7 @@ int perf_evlist__apply_filters(struct perf_evlist *evlist);
void __perf_evlist__set_leader(struct list_head *list); void __perf_evlist__set_leader(struct list_head *list);
void perf_evlist__set_leader(struct perf_evlist *evlist); void perf_evlist__set_leader(struct perf_evlist *evlist);
u64 perf_evlist__read_format(struct perf_evlist *evlist);
u64 perf_evlist__sample_type(struct perf_evlist *evlist); u64 perf_evlist__sample_type(struct perf_evlist *evlist);
bool perf_evlist__sample_id_all(struct perf_evlist *evlist); bool perf_evlist__sample_id_all(struct perf_evlist *evlist);
u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist); u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist);
...@@ -127,6 +130,7 @@ int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *even ...@@ -127,6 +130,7 @@ int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *even
bool perf_evlist__valid_sample_type(struct perf_evlist *evlist); bool perf_evlist__valid_sample_type(struct perf_evlist *evlist);
bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist); bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist);
bool perf_evlist__valid_read_format(struct perf_evlist *evlist);
void perf_evlist__splice_list_tail(struct perf_evlist *evlist, void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
struct list_head *list, struct list_head *list,
......
...@@ -490,6 +490,7 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size) ...@@ -490,6 +490,7 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
void perf_evsel__config(struct perf_evsel *evsel, void perf_evsel__config(struct perf_evsel *evsel,
struct perf_record_opts *opts) struct perf_record_opts *opts)
{ {
struct perf_evsel *leader = evsel->leader;
struct perf_event_attr *attr = &evsel->attr; struct perf_event_attr *attr = &evsel->attr;
int track = !evsel->idx; /* only the first counter needs these */ int track = !evsel->idx; /* only the first counter needs these */
...@@ -499,6 +500,25 @@ void perf_evsel__config(struct perf_evsel *evsel, ...@@ -499,6 +500,25 @@ void perf_evsel__config(struct perf_evsel *evsel,
perf_evsel__set_sample_bit(evsel, IP); perf_evsel__set_sample_bit(evsel, IP);
perf_evsel__set_sample_bit(evsel, TID); perf_evsel__set_sample_bit(evsel, TID);
if (evsel->sample_read) {
perf_evsel__set_sample_bit(evsel, READ);
/*
* We need ID even in case of single event, because
* PERF_SAMPLE_READ process ID specific data.
*/
perf_evsel__set_sample_id(evsel);
/*
* Apply group format only if we belong to group
* with more than one members.
*/
if (leader->nr_members > 1) {
attr->read_format |= PERF_FORMAT_GROUP;
attr->inherit = 0;
}
}
/* /*
* We default some events to a 1 default interval. But keep * We default some events to a 1 default interval. But keep
* it a weak assumption overridable by the user. * it a weak assumption overridable by the user.
...@@ -514,6 +534,15 @@ void perf_evsel__config(struct perf_evsel *evsel, ...@@ -514,6 +534,15 @@ void perf_evsel__config(struct perf_evsel *evsel,
} }
} }
/*
* Disable sampling for all group members other
* than leader in case leader 'leads' the sampling.
*/
if ((leader != evsel) && leader->sample_read) {
attr->sample_freq = 0;
attr->sample_period = 0;
}
if (opts->no_samples) if (opts->no_samples)
attr->sample_freq = 0; attr->sample_freq = 0;
...@@ -605,15 +634,15 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) ...@@ -605,15 +634,15 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
return evsel->fd != NULL ? 0 : -ENOMEM; return evsel->fd != NULL ? 0 : -ENOMEM;
} }
int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads,
const char *filter) int ioc, void *arg)
{ {
int cpu, thread; int cpu, thread;
for (cpu = 0; cpu < ncpus; cpu++) { for (cpu = 0; cpu < ncpus; cpu++) {
for (thread = 0; thread < nthreads; thread++) { for (thread = 0; thread < nthreads; thread++) {
int fd = FD(evsel, cpu, thread), int fd = FD(evsel, cpu, thread),
err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter); err = ioctl(fd, ioc, arg);
if (err) if (err)
return err; return err;
...@@ -623,6 +652,21 @@ int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, ...@@ -623,6 +652,21 @@ int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
return 0; return 0;
} }
int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
const char *filter)
{
return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
PERF_EVENT_IOC_SET_FILTER,
(void *)filter);
}
int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
{
return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
PERF_EVENT_IOC_ENABLE,
0);
}
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
{ {
evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
...@@ -1096,8 +1140,34 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, ...@@ -1096,8 +1140,34 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
} }
if (type & PERF_SAMPLE_READ) { if (type & PERF_SAMPLE_READ) {
fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n"); u64 read_format = evsel->attr.read_format;
return -1;
if (read_format & PERF_FORMAT_GROUP)
data->read.group.nr = *array;
else
data->read.one.value = *array;
array++;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
data->read.time_enabled = *array;
array++;
}
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
data->read.time_running = *array;
array++;
}
/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
if (read_format & PERF_FORMAT_GROUP) {
data->read.group.values = (struct sample_read_value *) array;
array = (void *) array + data->read.group.nr *
sizeof(struct sample_read_value);
} else {
data->read.one.id = *array;
array++;
}
} }
if (type & PERF_SAMPLE_CALLCHAIN) { if (type & PERF_SAMPLE_CALLCHAIN) {
......
...@@ -38,6 +38,9 @@ struct perf_sample_id { ...@@ -38,6 +38,9 @@ struct perf_sample_id {
struct hlist_node node; struct hlist_node node;
u64 id; u64 id;
struct perf_evsel *evsel; struct perf_evsel *evsel;
/* Holds total ID period value for PERF_SAMPLE_READ processing. */
u64 period;
}; };
/** struct perf_evsel - event selector /** struct perf_evsel - event selector
...@@ -76,6 +79,7 @@ struct perf_evsel { ...@@ -76,6 +79,7 @@ struct perf_evsel {
/* parse modifier helper */ /* parse modifier helper */
int exclude_GH; int exclude_GH;
int nr_members; int nr_members;
int sample_read;
struct perf_evsel *leader; struct perf_evsel *leader;
char *group_name; char *group_name;
}; };
...@@ -142,6 +146,7 @@ void perf_evsel__set_sample_id(struct perf_evsel *evsel); ...@@ -142,6 +146,7 @@ void perf_evsel__set_sample_id(struct perf_evsel *evsel);
int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
const char *filter); const char *filter);
int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__open_per_cpu(struct perf_evsel *evsel, int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
struct cpu_map *cpus); struct cpu_map *cpus);
......
...@@ -628,10 +628,8 @@ int machine__load_vmlinux_path(struct machine *machine, enum map_type type, ...@@ -628,10 +628,8 @@ int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
struct map *map = machine->vmlinux_maps[type]; struct map *map = machine->vmlinux_maps[type];
int ret = dso__load_vmlinux_path(map->dso, map, filter); int ret = dso__load_vmlinux_path(map->dso, map, filter);
if (ret > 0) { if (ret > 0)
dso__set_loaded(map->dso, type); dso__set_loaded(map->dso, type);
map__reloc_vmlinux(map);
}
return ret; return ret;
} }
...@@ -808,7 +806,10 @@ static int machine__create_modules(struct machine *machine) ...@@ -808,7 +806,10 @@ static int machine__create_modules(struct machine *machine)
free(line); free(line);
fclose(file); fclose(file);
return machine__set_modules_path(machine); if (machine__set_modules_path(machine) < 0) {
pr_debug("Problems setting modules path maps, continuing anyway...\n");
}
return 0;
out_delete_line: out_delete_line:
free(line); free(line);
...@@ -858,6 +859,18 @@ static void machine__set_kernel_mmap_len(struct machine *machine, ...@@ -858,6 +859,18 @@ static void machine__set_kernel_mmap_len(struct machine *machine,
} }
} }
static bool machine__uses_kcore(struct machine *machine)
{
struct dso *dso;
list_for_each_entry(dso, &machine->kernel_dsos, node) {
if (dso__is_kcore(dso))
return true;
}
return false;
}
static int machine__process_kernel_mmap_event(struct machine *machine, static int machine__process_kernel_mmap_event(struct machine *machine,
union perf_event *event) union perf_event *event)
{ {
...@@ -866,6 +879,10 @@ static int machine__process_kernel_mmap_event(struct machine *machine, ...@@ -866,6 +879,10 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
enum dso_kernel_type kernel_type; enum dso_kernel_type kernel_type;
bool is_kernel_mmap; bool is_kernel_mmap;
/* If we have maps from kcore then we do not need or want any others */
if (machine__uses_kcore(machine))
return 0;
machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix)); machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
if (machine__is_host(machine)) if (machine__is_host(machine))
kernel_type = DSO_TYPE_KERNEL; kernel_type = DSO_TYPE_KERNEL;
......
...@@ -182,12 +182,6 @@ int map__load(struct map *map, symbol_filter_t filter) ...@@ -182,12 +182,6 @@ int map__load(struct map *map, symbol_filter_t filter)
#endif #endif
return -1; return -1;
} }
/*
* Only applies to the kernel, as its symtabs aren't relative like the
* module ones.
*/
if (map->dso->kernel)
map__reloc_vmlinux(map);
return 0; return 0;
} }
...@@ -254,14 +248,18 @@ size_t map__fprintf_dsoname(struct map *map, FILE *fp) ...@@ -254,14 +248,18 @@ size_t map__fprintf_dsoname(struct map *map, FILE *fp)
/* /*
* objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN. * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
* map->dso->adjust_symbols==1 for ET_EXEC-like cases. * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
* relative to section start.
*/ */
u64 map__rip_2objdump(struct map *map, u64 rip) u64 map__rip_2objdump(struct map *map, u64 rip)
{ {
u64 addr = map->dso->adjust_symbols ? if (!map->dso->adjust_symbols)
map->unmap_ip(map, rip) : /* RIP -> IP */ return rip;
rip;
return addr; if (map->dso->rel)
return rip - map->pgoff;
return map->unmap_ip(map, rip);
} }
void map_groups__init(struct map_groups *mg) void map_groups__init(struct map_groups *mg)
...@@ -513,35 +511,6 @@ int map_groups__clone(struct map_groups *mg, ...@@ -513,35 +511,6 @@ int map_groups__clone(struct map_groups *mg,
return 0; return 0;
} }
static u64 map__reloc_map_ip(struct map *map, u64 ip)
{
return ip + (s64)map->pgoff;
}
static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
{
return ip - (s64)map->pgoff;
}
void map__reloc_vmlinux(struct map *map)
{
struct kmap *kmap = map__kmap(map);
s64 reloc;
if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
return;
reloc = (kmap->ref_reloc_sym->unrelocated_addr -
kmap->ref_reloc_sym->addr);
if (!reloc)
return;
map->map_ip = map__reloc_map_ip;
map->unmap_ip = map__reloc_unmap_ip;
map->pgoff = reloc;
}
void maps__insert(struct rb_root *maps, struct map *map) void maps__insert(struct rb_root *maps, struct map *map)
{ {
struct rb_node **p = &maps->rb_node; struct rb_node **p = &maps->rb_node;
...@@ -586,3 +555,21 @@ struct map *maps__find(struct rb_root *maps, u64 ip) ...@@ -586,3 +555,21 @@ struct map *maps__find(struct rb_root *maps, u64 ip)
return NULL; return NULL;
} }
struct map *maps__first(struct rb_root *maps)
{
struct rb_node *first = rb_first(maps);
if (first)
return rb_entry(first, struct map, rb_node);
return NULL;
}
struct map *maps__next(struct map *map)
{
struct rb_node *next = rb_next(&map->rb_node);
if (next)
return rb_entry(next, struct map, rb_node);
return NULL;
}
...@@ -112,6 +112,8 @@ size_t __map_groups__fprintf_maps(struct map_groups *mg, ...@@ -112,6 +112,8 @@ size_t __map_groups__fprintf_maps(struct map_groups *mg,
void maps__insert(struct rb_root *maps, struct map *map); void maps__insert(struct rb_root *maps, struct map *map);
void maps__remove(struct rb_root *maps, struct map *map); void maps__remove(struct rb_root *maps, struct map *map);
struct map *maps__find(struct rb_root *maps, u64 addr); struct map *maps__find(struct rb_root *maps, u64 addr);
struct map *maps__first(struct rb_root *maps);
struct map *maps__next(struct map *map);
void map_groups__init(struct map_groups *mg); void map_groups__init(struct map_groups *mg);
void map_groups__exit(struct map_groups *mg); void map_groups__exit(struct map_groups *mg);
int map_groups__clone(struct map_groups *mg, int map_groups__clone(struct map_groups *mg,
...@@ -139,6 +141,17 @@ static inline struct map *map_groups__find(struct map_groups *mg, ...@@ -139,6 +141,17 @@ static inline struct map *map_groups__find(struct map_groups *mg,
return maps__find(&mg->maps[type], addr); return maps__find(&mg->maps[type], addr);
} }
static inline struct map *map_groups__first(struct map_groups *mg,
enum map_type type)
{
return maps__first(&mg->maps[type]);
}
static inline struct map *map_groups__next(struct map *map)
{
return maps__next(map);
}
struct symbol *map_groups__find_symbol(struct map_groups *mg, struct symbol *map_groups__find_symbol(struct map_groups *mg,
enum map_type type, u64 addr, enum map_type type, u64 addr,
struct map **mapp, struct map **mapp,
......
...@@ -687,6 +687,8 @@ struct event_modifier { ...@@ -687,6 +687,8 @@ struct event_modifier {
int eG; int eG;
int precise; int precise;
int exclude_GH; int exclude_GH;
int sample_read;
int pinned;
}; };
static int get_event_modifier(struct event_modifier *mod, char *str, static int get_event_modifier(struct event_modifier *mod, char *str,
...@@ -698,6 +700,8 @@ static int get_event_modifier(struct event_modifier *mod, char *str, ...@@ -698,6 +700,8 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
int eH = evsel ? evsel->attr.exclude_host : 0; int eH = evsel ? evsel->attr.exclude_host : 0;
int eG = evsel ? evsel->attr.exclude_guest : 0; int eG = evsel ? evsel->attr.exclude_guest : 0;
int precise = evsel ? evsel->attr.precise_ip : 0; int precise = evsel ? evsel->attr.precise_ip : 0;
int sample_read = 0;
int pinned = evsel ? evsel->attr.pinned : 0;
int exclude = eu | ek | eh; int exclude = eu | ek | eh;
int exclude_GH = evsel ? evsel->exclude_GH : 0; int exclude_GH = evsel ? evsel->exclude_GH : 0;
...@@ -730,6 +734,10 @@ static int get_event_modifier(struct event_modifier *mod, char *str, ...@@ -730,6 +734,10 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
/* use of precise requires exclude_guest */ /* use of precise requires exclude_guest */
if (!exclude_GH) if (!exclude_GH)
eG = 1; eG = 1;
} else if (*str == 'S') {
sample_read = 1;
} else if (*str == 'D') {
pinned = 1;
} else } else
break; break;
...@@ -756,6 +764,9 @@ static int get_event_modifier(struct event_modifier *mod, char *str, ...@@ -756,6 +764,9 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
mod->eG = eG; mod->eG = eG;
mod->precise = precise; mod->precise = precise;
mod->exclude_GH = exclude_GH; mod->exclude_GH = exclude_GH;
mod->sample_read = sample_read;
mod->pinned = pinned;
return 0; return 0;
} }
...@@ -768,7 +779,7 @@ static int check_modifier(char *str) ...@@ -768,7 +779,7 @@ static int check_modifier(char *str)
char *p = str; char *p = str;
/* The sizeof includes 0 byte as well. */ /* The sizeof includes 0 byte as well. */
if (strlen(str) > (sizeof("ukhGHppp") - 1)) if (strlen(str) > (sizeof("ukhGHpppSD") - 1))
return -1; return -1;
while (*p) { while (*p) {
...@@ -806,6 +817,10 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add) ...@@ -806,6 +817,10 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add)
evsel->attr.exclude_host = mod.eH; evsel->attr.exclude_host = mod.eH;
evsel->attr.exclude_guest = mod.eG; evsel->attr.exclude_guest = mod.eG;
evsel->exclude_GH = mod.exclude_GH; evsel->exclude_GH = mod.exclude_GH;
evsel->sample_read = mod.sample_read;
if (perf_evsel__is_group_leader(evsel))
evsel->attr.pinned = mod.pinned;
} }
return 0; return 0;
......
...@@ -82,7 +82,8 @@ num_hex 0x[a-fA-F0-9]+ ...@@ -82,7 +82,8 @@ num_hex 0x[a-fA-F0-9]+
num_raw_hex [a-fA-F0-9]+ num_raw_hex [a-fA-F0-9]+
name [a-zA-Z_*?][a-zA-Z0-9_*?]* name [a-zA-Z_*?][a-zA-Z0-9_*?]*
name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?]* name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?]*
modifier_event [ukhpGH]+ /* If you add a modifier you need to update check_modifier() */
modifier_event [ukhpGHSD]+
modifier_bp [rwx]{1,3} modifier_bp [rwx]{1,3}
%% %%
......
...@@ -71,6 +71,11 @@ static int perf_session__open(struct perf_session *self, bool force) ...@@ -71,6 +71,11 @@ static int perf_session__open(struct perf_session *self, bool force)
goto out_close; goto out_close;
} }
if (!perf_evlist__valid_read_format(self->evlist)) {
pr_err("non matching read_format");
goto out_close;
}
self->size = input_stat.st_size; self->size = input_stat.st_size;
return 0; return 0;
...@@ -245,7 +250,7 @@ static int process_finished_round(struct perf_tool *tool, ...@@ -245,7 +250,7 @@ static int process_finished_round(struct perf_tool *tool,
union perf_event *event, union perf_event *event,
struct perf_session *session); struct perf_session *session);
static void perf_tool__fill_defaults(struct perf_tool *tool) void perf_tool__fill_defaults(struct perf_tool *tool)
{ {
if (tool->sample == NULL) if (tool->sample == NULL)
tool->sample = process_event_sample_stub; tool->sample = process_event_sample_stub;
...@@ -490,7 +495,7 @@ static int perf_session_deliver_event(struct perf_session *session, ...@@ -490,7 +495,7 @@ static int perf_session_deliver_event(struct perf_session *session,
u64 file_offset); u64 file_offset);
static int flush_sample_queue(struct perf_session *s, static int flush_sample_queue(struct perf_session *s,
struct perf_tool *tool) struct perf_tool *tool)
{ {
struct ordered_samples *os = &s->ordered_samples; struct ordered_samples *os = &s->ordered_samples;
struct list_head *head = &os->samples; struct list_head *head = &os->samples;
...@@ -638,7 +643,7 @@ static void __queue_event(struct sample_queue *new, struct perf_session *s) ...@@ -638,7 +643,7 @@ static void __queue_event(struct sample_queue *new, struct perf_session *s)
#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
static int perf_session_queue_event(struct perf_session *s, union perf_event *event, int perf_session_queue_event(struct perf_session *s, union perf_event *event,
struct perf_sample *sample, u64 file_offset) struct perf_sample *sample, u64 file_offset)
{ {
struct ordered_samples *os = &s->ordered_samples; struct ordered_samples *os = &s->ordered_samples;
...@@ -749,6 +754,36 @@ static void perf_session__print_tstamp(struct perf_session *session, ...@@ -749,6 +754,36 @@ static void perf_session__print_tstamp(struct perf_session *session,
printf("%" PRIu64 " ", sample->time); printf("%" PRIu64 " ", sample->time);
} }
static void sample_read__printf(struct perf_sample *sample, u64 read_format)
{
printf("... sample_read:\n");
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
printf("...... time enabled %016" PRIx64 "\n",
sample->read.time_enabled);
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
printf("...... time running %016" PRIx64 "\n",
sample->read.time_running);
if (read_format & PERF_FORMAT_GROUP) {
u64 i;
printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
for (i = 0; i < sample->read.group.nr; i++) {
struct sample_read_value *value;
value = &sample->read.group.values[i];
printf("..... id %016" PRIx64
", value %016" PRIx64 "\n",
value->id, value->value);
}
} else
printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
sample->read.one.id, sample->read.one.value);
}
static void dump_event(struct perf_session *session, union perf_event *event, static void dump_event(struct perf_session *session, union perf_event *event,
u64 file_offset, struct perf_sample *sample) u64 file_offset, struct perf_sample *sample)
{ {
...@@ -798,6 +833,9 @@ static void dump_sample(struct perf_evsel *evsel, union perf_event *event, ...@@ -798,6 +833,9 @@ static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
if (sample_type & PERF_SAMPLE_DATA_SRC) if (sample_type & PERF_SAMPLE_DATA_SRC)
printf(" . data_src: 0x%"PRIx64"\n", sample->data_src); printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
if (sample_type & PERF_SAMPLE_READ)
sample_read__printf(sample, evsel->attr.read_format);
} }
static struct machine * static struct machine *
...@@ -822,6 +860,75 @@ static struct machine * ...@@ -822,6 +860,75 @@ static struct machine *
return &session->machines.host; return &session->machines.host;
} }
static int deliver_sample_value(struct perf_session *session,
struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct sample_read_value *v,
struct machine *machine)
{
struct perf_sample_id *sid;
sid = perf_evlist__id2sid(session->evlist, v->id);
if (sid) {
sample->id = v->id;
sample->period = v->value - sid->period;
sid->period = v->value;
}
if (!sid || sid->evsel == NULL) {
++session->stats.nr_unknown_id;
return 0;
}
return tool->sample(tool, event, sample, sid->evsel, machine);
}
static int deliver_sample_group(struct perf_session *session,
struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
int ret = -EINVAL;
u64 i;
for (i = 0; i < sample->read.group.nr; i++) {
ret = deliver_sample_value(session, tool, event, sample,
&sample->read.group.values[i],
machine);
if (ret)
break;
}
return ret;
}
static int
perf_session__deliver_sample(struct perf_session *session,
struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine)
{
/* We know evsel != NULL. */
u64 sample_type = evsel->attr.sample_type;
u64 read_format = evsel->attr.read_format;
/* Standard sample delievery. */
if (!(sample_type & PERF_SAMPLE_READ))
return tool->sample(tool, event, sample, evsel, machine);
/* For PERF_SAMPLE_READ we have either single or group mode. */
if (read_format & PERF_FORMAT_GROUP)
return deliver_sample_group(session, tool, event, sample,
machine);
else
return deliver_sample_value(session, tool, event, sample,
&sample->read.one, machine);
}
static int perf_session_deliver_event(struct perf_session *session, static int perf_session_deliver_event(struct perf_session *session,
union perf_event *event, union perf_event *event,
struct perf_sample *sample, struct perf_sample *sample,
...@@ -864,7 +971,8 @@ static int perf_session_deliver_event(struct perf_session *session, ...@@ -864,7 +971,8 @@ static int perf_session_deliver_event(struct perf_session *session,
++session->stats.nr_unprocessable_samples; ++session->stats.nr_unprocessable_samples;
return 0; return 0;
} }
return tool->sample(tool, event, sample, evsel, machine); return perf_session__deliver_sample(session, tool, event,
sample, evsel, machine);
case PERF_RECORD_MMAP: case PERF_RECORD_MMAP:
return tool->mmap(tool, event, sample, machine); return tool->mmap(tool, event, sample, machine);
case PERF_RECORD_COMM: case PERF_RECORD_COMM:
...@@ -1411,8 +1519,13 @@ void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event, ...@@ -1411,8 +1519,13 @@ void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
printf("\t%16" PRIx64, node->ip); printf("\t%16" PRIx64, node->ip);
if (print_sym) { if (print_sym) {
printf(" "); printf(" ");
symbol__fprintf_symname(node->sym, stdout); if (print_symoffset) {
al.addr = node->ip;
symbol__fprintf_symname_offs(node->sym, &al, stdout);
} else
symbol__fprintf_symname(node->sym, stdout);
} }
if (print_dso) { if (print_dso) {
printf(" ("); printf(" (");
map__fprintf_dsoname(node->map, stdout); map__fprintf_dsoname(node->map, stdout);
......
...@@ -56,6 +56,11 @@ int __perf_session__process_events(struct perf_session *self, ...@@ -56,6 +56,11 @@ int __perf_session__process_events(struct perf_session *self,
int perf_session__process_events(struct perf_session *self, int perf_session__process_events(struct perf_session *self,
struct perf_tool *tool); struct perf_tool *tool);
int perf_session_queue_event(struct perf_session *s, union perf_event *event,
struct perf_sample *sample, u64 file_offset);
void perf_tool__fill_defaults(struct perf_tool *tool);
int perf_session__resolve_callchain(struct perf_session *self, struct perf_evsel *evsel, int perf_session__resolve_callchain(struct perf_session *self, struct perf_evsel *evsel,
struct thread *thread, struct thread *thread,
struct ip_callchain *chain, struct ip_callchain *chain,
......
...@@ -10,6 +10,12 @@ void update_stats(struct stats *stats, u64 val) ...@@ -10,6 +10,12 @@ void update_stats(struct stats *stats, u64 val)
delta = val - stats->mean; delta = val - stats->mean;
stats->mean += delta / stats->n; stats->mean += delta / stats->n;
stats->M2 += delta*(val - stats->mean); stats->M2 += delta*(val - stats->mean);
if (val > stats->max)
stats->max = val;
if (val < stats->min)
stats->min = val;
} }
double avg_stats(struct stats *stats) double avg_stats(struct stats *stats)
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
struct stats struct stats
{ {
double n, mean, M2; double n, mean, M2;
u64 max, min;
}; };
void update_stats(struct stats *stats, u64 val); void update_stats(struct stats *stats, u64 val);
...@@ -13,4 +14,12 @@ double avg_stats(struct stats *stats); ...@@ -13,4 +14,12 @@ double avg_stats(struct stats *stats);
double stddev_stats(struct stats *stats); double stddev_stats(struct stats *stats);
double rel_stddev_stats(double stddev, double avg); double rel_stddev_stats(double stddev, double avg);
static inline void init_stats(struct stats *stats)
{
stats->n = 0.0;
stats->mean = 0.0;
stats->M2 = 0.0;
stats->min = (u64) -1;
stats->max = 0;
}
#endif #endif
...@@ -599,11 +599,13 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, ...@@ -599,11 +599,13 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
if (dso->kernel == DSO_TYPE_USER) { if (dso->kernel == DSO_TYPE_USER) {
GElf_Shdr shdr; GElf_Shdr shdr;
ss->adjust_symbols = (ehdr.e_type == ET_EXEC || ss->adjust_symbols = (ehdr.e_type == ET_EXEC ||
ehdr.e_type == ET_REL ||
elf_section_by_name(elf, &ehdr, &shdr, elf_section_by_name(elf, &ehdr, &shdr,
".gnu.prelink_undo", ".gnu.prelink_undo",
NULL) != NULL); NULL) != NULL);
} else { } else {
ss->adjust_symbols = 0; ss->adjust_symbols = ehdr.e_type == ET_EXEC ||
ehdr.e_type == ET_REL;
} }
ss->name = strdup(name); ss->name = strdup(name);
...@@ -624,6 +626,37 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, ...@@ -624,6 +626,37 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
return err; return err;
} }
/**
* ref_reloc_sym_not_found - has kernel relocation symbol been found.
* @kmap: kernel maps and relocation reference symbol
*
* This function returns %true if we are dealing with the kernel maps and the
* relocation reference symbol has not yet been found. Otherwise %false is
* returned.
*/
static bool ref_reloc_sym_not_found(struct kmap *kmap)
{
return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
!kmap->ref_reloc_sym->unrelocated_addr;
}
/**
* ref_reloc - kernel relocation offset.
* @kmap: kernel maps and relocation reference symbol
*
* This function returns the offset of kernel addresses as determined by using
* the relocation reference symbol i.e. if the kernel has not been relocated
* then the return value is zero.
*/
static u64 ref_reloc(struct kmap *kmap)
{
if (kmap && kmap->ref_reloc_sym &&
kmap->ref_reloc_sym->unrelocated_addr)
return kmap->ref_reloc_sym->addr -
kmap->ref_reloc_sym->unrelocated_addr;
return 0;
}
int dso__load_sym(struct dso *dso, struct map *map, int dso__load_sym(struct dso *dso, struct map *map,
struct symsrc *syms_ss, struct symsrc *runtime_ss, struct symsrc *syms_ss, struct symsrc *runtime_ss,
symbol_filter_t filter, int kmodule) symbol_filter_t filter, int kmodule)
...@@ -642,8 +675,17 @@ int dso__load_sym(struct dso *dso, struct map *map, ...@@ -642,8 +675,17 @@ int dso__load_sym(struct dso *dso, struct map *map,
Elf_Scn *sec, *sec_strndx; Elf_Scn *sec, *sec_strndx;
Elf *elf; Elf *elf;
int nr = 0; int nr = 0;
bool remap_kernel = false, adjust_kernel_syms = false;
dso->symtab_type = syms_ss->type; dso->symtab_type = syms_ss->type;
dso->rel = syms_ss->ehdr.e_type == ET_REL;
/*
* Modules may already have symbols from kallsyms, but those symbols
* have the wrong values for the dso maps, so remove them.
*/
if (kmodule && syms_ss->symtab)
symbols__delete(&dso->symbols[map->type]);
if (!syms_ss->symtab) { if (!syms_ss->symtab) {
syms_ss->symtab = syms_ss->dynsym; syms_ss->symtab = syms_ss->dynsym;
...@@ -681,7 +723,31 @@ int dso__load_sym(struct dso *dso, struct map *map, ...@@ -681,7 +723,31 @@ int dso__load_sym(struct dso *dso, struct map *map,
nr_syms = shdr.sh_size / shdr.sh_entsize; nr_syms = shdr.sh_size / shdr.sh_entsize;
memset(&sym, 0, sizeof(sym)); memset(&sym, 0, sizeof(sym));
dso->adjust_symbols = runtime_ss->adjust_symbols;
/*
* The kernel relocation symbol is needed in advance in order to adjust
* kernel maps correctly.
*/
if (ref_reloc_sym_not_found(kmap)) {
elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
const char *elf_name = elf_sym__name(&sym, symstrs);
if (strcmp(elf_name, kmap->ref_reloc_sym->name))
continue;
kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
break;
}
}
dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
/*
* Initial kernel and module mappings do not map to the dso. For
* function mappings, flag the fixups.
*/
if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) {
remap_kernel = true;
adjust_kernel_syms = dso->adjust_symbols;
}
elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
struct symbol *f; struct symbol *f;
const char *elf_name = elf_sym__name(&sym, symstrs); const char *elf_name = elf_sym__name(&sym, symstrs);
...@@ -690,10 +756,6 @@ int dso__load_sym(struct dso *dso, struct map *map, ...@@ -690,10 +756,6 @@ int dso__load_sym(struct dso *dso, struct map *map,
const char *section_name; const char *section_name;
bool used_opd = false; bool used_opd = false;
if (kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
strcmp(elf_name, kmap->ref_reloc_sym->name) == 0)
kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
if (!is_label && !elf_sym__is_a(&sym, map->type)) if (!is_label && !elf_sym__is_a(&sym, map->type))
continue; continue;
...@@ -745,20 +807,55 @@ int dso__load_sym(struct dso *dso, struct map *map, ...@@ -745,20 +807,55 @@ int dso__load_sym(struct dso *dso, struct map *map,
(sym.st_value & 1)) (sym.st_value & 1))
--sym.st_value; --sym.st_value;
if (dso->kernel != DSO_TYPE_USER || kmodule) { if (dso->kernel || kmodule) {
char dso_name[PATH_MAX]; char dso_name[PATH_MAX];
/* Adjust symbol to map to file offset */
if (adjust_kernel_syms)
sym.st_value -= shdr.sh_addr - shdr.sh_offset;
if (strcmp(section_name, if (strcmp(section_name,
(curr_dso->short_name + (curr_dso->short_name +
dso->short_name_len)) == 0) dso->short_name_len)) == 0)
goto new_symbol; goto new_symbol;
if (strcmp(section_name, ".text") == 0) { if (strcmp(section_name, ".text") == 0) {
/*
* The initial kernel mapping is based on
* kallsyms and identity maps. Overwrite it to
* map to the kernel dso.
*/
if (remap_kernel && dso->kernel) {
remap_kernel = false;
map->start = shdr.sh_addr +
ref_reloc(kmap);
map->end = map->start + shdr.sh_size;
map->pgoff = shdr.sh_offset;
map->map_ip = map__map_ip;
map->unmap_ip = map__unmap_ip;
/* Ensure maps are correctly ordered */
map_groups__remove(kmap->kmaps, map);
map_groups__insert(kmap->kmaps, map);
}
/*
* The initial module mapping is based on
* /proc/modules mapped to offset zero.
* Overwrite it to map to the module dso.
*/
if (remap_kernel && kmodule) {
remap_kernel = false;
map->pgoff = shdr.sh_offset;
}
curr_map = map; curr_map = map;
curr_dso = dso; curr_dso = dso;
goto new_symbol; goto new_symbol;
} }
if (!kmap)
goto new_symbol;
snprintf(dso_name, sizeof(dso_name), snprintf(dso_name, sizeof(dso_name),
"%s%s", dso->short_name, section_name); "%s%s", dso->short_name, section_name);
...@@ -781,8 +878,16 @@ int dso__load_sym(struct dso *dso, struct map *map, ...@@ -781,8 +878,16 @@ int dso__load_sym(struct dso *dso, struct map *map,
dso__delete(curr_dso); dso__delete(curr_dso);
goto out_elf_end; goto out_elf_end;
} }
curr_map->map_ip = identity__map_ip; if (adjust_kernel_syms) {
curr_map->unmap_ip = identity__map_ip; curr_map->start = shdr.sh_addr +
ref_reloc(kmap);
curr_map->end = curr_map->start +
shdr.sh_size;
curr_map->pgoff = shdr.sh_offset;
} else {
curr_map->map_ip = identity__map_ip;
curr_map->unmap_ip = identity__map_ip;
}
curr_dso->symtab_type = dso->symtab_type; curr_dso->symtab_type = dso->symtab_type;
map_groups__insert(kmap->kmaps, curr_map); map_groups__insert(kmap->kmaps, curr_map);
dsos__add(&dso->node, curr_dso); dsos__add(&dso->node, curr_dso);
...@@ -846,6 +951,57 @@ int dso__load_sym(struct dso *dso, struct map *map, ...@@ -846,6 +951,57 @@ int dso__load_sym(struct dso *dso, struct map *map,
return err; return err;
} }
static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
{
GElf_Phdr phdr;
size_t i, phdrnum;
int err;
u64 sz;
if (elf_getphdrnum(elf, &phdrnum))
return -1;
for (i = 0; i < phdrnum; i++) {
if (gelf_getphdr(elf, i, &phdr) == NULL)
return -1;
if (phdr.p_type != PT_LOAD)
continue;
if (exe) {
if (!(phdr.p_flags & PF_X))
continue;
} else {
if (!(phdr.p_flags & PF_R))
continue;
}
sz = min(phdr.p_memsz, phdr.p_filesz);
if (!sz)
continue;
err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
if (err)
return err;
}
return 0;
}
int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
bool *is_64_bit)
{
int err;
Elf *elf;
elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
if (elf == NULL)
return -1;
if (is_64_bit)
*is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
err = elf_read_maps(elf, exe, mapfn, data);
elf_end(elf);
return err;
}
void symbol__elf_init(void) void symbol__elf_init(void)
{ {
elf_version(EV_CURRENT); elf_version(EV_CURRENT);
......
...@@ -301,6 +301,13 @@ int dso__load_sym(struct dso *dso, struct map *map __maybe_unused, ...@@ -301,6 +301,13 @@ int dso__load_sym(struct dso *dso, struct map *map __maybe_unused,
return 0; return 0;
} }
int file__read_maps(int fd __maybe_unused, bool exe __maybe_unused,
mapfn_t mapfn __maybe_unused, void *data __maybe_unused,
bool *is_64_bit __maybe_unused)
{
return -1;
}
void symbol__elf_init(void) void symbol__elf_init(void)
{ {
} }
...@@ -87,6 +87,7 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb) ...@@ -87,6 +87,7 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
{ {
s64 a; s64 a;
s64 b; s64 b;
size_t na, nb;
/* Prefer a symbol with non zero length */ /* Prefer a symbol with non zero length */
a = syma->end - syma->start; a = syma->end - syma->start;
...@@ -120,11 +121,21 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb) ...@@ -120,11 +121,21 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
else if (a > b) else if (a > b)
return SYMBOL_B; return SYMBOL_B;
/* If all else fails, choose the symbol with the longest name */ /* Choose the symbol with the longest name */
if (strlen(syma->name) >= strlen(symb->name)) na = strlen(syma->name);
nb = strlen(symb->name);
if (na > nb)
return SYMBOL_A; return SYMBOL_A;
else else if (na < nb)
return SYMBOL_B;
/* Avoid "SyS" kernel syscall aliases */
if (na >= 3 && !strncmp(syma->name, "SyS", 3))
return SYMBOL_B;
if (na >= 10 && !strncmp(syma->name, "compat_SyS", 10))
return SYMBOL_B; return SYMBOL_B;
return SYMBOL_A;
} }
void symbols__fixup_duplicate(struct rb_root *symbols) void symbols__fixup_duplicate(struct rb_root *symbols)
...@@ -316,6 +327,16 @@ static struct symbol *symbols__find(struct rb_root *symbols, u64 ip) ...@@ -316,6 +327,16 @@ static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
return NULL; return NULL;
} }
static struct symbol *symbols__first(struct rb_root *symbols)
{
struct rb_node *n = rb_first(symbols);
if (n)
return rb_entry(n, struct symbol, rb_node);
return NULL;
}
struct symbol_name_rb_node { struct symbol_name_rb_node {
struct rb_node rb_node; struct rb_node rb_node;
struct symbol sym; struct symbol sym;
...@@ -386,6 +407,11 @@ struct symbol *dso__find_symbol(struct dso *dso, ...@@ -386,6 +407,11 @@ struct symbol *dso__find_symbol(struct dso *dso,
return symbols__find(&dso->symbols[type], addr); return symbols__find(&dso->symbols[type], addr);
} }
struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
{
return symbols__first(&dso->symbols[type]);
}
struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
const char *name) const char *name)
{ {
...@@ -522,6 +548,53 @@ static int dso__load_all_kallsyms(struct dso *dso, const char *filename, ...@@ -522,6 +548,53 @@ static int dso__load_all_kallsyms(struct dso *dso, const char *filename,
return kallsyms__parse(filename, &args, map__process_kallsym_symbol); return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
} }
static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
symbol_filter_t filter)
{
struct map_groups *kmaps = map__kmap(map)->kmaps;
struct map *curr_map;
struct symbol *pos;
int count = 0, moved = 0;
struct rb_root *root = &dso->symbols[map->type];
struct rb_node *next = rb_first(root);
while (next) {
char *module;
pos = rb_entry(next, struct symbol, rb_node);
next = rb_next(&pos->rb_node);
module = strchr(pos->name, '\t');
if (module)
*module = '\0';
curr_map = map_groups__find(kmaps, map->type, pos->start);
if (!curr_map || (filter && filter(curr_map, pos))) {
rb_erase(&pos->rb_node, root);
symbol__delete(pos);
} else {
pos->start -= curr_map->start - curr_map->pgoff;
if (pos->end)
pos->end -= curr_map->start - curr_map->pgoff;
if (curr_map != map) {
rb_erase(&pos->rb_node, root);
symbols__insert(
&curr_map->dso->symbols[curr_map->type],
pos);
++moved;
} else {
++count;
}
}
}
/* Symbols have been adjusted */
dso->adjust_symbols = 1;
return count + moved;
}
/* /*
* Split the symbols into maps, making sure there are no overlaps, i.e. the * Split the symbols into maps, making sure there are no overlaps, i.e. the
* kernel range is broken in several maps, named [kernel].N, as we don't have * kernel range is broken in several maps, named [kernel].N, as we don't have
...@@ -663,6 +736,161 @@ bool symbol__restricted_filename(const char *filename, ...@@ -663,6 +736,161 @@ bool symbol__restricted_filename(const char *filename,
return restricted; return restricted;
} }
struct kcore_mapfn_data {
struct dso *dso;
enum map_type type;
struct list_head maps;
};
static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
{
struct kcore_mapfn_data *md = data;
struct map *map;
map = map__new2(start, md->dso, md->type);
if (map == NULL)
return -ENOMEM;
map->end = map->start + len;
map->pgoff = pgoff;
list_add(&map->node, &md->maps);
return 0;
}
/*
* If kallsyms is referenced by name then we look for kcore in the same
* directory.
*/
static bool kcore_filename_from_kallsyms_filename(char *kcore_filename,
const char *kallsyms_filename)
{
char *name;
strcpy(kcore_filename, kallsyms_filename);
name = strrchr(kcore_filename, '/');
if (!name)
return false;
if (!strcmp(name, "/kallsyms")) {
strcpy(name, "/kcore");
return true;
}
return false;
}
static int dso__load_kcore(struct dso *dso, struct map *map,
const char *kallsyms_filename)
{
struct map_groups *kmaps = map__kmap(map)->kmaps;
struct machine *machine = kmaps->machine;
struct kcore_mapfn_data md;
struct map *old_map, *new_map, *replacement_map = NULL;
bool is_64_bit;
int err, fd;
char kcore_filename[PATH_MAX];
struct symbol *sym;
/* This function requires that the map is the kernel map */
if (map != machine->vmlinux_maps[map->type])
return -EINVAL;
if (!kcore_filename_from_kallsyms_filename(kcore_filename,
kallsyms_filename))
return -EINVAL;
md.dso = dso;
md.type = map->type;
INIT_LIST_HEAD(&md.maps);
fd = open(kcore_filename, O_RDONLY);
if (fd < 0)
return -EINVAL;
/* Read new maps into temporary lists */
err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md,
&is_64_bit);
if (err)
goto out_err;
if (list_empty(&md.maps)) {
err = -EINVAL;
goto out_err;
}
/* Remove old maps */
old_map = map_groups__first(kmaps, map->type);
while (old_map) {
struct map *next = map_groups__next(old_map);
if (old_map != map)
map_groups__remove(kmaps, old_map);
old_map = next;
}
/* Find the kernel map using the first symbol */
sym = dso__first_symbol(dso, map->type);
list_for_each_entry(new_map, &md.maps, node) {
if (sym && sym->start >= new_map->start &&
sym->start < new_map->end) {
replacement_map = new_map;
break;
}
}
if (!replacement_map)
replacement_map = list_entry(md.maps.next, struct map, node);
/* Add new maps */
while (!list_empty(&md.maps)) {
new_map = list_entry(md.maps.next, struct map, node);
list_del(&new_map->node);
if (new_map == replacement_map) {
map->start = new_map->start;
map->end = new_map->end;
map->pgoff = new_map->pgoff;
map->map_ip = new_map->map_ip;
map->unmap_ip = new_map->unmap_ip;
map__delete(new_map);
/* Ensure maps are correctly ordered */
map_groups__remove(kmaps, map);
map_groups__insert(kmaps, map);
} else {
map_groups__insert(kmaps, new_map);
}
}
/*
* Set the data type and long name so that kcore can be read via
* dso__data_read_addr().
*/
if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
dso->data_type = DSO_BINARY_TYPE__GUEST_KCORE;
else
dso->data_type = DSO_BINARY_TYPE__KCORE;
dso__set_long_name(dso, strdup(kcore_filename));
close(fd);
if (map->type == MAP__FUNCTION)
pr_debug("Using %s for kernel object code\n", kcore_filename);
else
pr_debug("Using %s for kernel data\n", kcore_filename);
return 0;
out_err:
while (!list_empty(&md.maps)) {
map = list_entry(md.maps.next, struct map, node);
list_del(&map->node);
map__delete(map);
}
close(fd);
return -EINVAL;
}
int dso__load_kallsyms(struct dso *dso, const char *filename, int dso__load_kallsyms(struct dso *dso, const char *filename,
struct map *map, symbol_filter_t filter) struct map *map, symbol_filter_t filter)
{ {
...@@ -680,7 +908,10 @@ int dso__load_kallsyms(struct dso *dso, const char *filename, ...@@ -680,7 +908,10 @@ int dso__load_kallsyms(struct dso *dso, const char *filename,
else else
dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS; dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
return dso__split_kallsyms(dso, map, filter); if (!dso__load_kcore(dso, map, filename))
return dso__split_kallsyms_for_kcore(dso, map, filter);
else
return dso__split_kallsyms(dso, map, filter);
} }
static int dso__load_perf_map(struct dso *dso, struct map *map, static int dso__load_perf_map(struct dso *dso, struct map *map,
...@@ -843,10 +1074,15 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) ...@@ -843,10 +1074,15 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
if (!runtime_ss && syms_ss) if (!runtime_ss && syms_ss)
runtime_ss = syms_ss; runtime_ss = syms_ss;
if (syms_ss) if (syms_ss) {
ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, 0); int km;
else
km = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, km);
} else {
ret = -1; ret = -1;
}
if (ret > 0) { if (ret > 0) {
int nr_plt; int nr_plt;
...@@ -906,6 +1142,10 @@ int dso__load_vmlinux(struct dso *dso, struct map *map, ...@@ -906,6 +1142,10 @@ int dso__load_vmlinux(struct dso *dso, struct map *map,
symsrc__destroy(&ss); symsrc__destroy(&ss);
if (err > 0) { if (err > 0) {
if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
dso->data_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
else
dso->data_type = DSO_BINARY_TYPE__VMLINUX;
dso__set_long_name(dso, (char *)vmlinux); dso__set_long_name(dso, (char *)vmlinux);
dso__set_loaded(dso, map->type); dso__set_loaded(dso, map->type);
pr_debug("Using %s for symbols\n", symfs_vmlinux); pr_debug("Using %s for symbols\n", symfs_vmlinux);
...@@ -978,7 +1218,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, ...@@ -978,7 +1218,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map,
dso__set_long_name(dso, dso__set_long_name(dso,
strdup(symbol_conf.vmlinux_name)); strdup(symbol_conf.vmlinux_name));
dso->lname_alloc = 1; dso->lname_alloc = 1;
goto out_fixup; return err;
} }
return err; return err;
} }
...@@ -986,7 +1226,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, ...@@ -986,7 +1226,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map,
if (vmlinux_path != NULL) { if (vmlinux_path != NULL) {
err = dso__load_vmlinux_path(dso, map, filter); err = dso__load_vmlinux_path(dso, map, filter);
if (err > 0) if (err > 0)
goto out_fixup; return err;
} }
/* do not try local files if a symfs was given */ /* do not try local files if a symfs was given */
...@@ -1045,9 +1285,8 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, ...@@ -1045,9 +1285,8 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map,
pr_debug("Using %s for symbols\n", kallsyms_filename); pr_debug("Using %s for symbols\n", kallsyms_filename);
free(kallsyms_allocated_filename); free(kallsyms_allocated_filename);
if (err > 0) { if (err > 0 && !dso__is_kcore(dso)) {
dso__set_long_name(dso, strdup("[kernel.kallsyms]")); dso__set_long_name(dso, strdup("[kernel.kallsyms]"));
out_fixup:
map__fixup_start(map); map__fixup_start(map);
map__fixup_end(map); map__fixup_end(map);
} }
...@@ -1078,7 +1317,7 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, ...@@ -1078,7 +1317,7 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
if (symbol_conf.default_guest_vmlinux_name != NULL) { if (symbol_conf.default_guest_vmlinux_name != NULL) {
err = dso__load_vmlinux(dso, map, err = dso__load_vmlinux(dso, map,
symbol_conf.default_guest_vmlinux_name, filter); symbol_conf.default_guest_vmlinux_name, filter);
goto out_try_fixup; return err;
} }
kallsyms_filename = symbol_conf.default_guest_kallsyms; kallsyms_filename = symbol_conf.default_guest_kallsyms;
...@@ -1092,13 +1331,9 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, ...@@ -1092,13 +1331,9 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); err = dso__load_kallsyms(dso, kallsyms_filename, map, filter);
if (err > 0) if (err > 0)
pr_debug("Using %s for symbols\n", kallsyms_filename); pr_debug("Using %s for symbols\n", kallsyms_filename);
if (err > 0 && !dso__is_kcore(dso)) {
out_try_fixup: machine__mmap_name(machine, path, sizeof(path));
if (err > 0) { dso__set_long_name(dso, strdup(path));
if (kallsyms_filename != NULL) {
machine__mmap_name(machine, path, sizeof(path));
dso__set_long_name(dso, strdup(path));
}
map__fixup_start(map); map__fixup_start(map);
map__fixup_end(map); map__fixup_end(map);
} }
......
...@@ -215,6 +215,7 @@ struct symbol *dso__find_symbol(struct dso *dso, enum map_type type, ...@@ -215,6 +215,7 @@ struct symbol *dso__find_symbol(struct dso *dso, enum map_type type,
u64 addr); u64 addr);
struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
const char *name); const char *name);
struct symbol *dso__first_symbol(struct dso *dso, enum map_type type);
int filename__read_build_id(const char *filename, void *bf, size_t size); int filename__read_build_id(const char *filename, void *bf, size_t size);
int sysfs__read_build_id(const char *filename, void *bf, size_t size); int sysfs__read_build_id(const char *filename, void *bf, size_t size);
...@@ -247,4 +248,8 @@ void symbols__fixup_duplicate(struct rb_root *symbols); ...@@ -247,4 +248,8 @@ void symbols__fixup_duplicate(struct rb_root *symbols);
void symbols__fixup_end(struct rb_root *symbols); void symbols__fixup_end(struct rb_root *symbols);
void __map_groups__fixup_end(struct map_groups *mg, enum map_type type); void __map_groups__fixup_end(struct map_groups *mg, enum map_type type);
typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
bool *is_64_bit);
#endif /* __PERF_SYMBOL */ #endif /* __PERF_SYMBOL */
...@@ -41,7 +41,7 @@ static inline struct map *thread__find_map(struct thread *self, ...@@ -41,7 +41,7 @@ static inline struct map *thread__find_map(struct thread *self,
void thread__find_addr_map(struct thread *thread, struct machine *machine, void thread__find_addr_map(struct thread *thread, struct machine *machine,
u8 cpumode, enum map_type type, u64 addr, u8 cpumode, enum map_type type, u64 addr,
struct addr_location *al); struct addr_location *al, symbol_filter_t filter);
void thread__find_addr_location(struct thread *thread, struct machine *machine, void thread__find_addr_location(struct thread *thread, struct machine *machine,
u8 cpumode, enum map_type type, u64 addr, u8 cpumode, enum map_type type, u64 addr,
......
...@@ -39,6 +39,8 @@ struct perf_top { ...@@ -39,6 +39,8 @@ struct perf_top {
float min_percent; float min_percent;
}; };
#define CONSOLE_CLEAR ""
size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size); size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size);
void perf_top__reset_sample_counters(struct perf_top *top); void perf_top__reset_sample_counters(struct perf_top *top);
#endif /* __PERF_TOP_H */ #endif /* __PERF_TOP_H */
...@@ -272,7 +272,7 @@ static struct map *find_map(unw_word_t ip, struct unwind_info *ui) ...@@ -272,7 +272,7 @@ static struct map *find_map(unw_word_t ip, struct unwind_info *ui)
struct addr_location al; struct addr_location al;
thread__find_addr_map(ui->thread, ui->machine, PERF_RECORD_MISC_USER, thread__find_addr_map(ui->thread, ui->machine, PERF_RECORD_MISC_USER,
MAP__FUNCTION, ip, &al); MAP__FUNCTION, ip, &al, NULL);
return al.map; return al.map;
} }
...@@ -349,7 +349,7 @@ static int access_dso_mem(struct unwind_info *ui, unw_word_t addr, ...@@ -349,7 +349,7 @@ static int access_dso_mem(struct unwind_info *ui, unw_word_t addr,
ssize_t size; ssize_t size;
thread__find_addr_map(ui->thread, ui->machine, PERF_RECORD_MISC_USER, thread__find_addr_map(ui->thread, ui->machine, PERF_RECORD_MISC_USER,
MAP__FUNCTION, addr, &al); MAP__FUNCTION, addr, &al, NULL);
if (!al.map) { if (!al.map) {
pr_debug("unwind: no map for %lx\n", (unsigned long)addr); pr_debug("unwind: no map for %lx\n", (unsigned long)addr);
return -1; return -1;
......
...@@ -328,3 +328,36 @@ void put_tracing_file(char *file) ...@@ -328,3 +328,36 @@ void put_tracing_file(char *file)
{ {
free(file); free(file);
} }
int parse_nsec_time(const char *str, u64 *ptime)
{
u64 time_sec, time_nsec;
char *end;
time_sec = strtoul(str, &end, 10);
if (*end != '.' && *end != '\0')
return -1;
if (*end == '.') {
int i;
char nsec_buf[10];
if (strlen(++end) > 9)
return -1;
strncpy(nsec_buf, end, 9);
nsec_buf[9] = '\0';
/* make it nsec precision */
for (i = strlen(nsec_buf); i < 9; i++)
nsec_buf[i] = '0';
time_nsec = strtoul(nsec_buf, &end, 10);
if (*end != '\0')
return -1;
} else
time_nsec = 0;
*ptime = time_sec * NSEC_PER_SEC + time_nsec;
return 0;
}
...@@ -208,6 +208,8 @@ static inline int has_extension(const char *filename, const char *ext) ...@@ -208,6 +208,8 @@ static inline int has_extension(const char *filename, const char *ext)
#define NSEC_PER_MSEC 1000000L #define NSEC_PER_MSEC 1000000L
#endif #endif
int parse_nsec_time(const char *str, u64 *ptime);
extern unsigned char sane_ctype[256]; extern unsigned char sane_ctype[256];
#define GIT_SPACE 0x01 #define GIT_SPACE 0x01
#define GIT_DIGIT 0x02 #define GIT_DIGIT 0x02
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment