Commit 4bc6a58f authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-core-for-mingo' of...

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes:

User visible changes:

  - By default use the most precise "cycles" hw counter available, i.e.
    when the user doesn't specify any event, it will try using cycles:ppp,
    cycles:pp, etc. (Arnaldo Carvalho de Melo)

  - Remove blank lines, headers when piping output in 'perf list', so that it can
    be sanely used with 'wc -l', etc. (Arnaldo Carvalho de Melo)

  - Amend documentation about max_stack and synthesized callchains. (Adrian Hunter)

  - Fix 'perf probe -l' for probes added to kernel module functions. (Masami Hiramatsu)

Build fixes:

  - Fix shadowed declarations that break the build on older distros. (Jiri Olsa)

  - Fix build break on powerpc due to sample_reg_masks. (Sukadev Bhattiprolu)
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parents 9c17dbc6 7f8d1ade
......@@ -31,9 +31,9 @@
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
static inline void * __must_check ERR_PTR(long error)
static inline void * __must_check ERR_PTR(long error_)
{
return (void *) error;
return (void *) error_;
}
static inline long __must_check PTR_ERR(__force const void *ptr)
......
......@@ -2,6 +2,12 @@
#include <stdio.h>
#include <stdlib.h>
u8 kallsyms2elf_type(char type)
{
type = tolower(type);
return (type == 't' || type == 'w') ? STT_FUNC : STT_OBJECT;
}
int kallsyms__parse(const char *filename, void *arg,
int (*process_symbol)(void *arg, const char *name,
char type, u64 start))
......
......@@ -9,7 +9,7 @@
#define KSYM_NAME_LEN 256
#endif
static inline u8 kallsyms2elf_type(char type)
static inline u8 kallsyms2elf_binding(char type)
{
if (type == 'W')
return STB_WEAK;
......@@ -17,6 +17,8 @@ static inline u8 kallsyms2elf_type(char type)
return isupper(type) ? STB_GLOBAL : STB_LOCAL;
}
u8 kallsyms2elf_type(char type);
int kallsyms__parse(const char *filename, void *arg,
int (*process_symbol)(void *arg, const char *name,
char type, u64 start));
......
......@@ -205,6 +205,8 @@ OPTIONS
beyond the specified depth will be ignored. This is a trade-off
between information loss and faster processing especially for
workloads that can have a very long callchain stack.
Note that when using the --itrace option the synthesized callchain size
will override this value if the synthesized callchain size is bigger.
Default: 127
......
......@@ -329,7 +329,7 @@ static int build_alloc_func_list(void)
return -EINVAL;
}
kernel_map = machine->vmlinux_maps[MAP__FUNCTION];
kernel_map = machine__kernel_map(machine);
if (map__load(kernel_map, NULL) < 0) {
pr_err("cannot load kernel map\n");
return -ENOENT;
......
......@@ -36,7 +36,7 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
setup_pager();
if (!raw_dump)
if (!raw_dump && pager_in_use())
printf("\nList of pre-defined events (to be used in -e):\n\n");
if (argc == 0) {
......
......@@ -387,7 +387,7 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
static void report__warn_kptr_restrict(const struct report *rep)
{
struct map *kernel_map = rep->session->machines.host.vmlinux_maps[MAP__FUNCTION];
struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
if (kernel_map == NULL ||
......
......@@ -473,7 +473,7 @@ static int do_test_code_reading(bool try_kcore)
symbol_conf.kallsyms_name = "/proc/kallsyms";
/* Load kernel map */
map = machine->vmlinux_maps[MAP__FUNCTION];
map = machine__kernel_map(machine);
ret = map__load(map, NULL);
if (ret < 0) {
pr_debug("map__load failed\n");
......
......@@ -68,7 +68,7 @@ int test__vmlinux_matches_kallsyms(void)
* to see if the running kernel was relocated by checking if it has the
* same value in the vmlinux file we load.
*/
kallsyms_map = machine__kernel_map(&kallsyms, type);
kallsyms_map = machine__kernel_map(&kallsyms);
/*
* Step 5:
......@@ -80,7 +80,7 @@ int test__vmlinux_matches_kallsyms(void)
goto out;
}
vmlinux_map = machine__kernel_map(&vmlinux, type);
vmlinux_map = machine__kernel_map(&vmlinux);
/*
* Step 6:
......
......@@ -18,6 +18,7 @@ libperf-y += levenshtein.o
libperf-y += llvm-utils.o
libperf-y += parse-options.o
libperf-y += parse-events.o
libperf-y += perf_regs.o
libperf-y += path.o
libperf-y += rbtree.o
libperf-y += bitmap.o
......@@ -104,7 +105,6 @@ libperf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o
libperf-y += scripting-engines/
libperf-$(CONFIG_PERF_REGS) += perf_regs.o
libperf-$(CONFIG_ZLIB) += zlib.o
libperf-$(CONFIG_LZMA) += lzma.o
......
......@@ -649,12 +649,12 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
size_t size;
const char *mmap_name;
char name_buff[PATH_MAX];
struct map *map;
struct map *map = machine__kernel_map(machine);
struct kmap *kmap;
int err;
union perf_event *event;
if (machine->vmlinux_maps[0] == NULL)
if (map == NULL)
return -1;
/*
......@@ -680,7 +680,6 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
}
map = machine->vmlinux_maps[MAP__FUNCTION];
kmap = map__kmap(map);
size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
"%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
......@@ -1008,7 +1007,7 @@ int perf_event__preprocess_sample(const union perf_event *event,
* it now.
*/
if (cpumode == PERF_RECORD_MISC_KERNEL &&
machine->vmlinux_maps[MAP__FUNCTION] == NULL)
machine__kernel_map(machine) == NULL)
machine__create_kernel_maps(machine);
thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->ip, al);
......
......@@ -205,6 +205,20 @@ void perf_evlist__set_leader(struct perf_evlist *evlist)
}
}
static void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
{
attr->precise_ip = 3;
while (attr->precise_ip != 0) {
int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
if (fd != -1) {
close(fd);
break;
}
--attr->precise_ip;
}
}
int perf_evlist__add_default(struct perf_evlist *evlist)
{
struct perf_event_attr attr = {
......@@ -215,13 +229,15 @@ int perf_evlist__add_default(struct perf_evlist *evlist)
event_attr_init(&attr);
perf_event_attr__set_max_precise_ip(&attr);
evsel = perf_evsel__new(&attr);
if (evsel == NULL)
goto error;
/* use strdup() because free(evsel) assumes name is allocated */
evsel->name = strdup("cycles");
if (!evsel->name)
/* use asprintf() because free(evsel) assumes name is allocated */
if (asprintf(&evsel->name, "cycles%.*s",
attr.precise_ip ? attr.precise_ip + 1 : 0, ":ppp") < 0)
goto error_free;
perf_evlist__add(evlist, evsel);
......
......@@ -1268,7 +1268,7 @@ static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
if (ptss_ip)
*ptss_ip = 0;
map = machine__kernel_map(machine, MAP__FUNCTION);
map = machine__kernel_map(machine);
if (!map)
return 0;
......
......@@ -625,7 +625,7 @@ size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
{
int i;
size_t printed = 0;
struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
struct dso *kdso = machine__kernel_map(machine)->dso;
if (kdso->has_build_id) {
char filename[PATH_MAX];
......@@ -741,6 +741,7 @@ int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
for (type = 0; type < MAP__NR_TYPES; ++type) {
struct kmap *kmap;
struct map *map;
machine->vmlinux_maps[type] = map__new2(start, kernel, type);
if (machine->vmlinux_maps[type] == NULL)
......@@ -749,13 +750,13 @@ int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
machine->vmlinux_maps[type]->map_ip =
machine->vmlinux_maps[type]->unmap_ip =
identity__map_ip;
kmap = map__kmap(machine->vmlinux_maps[type]);
map = __machine__kernel_map(machine, type);
kmap = map__kmap(map);
if (!kmap)
return -1;
kmap->kmaps = &machine->kmaps;
map_groups__insert(&machine->kmaps,
machine->vmlinux_maps[type]);
map_groups__insert(&machine->kmaps, map);
}
return 0;
......@@ -767,13 +768,13 @@ void machine__destroy_kernel_maps(struct machine *machine)
for (type = 0; type < MAP__NR_TYPES; ++type) {
struct kmap *kmap;
struct map *map = __machine__kernel_map(machine, type);
if (machine->vmlinux_maps[type] == NULL)
if (map == NULL)
continue;
kmap = map__kmap(machine->vmlinux_maps[type]);
map_groups__remove(&machine->kmaps,
machine->vmlinux_maps[type]);
kmap = map__kmap(map);
map_groups__remove(&machine->kmaps, map);
if (kmap && kmap->ref_reloc_sym) {
/*
* ref_reloc_sym is shared among all maps, so free just
......@@ -867,7 +868,7 @@ int machines__create_kernel_maps(struct machines *machines, pid_t pid)
int machine__load_kallsyms(struct machine *machine, const char *filename,
enum map_type type, symbol_filter_t filter)
{
struct map *map = machine->vmlinux_maps[type];
struct map *map = machine__kernel_map(machine);
int ret = dso__load_kallsyms(map->dso, filename, map, filter);
if (ret > 0) {
......@@ -886,7 +887,7 @@ int machine__load_kallsyms(struct machine *machine, const char *filename,
int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
symbol_filter_t filter)
{
struct map *map = machine->vmlinux_maps[type];
struct map *map = machine__kernel_map(machine);
int ret = dso__load_vmlinux_path(map->dso, map, filter);
if (ret > 0)
......@@ -1244,8 +1245,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
/*
* preload dso of guest kernel and modules
*/
dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
NULL);
dso__load(kernel, machine__kernel_map(machine), NULL);
}
}
return 0;
......@@ -1997,7 +1997,7 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
int machine__get_kernel_start(struct machine *machine)
{
struct map *map = machine__kernel_map(machine, MAP__FUNCTION);
struct map *map = machine__kernel_map(machine);
int err = 0;
/*
......
......@@ -48,11 +48,17 @@ struct machine {
};
static inline
struct map *machine__kernel_map(struct machine *machine, enum map_type type)
struct map *__machine__kernel_map(struct machine *machine, enum map_type type)
{
return machine->vmlinux_maps[type];
}
static inline
struct map *machine__kernel_map(struct machine *machine)
{
return __machine__kernel_map(machine, MAP__FUNCTION);
}
int machine__get_kernel_start(struct machine *machine);
static inline u64 machine__kernel_start(struct machine *machine)
......
......@@ -235,7 +235,7 @@ struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
*/
bool __map__is_kernel(const struct map *map)
{
return map->groups->machine->vmlinux_maps[map->type] == map;
return __machine__kernel_map(map->groups->machine, map->type) == map;
}
static void map__exit(struct map *map)
......@@ -553,13 +553,9 @@ struct symbol *map_groups__find_symbol(struct map_groups *mg,
return NULL;
}
struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
enum map_type type,
const char *name,
struct map **mapp,
symbol_filter_t filter)
struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
struct map **mapp, symbol_filter_t filter)
{
struct maps *maps = &mg->maps[type];
struct symbol *sym;
struct rb_node *nd;
......@@ -583,6 +579,17 @@ struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
return sym;
}
struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
enum map_type type,
const char *name,
struct map **mapp,
symbol_filter_t filter)
{
struct symbol *sym = maps__find_symbol_by_name(&mg->maps[type], name, mapp, filter);
return sym;
}
int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter)
{
if (ams->addr < ams->map->start || ams->addr >= ams->map->end) {
......
......@@ -190,6 +190,8 @@ void maps__remove(struct maps *maps, struct map *map);
struct map *maps__find(struct maps *maps, u64 addr);
struct map *maps__first(struct maps *maps);
struct map *map__next(struct map *map);
struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
struct map **mapp, symbol_filter_t filter);
void map_groups__init(struct map_groups *mg, struct machine *machine);
void map_groups__exit(struct map_groups *mg);
int map_groups__clone(struct map_groups *mg,
......
......@@ -389,7 +389,7 @@ int parse_events_add_cache(struct list_head *list, int *idx,
return add_event(list, idx, &attr, name, NULL);
}
static void tracepoint_error(struct parse_events_error *error, int err,
static void tracepoint_error(struct parse_events_error *e, int err,
char *sys, char *name)
{
char help[BUFSIZ];
......@@ -402,30 +402,30 @@ static void tracepoint_error(struct parse_events_error *error, int err,
switch (err) {
case EACCES:
error->str = strdup("can't access trace events");
e->str = strdup("can't access trace events");
break;
case ENOENT:
error->str = strdup("unknown tracepoint");
e->str = strdup("unknown tracepoint");
break;
default:
error->str = strdup("failed to add tracepoint");
e->str = strdup("failed to add tracepoint");
break;
}
tracing_path__strerror_open_tp(err, help, sizeof(help), sys, name);
error->help = strdup(help);
e->help = strdup(help);
}
static int add_tracepoint(struct list_head *list, int *idx,
char *sys_name, char *evt_name,
struct parse_events_error *error __maybe_unused,
struct parse_events_error *err,
struct list_head *head_config)
{
struct perf_evsel *evsel;
evsel = perf_evsel__newtp_idx(sys_name, evt_name, (*idx)++);
if (IS_ERR(evsel)) {
tracepoint_error(error, PTR_ERR(evsel), sys_name, evt_name);
tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name);
return PTR_ERR(evsel);
}
......@@ -443,7 +443,7 @@ static int add_tracepoint(struct list_head *list, int *idx,
static int add_tracepoint_multi_event(struct list_head *list, int *idx,
char *sys_name, char *evt_name,
struct parse_events_error *error,
struct parse_events_error *err,
struct list_head *head_config)
{
char evt_path[MAXPATHLEN];
......@@ -454,7 +454,7 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx,
snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name);
evt_dir = opendir(evt_path);
if (!evt_dir) {
tracepoint_error(error, errno, sys_name, evt_name);
tracepoint_error(err, errno, sys_name, evt_name);
return -1;
}
......@@ -469,7 +469,7 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx,
continue;
ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name,
error, head_config);
err, head_config);
}
closedir(evt_dir);
......@@ -478,19 +478,19 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx,
static int add_tracepoint_event(struct list_head *list, int *idx,
char *sys_name, char *evt_name,
struct parse_events_error *error,
struct parse_events_error *err,
struct list_head *head_config)
{
return strpbrk(evt_name, "*?") ?
add_tracepoint_multi_event(list, idx, sys_name, evt_name,
error, head_config) :
err, head_config) :
add_tracepoint(list, idx, sys_name, evt_name,
error, head_config);
err, head_config);
}
static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
char *sys_name, char *evt_name,
struct parse_events_error *error,
struct parse_events_error *err,
struct list_head *head_config)
{
struct dirent *events_ent;
......@@ -499,7 +499,7 @@ static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
events_dir = opendir(tracing_events_path);
if (!events_dir) {
tracepoint_error(error, errno, sys_name, evt_name);
tracepoint_error(err, errno, sys_name, evt_name);
return -1;
}
......@@ -515,7 +515,7 @@ static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
continue;
ret = add_tracepoint_event(list, idx, events_ent->d_name,
evt_name, error, head_config);
evt_name, err, head_config);
}
closedir(events_dir);
......@@ -767,23 +767,23 @@ do { \
int parse_events_add_tracepoint(struct list_head *list, int *idx,
char *sys, char *event,
struct parse_events_error *error,
struct parse_events_error *err,
struct list_head *head_config)
{
if (head_config) {
struct perf_event_attr attr;
if (config_attr(&attr, head_config, error,
if (config_attr(&attr, head_config, err,
config_term_tracepoint))
return -EINVAL;
}
if (strpbrk(sys, "*?"))
return add_tracepoint_multi_sys(list, idx, sys, event,
error, head_config);
err, head_config);
else
return add_tracepoint_event(list, idx, sys, event,
error, head_config);
err, head_config);
}
int parse_events_add_numeric(struct parse_events_evlist *data,
......@@ -1534,7 +1534,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
printf(" %-50s [%s]\n", evt_list[evt_i++],
event_type_descriptors[PERF_TYPE_TRACEPOINT]);
}
if (evt_num)
if (evt_num && pager_in_use())
printf("\n");
out_free:
......@@ -1690,7 +1690,7 @@ int print_hwcache_events(const char *event_glob, bool name_only)
printf(" %-50s [%s]\n", evt_list[evt_i++],
event_type_descriptors[PERF_TYPE_HW_CACHE]);
}
if (evt_num)
if (evt_num && pager_in_use())
printf("\n");
out_free:
......@@ -1763,7 +1763,7 @@ void print_symbol_events(const char *event_glob, unsigned type,
}
printf(" %-50s [%s]\n", evt_list[evt_i++], event_type_descriptors[type]);
}
if (evt_num)
if (evt_num && pager_in_use())
printf("\n");
out_free:
......@@ -1804,13 +1804,14 @@ void print_events(const char *event_glob, bool name_only)
printf(" %-50s [%s]\n",
"cpu/t1=v1[,t2=v2,t3 ...]/modifier",
event_type_descriptors[PERF_TYPE_RAW]);
printf(" (see 'man perf-list' on how to encode it)\n");
printf("\n");
if (pager_in_use())
printf(" (see 'man perf-list' on how to encode it)\n\n");
printf(" %-50s [%s]\n",
"mem:<addr>[/len][:access]",
event_type_descriptors[PERF_TYPE_BREAKPOINT]);
printf("\n");
if (pager_in_use())
printf("\n");
}
print_tracepoint_events(NULL, NULL, name_only);
......
......@@ -6,6 +6,7 @@ const struct sample_reg __weak sample_reg_masks[] = {
SMPL_REG_END
};
#ifdef HAVE_PERF_REGS_SUPPORT
int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
{
int i, idx = 0;
......@@ -29,3 +30,4 @@ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
*valp = regs->cache_regs[id];
return 0;
}
#endif
......@@ -2,6 +2,7 @@
#define __PERF_REGS_H
#include <linux/types.h>
#include <linux/compiler.h>
struct regs_dump;
......
......@@ -1026,7 +1026,7 @@ void print_pmu_events(const char *event_glob, bool name_only)
printf(" %-50s [Kernel PMU event]\n", aliases[j]);
printed++;
}
if (printed)
if (printed && pager_in_use())
printf("\n");
out_free:
for (j = 0; j < len; j++)
......
......@@ -126,17 +126,19 @@ static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void)
{
/* kmap->ref_reloc_sym should be set if host_machine is initialized */
struct kmap *kmap;
struct map *map = machine__kernel_map(host_machine);
if (map__load(host_machine->vmlinux_maps[MAP__FUNCTION], NULL) < 0)
if (map__load(map, NULL) < 0)
return NULL;
kmap = map__kmap(host_machine->vmlinux_maps[MAP__FUNCTION]);
kmap = map__kmap(map);
if (!kmap)
return NULL;
return kmap->ref_reloc_sym;
}
static u64 kernel_get_symbol_address_by_name(const char *name, bool reloc)
static int kernel_get_symbol_address_by_name(const char *name, u64 *addr,
bool reloc, bool reladdr)
{
struct ref_reloc_sym *reloc_sym;
struct symbol *sym;
......@@ -145,12 +147,14 @@ static u64 kernel_get_symbol_address_by_name(const char *name, bool reloc)
/* ref_reloc_sym is just a label. Need a special fix*/
reloc_sym = kernel_get_ref_reloc_sym();
if (reloc_sym && strcmp(name, reloc_sym->name) == 0)
return (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr;
*addr = (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr;
else {
sym = __find_kernel_function_by_name(name, &map);
if (sym)
return map->unmap_ip(map, sym->start) -
((reloc) ? 0 : map->reloc);
if (!sym)
return -ENOENT;
*addr = map->unmap_ip(map, sym->start) -
((reloc) ? 0 : map->reloc) -
((reladdr) ? map->start : 0);
}
return 0;
}
......@@ -244,12 +248,14 @@ static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs)
static bool kprobe_blacklist__listed(unsigned long address);
static bool kprobe_warn_out_range(const char *symbol, unsigned long address)
{
u64 etext_addr;
u64 etext_addr = 0;
int ret;
/* Get the address of _etext for checking non-probable text symbol */
etext_addr = kernel_get_symbol_address_by_name("_etext", false);
ret = kernel_get_symbol_address_by_name("_etext", &etext_addr,
false, false);
if (etext_addr != 0 && etext_addr < address)
if (ret == 0 && etext_addr < address)
pr_warning("%s is out of .text, skip it.\n", symbol);
else if (kprobe_blacklist__listed(address))
pr_warning("%s is blacklisted function, skip it.\n", symbol);
......@@ -281,7 +287,7 @@ static int kernel_get_module_dso(const char *module, struct dso **pdso)
return -ENOENT;
}
map = host_machine->vmlinux_maps[MAP__FUNCTION];
map = machine__kernel_map(host_machine);
dso = map->dso;
vmlinux_name = symbol_conf.vmlinux_name;
......@@ -435,19 +441,22 @@ static char *debuginfo_cache_path;
static struct debuginfo *debuginfo_cache__open(const char *module, bool silent)
{
if ((debuginfo_cache_path && !strcmp(debuginfo_cache_path, module)) ||
(!debuginfo_cache_path && !module && debuginfo_cache))
const char *path = module;
/* If the module is NULL, it should be the kernel. */
if (!module)
path = "kernel";
if (debuginfo_cache_path && !strcmp(debuginfo_cache_path, path))
goto out;
/* Copy module path */
free(debuginfo_cache_path);
if (module) {
debuginfo_cache_path = strdup(module);
if (!debuginfo_cache_path) {
debuginfo__delete(debuginfo_cache);
debuginfo_cache = NULL;
goto out;
}
debuginfo_cache_path = strdup(path);
if (!debuginfo_cache_path) {
debuginfo__delete(debuginfo_cache);
debuginfo_cache = NULL;
goto out;
}
debuginfo_cache = open_debuginfo(module, silent);
......@@ -516,8 +525,10 @@ static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp,
goto error;
addr += stext;
} else if (tp->symbol) {
addr = kernel_get_symbol_address_by_name(tp->symbol, false);
if (addr == 0)
/* If the module is given, this returns relative address */
ret = kernel_get_symbol_address_by_name(tp->symbol, &addr,
false, !!tp->module);
if (ret != 0)
goto error;
addr += tp->offset;
}
......@@ -1883,8 +1894,12 @@ static int find_perf_probe_point_from_map(struct probe_trace_point *tp,
goto out;
sym = map__find_symbol(map, addr, NULL);
} else {
if (tp->symbol)
addr = kernel_get_symbol_address_by_name(tp->symbol, true);
if (tp->symbol && !addr) {
ret = kernel_get_symbol_address_by_name(tp->symbol,
&addr, true, false);
if (ret < 0)
goto out;
}
if (addr) {
addr += tp->offset;
sym = __find_kernel_function(addr, &map);
......@@ -2288,36 +2303,41 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
struct strlist *namelist, bool allow_suffix)
{
int i, ret;
char *p;
char *p, *nbase;
if (*base == '.')
base++;
nbase = strdup(base);
if (!nbase)
return -ENOMEM;
/* Cut off the dot suffixes (e.g. .const, .isra)*/
p = strchr(nbase, '.');
if (p && p != nbase)
*p = '\0';
/* Try no suffix */
ret = e_snprintf(buf, len, "%s", base);
/* Try no suffix number */
ret = e_snprintf(buf, len, "%s", nbase);
if (ret < 0) {
pr_debug("snprintf() failed: %d\n", ret);
return ret;
goto out;
}
/* Cut off the postfixes (e.g. .const, .isra)*/
p = strchr(buf, '.');
if (p && p != buf)
*p = '\0';
if (!strlist__has_entry(namelist, buf))
return 0;
goto out;
if (!allow_suffix) {
pr_warning("Error: event \"%s\" already exists. "
"(Use -f to force duplicates.)\n", base);
return -EEXIST;
"(Use -f to force duplicates.)\n", buf);
ret = -EEXIST;
goto out;
}
/* Try to add suffix */
for (i = 1; i < MAX_EVENT_INDEX; i++) {
ret = e_snprintf(buf, len, "%s_%d", base, i);
ret = e_snprintf(buf, len, "%s_%d", nbase, i);
if (ret < 0) {
pr_debug("snprintf() failed: %d\n", ret);
return ret;
goto out;
}
if (!strlist__has_entry(namelist, buf))
break;
......@@ -2327,6 +2347,8 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
ret = -ERANGE;
}
out:
free(nbase);
return ret;
}
......
......@@ -70,6 +70,7 @@ static int debuginfo__init_offline_dwarf(struct debuginfo *dbg,
if (!dbg->dwfl)
goto error;
dwfl_report_begin(dbg->dwfl);
dbg->mod = dwfl_report_offline(dbg->dwfl, "", "", fd);
if (!dbg->mod)
goto error;
......@@ -78,6 +79,8 @@ static int debuginfo__init_offline_dwarf(struct debuginfo *dbg,
if (!dbg->dbg)
goto error;
dwfl_report_end(dbg->dwfl, NULL, NULL);
return 0;
error:
if (dbg->dwfl)
......@@ -591,6 +594,7 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
/* Convert subprogram DIE to trace point */
static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
Dwarf_Addr paddr, bool retprobe,
const char *function,
struct probe_trace_point *tp)
{
Dwarf_Addr eaddr, highaddr;
......@@ -634,8 +638,10 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
/* Return probe must be on the head of a subprogram */
if (retprobe) {
if (eaddr != paddr) {
pr_warning("Return probe must be on the head of"
" a real function.\n");
pr_warning("Failed to find \"%s%%return\",\n"
" because %s is an inlined function and"
" has no return point.\n", function,
function);
return -EINVAL;
}
tp->retprobe = true;
......@@ -1175,6 +1181,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
{
struct trace_event_finder *tf =
container_of(pf, struct trace_event_finder, pf);
struct perf_probe_point *pp = &pf->pev->point;
struct probe_trace_event *tev;
struct perf_probe_arg *args;
int ret, i;
......@@ -1189,7 +1196,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
/* Trace point should be converted from subprogram DIE */
ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr,
pf->pev->point.retprobe, &tev->point);
pp->retprobe, pp->function, &tev->point);
if (ret < 0)
return ret;
......@@ -1319,6 +1326,7 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
{
struct available_var_finder *af =
container_of(pf, struct available_var_finder, pf);
struct perf_probe_point *pp = &pf->pev->point;
struct variable_list *vl;
Dwarf_Die die_mem;
int ret;
......@@ -1332,7 +1340,7 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
/* Trace point should be converted from subprogram DIE */
ret = convert_to_trace_point(&pf->sp_die, af->mod, pf->addr,
pf->pev->point.retprobe, &vl->point);
pp->retprobe, pp->function, &vl->point);
if (ret < 0)
return ret;
......@@ -1399,6 +1407,41 @@ int debuginfo__find_available_vars_at(struct debuginfo *dbg,
return (ret < 0) ? ret : af.nvls;
}
/* For the kernel module, we need a special code to get a DIE */
static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs)
{
int n, i;
Elf32_Word shndx;
Elf_Scn *scn;
Elf *elf;
GElf_Shdr mem, *shdr;
const char *p;
elf = dwfl_module_getelf(dbg->mod, &dbg->bias);
if (!elf)
return -EINVAL;
/* Get the number of relocations */
n = dwfl_module_relocations(dbg->mod);
if (n < 0)
return -ENOENT;
/* Search the relocation related .text section */
for (i = 0; i < n; i++) {
p = dwfl_module_relocation_info(dbg->mod, i, &shndx);
if (strcmp(p, ".text") == 0) {
/* OK, get the section header */
scn = elf_getscn(elf, shndx);
if (!scn)
return -ENOENT;
shdr = gelf_getshdr(scn, &mem);
if (!shdr)
return -ENOENT;
*offs = shdr->sh_addr;
}
}
return 0;
}
/* Reverse search */
int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
struct perf_probe_point *ppt)
......@@ -1407,9 +1450,16 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
Dwarf_Addr _addr = 0, baseaddr = 0;
const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
int baseline = 0, lineno = 0, ret = 0;
bool reloc = false;
retry:
/* Find cu die */
if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) {
if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) {
addr += baseaddr;
reloc = true;
goto retry;
}
pr_warning("Failed to find debug information for address %lx\n",
addr);
ret = -EINVAL;
......
......@@ -624,7 +624,7 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
* symbols, setting length to 0, and rely on
* symbols__fixup_end() to fix it up.
*/
sym = symbol__new(start, 0, kallsyms2elf_type(type), name);
sym = symbol__new(start, 0, kallsyms2elf_binding(type), name);
if (sym == NULL)
return -ENOMEM;
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment