Commit 42c4fb77 authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-core-for-mingo-20160530' of...

Merge tag 'perf-core-for-mingo-20160530' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

User visible/kernel ABI changes:

- Per event callchain limit: Recently we introduced a sysctl to tune the
  max-stack for all events for which callchains were requested:

  $ sysctl kernel.perf_event_max_stack
  kernel.perf_event_max_stack = 127

  Now this patch introduces a way to configure this per event, i.e. this
  becomes possible:

  $ perf record -e sched:*/max-stack=2/ -e block:*/max-stack=10/ -a

  allowing finer tuning of how much buffer space callchains use.

  This uses an u16 from the reserved space at the end, leaving another
  u16 for future use.

  There has been interest in even finer tuning, namely to control the
  max stack for kernel and userspace callchains separately. Further
  discussion is needed, we may for instance use the remaining u16 for
  that and when it is present, assume that the sample_max_stack introduced
  in this patch applies for the kernel, and the u16 left is used for
  limiting the userspace callchain. (Arnaldo Carvalho de Melo)

Infrastructure changes:

- Adopt get_main_thread from db-export.c (Andi Kleen)

- More prep work for backward ring buffer support (Wang Nan)

- Prep work for supporting SDT (Statically Defined Tracing)
  tracepoints (Masami Hiramatsu)

- Add arch/*/include/generated/ to .gitignore (Taeung Song)
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 71146051 01412261
...@@ -1076,7 +1076,7 @@ extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct ...@@ -1076,7 +1076,7 @@ extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct
extern struct perf_callchain_entry * extern struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
u32 max_stack, bool crosstask, bool add_mark); u32 max_stack, bool crosstask, bool add_mark);
extern int get_callchain_buffers(void); extern int get_callchain_buffers(int max_stack);
extern void put_callchain_buffers(void); extern void put_callchain_buffers(void);
extern int sysctl_perf_event_max_stack; extern int sysctl_perf_event_max_stack;
......
...@@ -276,6 +276,9 @@ enum perf_event_read_format { ...@@ -276,6 +276,9 @@ enum perf_event_read_format {
/* /*
* Hardware event_id to monitor via a performance monitoring event: * Hardware event_id to monitor via a performance monitoring event:
*
* @sample_max_stack: Max number of frame pointers in a callchain,
* should be < /proc/sys/kernel/perf_event_max_stack
*/ */
struct perf_event_attr { struct perf_event_attr {
...@@ -385,7 +388,8 @@ struct perf_event_attr { ...@@ -385,7 +388,8 @@ struct perf_event_attr {
* Wakeup watermark for AUX area * Wakeup watermark for AUX area
*/ */
__u32 aux_watermark; __u32 aux_watermark;
__u32 __reserved_2; /* align to __u64 */ __u16 sample_max_stack;
__u16 __reserved_2; /* align to __u64 */
}; };
#define perf_flags(attr) (*(&(attr)->read_format + 1)) #define perf_flags(attr) (*(&(attr)->read_format + 1))
......
...@@ -99,7 +99,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) ...@@ -99,7 +99,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
if (err) if (err)
goto free_smap; goto free_smap;
err = get_callchain_buffers(); err = get_callchain_buffers(sysctl_perf_event_max_stack);
if (err) if (err)
goto free_smap; goto free_smap;
......
...@@ -104,7 +104,7 @@ static int alloc_callchain_buffers(void) ...@@ -104,7 +104,7 @@ static int alloc_callchain_buffers(void)
return -ENOMEM; return -ENOMEM;
} }
int get_callchain_buffers(void) int get_callchain_buffers(int event_max_stack)
{ {
int err = 0; int err = 0;
int count; int count;
...@@ -121,6 +121,15 @@ int get_callchain_buffers(void) ...@@ -121,6 +121,15 @@ int get_callchain_buffers(void)
/* If the allocation failed, give up */ /* If the allocation failed, give up */
if (!callchain_cpus_entries) if (!callchain_cpus_entries)
err = -ENOMEM; err = -ENOMEM;
/*
* If requesting per event more than the global cap,
* return a different error to help userspace figure
* this out.
*
* And also do it here so that we have &callchain_mutex held.
*/
if (event_max_stack > sysctl_perf_event_max_stack)
err = -EOVERFLOW;
goto exit; goto exit;
} }
...@@ -174,11 +183,12 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs) ...@@ -174,11 +183,12 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
bool user = !event->attr.exclude_callchain_user; bool user = !event->attr.exclude_callchain_user;
/* Disallow cross-task user callchains. */ /* Disallow cross-task user callchains. */
bool crosstask = event->ctx->task && event->ctx->task != current; bool crosstask = event->ctx->task && event->ctx->task != current;
const u32 max_stack = event->attr.sample_max_stack;
if (!kernel && !user) if (!kernel && !user)
return NULL; return NULL;
return get_perf_callchain(regs, 0, kernel, user, sysctl_perf_event_max_stack, crosstask, true); return get_perf_callchain(regs, 0, kernel, user, max_stack, crosstask, true);
} }
struct perf_callchain_entry * struct perf_callchain_entry *
......
...@@ -8843,7 +8843,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, ...@@ -8843,7 +8843,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
if (!event->parent) { if (!event->parent) {
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
err = get_callchain_buffers(); err = get_callchain_buffers(attr->sample_max_stack);
if (err) if (err)
goto err_addr_filters; goto err_addr_filters;
} }
...@@ -9165,6 +9165,9 @@ SYSCALL_DEFINE5(perf_event_open, ...@@ -9165,6 +9165,9 @@ SYSCALL_DEFINE5(perf_event_open,
return -EINVAL; return -EINVAL;
} }
if (!attr.sample_max_stack)
attr.sample_max_stack = sysctl_perf_event_max_stack;
/* /*
* In cgroup mode, the pid argument is used to pass the fd * In cgroup mode, the pid argument is used to pass the fd
* opened to the cgroup directory in cgroupfs. The cpu argument * opened to the cgroup directory in cgroupfs. The cpu argument
......
...@@ -85,7 +85,8 @@ int fdarray__add(struct fdarray *fda, int fd, short revents) ...@@ -85,7 +85,8 @@ int fdarray__add(struct fdarray *fda, int fd, short revents)
} }
int fdarray__filter(struct fdarray *fda, short revents, int fdarray__filter(struct fdarray *fda, short revents,
void (*entry_destructor)(struct fdarray *fda, int fd)) void (*entry_destructor)(struct fdarray *fda, int fd, void *arg),
void *arg)
{ {
int fd, nr = 0; int fd, nr = 0;
...@@ -95,7 +96,7 @@ int fdarray__filter(struct fdarray *fda, short revents, ...@@ -95,7 +96,7 @@ int fdarray__filter(struct fdarray *fda, short revents,
for (fd = 0; fd < fda->nr; ++fd) { for (fd = 0; fd < fda->nr; ++fd) {
if (fda->entries[fd].revents & revents) { if (fda->entries[fd].revents & revents) {
if (entry_destructor) if (entry_destructor)
entry_destructor(fda, fd); entry_destructor(fda, fd, arg);
continue; continue;
} }
......
...@@ -34,7 +34,8 @@ void fdarray__delete(struct fdarray *fda); ...@@ -34,7 +34,8 @@ void fdarray__delete(struct fdarray *fda);
int fdarray__add(struct fdarray *fda, int fd, short revents); int fdarray__add(struct fdarray *fda, int fd, short revents);
int fdarray__poll(struct fdarray *fda, int timeout); int fdarray__poll(struct fdarray *fda, int timeout);
int fdarray__filter(struct fdarray *fda, short revents, int fdarray__filter(struct fdarray *fda, short revents,
void (*entry_destructor)(struct fdarray *fda, int fd)); void (*entry_destructor)(struct fdarray *fda, int fd, void *arg),
void *arg);
int fdarray__grow(struct fdarray *fda, int extra); int fdarray__grow(struct fdarray *fda, int extra);
int fdarray__fprintf(struct fdarray *fda, FILE *fp); int fdarray__fprintf(struct fdarray *fda, FILE *fp);
......
...@@ -30,3 +30,4 @@ config.mak.autogen ...@@ -30,3 +30,4 @@ config.mak.autogen
*.pyo *.pyo
.config-detected .config-detected
util/intel-pt-decoder/inat-tables.c util/intel-pt-decoder/inat-tables.c
arch/*/include/generated/
...@@ -62,6 +62,8 @@ int perf_event__synth_time_conv(const struct perf_event_mmap_page *pc, ...@@ -62,6 +62,8 @@ int perf_event__synth_time_conv(const struct perf_event_mmap_page *pc,
struct perf_tsc_conversion tc; struct perf_tsc_conversion tc;
int err; int err;
if (!pc)
return 0;
err = perf_read_tsc_conversion(pc, &tc); err = perf_read_tsc_conversion(pc, &tc);
if (err == -EOPNOTSUPP) if (err == -EOPNOTSUPP)
return 0; return 0;
......
...@@ -655,6 +655,13 @@ perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused ...@@ -655,6 +655,13 @@ perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused
return 0; return 0;
} }
static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
{
if (rec->evlist && rec->evlist->mmap && rec->evlist->mmap[0].base)
return rec->evlist->mmap[0].base;
return NULL;
}
static int record__synthesize(struct record *rec) static int record__synthesize(struct record *rec)
{ {
struct perf_session *session = rec->session; struct perf_session *session = rec->session;
...@@ -692,7 +699,7 @@ static int record__synthesize(struct record *rec) ...@@ -692,7 +699,7 @@ static int record__synthesize(struct record *rec)
} }
} }
err = perf_event__synth_time_conv(rec->evlist->mmap[0].base, tool, err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
process_synthesized_event, machine); process_synthesized_event, machine);
if (err) if (err)
goto out; goto out;
......
...@@ -36,7 +36,7 @@ int test__fdarray__filter(int subtest __maybe_unused) ...@@ -36,7 +36,7 @@ int test__fdarray__filter(int subtest __maybe_unused)
} }
fdarray__init_revents(fda, POLLIN); fdarray__init_revents(fda, POLLIN);
nr_fds = fdarray__filter(fda, POLLHUP, NULL); nr_fds = fdarray__filter(fda, POLLHUP, NULL, NULL);
if (nr_fds != fda->nr_alloc) { if (nr_fds != fda->nr_alloc) {
pr_debug("\nfdarray__filter()=%d != %d shouldn't have filtered anything", pr_debug("\nfdarray__filter()=%d != %d shouldn't have filtered anything",
nr_fds, fda->nr_alloc); nr_fds, fda->nr_alloc);
...@@ -44,7 +44,7 @@ int test__fdarray__filter(int subtest __maybe_unused) ...@@ -44,7 +44,7 @@ int test__fdarray__filter(int subtest __maybe_unused)
} }
fdarray__init_revents(fda, POLLHUP); fdarray__init_revents(fda, POLLHUP);
nr_fds = fdarray__filter(fda, POLLHUP, NULL); nr_fds = fdarray__filter(fda, POLLHUP, NULL, NULL);
if (nr_fds != 0) { if (nr_fds != 0) {
pr_debug("\nfdarray__filter()=%d != %d, should have filtered all fds", pr_debug("\nfdarray__filter()=%d != %d, should have filtered all fds",
nr_fds, fda->nr_alloc); nr_fds, fda->nr_alloc);
...@@ -57,7 +57,7 @@ int test__fdarray__filter(int subtest __maybe_unused) ...@@ -57,7 +57,7 @@ int test__fdarray__filter(int subtest __maybe_unused)
pr_debug("\nfiltering all but fda->entries[2]:"); pr_debug("\nfiltering all but fda->entries[2]:");
fdarray__fprintf_prefix(fda, "before", stderr); fdarray__fprintf_prefix(fda, "before", stderr);
nr_fds = fdarray__filter(fda, POLLHUP, NULL); nr_fds = fdarray__filter(fda, POLLHUP, NULL, NULL);
fdarray__fprintf_prefix(fda, " after", stderr); fdarray__fprintf_prefix(fda, " after", stderr);
if (nr_fds != 1) { if (nr_fds != 1) {
pr_debug("\nfdarray__filter()=%d != 1, should have left just one event", nr_fds); pr_debug("\nfdarray__filter()=%d != 1, should have left just one event", nr_fds);
...@@ -78,7 +78,7 @@ int test__fdarray__filter(int subtest __maybe_unused) ...@@ -78,7 +78,7 @@ int test__fdarray__filter(int subtest __maybe_unused)
pr_debug("\nfiltering all but (fda->entries[0], fda->entries[3]):"); pr_debug("\nfiltering all but (fda->entries[0], fda->entries[3]):");
fdarray__fprintf_prefix(fda, "before", stderr); fdarray__fprintf_prefix(fda, "before", stderr);
nr_fds = fdarray__filter(fda, POLLHUP, NULL); nr_fds = fdarray__filter(fda, POLLHUP, NULL, NULL);
fdarray__fprintf_prefix(fda, " after", stderr); fdarray__fprintf_prefix(fda, " after", stderr);
if (nr_fds != 2) { if (nr_fds != 2) {
pr_debug("\nfdarray__filter()=%d != 2, should have left just two events", pr_debug("\nfdarray__filter()=%d != 2, should have left just two events",
......
...@@ -144,7 +144,32 @@ static int asnprintf(char **strp, size_t size, const char *fmt, ...) ...@@ -144,7 +144,32 @@ static int asnprintf(char **strp, size_t size, const char *fmt, ...)
return ret; return ret;
} }
static char *build_id__filename(const char *sbuild_id, char *bf, size_t size) char *build_id_cache__kallsyms_path(const char *sbuild_id, char *bf,
size_t size)
{
bool is_alloc = !!bf;
bool retry_old = true;
asnprintf(&bf, size, "%s/%s/%s/kallsyms",
buildid_dir, DSO__NAME_KALLSYMS, sbuild_id);
retry:
if (!access(bf, F_OK))
return bf;
if (is_alloc)
free(bf);
if (retry_old) {
/* Try old style kallsyms cache */
asnprintf(&bf, size, "%s/%s/%s",
buildid_dir, DSO__NAME_KALLSYMS, sbuild_id);
retry_old = false;
goto retry;
}
return NULL;
}
static char *build_id_cache__linkname(const char *sbuild_id, char *bf,
size_t size)
{ {
char *tmp = bf; char *tmp = bf;
int ret = asnprintf(&bf, size, "%s/.build-id/%.2s/%s", buildid_dir, int ret = asnprintf(&bf, size, "%s/.build-id/%.2s/%s", buildid_dir,
...@@ -154,23 +179,52 @@ static char *build_id__filename(const char *sbuild_id, char *bf, size_t size) ...@@ -154,23 +179,52 @@ static char *build_id__filename(const char *sbuild_id, char *bf, size_t size)
return bf; return bf;
} }
static const char *build_id_cache__basename(bool is_kallsyms, bool is_vdso)
{
return is_kallsyms ? "kallsyms" : (is_vdso ? "vdso" : "elf");
}
char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size) char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size)
{ {
char build_id_hex[SBUILD_ID_SIZE]; bool is_kallsyms = dso__is_kallsyms((struct dso *)dso);
bool is_vdso = dso__is_vdso((struct dso *)dso);
char sbuild_id[SBUILD_ID_SIZE];
char *linkname;
bool alloc = (bf == NULL);
int ret;
if (!dso->has_build_id) if (!dso->has_build_id)
return NULL; return NULL;
build_id__sprintf(dso->build_id, sizeof(dso->build_id), build_id_hex); build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
return build_id__filename(build_id_hex, bf, size); linkname = build_id_cache__linkname(sbuild_id, NULL, 0);
if (!linkname)
return NULL;
/* Check if old style build_id cache */
if (is_regular_file(linkname))
ret = asnprintf(&bf, size, "%s", linkname);
else
ret = asnprintf(&bf, size, "%s/%s", linkname,
build_id_cache__basename(is_kallsyms, is_vdso));
if (ret < 0 || (!alloc && size < (unsigned int)ret))
bf = NULL;
free(linkname);
return bf;
} }
bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size) bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size)
{ {
char *id_name, *ch; char *id_name = NULL, *ch;
struct stat sb; struct stat sb;
char sbuild_id[SBUILD_ID_SIZE];
if (!dso->has_build_id)
goto err;
id_name = dso__build_id_filename(dso, bf, size); build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
id_name = build_id_cache__linkname(sbuild_id, NULL, 0);
if (!id_name) if (!id_name)
goto err; goto err;
if (access(id_name, F_OK)) if (access(id_name, F_OK))
...@@ -194,18 +248,14 @@ bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size) ...@@ -194,18 +248,14 @@ bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size)
if (ch - 3 < bf) if (ch - 3 < bf)
goto err; goto err;
free(id_name);
return strncmp(".ko", ch - 3, 3) == 0; return strncmp(".ko", ch - 3, 3) == 0;
err: err:
/*
* If dso__build_id_filename work, get id_name again,
* because id_name points to bf and is broken.
*/
if (id_name)
id_name = dso__build_id_filename(dso, bf, size);
pr_err("Invalid build id: %s\n", id_name ? : pr_err("Invalid build id: %s\n", id_name ? :
dso->long_name ? : dso->long_name ? :
dso->short_name ? : dso->short_name ? :
"[unknown]"); "[unknown]");
free(id_name);
return false; return false;
} }
...@@ -341,7 +391,8 @@ void disable_buildid_cache(void) ...@@ -341,7 +391,8 @@ void disable_buildid_cache(void)
} }
static char *build_id_cache__dirname_from_path(const char *name, static char *build_id_cache__dirname_from_path(const char *name,
bool is_kallsyms, bool is_vdso) bool is_kallsyms, bool is_vdso,
const char *sbuild_id)
{ {
char *realname = (char *)name, *filename; char *realname = (char *)name, *filename;
bool slash = is_kallsyms || is_vdso; bool slash = is_kallsyms || is_vdso;
...@@ -352,8 +403,9 @@ static char *build_id_cache__dirname_from_path(const char *name, ...@@ -352,8 +403,9 @@ static char *build_id_cache__dirname_from_path(const char *name,
return NULL; return NULL;
} }
if (asprintf(&filename, "%s%s%s", buildid_dir, slash ? "/" : "", if (asprintf(&filename, "%s%s%s%s%s", buildid_dir, slash ? "/" : "",
is_vdso ? DSO__NAME_VDSO : realname) < 0) is_vdso ? DSO__NAME_VDSO : realname,
sbuild_id ? "/" : "", sbuild_id ?: "") < 0)
filename = NULL; filename = NULL;
if (!slash) if (!slash)
...@@ -368,7 +420,8 @@ int build_id_cache__list_build_ids(const char *pathname, ...@@ -368,7 +420,8 @@ int build_id_cache__list_build_ids(const char *pathname,
char *dir_name; char *dir_name;
int ret = 0; int ret = 0;
dir_name = build_id_cache__dirname_from_path(pathname, false, false); dir_name = build_id_cache__dirname_from_path(pathname, false, false,
NULL);
if (!dir_name) if (!dir_name)
return -ENOMEM; return -ENOMEM;
...@@ -385,7 +438,7 @@ int build_id_cache__add_s(const char *sbuild_id, const char *name, ...@@ -385,7 +438,7 @@ int build_id_cache__add_s(const char *sbuild_id, const char *name,
{ {
const size_t size = PATH_MAX; const size_t size = PATH_MAX;
char *realname = NULL, *filename = NULL, *dir_name = NULL, char *realname = NULL, *filename = NULL, *dir_name = NULL,
*linkname = zalloc(size), *targetname, *tmp; *linkname = zalloc(size), *tmp;
int err = -1; int err = -1;
if (!is_kallsyms) { if (!is_kallsyms) {
...@@ -394,14 +447,22 @@ int build_id_cache__add_s(const char *sbuild_id, const char *name, ...@@ -394,14 +447,22 @@ int build_id_cache__add_s(const char *sbuild_id, const char *name,
goto out_free; goto out_free;
} }
dir_name = build_id_cache__dirname_from_path(name, is_kallsyms, is_vdso); dir_name = build_id_cache__dirname_from_path(name, is_kallsyms,
is_vdso, sbuild_id);
if (!dir_name) if (!dir_name)
goto out_free; goto out_free;
/* Remove old style build-id cache */
if (is_regular_file(dir_name))
if (unlink(dir_name))
goto out_free;
if (mkdir_p(dir_name, 0755)) if (mkdir_p(dir_name, 0755))
goto out_free; goto out_free;
if (asprintf(&filename, "%s/%s", dir_name, sbuild_id) < 0) { /* Save the allocated buildid dirname */
if (asprintf(&filename, "%s/%s", dir_name,
build_id_cache__basename(is_kallsyms, is_vdso)) < 0) {
filename = NULL; filename = NULL;
goto out_free; goto out_free;
} }
...@@ -415,7 +476,7 @@ int build_id_cache__add_s(const char *sbuild_id, const char *name, ...@@ -415,7 +476,7 @@ int build_id_cache__add_s(const char *sbuild_id, const char *name,
goto out_free; goto out_free;
} }
if (!build_id__filename(sbuild_id, linkname, size)) if (!build_id_cache__linkname(sbuild_id, linkname, size))
goto out_free; goto out_free;
tmp = strrchr(linkname, '/'); tmp = strrchr(linkname, '/');
*tmp = '\0'; *tmp = '\0';
...@@ -424,10 +485,10 @@ int build_id_cache__add_s(const char *sbuild_id, const char *name, ...@@ -424,10 +485,10 @@ int build_id_cache__add_s(const char *sbuild_id, const char *name,
goto out_free; goto out_free;
*tmp = '/'; *tmp = '/';
targetname = filename + strlen(buildid_dir) - 5; tmp = dir_name + strlen(buildid_dir) - 5;
memcpy(targetname, "../..", 5); memcpy(tmp, "../..", 5);
if (symlink(targetname, linkname) == 0) if (symlink(tmp, linkname) == 0)
err = 0; err = 0;
out_free: out_free:
if (!is_kallsyms) if (!is_kallsyms)
...@@ -452,7 +513,7 @@ static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size, ...@@ -452,7 +513,7 @@ static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
bool build_id_cache__cached(const char *sbuild_id) bool build_id_cache__cached(const char *sbuild_id)
{ {
bool ret = false; bool ret = false;
char *filename = build_id__filename(sbuild_id, NULL, 0); char *filename = build_id_cache__linkname(sbuild_id, NULL, 0);
if (filename && !access(filename, F_OK)) if (filename && !access(filename, F_OK))
ret = true; ret = true;
...@@ -471,7 +532,7 @@ int build_id_cache__remove_s(const char *sbuild_id) ...@@ -471,7 +532,7 @@ int build_id_cache__remove_s(const char *sbuild_id)
if (filename == NULL || linkname == NULL) if (filename == NULL || linkname == NULL)
goto out_free; goto out_free;
if (!build_id__filename(sbuild_id, linkname, size)) if (!build_id_cache__linkname(sbuild_id, linkname, size))
goto out_free; goto out_free;
if (access(linkname, F_OK)) if (access(linkname, F_OK))
...@@ -489,7 +550,7 @@ int build_id_cache__remove_s(const char *sbuild_id) ...@@ -489,7 +550,7 @@ int build_id_cache__remove_s(const char *sbuild_id)
tmp = strrchr(linkname, '/') + 1; tmp = strrchr(linkname, '/') + 1;
snprintf(tmp, size - (tmp - linkname), "%s", filename); snprintf(tmp, size - (tmp - linkname), "%s", filename);
if (unlink(linkname)) if (rm_rf(linkname))
goto out_free; goto out_free;
err = 0; err = 0;
...@@ -501,7 +562,7 @@ int build_id_cache__remove_s(const char *sbuild_id) ...@@ -501,7 +562,7 @@ int build_id_cache__remove_s(const char *sbuild_id)
static int dso__cache_build_id(struct dso *dso, struct machine *machine) static int dso__cache_build_id(struct dso *dso, struct machine *machine)
{ {
bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; bool is_kallsyms = dso__is_kallsyms(dso);
bool is_vdso = dso__is_vdso(dso); bool is_vdso = dso__is_vdso(dso);
const char *name = dso->long_name; const char *name = dso->long_name;
char nm[PATH_MAX]; char nm[PATH_MAX];
......
...@@ -14,6 +14,8 @@ struct dso; ...@@ -14,6 +14,8 @@ struct dso;
int build_id__sprintf(const u8 *build_id, int len, char *bf); int build_id__sprintf(const u8 *build_id, int len, char *bf);
int sysfs__sprintf_build_id(const char *root_dir, char *sbuild_id); int sysfs__sprintf_build_id(const char *root_dir, char *sbuild_id);
int filename__sprintf_build_id(const char *pathname, char *sbuild_id); int filename__sprintf_build_id(const char *pathname, char *sbuild_id);
char *build_id_cache__kallsyms_path(const char *sbuild_id, char *bf,
size_t size);
char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size); char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size);
bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size); bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size);
......
...@@ -94,6 +94,7 @@ struct callchain_param { ...@@ -94,6 +94,7 @@ struct callchain_param {
enum perf_call_graph_mode record_mode; enum perf_call_graph_mode record_mode;
u32 dump_size; u32 dump_size;
enum chain_mode mode; enum chain_mode mode;
u16 max_stack;
u32 print_limit; u32 print_limit;
double min_percent; double min_percent;
sort_chain_func_t sort; sort_chain_func_t sort;
......
...@@ -233,17 +233,6 @@ int db_export__symbol(struct db_export *dbe, struct symbol *sym, ...@@ -233,17 +233,6 @@ int db_export__symbol(struct db_export *dbe, struct symbol *sym,
return 0; return 0;
} }
static struct thread *get_main_thread(struct machine *machine, struct thread *thread)
{
if (thread->pid_ == thread->tid)
return thread__get(thread);
if (thread->pid_ == -1)
return NULL;
return machine__find_thread(machine, thread->pid_, thread->pid_);
}
static int db_ids_from_al(struct db_export *dbe, struct addr_location *al, static int db_ids_from_al(struct db_export *dbe, struct addr_location *al,
u64 *dso_db_id, u64 *sym_db_id, u64 *offset) u64 *dso_db_id, u64 *sym_db_id, u64 *offset)
{ {
...@@ -382,7 +371,7 @@ int db_export__sample(struct db_export *dbe, union perf_event *event, ...@@ -382,7 +371,7 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
if (err) if (err)
return err; return err;
main_thread = get_main_thread(al->machine, thread); main_thread = thread__main_thread(al->machine, thread);
if (main_thread) if (main_thread)
comm = machine__thread_exec_comm(al->machine, main_thread); comm = machine__thread_exec_comm(al->machine, main_thread);
......
...@@ -349,6 +349,11 @@ static inline bool dso__is_kcore(struct dso *dso) ...@@ -349,6 +349,11 @@ static inline bool dso__is_kcore(struct dso *dso)
dso->binary_type == DSO_BINARY_TYPE__GUEST_KCORE; dso->binary_type == DSO_BINARY_TYPE__GUEST_KCORE;
} }
static inline bool dso__is_kallsyms(struct dso *dso)
{
return dso->kernel && dso->long_name[0] != '/';
}
void dso__free_a2l(struct dso *dso); void dso__free_a2l(struct dso *dso);
enum dso_type dso__type(struct dso *dso, struct machine *machine); enum dso_type dso__type(struct dso *dso, struct machine *machine);
......
...@@ -462,9 +462,9 @@ int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) ...@@ -462,9 +462,9 @@ int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
return 0; return 0;
} }
static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx) static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx, short revent)
{ {
int pos = fdarray__add(&evlist->pollfd, fd, POLLIN | POLLERR | POLLHUP); int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
/* /*
* Save the idx so that when we filter out fds POLLHUP'ed we can * Save the idx so that when we filter out fds POLLHUP'ed we can
* close the associated evlist->mmap[] entry. * close the associated evlist->mmap[] entry.
...@@ -480,10 +480,11 @@ static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx ...@@ -480,10 +480,11 @@ static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx
int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
{ {
return __perf_evlist__add_pollfd(evlist, fd, -1); return __perf_evlist__add_pollfd(evlist, fd, -1, POLLIN);
} }
static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd) static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
void *arg __maybe_unused)
{ {
struct perf_evlist *evlist = container_of(fda, struct perf_evlist, pollfd); struct perf_evlist *evlist = container_of(fda, struct perf_evlist, pollfd);
...@@ -493,7 +494,7 @@ static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd) ...@@ -493,7 +494,7 @@ static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd)
int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask) int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
{ {
return fdarray__filter(&evlist->pollfd, revents_and_mask, return fdarray__filter(&evlist->pollfd, revents_and_mask,
perf_evlist__munmap_filtered); perf_evlist__munmap_filtered, NULL);
} }
int perf_evlist__poll(struct perf_evlist *evlist, int timeout) int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
...@@ -777,7 +778,7 @@ perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start, ...@@ -777,7 +778,7 @@ perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
return event; return event;
} }
union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
{ {
struct perf_mmap *md = &evlist->mmap[idx]; struct perf_mmap *md = &evlist->mmap[idx];
u64 head; u64 head;
...@@ -832,6 +833,13 @@ perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx) ...@@ -832,6 +833,13 @@ perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
return perf_mmap__read(md, false, start, end, &md->prev); return perf_mmap__read(md, false, start, end, &md->prev);
} }
union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
{
if (!evlist->backward)
return perf_evlist__mmap_read_forward(evlist, idx);
return perf_evlist__mmap_read_backward(evlist, idx);
}
void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx) void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
{ {
struct perf_mmap *md = &evlist->mmap[idx]; struct perf_mmap *md = &evlist->mmap[idx];
...@@ -856,9 +864,11 @@ static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx) ...@@ -856,9 +864,11 @@ static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx) static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx)
{ {
BUG_ON(atomic_read(&evlist->mmap[idx].refcnt) == 0); struct perf_mmap *md = &evlist->mmap[idx];
if (atomic_dec_and_test(&evlist->mmap[idx].refcnt)) BUG_ON(md->base && atomic_read(&md->refcnt) == 0);
if (atomic_dec_and_test(&md->refcnt))
__perf_evlist__munmap(evlist, idx); __perf_evlist__munmap(evlist, idx);
} }
...@@ -983,15 +993,28 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx, ...@@ -983,15 +993,28 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
return 0; return 0;
} }
static bool
perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
struct perf_evsel *evsel)
{
if (evsel->overwrite)
return false;
return true;
}
static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
struct mmap_params *mp, int cpu, struct mmap_params *mp, int cpu,
int thread, int *output) int thread, int *output)
{ {
struct perf_evsel *evsel; struct perf_evsel *evsel;
int revent;
evlist__for_each(evlist, evsel) { evlist__for_each(evlist, evsel) {
int fd; int fd;
if (evsel->overwrite != (evlist->overwrite && evlist->backward))
continue;
if (evsel->system_wide && thread) if (evsel->system_wide && thread)
continue; continue;
...@@ -1008,6 +1031,8 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, ...@@ -1008,6 +1031,8 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
perf_evlist__mmap_get(evlist, idx); perf_evlist__mmap_get(evlist, idx);
} }
revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
/* /*
* The system_wide flag causes a selected event to be opened * The system_wide flag causes a selected event to be opened
* always without a pid. Consequently it will never get a * always without a pid. Consequently it will never get a
...@@ -1016,7 +1041,7 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, ...@@ -1016,7 +1041,7 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
* Therefore don't add it for polling. * Therefore don't add it for polling.
*/ */
if (!evsel->system_wide && if (!evsel->system_wide &&
__perf_evlist__add_pollfd(evlist, fd, idx) < 0) { __perf_evlist__add_pollfd(evlist, fd, idx, revent) < 0) {
perf_evlist__mmap_put(evlist, idx); perf_evlist__mmap_put(evlist, idx);
return -1; return -1;
} }
......
...@@ -131,6 +131,8 @@ struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id); ...@@ -131,6 +131,8 @@ struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx); union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist,
int idx);
union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist,
int idx); int idx);
void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx); void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx);
......
...@@ -572,6 +572,8 @@ void perf_evsel__config_callchain(struct perf_evsel *evsel, ...@@ -572,6 +572,8 @@ void perf_evsel__config_callchain(struct perf_evsel *evsel,
perf_evsel__set_sample_bit(evsel, CALLCHAIN); perf_evsel__set_sample_bit(evsel, CALLCHAIN);
attr->sample_max_stack = param->max_stack;
if (param->record_mode == CALLCHAIN_LBR) { if (param->record_mode == CALLCHAIN_LBR) {
if (!opts->branch_stack) { if (!opts->branch_stack) {
if (attr->exclude_user) { if (attr->exclude_user) {
...@@ -635,7 +637,8 @@ static void apply_config_terms(struct perf_evsel *evsel, ...@@ -635,7 +637,8 @@ static void apply_config_terms(struct perf_evsel *evsel,
struct perf_event_attr *attr = &evsel->attr; struct perf_event_attr *attr = &evsel->attr;
struct callchain_param param; struct callchain_param param;
u32 dump_size = 0; u32 dump_size = 0;
char *callgraph_buf = NULL; int max_stack = 0;
const char *callgraph_buf = NULL;
/* callgraph default */ /* callgraph default */
param.record_mode = callchain_param.record_mode; param.record_mode = callchain_param.record_mode;
...@@ -662,6 +665,9 @@ static void apply_config_terms(struct perf_evsel *evsel, ...@@ -662,6 +665,9 @@ static void apply_config_terms(struct perf_evsel *evsel,
case PERF_EVSEL__CONFIG_TERM_STACK_USER: case PERF_EVSEL__CONFIG_TERM_STACK_USER:
dump_size = term->val.stack_user; dump_size = term->val.stack_user;
break; break;
case PERF_EVSEL__CONFIG_TERM_MAX_STACK:
max_stack = term->val.max_stack;
break;
case PERF_EVSEL__CONFIG_TERM_INHERIT: case PERF_EVSEL__CONFIG_TERM_INHERIT:
/* /*
* attr->inherit should has already been set by * attr->inherit should has already been set by
...@@ -677,7 +683,12 @@ static void apply_config_terms(struct perf_evsel *evsel, ...@@ -677,7 +683,12 @@ static void apply_config_terms(struct perf_evsel *evsel,
} }
/* User explicitly set per-event callgraph, clear the old setting and reset. */ /* User explicitly set per-event callgraph, clear the old setting and reset. */
if ((callgraph_buf != NULL) || (dump_size > 0)) { if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) {
if (max_stack) {
param.max_stack = max_stack;
if (callgraph_buf == NULL)
callgraph_buf = "fp";
}
/* parse callgraph parameters */ /* parse callgraph parameters */
if (callgraph_buf != NULL) { if (callgraph_buf != NULL) {
...@@ -1329,6 +1340,7 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr, ...@@ -1329,6 +1340,7 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
PRINT_ATTRf(clockid, p_signed); PRINT_ATTRf(clockid, p_signed);
PRINT_ATTRf(sample_regs_intr, p_hex); PRINT_ATTRf(sample_regs_intr, p_hex);
PRINT_ATTRf(aux_watermark, p_unsigned); PRINT_ATTRf(aux_watermark, p_unsigned);
PRINT_ATTRf(sample_max_stack, p_unsigned);
return ret; return ret;
} }
......
...@@ -44,6 +44,7 @@ enum { ...@@ -44,6 +44,7 @@ enum {
PERF_EVSEL__CONFIG_TERM_CALLGRAPH, PERF_EVSEL__CONFIG_TERM_CALLGRAPH,
PERF_EVSEL__CONFIG_TERM_STACK_USER, PERF_EVSEL__CONFIG_TERM_STACK_USER,
PERF_EVSEL__CONFIG_TERM_INHERIT, PERF_EVSEL__CONFIG_TERM_INHERIT,
PERF_EVSEL__CONFIG_TERM_MAX_STACK,
PERF_EVSEL__CONFIG_TERM_MAX, PERF_EVSEL__CONFIG_TERM_MAX,
}; };
...@@ -56,6 +57,7 @@ struct perf_evsel_config_term { ...@@ -56,6 +57,7 @@ struct perf_evsel_config_term {
bool time; bool time;
char *callgraph; char *callgraph;
u64 stack_user; u64 stack_user;
int max_stack;
bool inherit; bool inherit;
} val; } val;
}; };
......
...@@ -900,6 +900,7 @@ static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = { ...@@ -900,6 +900,7 @@ static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
[PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size", [PARSE_EVENTS__TERM_TYPE_STACKSIZE] = "stack-size",
[PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit", [PARSE_EVENTS__TERM_TYPE_NOINHERIT] = "no-inherit",
[PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit", [PARSE_EVENTS__TERM_TYPE_INHERIT] = "inherit",
[PARSE_EVENTS__TERM_TYPE_MAX_STACK] = "max-stack",
}; };
static bool config_term_shrinked; static bool config_term_shrinked;
...@@ -995,6 +996,9 @@ do { \ ...@@ -995,6 +996,9 @@ do { \
case PARSE_EVENTS__TERM_TYPE_NAME: case PARSE_EVENTS__TERM_TYPE_NAME:
CHECK_TYPE_VAL(STR); CHECK_TYPE_VAL(STR);
break; break;
case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
CHECK_TYPE_VAL(NUM);
break;
default: default:
err->str = strdup("unknown term"); err->str = strdup("unknown term");
err->idx = term->err_term; err->idx = term->err_term;
...@@ -1040,6 +1044,7 @@ static int config_term_tracepoint(struct perf_event_attr *attr, ...@@ -1040,6 +1044,7 @@ static int config_term_tracepoint(struct perf_event_attr *attr,
case PARSE_EVENTS__TERM_TYPE_STACKSIZE: case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
case PARSE_EVENTS__TERM_TYPE_INHERIT: case PARSE_EVENTS__TERM_TYPE_INHERIT:
case PARSE_EVENTS__TERM_TYPE_NOINHERIT: case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
return config_term_common(attr, term, err); return config_term_common(attr, term, err);
default: default:
if (err) { if (err) {
...@@ -1109,6 +1114,9 @@ do { \ ...@@ -1109,6 +1114,9 @@ do { \
case PARSE_EVENTS__TERM_TYPE_NOINHERIT: case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
ADD_CONFIG_TERM(INHERIT, inherit, term->val.num ? 0 : 1); ADD_CONFIG_TERM(INHERIT, inherit, term->val.num ? 0 : 1);
break; break;
case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
ADD_CONFIG_TERM(MAX_STACK, max_stack, term->val.num);
break;
default: default:
break; break;
} }
......
...@@ -68,6 +68,7 @@ enum { ...@@ -68,6 +68,7 @@ enum {
PARSE_EVENTS__TERM_TYPE_STACKSIZE, PARSE_EVENTS__TERM_TYPE_STACKSIZE,
PARSE_EVENTS__TERM_TYPE_NOINHERIT, PARSE_EVENTS__TERM_TYPE_NOINHERIT,
PARSE_EVENTS__TERM_TYPE_INHERIT, PARSE_EVENTS__TERM_TYPE_INHERIT,
PARSE_EVENTS__TERM_TYPE_MAX_STACK,
__PARSE_EVENTS__TERM_TYPE_NR, __PARSE_EVENTS__TERM_TYPE_NR,
}; };
......
...@@ -199,6 +199,7 @@ branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE ...@@ -199,6 +199,7 @@ branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE
time { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_TIME); } time { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_TIME); }
call-graph { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CALLGRAPH); } call-graph { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CALLGRAPH); }
stack-size { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_STACKSIZE); } stack-size { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_STACKSIZE); }
max-stack { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_MAX_STACK); }
inherit { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_INHERIT); } inherit { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_INHERIT); }
no-inherit { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NOINHERIT); } no-inherit { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NOINHERIT); }
, { return ','; } , { return ','; }
......
...@@ -593,6 +593,7 @@ do { \ ...@@ -593,6 +593,7 @@ do { \
if (bswap_safe(f, 0)) \ if (bswap_safe(f, 0)) \
attr->f = bswap_##sz(attr->f); \ attr->f = bswap_##sz(attr->f); \
} while(0) } while(0)
#define bswap_field_16(f) bswap_field(f, 16)
#define bswap_field_32(f) bswap_field(f, 32) #define bswap_field_32(f) bswap_field(f, 32)
#define bswap_field_64(f) bswap_field(f, 64) #define bswap_field_64(f) bswap_field(f, 64)
...@@ -608,6 +609,7 @@ do { \ ...@@ -608,6 +609,7 @@ do { \
bswap_field_64(sample_regs_user); bswap_field_64(sample_regs_user);
bswap_field_32(sample_stack_user); bswap_field_32(sample_stack_user);
bswap_field_32(aux_watermark); bswap_field_32(aux_watermark);
bswap_field_16(sample_max_stack);
/* /*
* After read_format are bitfields. Check read_format because * After read_format are bitfields. Check read_format because
......
...@@ -1641,6 +1641,20 @@ static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz) ...@@ -1641,6 +1641,20 @@ static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
return ret; return ret;
} }
/*
* Use open(O_RDONLY) to check readability directly instead of access(R_OK)
* since access(R_OK) only checks with real UID/GID but open() use effective
* UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO).
*/
static bool filename__readable(const char *file)
{
int fd = open(file, O_RDONLY);
if (fd < 0)
return false;
close(fd);
return true;
}
static char *dso__find_kallsyms(struct dso *dso, struct map *map) static char *dso__find_kallsyms(struct dso *dso, struct map *map)
{ {
u8 host_build_id[BUILD_ID_SIZE]; u8 host_build_id[BUILD_ID_SIZE];
...@@ -1660,58 +1674,43 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map) ...@@ -1660,58 +1674,43 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
sizeof(host_build_id)) == 0) sizeof(host_build_id)) == 0)
is_host = dso__build_id_equal(dso, host_build_id); is_host = dso__build_id_equal(dso, host_build_id);
build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); /* Try a fast path for /proc/kallsyms if possible */
scnprintf(path, sizeof(path), "%s/%s/%s", buildid_dir,
DSO__NAME_KCORE, sbuild_id);
/* Use /proc/kallsyms if possible */
if (is_host) { if (is_host) {
DIR *d;
int fd;
/* If no cached kcore go with /proc/kallsyms */
d = opendir(path);
if (!d)
goto proc_kallsyms;
closedir(d);
/* /*
* Do not check the build-id cache, until we know we cannot use * Do not check the build-id cache, unless we know we cannot use
* /proc/kcore. * /proc/kcore or module maps don't match to /proc/kallsyms.
* To check readability of /proc/kcore, do not use access(R_OK)
* since /proc/kcore requires CAP_SYS_RAWIO to read and access
* can't check it.
*/ */
fd = open("/proc/kcore", O_RDONLY); if (filename__readable("/proc/kcore") &&
if (fd != -1) { !validate_kcore_addresses("/proc/kallsyms", map))
close(fd); goto proc_kallsyms;
/* If module maps match go with /proc/kallsyms */
if (!validate_kcore_addresses("/proc/kallsyms", map))
goto proc_kallsyms;
}
/* Find kallsyms in build-id cache with kcore */
if (!find_matching_kcore(map, path, sizeof(path)))
return strdup(path);
goto proc_kallsyms;
} }
build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
/* Find kallsyms in build-id cache with kcore */ /* Find kallsyms in build-id cache with kcore */
scnprintf(path, sizeof(path), "%s/%s/%s",
buildid_dir, DSO__NAME_KCORE, sbuild_id);
if (!find_matching_kcore(map, path, sizeof(path))) if (!find_matching_kcore(map, path, sizeof(path)))
return strdup(path); return strdup(path);
scnprintf(path, sizeof(path), "%s/%s/%s", /* Use current /proc/kallsyms if possible */
buildid_dir, DSO__NAME_KALLSYMS, sbuild_id); if (is_host) {
proc_kallsyms:
return strdup("/proc/kallsyms");
}
if (access(path, F_OK)) { /* Finally, find a cache of kallsyms */
if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) {
pr_err("No kallsyms or vmlinux with build-id %s was found\n", pr_err("No kallsyms or vmlinux with build-id %s was found\n",
sbuild_id); sbuild_id);
return NULL; return NULL;
} }
return strdup(path); return strdup(path);
proc_kallsyms:
return strdup("/proc/kallsyms");
} }
static int dso__load_kernel_sym(struct dso *dso, struct map *map, static int dso__load_kernel_sym(struct dso *dso, struct map *map,
......
...@@ -265,3 +265,14 @@ void thread__find_cpumode_addr_location(struct thread *thread, ...@@ -265,3 +265,14 @@ void thread__find_cpumode_addr_location(struct thread *thread,
break; break;
} }
} }
struct thread *thread__main_thread(struct machine *machine, struct thread *thread)
{
if (thread->pid_ == thread->tid)
return thread__get(thread);
if (thread->pid_ == -1)
return NULL;
return machine__find_thread(machine, thread->pid_, thread->pid_);
}
...@@ -81,6 +81,8 @@ void thread__insert_map(struct thread *thread, struct map *map); ...@@ -81,6 +81,8 @@ void thread__insert_map(struct thread *thread, struct map *map);
int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp); int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp);
size_t thread__fprintf(struct thread *thread, FILE *fp); size_t thread__fprintf(struct thread *thread, FILE *fp);
struct thread *thread__main_thread(struct machine *machine, struct thread *thread);
void thread__find_addr_map(struct thread *thread, void thread__find_addr_map(struct thread *thread,
u8 cpumode, enum map_type type, u64 addr, u8 cpumode, enum map_type type, u64 addr,
struct addr_location *al); struct addr_location *al);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment