Commit 6632c4b4 authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-core-for-mingo' of...

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from  Arnaldo Carvalho de Melo:

New features:

  - Add option in 'perf sched' to merge like comms to lat output (Josef Bacik)

  - Improve 'perf probe' error messages when not finding a
    suitable vmlinux (Masami Hiramatsu)

Infrastructure changes:

  - Use atomic.h for various pre-existing reference counts (Arnaldo Carvalho de Melo)

  - Leg work for refcounting 'struct map' (Arnaldo Carvalho de Melo)

  - Assign default value for some pointers (Martin Liška)

  - Improve setting of gcc debug option (Martin Liška)

  - Separate the tests and tools in installation (Nam T. Nguyen)

  - Reduce number of arguments of hist_entry_iter__add() (Namhyung Kim)

  - DSO data cache fixes (Namhyung Kim)
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents a82d24ed dddc7ee3
......@@ -464,7 +464,7 @@ check: $(OUTPUT)common-cmds.h
install-gtk:
install-bin: all install-gtk
install-tools: all install-gtk
$(call QUIET_INSTALL, binaries) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'; \
$(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'; \
......@@ -502,12 +502,16 @@ endif
$(call QUIET_INSTALL, perf_completion-script) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d'; \
$(INSTALL) perf-completion.sh '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf'
install-tests: all install-gtk
$(call QUIET_INSTALL, tests) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
$(INSTALL) tests/attr.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
$(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
install-bin: install-tools install-tests
install: install-bin try-install-man install-traceevent-plugins
install-python_ext:
......
......@@ -61,7 +61,7 @@ const char *const mips_triplets[] = {
static bool lookup_path(char *name)
{
bool found = false;
char *path, *tmp;
char *path, *tmp = NULL;
char buf[PATH_MAX];
char *env = getenv("PATH");
......
......@@ -139,6 +139,8 @@ static int process_sample_event(struct perf_tool *tool,
struct report *rep = container_of(tool, struct report, tool);
struct addr_location al;
struct hist_entry_iter iter = {
.evsel = evsel,
.sample = sample,
.hide_unresolved = rep->hide_unresolved,
.add_entry_cb = hist_iter__report_callback,
};
......@@ -168,8 +170,7 @@ static int process_sample_event(struct perf_tool *tool,
if (al.map != NULL)
al.map->dso->hit = 1;
ret = hist_entry_iter__add(&iter, &al, evsel, sample, rep->max_stack,
rep);
ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep);
if (ret < 0)
pr_debug("problem adding hist entry, skipping event\n");
out_put:
......
......@@ -95,6 +95,7 @@ struct work_atoms {
u64 total_lat;
u64 nb_atoms;
u64 total_runtime;
int num_merged;
};
typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
......@@ -168,9 +169,10 @@ struct perf_sched {
u64 all_runtime;
u64 all_count;
u64 cpu_last_switched[MAX_CPUS];
struct rb_root atom_root, sorted_atom_root;
struct rb_root atom_root, sorted_atom_root, merged_atom_root;
struct list_head sort_list, cmp_pid;
bool force;
bool skip_merge;
};
static u64 get_nsecs(void)
......@@ -1182,6 +1184,9 @@ static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_
sched->all_runtime += work_list->total_runtime;
sched->all_count += work_list->nb_atoms;
if (work_list->num_merged > 1)
ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread), work_list->num_merged);
else
ret = printf(" %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid);
for (i = 0; i < 24 - ret; i++)
......@@ -1302,17 +1307,22 @@ static int sort_dimension__add(const char *tok, struct list_head *list)
static void perf_sched__sort_lat(struct perf_sched *sched)
{
struct rb_node *node;
struct rb_root *root = &sched->atom_root;
again:
for (;;) {
struct work_atoms *data;
node = rb_first(&sched->atom_root);
node = rb_first(root);
if (!node)
break;
rb_erase(node, &sched->atom_root);
rb_erase(node, root);
data = rb_entry(node, struct work_atoms, node);
__thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
}
if (root == &sched->atom_root) {
root = &sched->merged_atom_root;
goto again;
}
}
static int process_sched_wakeup_event(struct perf_tool *tool,
......@@ -1572,6 +1582,59 @@ static void print_bad_events(struct perf_sched *sched)
}
}
static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
struct work_atoms *this;
const char *comm = thread__comm_str(data->thread), *this_comm;
while (*new) {
int cmp;
this = container_of(*new, struct work_atoms, node);
parent = *new;
this_comm = thread__comm_str(this->thread);
cmp = strcmp(comm, this_comm);
if (cmp > 0) {
new = &((*new)->rb_left);
} else if (cmp < 0) {
new = &((*new)->rb_right);
} else {
this->num_merged++;
this->total_runtime += data->total_runtime;
this->nb_atoms += data->nb_atoms;
this->total_lat += data->total_lat;
list_splice(&data->work_list, &this->work_list);
if (this->max_lat < data->max_lat) {
this->max_lat = data->max_lat;
this->max_lat_at = data->max_lat_at;
}
zfree(&data);
return;
}
}
data->num_merged++;
rb_link_node(&data->node, parent, new);
rb_insert_color(&data->node, root);
}
static void perf_sched__merge_lat(struct perf_sched *sched)
{
struct work_atoms *data;
struct rb_node *node;
if (sched->skip_merge)
return;
while ((node = rb_first(&sched->atom_root))) {
rb_erase(node, &sched->atom_root);
data = rb_entry(node, struct work_atoms, node);
__merge_work_atoms(&sched->merged_atom_root, data);
}
}
static int perf_sched__lat(struct perf_sched *sched)
{
struct rb_node *next;
......@@ -1581,6 +1644,7 @@ static int perf_sched__lat(struct perf_sched *sched)
if (perf_sched__read_events(sched))
return -1;
perf_sched__merge_lat(sched);
perf_sched__sort_lat(sched);
printf("\n -----------------------------------------------------------------------------------------------------------------\n");
......@@ -1732,6 +1796,7 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
.profile_cpu = -1,
.next_shortname1 = 'A',
.next_shortname2 = '0',
.skip_merge = 0,
};
const struct option latency_options[] = {
OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
......@@ -1742,6 +1807,8 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
"CPU to profile on"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_BOOLEAN('p', "pids", &sched.skip_merge,
"latency stats per pid instead of per comm"),
OPT_END()
};
const struct option replay_options[] = {
......
......@@ -775,6 +775,8 @@ static void perf_event__process_sample(struct perf_tool *tool,
if (al.sym == NULL || !al.sym->ignore) {
struct hists *hists = evsel__hists(evsel);
struct hist_entry_iter iter = {
.evsel = evsel,
.sample = sample,
.add_entry_cb = hist_iter__top_callback,
};
......@@ -785,8 +787,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
pthread_mutex_lock(&hists->lock);
err = hist_entry_iter__add(&iter, &al, evsel, sample,
top->max_stack, top);
err = hist_entry_iter__add(&iter, &al, top->max_stack, top);
if (err < 0)
pr_err("Problem incrementing symbol period, skipping event\n");
......
......@@ -32,7 +32,7 @@ ifeq ($(ARCH),x86)
LIBUNWIND_LIBS = -lunwind -lunwind-x86_64
$(call detected,CONFIG_X86_64)
else
LIBUNWIND_LIBS = -lunwind -lunwind-x86
LIBUNWIND_LIBS = -lunwind-x86 -llzma -lunwind
endif
NO_PERF_REGS := 0
endif
......@@ -130,6 +130,8 @@ endif
ifeq ($(DEBUG),0)
CFLAGS += -O6
else
CFLAGS += $(call cc-option,-Og,-O0)
endif
ifdef PARSER_DEBUG
......
......@@ -177,3 +177,22 @@ $(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2)))
endef
_ge_attempt = $(if $(get-executable),$(get-executable),$(call _gea_err,$(2)))
_gea_err = $(if $(1),$(error Please set '$(1)' appropriately))
# try-run
# Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise)
# Exit code chooses option. "$$TMP" is can be used as temporary file and
# is automatically cleaned up.
try-run = $(shell set -e; \
TMP="$(TMPOUT).$$$$.tmp"; \
TMPO="$(TMPOUT).$$$$.o"; \
if ($(1)) >/dev/null 2>&1; \
then echo "$(2)"; \
else echo "$(3)"; \
fi; \
rm -f "$$TMP" "$$TMPO")
# cc-option
# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
cc-option = $(call try-run,\
$(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
......@@ -99,6 +99,17 @@ struct test_data_offset offsets[] = {
},
};
/* move it from util/dso.c for compatibility */
static int dso__data_fd(struct dso *dso, struct machine *machine)
{
int fd = dso__data_get_fd(dso, machine);
if (fd >= 0)
dso__data_put_fd(dso);
return fd;
}
int test__dso_data(void)
{
struct machine machine;
......
......@@ -87,6 +87,8 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)
},
};
struct hist_entry_iter iter = {
.evsel = evsel,
.sample = &sample,
.hide_unresolved = false,
};
......@@ -104,8 +106,8 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)
&sample) < 0)
goto out;
if (hist_entry_iter__add(&iter, &al, evsel, &sample,
PERF_MAX_STACK_DEPTH, NULL) < 0) {
if (hist_entry_iter__add(&iter, &al, PERF_MAX_STACK_DEPTH,
NULL) < 0) {
addr_location__put(&al);
goto out;
}
......
......@@ -63,6 +63,8 @@ static int add_hist_entries(struct perf_evlist *evlist,
},
};
struct hist_entry_iter iter = {
.evsel = evsel,
.sample = &sample,
.ops = &hist_iter_normal,
.hide_unresolved = false,
};
......@@ -81,7 +83,7 @@ static int add_hist_entries(struct perf_evlist *evlist,
&sample) < 0)
goto out;
if (hist_entry_iter__add(&iter, &al, evsel, &sample,
if (hist_entry_iter__add(&iter, &al,
PERF_MAX_STACK_DEPTH, NULL) < 0) {
addr_location__put(&al);
goto out;
......
......@@ -57,6 +57,8 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)
},
};
struct hist_entry_iter iter = {
.evsel = evsel,
.sample = &sample,
.ops = &hist_iter_normal,
.hide_unresolved = false,
};
......@@ -70,8 +72,8 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)
&sample) < 0)
goto out;
if (hist_entry_iter__add(&iter, &al, evsel, &sample,
PERF_MAX_STACK_DEPTH, NULL) < 0) {
if (hist_entry_iter__add(&iter, &al, PERF_MAX_STACK_DEPTH,
NULL) < 0) {
addr_location__put(&al);
goto out;
}
......
......@@ -23,9 +23,10 @@ int test__vmlinux_matches_kallsyms(void)
int err = -1;
struct rb_node *nd;
struct symbol *sym;
struct map *kallsyms_map, *vmlinux_map;
struct map *kallsyms_map, *vmlinux_map, *map;
struct machine kallsyms, vmlinux;
enum map_type type = MAP__FUNCTION;
struct rb_root *maps = &vmlinux.kmaps.maps[type];
u64 mem_start, mem_end;
/*
......@@ -184,8 +185,8 @@ int test__vmlinux_matches_kallsyms(void)
pr_info("Maps only in vmlinux:\n");
for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
for (map = maps__first(maps); map; map = map__next(map)) {
struct map *
/*
* If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
* the kernel will have the path for the vmlinux file being used,
......@@ -193,22 +194,22 @@ int test__vmlinux_matches_kallsyms(void)
* both cases.
*/
pair = map_groups__find_by_name(&kallsyms.kmaps, type,
(pos->dso->kernel ?
pos->dso->short_name :
pos->dso->name));
(map->dso->kernel ?
map->dso->short_name :
map->dso->name));
if (pair)
pair->priv = 1;
else
map__fprintf(pos, stderr);
map__fprintf(map, stderr);
}
pr_info("Maps in vmlinux with a different name in kallsyms:\n");
for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
for (map = maps__first(maps); map; map = map__next(map)) {
struct map *pair;
mem_start = vmlinux_map->unmap_ip(vmlinux_map, pos->start);
mem_end = vmlinux_map->unmap_ip(vmlinux_map, pos->end);
mem_start = vmlinux_map->unmap_ip(vmlinux_map, map->start);
mem_end = vmlinux_map->unmap_ip(vmlinux_map, map->end);
pair = map_groups__find(&kallsyms.kmaps, type, mem_start);
if (pair == NULL || pair->priv)
......@@ -217,7 +218,7 @@ int test__vmlinux_matches_kallsyms(void)
if (pair->start == mem_start) {
pair->priv = 1;
pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
pos->start, pos->end, pos->pgoff, pos->dso->name);
map->start, map->end, map->pgoff, map->dso->name);
if (mem_end != pair->end)
pr_info(":\n*%" PRIx64 "-%" PRIx64 " %" PRIx64,
pair->start, pair->end, pair->pgoff);
......@@ -228,12 +229,11 @@ int test__vmlinux_matches_kallsyms(void)
pr_info("Maps only in kallsyms:\n");
for (nd = rb_first(&kallsyms.kmaps.maps[type]);
nd; nd = rb_next(nd)) {
struct map *pos = rb_entry(nd, struct map, rb_node);
maps = &kallsyms.kmaps.maps[type];
if (!pos->priv)
map__fprintf(pos, stderr);
for (map = maps__first(maps); map; map = map__next(map)) {
if (!map->priv)
map__fprintf(map, stderr);
}
out:
machine__exit(&kallsyms);
......
......@@ -2,24 +2,27 @@
#include "util.h"
#include <stdlib.h>
#include <stdio.h>
#include <linux/atomic.h>
struct comm_str {
char *str;
struct rb_node rb_node;
int ref;
atomic_t refcnt;
};
/* Should perhaps be moved to struct machine */
static struct rb_root comm_str_root;
static void comm_str__get(struct comm_str *cs)
static struct comm_str *comm_str__get(struct comm_str *cs)
{
cs->ref++;
if (cs)
atomic_inc(&cs->refcnt);
return cs;
}
static void comm_str__put(struct comm_str *cs)
{
if (!--cs->ref) {
if (cs && atomic_dec_and_test(&cs->refcnt)) {
rb_erase(&cs->rb_node, &comm_str_root);
zfree(&cs->str);
free(cs);
......@@ -40,6 +43,8 @@ static struct comm_str *comm_str__alloc(const char *str)
return NULL;
}
atomic_set(&cs->refcnt, 0);
return cs;
}
......
......@@ -440,15 +440,7 @@ void dso__data_close(struct dso *dso)
pthread_mutex_unlock(&dso__data_open_lock);
}
/**
* dso__data_fd - Get dso's data file descriptor
* @dso: dso object
* @machine: machine object
*
* External interface to find dso's file, open it and
* returns file descriptor.
*/
int dso__data_fd(struct dso *dso, struct machine *machine)
static void try_to_open_dso(struct dso *dso, struct machine *machine)
{
enum dso_binary_type binary_type_data[] = {
DSO_BINARY_TYPE__BUILD_ID_CACHE,
......@@ -457,13 +449,8 @@ int dso__data_fd(struct dso *dso, struct machine *machine)
};
int i = 0;
if (dso->data.status == DSO_DATA_STATUS_ERROR)
return -1;
pthread_mutex_lock(&dso__data_open_lock);
if (dso->data.fd >= 0)
goto out;
return;
if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
dso->data.fd = open_dso(dso, machine);
......@@ -483,11 +470,38 @@ int dso__data_fd(struct dso *dso, struct machine *machine)
dso->data.status = DSO_DATA_STATUS_OK;
else
dso->data.status = DSO_DATA_STATUS_ERROR;
}
/**
* dso__data_get_fd - Get dso's data file descriptor
* @dso: dso object
* @machine: machine object
*
* External interface to find dso's file, open it and
* returns file descriptor. It should be paired with
* dso__data_put_fd() if it returns non-negative value.
*/
int dso__data_get_fd(struct dso *dso, struct machine *machine)
{
if (dso->data.status == DSO_DATA_STATUS_ERROR)
return -1;
if (pthread_mutex_lock(&dso__data_open_lock) < 0)
return -1;
try_to_open_dso(dso, machine);
if (dso->data.fd < 0)
pthread_mutex_unlock(&dso__data_open_lock);
return dso->data.fd;
}
void dso__data_put_fd(struct dso *dso __maybe_unused)
{
pthread_mutex_unlock(&dso__data_open_lock);
}
bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
{
u32 flag = 1 << by;
......@@ -609,14 +623,13 @@ dso_cache__read(struct dso *dso, struct machine *machine,
* dso->data.fd might be closed if other thread opened another
* file (dso) due to open file limit (RLIMIT_NOFILE).
*/
if (dso->data.fd < 0) {
dso->data.fd = open_dso(dso, machine);
try_to_open_dso(dso, machine);
if (dso->data.fd < 0) {
ret = -errno;
dso->data.status = DSO_DATA_STATUS_ERROR;
break;
}
}
cache_offset = offset & DSO__DATA_CACHE_MASK;
......@@ -702,20 +715,22 @@ static int data_file_size(struct dso *dso, struct machine *machine)
if (dso->data.file_size)
return 0;
if (dso->data.status == DSO_DATA_STATUS_ERROR)
return -1;
pthread_mutex_lock(&dso__data_open_lock);
/*
* dso->data.fd might be closed if other thread opened another
* file (dso) due to open file limit (RLIMIT_NOFILE).
*/
if (dso->data.fd < 0) {
dso->data.fd = open_dso(dso, machine);
try_to_open_dso(dso, machine);
if (dso->data.fd < 0) {
ret = -errno;
dso->data.status = DSO_DATA_STATUS_ERROR;
goto out;
}
}
if (fstat(dso->data.fd, &st) < 0) {
ret = -errno;
......@@ -740,12 +755,6 @@ static int data_file_size(struct dso *dso, struct machine *machine)
*/
off_t dso__data_size(struct dso *dso, struct machine *machine)
{
int fd;
fd = dso__data_fd(dso, machine);
if (fd < 0)
return fd;
if (data_file_size(dso, machine))
return -1;
......@@ -1200,12 +1209,15 @@ size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
enum dso_type dso__type(struct dso *dso, struct machine *machine)
{
int fd;
enum dso_type type = DSO__TYPE_UNKNOWN;
fd = dso__data_fd(dso, machine);
if (fd < 0)
return DSO__TYPE_UNKNOWN;
fd = dso__data_get_fd(dso, machine);
if (fd >= 0) {
type = dso__type_fd(fd);
dso__data_put_fd(dso);
}
return dso__type_fd(fd);
return type;
}
int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
......
......@@ -240,7 +240,8 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
/*
* The dso__data_* external interface provides following functions:
* dso__data_fd
* dso__data_get_fd
* dso__data_put_fd
* dso__data_close
* dso__data_size
* dso__data_read_offset
......@@ -257,8 +258,11 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
* The current usage of the dso__data_* interface is as follows:
*
* Get DSO's fd:
* int fd = dso__data_fd(dso, machine);
* int fd = dso__data_get_fd(dso, machine);
* if (fd >= 0) {
* USE 'fd' SOMEHOW
* dso__data_put_fd(dso);
* }
*
* Read DSO's data:
* n = dso__data_read_offset(dso_0, &machine, 0, buf, BUFSIZE);
......@@ -277,7 +281,8 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
*
* TODO
*/
int dso__data_fd(struct dso *dso, struct machine *machine);
int dso__data_get_fd(struct dso *dso, struct machine *machine);
void dso__data_put_fd(struct dso *dso __maybe_unused);
void dso__data_close(struct dso *dso);
off_t dso__data_size(struct dso *dso, struct machine *machine);
......
......@@ -329,8 +329,9 @@ int perf_event__synthesize_modules(struct perf_tool *tool,
struct machine *machine)
{
int rc = 0;
struct rb_node *nd;
struct map *pos;
struct map_groups *kmaps = &machine->kmaps;
struct rb_root *maps = &kmaps->maps[MAP__FUNCTION];
union perf_event *event = zalloc((sizeof(event->mmap) +
machine->id_hdr_size));
if (event == NULL) {
......@@ -350,10 +351,8 @@ int perf_event__synthesize_modules(struct perf_tool *tool,
else
event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
nd; nd = rb_next(nd)) {
for (pos = maps__first(maps); pos; pos = map__next(pos)) {
size_t size;
struct map *pos = rb_entry(nd, struct map, rb_node);
if (pos->dso->kernel)
continue;
......
......@@ -362,7 +362,7 @@ static u8 symbol__parent_filter(const struct symbol *parent)
return 0;
}
static struct hist_entry *add_hist_entry(struct hists *hists,
static struct hist_entry *hists__findnew_entry(struct hists *hists,
struct hist_entry *entry,
struct addr_location *al,
bool sample_self)
......@@ -468,7 +468,7 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
.transaction = transaction,
};
return add_hist_entry(hists, &entry, al, sample_self);
return hists__findnew_entry(hists, &entry, al, sample_self);
}
static int
......@@ -548,9 +548,9 @@ iter_finish_mem_entry(struct hist_entry_iter *iter,
out:
/*
* We don't need to free iter->priv (mem_info) here since
* the mem info was either already freed in add_hist_entry() or
* passed to a new hist entry by hist_entry__new().
* We don't need to free iter->priv (mem_info) here since the mem info
* was either already freed in hists__findnew_entry() or passed to a
* new hist entry by hist_entry__new().
*/
iter->priv = NULL;
......@@ -851,19 +851,15 @@ const struct hist_iter_ops hist_iter_cumulative = {
};
int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
struct perf_evsel *evsel, struct perf_sample *sample,
int max_stack_depth, void *arg)
{
int err, err2;
err = sample__resolve_callchain(sample, &iter->parent, evsel, al,
max_stack_depth);
err = sample__resolve_callchain(iter->sample, &iter->parent,
iter->evsel, al, max_stack_depth);
if (err)
return err;
iter->evsel = evsel;
iter->sample = sample;
err = iter->ops->prepare_entry(iter, al);
if (err)
goto out;
......
......@@ -111,7 +111,6 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
u64 weight, u64 transaction,
bool sample_self);
int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
struct perf_evsel *evsel, struct perf_sample *sample,
int max_stack_depth, void *arg);
int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right);
......
#ifndef __TOOLS_LINUX_PERF_RBTREE_H
#define __TOOLS_LINUX_PERF_RBTREE_H
#include <stdbool.h>
#include "../../../../include/linux/rbtree.h"
/*
* Handy for checking that we are not deleting an entry that is
* already in a list, found in block/{blk-throttle,cfq-iosched}.c,
* probably should be moved to lib/rbtree.c...
*/
static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
{
rb_erase(n, root);
RB_CLEAR_NODE(n);
}
#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
......@@ -333,7 +333,7 @@ static void machine__update_thread_pid(struct machine *machine,
if (!map_groups__empty(th->mg))
pr_err("Discarding thread maps for %d:%d\n",
th->pid_, th->tid);
map_groups__delete(th->mg);
map_groups__put(th->mg);
}
th->mg = map_groups__get(leader->mg);
......@@ -400,7 +400,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
* leader and that would screwed the rb tree.
*/
if (thread__init_map_groups(th, machine)) {
rb_erase(&th->rb_node, &machine->threads);
rb_erase_init(&th->rb_node, &machine->threads);
RB_CLEAR_NODE(&th->rb_node);
thread__delete(th);
return NULL;
......@@ -1314,7 +1314,7 @@ static void __machine__remove_thread(struct machine *machine, struct thread *th,
BUG_ON(atomic_read(&th->refcnt) == 0);
if (lock)
pthread_rwlock_wrlock(&machine->threads_lock);
rb_erase(&th->rb_node, &machine->threads);
rb_erase_init(&th->rb_node, &machine->threads);
RB_CLEAR_NODE(&th->rb_node);
/*
* Move it first to the dead_threads list, then drop the reference,
......
......@@ -498,28 +498,6 @@ void map_groups__put(struct map_groups *mg)
map_groups__delete(mg);
}
void map_groups__flush(struct map_groups *mg)
{
int type;
for (type = 0; type < MAP__NR_TYPES; type++) {
struct rb_root *root = &mg->maps[type];
struct rb_node *next = rb_first(root);
while (next) {
struct map *pos = rb_entry(next, struct map, rb_node);
next = rb_next(&pos->rb_node);
rb_erase(&pos->rb_node, root);
/*
* We may have references to this map, for
* instance in some hist_entry instances, so
* just move them to a separate list.
*/
list_add_tail(&pos->node, &mg->removed_maps[pos->type]);
}
}
}
struct symbol *map_groups__find_symbol(struct map_groups *mg,
enum map_type type, u64 addr,
struct map **mapp,
......@@ -710,9 +688,10 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
int map_groups__clone(struct map_groups *mg,
struct map_groups *parent, enum map_type type)
{
struct rb_node *nd;
for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
struct map *map = rb_entry(nd, struct map, rb_node);
struct map *map;
struct rb_root *maps = &parent->maps[type];
for (map = maps__first(maps); map; map = map__next(map)) {
struct map *new = map__clone(map);
if (new == NULL)
return -ENOMEM;
......@@ -775,7 +754,7 @@ struct map *maps__first(struct rb_root *maps)
return NULL;
}
struct map *maps__next(struct map *map)
struct map *map__next(struct map *map)
{
struct rb_node *next = rb_next(&map->rb_node);
......
......@@ -166,7 +166,7 @@ void maps__insert(struct rb_root *maps, struct map *map);
void maps__remove(struct rb_root *maps, struct map *map);
struct map *maps__find(struct rb_root *maps, u64 addr);
struct map *maps__first(struct rb_root *maps);
struct map *maps__next(struct map *map);
struct map *map__next(struct map *map);
void map_groups__init(struct map_groups *mg, struct machine *machine);
void map_groups__exit(struct map_groups *mg);
int map_groups__clone(struct map_groups *mg,
......@@ -201,7 +201,7 @@ static inline struct map *map_groups__first(struct map_groups *mg,
static inline struct map *map_groups__next(struct map *map)
{
return maps__next(map);
return map__next(map);
}
struct symbol *map_groups__find_symbol(struct map_groups *mg,
......@@ -233,6 +233,4 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
struct map *map_groups__find_by_name(struct map_groups *mg,
enum map_type type, const char *name);
void map_groups__flush(struct map_groups *mg);
#endif /* __PERF_MAP_H */
......@@ -25,12 +25,6 @@
extern int parse_events_debug;
#endif
int parse_events_parse(void *data, void *scanner);
int parse_events_term__num(struct parse_events_term **term,
int type_term, char *config, u64 num,
YYLTYPE *loc_term, YYLTYPE *loc_val);
int parse_events_term__str(struct parse_events_term **term,
int type_term, char *config, char *str,
YYLTYPE *loc_term, YYLTYPE *loc_val);
static struct perf_pmu_event_symbol *perf_pmu_events_list;
/*
......@@ -1601,8 +1595,11 @@ static int new_term(struct parse_events_term **_term, int type_val,
int parse_events_term__num(struct parse_events_term **term,
int type_term, char *config, u64 num,
YYLTYPE *loc_term, YYLTYPE *loc_val)
void *loc_term_, void *loc_val_)
{
YYLTYPE *loc_term = loc_term_;
YYLTYPE *loc_val = loc_val_;
return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term,
config, NULL, num,
loc_term ? loc_term->first_column : 0,
......@@ -1611,8 +1608,11 @@ int parse_events_term__num(struct parse_events_term **term,
int parse_events_term__str(struct parse_events_term **term,
int type_term, char *config, char *str,
YYLTYPE *loc_term, YYLTYPE *loc_val)
void *loc_term_, void *loc_val_)
{
YYLTYPE *loc_term = loc_term_;
YYLTYPE *loc_val = loc_val_;
return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term,
config, str, 0,
loc_term ? loc_term->first_column : 0,
......@@ -1659,6 +1659,8 @@ void parse_events_evlist_error(struct parse_events_evlist *data,
{
struct parse_events_error *err = data->error;
if (!err)
return;
err->idx = idx;
err->str = strdup(str);
WARN_ONCE(!err->str, "WARNING: failed to allocate error string");
......
......@@ -98,6 +98,12 @@ struct parse_events_terms {
};
int parse_events__is_hardcoded_term(struct parse_events_term *term);
int parse_events_term__num(struct parse_events_term **term,
int type_term, char *config, u64 num,
void *loc_term, void *loc_val);
int parse_events_term__str(struct parse_events_term **term,
int type_term, char *config, char *str,
void *loc_term, void *loc_val);
int parse_events_term__sym_hw(struct parse_events_term **term,
char *config, unsigned idx);
int parse_events_term__clone(struct parse_events_term **new,
......
......@@ -389,8 +389,10 @@ PE_NAME ':' PE_NAME
if (parse_events_add_tracepoint(list, &data->idx, $1, $3)) {
struct parse_events_error *error = data->error;
if (error) {
error->idx = @1.first_column;
error->str = strdup("unknown tracepoint");
}
return -1;
}
$$ = list;
......
......@@ -442,6 +442,10 @@ static struct perf_pmu *pmu_lookup(const char *name)
LIST_HEAD(aliases);
__u32 type;
/* No support for intel_bts or intel_pt so disallow them */
if (!strcmp(name, "intel_bts") || !strcmp(name, "intel_pt"))
return NULL;
/*
* The pmu data we store & need consists of the pmu
* type value and format definitions. Load both right
......
......@@ -162,8 +162,9 @@ static u64 kernel_get_symbol_address_by_name(const char *name, bool reloc)
static struct map *kernel_get_module_map(const char *module)
{
struct rb_node *nd;
struct map_groups *grp = &host_machine->kmaps;
struct rb_root *maps = &grp->maps[MAP__FUNCTION];
struct map *pos;
/* A file path -- this is an offline module */
if (module && strchr(module, '/'))
......@@ -172,8 +173,7 @@ static struct map *kernel_get_module_map(const char *module)
if (!module)
module = "kernel";
for (nd = rb_first(&grp->maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) {
struct map *pos = rb_entry(nd, struct map, rb_node);
for (pos = maps__first(maps); pos; pos = map__next(pos)) {
if (strncmp(pos->dso->short_name + 1, module,
pos->dso->short_name_len - 2) == 0) {
return pos;
......@@ -195,17 +195,17 @@ static void put_target_map(struct map *map, bool user)
{
if (map && user) {
/* Only the user map needs to be released */
dso__delete(map->dso);
map__delete(map);
}
}
static struct dso *kernel_get_module_dso(const char *module)
static int kernel_get_module_dso(const char *module, struct dso **pdso)
{
struct dso *dso;
struct map *map;
const char *vmlinux_name;
int ret = 0;
if (module) {
list_for_each_entry(dso, &host_machine->kernel_dsos.head,
......@@ -215,30 +215,21 @@ static struct dso *kernel_get_module_dso(const char *module)
goto found;
}
pr_debug("Failed to find module %s.\n", module);
return NULL;
return -ENOENT;
}
map = host_machine->vmlinux_maps[MAP__FUNCTION];
dso = map->dso;
vmlinux_name = symbol_conf.vmlinux_name;
if (vmlinux_name) {
if (dso__load_vmlinux(dso, map, vmlinux_name, false, NULL) <= 0)
return NULL;
} else {
if (dso__load_vmlinux_path(dso, map, NULL) <= 0) {
pr_debug("Failed to load kernel map.\n");
return NULL;
}
}
dso->load_errno = 0;
if (vmlinux_name)
ret = dso__load_vmlinux(dso, map, vmlinux_name, false, NULL);
else
ret = dso__load_vmlinux_path(dso, map, NULL);
found:
return dso;
}
const char *kernel_get_module_path(const char *module)
{
struct dso *dso = kernel_get_module_dso(module);
return (dso) ? dso->long_name : NULL;
*pdso = dso;
return ret;
}
static int convert_exec_to_group(const char *exec, char **result)
......@@ -390,16 +381,25 @@ static int get_alternative_line_range(struct debuginfo *dinfo,
static struct debuginfo *open_debuginfo(const char *module, bool silent)
{
const char *path = module;
struct debuginfo *ret;
char reason[STRERR_BUFSIZE];
struct debuginfo *ret = NULL;
struct dso *dso = NULL;
int err;
if (!module || !strchr(module, '/')) {
path = kernel_get_module_path(module);
if (!path) {
err = kernel_get_module_dso(module, &dso);
if (err < 0) {
if (!dso || dso->load_errno == 0) {
if (!strerror_r(-err, reason, STRERR_BUFSIZE))
strcpy(reason, "(unknown)");
} else
dso__strerror_load(dso, reason, STRERR_BUFSIZE);
if (!silent)
pr_err("Failed to find path of %s module.\n",
module ?: "kernel");
pr_err("Failed to find the path for %s: %s\n",
module ?: "kernel", reason);
return NULL;
}
path = dso->long_name;
}
ret = debuginfo__new(path);
if (!ret && !silent) {
......@@ -1791,7 +1791,6 @@ static int find_perf_probe_point_from_map(struct probe_trace_point *tp,
out:
if (map && !is_kprobe) {
dso__delete(map->dso);
map__delete(map);
}
......@@ -2812,13 +2811,14 @@ int del_perf_probe_events(struct strfilter *filter)
goto error;
ret2 = del_trace_probe_events(ufd, filter, unamelist);
if (ret2 < 0 && ret2 != -ENOENT)
if (ret2 < 0 && ret2 != -ENOENT) {
ret = ret2;
else if (ret == -ENOENT && ret2 == -ENOENT) {
goto error;
}
if (ret == -ENOENT && ret2 == -ENOENT)
pr_debug("\"%s\" does not hit any event.\n", str);
/* Note that this is silently ignored */
ret = 0;
}
error:
if (kfd >= 0) {
......@@ -2884,7 +2884,6 @@ int show_available_funcs(const char *target, struct strfilter *_filter,
dso__fprintf_symbols_by_name(map->dso, map->type, stdout);
end:
if (user) {
dso__delete(map->dso);
map__delete(map);
}
exit_symbol_maps();
......
......@@ -131,9 +131,6 @@ extern void line_range__clear(struct line_range *lr);
/* Initialize line range */
extern int line_range__init(struct line_range *lr);
/* Internal use: Return kernel/module path */
extern const char *kernel_get_module_path(const char *module);
extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs);
extern int del_perf_probe_events(struct strfilter *filter);
extern int show_perf_probe_events(struct strfilter *filter);
......
......@@ -1182,7 +1182,7 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
return -1;
if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
readn(fd, &buf, hdr_sz) != (ssize_t)hdr_sz)
readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
return -1;
event = (union perf_event *)buf;
......@@ -1190,12 +1190,12 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
if (session->header.needs_swap)
perf_event_header__bswap(&event->header);
if (event->header.size < hdr_sz)
if (event->header.size < hdr_sz || event->header.size > buf_sz)
return -1;
rest = event->header.size - hdr_sz;
if (readn(fd, &buf, rest) != (ssize_t)rest)
if (readn(fd, buf, rest) != (ssize_t)rest)
return -1;
if (session->header.needs_swap)
......
......@@ -202,18 +202,16 @@ void symbols__fixup_end(struct rb_root *symbols)
void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
{
struct map *prev, *curr;
struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]);
struct rb_root *maps = &mg->maps[type];
struct map *next, *curr;
if (prevnd == NULL)
curr = maps__first(maps);
if (curr == NULL)
return;
curr = rb_entry(prevnd, struct map, rb_node);
for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
prev = curr;
curr = rb_entry(nd, struct map, rb_node);
prev->end = curr->start;
for (next = map__next(curr); next; next = map__next(curr)) {
curr->end = next->start;
curr = next;
}
/*
......@@ -400,7 +398,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
const char *name)
{
struct rb_node *n;
struct symbol_name_rb_node *s;
struct symbol_name_rb_node *s = NULL;
if (symbols == NULL)
return NULL;
......@@ -1522,11 +1520,10 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
struct map *map_groups__find_by_name(struct map_groups *mg,
enum map_type type, const char *name)
{
struct rb_node *nd;
for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
struct map *map = rb_entry(nd, struct map, rb_node);
struct rb_root *maps = &mg->maps[type];
struct map *map;
for (map = maps__first(maps); map; map = map__next(map)) {
if (map->dso && strcmp(map->dso->short_name, name) == 0)
return map;
}
......
......@@ -54,7 +54,6 @@ struct thread *thread__new(pid_t pid, pid_t tid)
list_add(&comm->list, &thread->comm_list);
atomic_set(&thread->refcnt, 0);
INIT_LIST_HEAD(&thread->node);
RB_CLEAR_NODE(&thread->rb_node);
}
......@@ -70,7 +69,6 @@ void thread__delete(struct thread *thread)
struct comm *comm, *tmp;
BUG_ON(!RB_EMPTY_NODE(&thread->rb_node));
BUG_ON(!list_empty(&thread->node));
thread_stack__free(thread);
......
......@@ -173,7 +173,7 @@ void parse_ftrace_printk(struct pevent *pevent,
char *line;
char *next = NULL;
char *addr_str;
char *fmt;
char *fmt = NULL;
line = strtok_r(file, "\n", &next);
while (line) {
......
......@@ -269,13 +269,14 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
u64 offset = dso->data.eh_frame_hdr_offset;
if (offset == 0) {
fd = dso__data_fd(dso, machine);
fd = dso__data_get_fd(dso, machine);
if (fd < 0)
return -EINVAL;
/* Check the .eh_frame section for unwinding info */
offset = elf_section_offset(fd, ".eh_frame_hdr");
dso->data.eh_frame_hdr_offset = offset;
dso__data_put_fd(dso);
}
if (offset)
......@@ -294,13 +295,14 @@ static int read_unwind_spec_debug_frame(struct dso *dso,
u64 ofs = dso->data.debug_frame_offset;
if (ofs == 0) {
fd = dso__data_fd(dso, machine);
fd = dso__data_get_fd(dso, machine);
if (fd < 0)
return -EINVAL;
/* Check the .debug_frame section for unwinding info */
ofs = elf_section_offset(fd, ".debug_frame");
dso->data.debug_frame_offset = ofs;
dso__data_put_fd(dso);
}
*offset = ofs;
......@@ -353,10 +355,13 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
#ifndef NO_LIBUNWIND_DEBUG_FRAME
/* Check the .debug_frame section for unwinding info */
if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) {
int fd = dso__data_fd(map->dso, ui->machine);
int fd = dso__data_get_fd(map->dso, ui->machine);
int is_exec = elf_is_exec(fd, map->dso->name);
unw_word_t base = is_exec ? 0 : map->start;
if (fd >= 0)
dso__data_put_fd(dso);
memset(&di, 0, sizeof(di));
if (dwarf_find_debug_frame(0, &di, ip, base, map->dso->name,
map->start, map->end))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment