Commit d380eaae authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'perf/core' of...

Merge branch 'perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux-2.6 into perf/core
parents dda99116 ef1d1af2
......@@ -73,6 +73,10 @@ OPTIONS
(Only for --vars) Show external defined variables in addition to local
variables.
-F::
--funcs::
Show available functions in given module or kernel.
-f::
--force::
Forcibly add events with existing name.
......
......@@ -402,6 +402,7 @@ LIB_H += util/debug.h
LIB_H += util/debugfs.h
LIB_H += util/event.h
LIB_H += util/evsel.h
LIB_H += util/evlist.h
LIB_H += util/exec_cmd.h
LIB_H += util/types.h
LIB_H += util/levenshtein.h
......@@ -425,6 +426,7 @@ LIB_H += util/values.h
LIB_H += util/sort.h
LIB_H += util/hist.h
LIB_H += util/thread.h
LIB_H += util/thread_map.h
LIB_H += util/trace-event.h
LIB_H += util/probe-finder.h
LIB_H += util/probe-event.h
......@@ -440,6 +442,7 @@ LIB_OBJS += $(OUTPUT)util/ctype.o
LIB_OBJS += $(OUTPUT)util/debugfs.o
LIB_OBJS += $(OUTPUT)util/environment.o
LIB_OBJS += $(OUTPUT)util/event.o
LIB_OBJS += $(OUTPUT)util/evlist.o
LIB_OBJS += $(OUTPUT)util/evsel.o
LIB_OBJS += $(OUTPUT)util/exec_cmd.o
LIB_OBJS += $(OUTPUT)util/help.o
......@@ -469,6 +472,7 @@ LIB_OBJS += $(OUTPUT)util/map.o
LIB_OBJS += $(OUTPUT)util/pstack.o
LIB_OBJS += $(OUTPUT)util/session.o
LIB_OBJS += $(OUTPUT)util/thread.o
LIB_OBJS += $(OUTPUT)util/thread_map.o
LIB_OBJS += $(OUTPUT)util/trace-event-parse.o
LIB_OBJS += $(OUTPUT)util/trace-event-read.o
LIB_OBJS += $(OUTPUT)util/trace-event-info.o
......
......@@ -52,6 +52,7 @@ static struct {
bool show_lines;
bool show_vars;
bool show_ext_vars;
bool show_funcs;
bool mod_events;
int nevents;
struct perf_probe_event events[MAX_PROBES];
......@@ -221,6 +222,8 @@ static const struct option options[] = {
OPT__DRY_RUN(&probe_event_dry_run),
OPT_INTEGER('\0', "max-probes", &params.max_probe_points,
"Set how many probe points can be found for a probe."),
OPT_BOOLEAN('F', "funcs", &params.show_funcs,
"Show potential probe-able functions."),
OPT_END()
};
......@@ -246,7 +249,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
params.max_probe_points = MAX_PROBES;
if ((!params.nevents && !params.dellist && !params.list_events &&
!params.show_lines))
!params.show_lines && !params.show_funcs))
usage_with_options(probe_usage, options);
/*
......@@ -267,12 +270,36 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
pr_err(" Error: Don't use --list with --vars.\n");
usage_with_options(probe_usage, options);
}
if (params.show_funcs) {
pr_err(" Error: Don't use --list with --funcs.\n");
usage_with_options(probe_usage, options);
}
ret = show_perf_probe_events();
if (ret < 0)
pr_err(" Error: Failed to show event list. (%d)\n",
ret);
return ret;
}
if (params.show_funcs) {
if (params.nevents != 0 || params.dellist) {
pr_err(" Error: Don't use --funcs with"
" --add/--del.\n");
usage_with_options(probe_usage, options);
}
if (params.show_lines) {
pr_err(" Error: Don't use --funcs with --line.\n");
usage_with_options(probe_usage, options);
}
if (params.show_vars) {
pr_err(" Error: Don't use --funcs with --vars.\n");
usage_with_options(probe_usage, options);
}
ret = show_available_funcs(params.target_module);
if (ret < 0)
pr_err(" Error: Failed to show functions."
" (%d)\n", ret);
return ret;
}
#ifdef DWARF_SUPPORT
if (params.show_lines) {
......
This diff is collapsed.
......@@ -81,18 +81,17 @@ static int perf_session__add_hist_entry(struct perf_session *self,
struct addr_location *al,
struct sample_data *data)
{
struct map_symbol *syms = NULL;
struct symbol *parent = NULL;
int err = -ENOMEM;
int err = 0;
struct hist_entry *he;
struct hists *hists;
struct perf_event_attr *attr;
if ((sort__has_parent || symbol_conf.use_callchain) && data->callchain) {
syms = perf_session__resolve_callchain(self, al->thread,
err = perf_session__resolve_callchain(self, al->thread,
data->callchain, &parent);
if (syms == NULL)
return -ENOMEM;
if (err)
return err;
}
attr = perf_header__find_attr(data->id, &self->header);
......@@ -101,16 +100,17 @@ static int perf_session__add_hist_entry(struct perf_session *self,
else
hists = perf_session__hists_findnew(self, data->id, 0, 0);
if (hists == NULL)
goto out_free_syms;
return -ENOMEM;
he = __hists__add_entry(hists, al, parent, data->period);
if (he == NULL)
goto out_free_syms;
err = 0;
return -ENOMEM;
if (symbol_conf.use_callchain) {
err = callchain_append(he->callchain, data->callchain, syms,
err = callchain_append(he->callchain, &self->callchain_cursor,
data->period);
if (err)
goto out_free_syms;
return err;
}
/*
* Only in the newt browser we are doing integrated annotation,
......@@ -119,8 +119,7 @@ static int perf_session__add_hist_entry(struct perf_session *self,
*/
if (use_browser > 0)
err = hist_entry__inc_addr_samples(he, al->addr);
out_free_syms:
free(syms);
return err;
}
......@@ -222,7 +221,7 @@ static int perf_session__setup_sample_type(struct perf_session *self)
} else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE &&
!symbol_conf.use_callchain) {
symbol_conf.use_callchain = true;
if (register_callchain_param(&callchain_param) < 0) {
if (callchain_register_param(&callchain_param) < 0) {
fprintf(stderr, "Can't register callchain"
" params\n");
return -EINVAL;
......@@ -424,7 +423,7 @@ parse_callchain_opt(const struct option *opt __used, const char *arg,
if (tok2)
callchain_param.print_limit = strtod(tok2, &endptr);
setup:
if (register_callchain_param(&callchain_param) < 0) {
if (callchain_register_param(&callchain_param) < 0) {
fprintf(stderr, "Can't register callchain params\n");
return -1;
}
......
......@@ -43,11 +43,13 @@
#include "util/parse-options.h"
#include "util/parse-events.h"
#include "util/event.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/debug.h"
#include "util/header.h"
#include "util/cpumap.h"
#include "util/thread.h"
#include "util/thread_map.h"
#include <sys/prctl.h>
#include <math.h>
......@@ -71,6 +73,8 @@ static struct perf_event_attr default_attrs[] = {
};
struct perf_evlist *evsel_list;
static bool system_wide = false;
static struct cpu_map *cpus;
static int run_idx = 0;
......@@ -166,7 +170,7 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
PERF_FORMAT_TOTAL_TIME_RUNNING;
if (system_wide)
return perf_evsel__open_per_cpu(evsel, cpus);
return perf_evsel__open_per_cpu(evsel, cpus, false, false);
attr->inherit = !no_inherit;
if (target_pid == -1 && target_tid == -1) {
......@@ -174,7 +178,7 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
attr->enable_on_exec = 1;
}
return perf_evsel__open_per_thread(evsel, threads);
return perf_evsel__open_per_thread(evsel, threads, false, false);
}
/*
......@@ -309,7 +313,7 @@ static int run_perf_stat(int argc __used, const char **argv)
close(child_ready_pipe[0]);
}
list_for_each_entry(counter, &evsel_list, node) {
list_for_each_entry(counter, &evsel_list->entries, node) {
if (create_perf_stat_counter(counter) < 0) {
if (errno == -EPERM || errno == -EACCES) {
error("You may not have permission to collect %sstats.\n"
......@@ -347,12 +351,12 @@ static int run_perf_stat(int argc __used, const char **argv)
update_stats(&walltime_nsecs_stats, t1 - t0);
if (no_aggr) {
list_for_each_entry(counter, &evsel_list, node) {
list_for_each_entry(counter, &evsel_list->entries, node) {
read_counter(counter);
perf_evsel__close_fd(counter, cpus->nr, 1);
}
} else {
list_for_each_entry(counter, &evsel_list, node) {
list_for_each_entry(counter, &evsel_list->entries, node) {
read_counter_aggr(counter);
perf_evsel__close_fd(counter, cpus->nr, threads->nr);
}
......@@ -555,10 +559,10 @@ static void print_stat(int argc, const char **argv)
}
if (no_aggr) {
list_for_each_entry(counter, &evsel_list, node)
list_for_each_entry(counter, &evsel_list->entries, node)
print_counter(counter);
} else {
list_for_each_entry(counter, &evsel_list, node)
list_for_each_entry(counter, &evsel_list->entries, node)
print_counter_aggr(counter);
}
......@@ -610,7 +614,7 @@ static int stat__set_big_num(const struct option *opt __used,
}
static const struct option options[] = {
OPT_CALLBACK('e', "event", NULL, "event",
OPT_CALLBACK('e', "event", &evsel_list, "event",
"event selector. use 'perf list' to list available events",
parse_events),
OPT_BOOLEAN('i', "no-inherit", &no_inherit,
......@@ -648,6 +652,10 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
setlocale(LC_ALL, "");
evsel_list = perf_evlist__new();
if (evsel_list == NULL)
return -ENOMEM;
argc = parse_options(argc, argv, options, stat_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
......@@ -679,17 +687,14 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
usage_with_options(stat_usage, options);
/* Set attrs and nr_counters if no event is selected and !null_run */
if (!null_run && !nr_counters) {
if (!null_run && !evsel_list->nr_entries) {
size_t c;
nr_counters = ARRAY_SIZE(default_attrs);
for (c = 0; c < ARRAY_SIZE(default_attrs); ++c) {
pos = perf_evsel__new(&default_attrs[c],
nr_counters);
pos = perf_evsel__new(&default_attrs[c], c);
if (pos == NULL)
goto out;
list_add(&pos->node, &evsel_list);
perf_evlist__add(evsel_list, pos);
}
}
......@@ -713,7 +718,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
return -1;
}
list_for_each_entry(pos, &evsel_list, node) {
list_for_each_entry(pos, &evsel_list->entries, node) {
if (perf_evsel__alloc_stat_priv(pos) < 0 ||
perf_evsel__alloc_counts(pos, cpus->nr) < 0 ||
perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0)
......@@ -741,9 +746,9 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
if (status != -1)
print_stat(argc, argv);
out_free_fd:
list_for_each_entry(pos, &evsel_list, node)
list_for_each_entry(pos, &evsel_list->entries, node)
perf_evsel__free_stat_priv(pos);
perf_evsel_list__delete();
perf_evlist__delete(evsel_list);
out:
thread_map__delete(threads);
threads = NULL;
......
......@@ -7,10 +7,11 @@
#include "util/cache.h"
#include "util/debug.h"
#include "util/evlist.h"
#include "util/parse-options.h"
#include "util/session.h"
#include "util/parse-events.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/thread_map.h"
static long page_size;
......@@ -238,14 +239,14 @@ static int test__vmlinux_matches_kallsyms(void)
#include "util/evsel.h"
#include <sys/types.h>
static int trace_event__id(const char *event_name)
static int trace_event__id(const char *evname)
{
char *filename;
int err = -1, fd;
if (asprintf(&filename,
"/sys/kernel/debug/tracing/events/syscalls/%s/id",
event_name) < 0)
evname) < 0)
return -1;
fd = open(filename, O_RDONLY);
......@@ -289,7 +290,7 @@ static int test__open_syscall_event(void)
goto out_thread_map_delete;
}
if (perf_evsel__open_per_thread(evsel, threads) < 0) {
if (perf_evsel__open_per_thread(evsel, threads, false, false) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
strerror(errno));
......@@ -347,9 +348,9 @@ static int test__open_syscall_event_on_all_cpus(void)
}
cpus = cpu_map__new(NULL);
if (threads == NULL) {
pr_debug("thread_map__new\n");
return -1;
if (cpus == NULL) {
pr_debug("cpu_map__new\n");
goto out_thread_map_delete;
}
......@@ -364,7 +365,7 @@ static int test__open_syscall_event_on_all_cpus(void)
goto out_thread_map_delete;
}
if (perf_evsel__open(evsel, cpus, threads) < 0) {
if (perf_evsel__open(evsel, cpus, threads, false, false) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
strerror(errno));
......@@ -408,6 +409,8 @@ static int test__open_syscall_event_on_all_cpus(void)
goto out_close_fd;
}
err = 0;
for (cpu = 0; cpu < cpus->nr; ++cpu) {
unsigned int expected;
......@@ -416,18 +419,18 @@ static int test__open_syscall_event_on_all_cpus(void)
if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
pr_debug("perf_evsel__open_read_on_cpu\n");
goto out_close_fd;
err = -1;
break;
}
expected = nr_open_calls + cpu;
if (evsel->counts->cpu[cpu].val != expected) {
pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
goto out_close_fd;
err = -1;
}
}
err = 0;
out_close_fd:
perf_evsel__close_fd(evsel, 1, threads->nr);
out_evsel_delete:
......@@ -437,6 +440,159 @@ static int test__open_syscall_event_on_all_cpus(void)
return err;
}
/*
* This test will generate random numbers of calls to some getpid syscalls,
* then establish an mmap for a group of events that are created to monitor
* the syscalls.
*
* It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
* sample.id field to map back to its respective perf_evsel instance.
*
* Then it checks if the number of syscalls reported as perf events by
* the kernel corresponds to the number of syscalls made.
*/
static int test__basic_mmap(void)
{
int err = -1;
event_t *event;
struct thread_map *threads;
struct cpu_map *cpus;
struct perf_evlist *evlist;
struct perf_event_attr attr = {
.type = PERF_TYPE_TRACEPOINT,
.read_format = PERF_FORMAT_ID,
.sample_type = PERF_SAMPLE_ID,
.watermark = 0,
};
cpu_set_t cpu_set;
const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
"getpgid", };
pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
(void*)getpgid };
#define nsyscalls ARRAY_SIZE(syscall_names)
int ids[nsyscalls];
unsigned int nr_events[nsyscalls],
expected_nr_events[nsyscalls], i, j;
struct perf_evsel *evsels[nsyscalls], *evsel;
for (i = 0; i < nsyscalls; ++i) {
char name[64];
snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
ids[i] = trace_event__id(name);
if (ids[i] < 0) {
pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
return -1;
}
nr_events[i] = 0;
expected_nr_events[i] = random() % 257;
}
threads = thread_map__new(-1, getpid());
if (threads == NULL) {
pr_debug("thread_map__new\n");
return -1;
}
cpus = cpu_map__new(NULL);
if (threads == NULL) {
pr_debug("thread_map__new\n");
goto out_free_threads;
}
CPU_ZERO(&cpu_set);
CPU_SET(cpus->map[0], &cpu_set);
sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
pr_debug("sched_setaffinity() failed on CPU %d: %s ",
cpus->map[0], strerror(errno));
goto out_free_cpus;
}
evlist = perf_evlist__new();
if (threads == NULL) {
pr_debug("perf_evlist__new\n");
goto out_free_cpus;
}
/* anonymous union fields, can't be initialized above */
attr.wakeup_events = 1;
attr.sample_period = 1;
for (i = 0; i < nsyscalls; ++i) {
attr.config = ids[i];
evsels[i] = perf_evsel__new(&attr, i);
if (evsels[i] == NULL) {
pr_debug("perf_evsel__new\n");
goto out_free_evlist;
}
perf_evlist__add(evlist, evsels[i]);
if (perf_evsel__open(evsels[i], cpus, threads, false, false) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
strerror(errno));
goto out_close_fd;
}
}
if (perf_evlist__mmap(evlist, cpus, threads, 128, true) < 0) {
pr_debug("failed to mmap events: %d (%s)\n", errno,
strerror(errno));
goto out_close_fd;
}
for (i = 0; i < nsyscalls; ++i)
for (j = 0; j < expected_nr_events[i]; ++j) {
int foo = syscalls[i]();
++foo;
}
while ((event = perf_evlist__read_on_cpu(evlist, 0)) != NULL) {
struct sample_data sample;
if (event->header.type != PERF_RECORD_SAMPLE) {
pr_debug("unexpected %s event\n",
event__get_event_name(event->header.type));
goto out_munmap;
}
event__parse_sample(event, attr.sample_type, false, &sample);
evsel = perf_evlist__id2evsel(evlist, sample.id);
if (evsel == NULL) {
pr_debug("event with id %" PRIu64
" doesn't map to an evsel\n", sample.id);
goto out_munmap;
}
nr_events[evsel->idx]++;
}
list_for_each_entry(evsel, &evlist->entries, node) {
if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
pr_debug("expected %d %s events, got %d\n",
expected_nr_events[evsel->idx],
event_name(evsel), nr_events[evsel->idx]);
goto out_munmap;
}
}
err = 0;
out_munmap:
perf_evlist__munmap(evlist, 1);
out_close_fd:
for (i = 0; i < nsyscalls; ++i)
perf_evsel__close_fd(evsels[i], 1, threads->nr);
out_free_evlist:
perf_evlist__delete(evlist);
out_free_cpus:
cpu_map__delete(cpus);
out_free_threads:
thread_map__delete(threads);
return err;
#undef nsyscalls
}
static struct test {
const char *desc;
int (*func)(void);
......@@ -453,6 +609,10 @@ static struct test {
.desc = "detect open syscall event on all cpus",
.func = test__open_syscall_event_on_all_cpus,
},
{
.desc = "read samples using the mmap interface",
.func = test__basic_mmap,
},
{
.func = NULL,
},
......
This diff is collapsed.
......@@ -94,6 +94,32 @@ void get_term_dimensions(struct winsize *ws);
#include "util/types.h"
#include <stdbool.h>
struct perf_mmap {
void *base;
int mask;
unsigned int prev;
};
static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
{
struct perf_event_mmap_page *pc = mm->base;
int head = pc->data_head;
rmb();
return head;
}
static inline void perf_mmap__write_tail(struct perf_mmap *md,
unsigned long tail)
{
struct perf_event_mmap_page *pc = md->base;
/*
* ensure all reads are done before we write the tail out.
*/
/* mb(); */
pc->data_tail = tail;
}
/*
* prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
* counters in the current task.
......
This diff is collapsed.
......@@ -16,7 +16,7 @@ enum chain_mode {
struct callchain_node {
struct callchain_node *parent;
struct list_head brothers;
struct list_head siblings;
struct list_head children;
struct list_head val;
struct rb_node rb_node; /* to sort nodes in an rbtree */
......@@ -49,9 +49,30 @@ struct callchain_list {
struct list_head list;
};
/*
* A callchain cursor is a single linked list that
* let one feed a callchain progressively.
* It keeps persitent allocated entries to minimize
* allocations.
*/
struct callchain_cursor_node {
u64 ip;
struct map *map;
struct symbol *sym;
struct callchain_cursor_node *next;
};
struct callchain_cursor {
u64 nr;
struct callchain_cursor_node *first;
struct callchain_cursor_node **last;
u64 pos;
struct callchain_cursor_node *curr;
};
static inline void callchain_init(struct callchain_root *root)
{
INIT_LIST_HEAD(&root->node.brothers);
INIT_LIST_HEAD(&root->node.siblings);
INIT_LIST_HEAD(&root->node.children);
INIT_LIST_HEAD(&root->node.val);
......@@ -61,15 +82,54 @@ static inline void callchain_init(struct callchain_root *root)
root->max_depth = 0;
}
static inline u64 cumul_hits(struct callchain_node *node)
static inline u64 callchain_cumul_hits(struct callchain_node *node)
{
return node->hit + node->children_hit;
}
int register_callchain_param(struct callchain_param *param);
int callchain_append(struct callchain_root *root, struct ip_callchain *chain,
struct map_symbol *syms, u64 period);
int callchain_merge(struct callchain_root *dst, struct callchain_root *src);
int callchain_register_param(struct callchain_param *param);
int callchain_append(struct callchain_root *root,
struct callchain_cursor *cursor,
u64 period);
int callchain_merge(struct callchain_cursor *cursor,
struct callchain_root *dst, struct callchain_root *src);
bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event);
/*
* Initialize a cursor before adding entries inside, but keep
* the previously allocated entries as a cache.
*/
static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
{
cursor->nr = 0;
cursor->last = &cursor->first;
}
int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
struct map *map, struct symbol *sym);
/* Close a cursor writing session. Initialize for the reader */
static inline void callchain_cursor_commit(struct callchain_cursor *cursor)
{
cursor->curr = cursor->first;
cursor->pos = 0;
}
/* Cursor reading iteration helpers */
static inline struct callchain_cursor_node *
callchain_cursor_current(struct callchain_cursor *cursor)
{
if (cursor->pos == cursor->nr)
return NULL;
return cursor->curr;
}
static inline void callchain_cursor_advance(struct callchain_cursor *cursor)
{
cursor->curr = cursor->curr->next;
cursor->pos++;
}
#endif /* __PERF_CALLCHAIN_H */
......@@ -177,3 +177,8 @@ struct cpu_map *cpu_map__dummy_new(void)
return cpus;
}
void cpu_map__delete(struct cpu_map *map)
{
free(map);
}
......@@ -8,6 +8,6 @@ struct cpu_map {
struct cpu_map *cpu_map__new(const char *cpu_list);
struct cpu_map *cpu_map__dummy_new(void);
void *cpu_map__delete(struct cpu_map *map);
void cpu_map__delete(struct cpu_map *map);
#endif /* __PERF_CPUMAP_H */
......@@ -826,128 +826,3 @@ int event__preprocess_sample(const event_t *self, struct perf_session *session,
al->filtered = true;
return 0;
}
static int event__parse_id_sample(const event_t *event,
struct perf_session *session,
struct sample_data *sample)
{
const u64 *array;
u64 type;
sample->cpu = sample->pid = sample->tid = -1;
sample->stream_id = sample->id = sample->time = -1ULL;
if (!session->sample_id_all)
return 0;
array = event->sample.array;
array += ((event->header.size -
sizeof(event->header)) / sizeof(u64)) - 1;
type = session->sample_type;
if (type & PERF_SAMPLE_CPU) {
u32 *p = (u32 *)array;
sample->cpu = *p;
array--;
}
if (type & PERF_SAMPLE_STREAM_ID) {
sample->stream_id = *array;
array--;
}
if (type & PERF_SAMPLE_ID) {
sample->id = *array;
array--;
}
if (type & PERF_SAMPLE_TIME) {
sample->time = *array;
array--;
}
if (type & PERF_SAMPLE_TID) {
u32 *p = (u32 *)array;
sample->pid = p[0];
sample->tid = p[1];
}
return 0;
}
int event__parse_sample(const event_t *event, struct perf_session *session,
struct sample_data *data)
{
const u64 *array;
u64 type;
if (event->header.type != PERF_RECORD_SAMPLE)
return event__parse_id_sample(event, session, data);
array = event->sample.array;
type = session->sample_type;
if (type & PERF_SAMPLE_IP) {
data->ip = event->ip.ip;
array++;
}
if (type & PERF_SAMPLE_TID) {
u32 *p = (u32 *)array;
data->pid = p[0];
data->tid = p[1];
array++;
}
if (type & PERF_SAMPLE_TIME) {
data->time = *array;
array++;
}
if (type & PERF_SAMPLE_ADDR) {
data->addr = *array;
array++;
}
data->id = -1ULL;
if (type & PERF_SAMPLE_ID) {
data->id = *array;
array++;
}
if (type & PERF_SAMPLE_STREAM_ID) {
data->stream_id = *array;
array++;
}
if (type & PERF_SAMPLE_CPU) {
u32 *p = (u32 *)array;
data->cpu = *p;
array++;
} else
data->cpu = -1;
if (type & PERF_SAMPLE_PERIOD) {
data->period = *array;
array++;
}
if (type & PERF_SAMPLE_READ) {
pr_debug("PERF_SAMPLE_READ is unsuported for now\n");
return -1;
}
if (type & PERF_SAMPLE_CALLCHAIN) {
data->callchain = (struct ip_callchain *)array;
array += 1 + data->callchain->nr;
}
if (type & PERF_SAMPLE_RAW) {
u32 *p = (u32 *)array;
data->raw_size = *p;
p++;
data->raw_data = p;
}
return 0;
}
......@@ -169,9 +169,10 @@ struct addr_location;
int event__preprocess_sample(const event_t *self, struct perf_session *session,
struct addr_location *al, struct sample_data *data,
symbol_filter_t filter);
int event__parse_sample(const event_t *event, struct perf_session *session,
struct sample_data *sample);
const char *event__get_event_name(unsigned int id);
int event__parse_sample(const event_t *event, u64 type, bool sample_id_all,
struct sample_data *sample);
#endif /* __PERF_RECORD_H */
#include <poll.h>
#include "evlist.h"
#include "evsel.h"
#include "util.h"
#include <linux/bitops.h>
#include <linux/hash.h>
void perf_evlist__init(struct perf_evlist *evlist)
{
int i;
for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
INIT_HLIST_HEAD(&evlist->heads[i]);
INIT_LIST_HEAD(&evlist->entries);
}
struct perf_evlist *perf_evlist__new(void)
{
struct perf_evlist *evlist = zalloc(sizeof(*evlist));
if (evlist != NULL)
perf_evlist__init(evlist);
return evlist;
}
static void perf_evlist__purge(struct perf_evlist *evlist)
{
struct perf_evsel *pos, *n;
list_for_each_entry_safe(pos, n, &evlist->entries, node) {
list_del_init(&pos->node);
perf_evsel__delete(pos);
}
evlist->nr_entries = 0;
}
void perf_evlist__exit(struct perf_evlist *evlist)
{
free(evlist->mmap);
free(evlist->pollfd);
evlist->mmap = NULL;
evlist->pollfd = NULL;
}
void perf_evlist__delete(struct perf_evlist *evlist)
{
perf_evlist__purge(evlist);
perf_evlist__exit(evlist);
free(evlist);
}
void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
{
list_add_tail(&entry->node, &evlist->entries);
++evlist->nr_entries;
}
int perf_evlist__add_default(struct perf_evlist *evlist)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
};
struct perf_evsel *evsel = perf_evsel__new(&attr, 0);
if (evsel == NULL)
return -ENOMEM;
perf_evlist__add(evlist, evsel);
return 0;
}
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist, int ncpus, int nthreads)
{
int nfds = ncpus * nthreads * evlist->nr_entries;
evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
return evlist->pollfd != NULL ? 0 : -ENOMEM;
}
void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
{
fcntl(fd, F_SETFL, O_NONBLOCK);
evlist->pollfd[evlist->nr_fds].fd = fd;
evlist->pollfd[evlist->nr_fds].events = POLLIN;
evlist->nr_fds++;
}
struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
{
struct hlist_head *head;
struct hlist_node *pos;
struct perf_sample_id *sid;
int hash;
if (evlist->nr_entries == 1)
return list_entry(evlist->entries.next, struct perf_evsel, node);
hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
head = &evlist->heads[hash];
hlist_for_each_entry(sid, pos, head, node)
if (sid->id == id)
return sid->evsel;
return NULL;
}
event_t *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu)
{
/* XXX Move this to perf.c, making it generally available */
unsigned int page_size = sysconf(_SC_PAGE_SIZE);
struct perf_mmap *md = &evlist->mmap[cpu];
unsigned int head = perf_mmap__read_head(md);
unsigned int old = md->prev;
unsigned char *data = md->base + page_size;
event_t *event = NULL;
int diff;
/*
* If we're further behind than half the buffer, there's a chance
* the writer will bite our tail and mess up the samples under us.
*
* If we somehow ended up ahead of the head, we got messed up.
*
* In either case, truncate and restart at head.
*/
diff = head - old;
if (diff > md->mask / 2 || diff < 0) {
fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
/*
* head points to a known good entry, start there.
*/
old = head;
}
if (old != head) {
size_t size;
event = (event_t *)&data[old & md->mask];
size = event->header.size;
/*
* Event straddles the mmap boundary -- header should always
* be inside due to u64 alignment of output.
*/
if ((old & md->mask) + size != ((old + size) & md->mask)) {
unsigned int offset = old;
unsigned int len = min(sizeof(*event), size), cpy;
void *dst = &evlist->event_copy;
do {
cpy = min(md->mask + 1 - (offset & md->mask), len);
memcpy(dst, &data[offset & md->mask], cpy);
offset += cpy;
dst += cpy;
len -= cpy;
} while (len);
event = &evlist->event_copy;
}
old += size;
}
md->prev = old;
return event;
}
#ifndef __PERF_EVLIST_H
#define __PERF_EVLIST_H 1
#include <linux/list.h>
#include "../perf.h"
#include "event.h"
struct pollfd;
#define PERF_EVLIST__HLIST_BITS 8
#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
struct perf_evlist {
struct list_head entries;
struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
int nr_entries;
int nr_fds;
int mmap_len;
event_t event_copy;
struct perf_mmap *mmap;
struct pollfd *pollfd;
};
struct perf_evsel;
struct perf_evlist *perf_evlist__new(void);
void perf_evlist__init(struct perf_evlist *evlist);
void perf_evlist__exit(struct perf_evlist *evlist);
void perf_evlist__delete(struct perf_evlist *evlist);
void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
int perf_evlist__add_default(struct perf_evlist *evlist);
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist, int ncpus, int nthreads);
void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
event_t *perf_evlist__read_on_cpu(struct perf_evlist *self, int cpu);
#endif /* __PERF_EVLIST_H */
This diff is collapsed.
......@@ -24,11 +24,24 @@ struct perf_counts {
struct perf_counts_values cpu[];
};
struct perf_evsel;
/*
* Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
* more than one entry in the evlist.
*/
struct perf_sample_id {
struct hlist_node node;
u64 id;
struct perf_evsel *evsel;
};
struct perf_evsel {
struct list_head node;
struct perf_event_attr attr;
char *filter;
struct xyarray *fd;
struct xyarray *id;
struct perf_counts *counts;
int idx;
void *priv;
......@@ -36,19 +49,31 @@ struct perf_evsel {
struct cpu_map;
struct thread_map;
struct perf_evlist;
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
void perf_evsel__init(struct perf_evsel *evsel,
struct perf_event_attr *attr, int idx);
void perf_evsel__exit(struct perf_evsel *evsel);
void perf_evsel__delete(struct perf_evsel *evsel);
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
int perf_evlist__alloc_mmap(struct perf_evlist *evlist, int ncpus);
void perf_evsel__free_fd(struct perf_evsel *evsel);
void perf_evsel__free_id(struct perf_evsel *evsel);
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus);
int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads);
int perf_evsel__open(struct perf_evsel *evsel,
struct cpu_map *cpus, struct thread_map *threads);
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
struct cpu_map *cpus, bool group, bool inherit);
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
struct thread_map *threads, bool group, bool inherit);
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
struct thread_map *threads, bool group, bool inherit);
int perf_evlist__mmap(struct perf_evlist *evlist, struct cpu_map *cpus,
struct thread_map *threads, int pages, bool overwrite);
void perf_evlist__munmap(struct perf_evlist *evlist, int ncpus);
#define perf_evsel__match(evsel, t, c) \
(evsel->attr.type == PERF_TYPE_##t && \
......
......@@ -8,6 +8,7 @@
#include <linux/list.h>
#include <linux/kernel.h>
#include "evlist.h"
#include "util.h"
#include "header.h"
#include "../perf.h"
......@@ -428,7 +429,8 @@ static bool perf_session__read_build_ids(struct perf_session *self, bool with_hi
return ret;
}
static int perf_header__adds_write(struct perf_header *self, int fd)
static int perf_header__adds_write(struct perf_header *self,
struct perf_evlist *evlist, int fd)
{
int nr_sections;
struct perf_session *session;
......@@ -463,7 +465,7 @@ static int perf_header__adds_write(struct perf_header *self, int fd)
/* Write trace info */
trace_sec->offset = lseek(fd, 0, SEEK_CUR);
read_tracing_data(fd, &evsel_list);
read_tracing_data(fd, &evlist->entries);
trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset;
}
......@@ -513,7 +515,8 @@ int perf_header__write_pipe(int fd)
return 0;
}
int perf_header__write(struct perf_header *self, int fd, bool at_exit)
int perf_header__write(struct perf_header *self, struct perf_evlist *evlist,
int fd, bool at_exit)
{
struct perf_file_header f_header;
struct perf_file_attr f_attr;
......@@ -566,7 +569,7 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit)
self->data_offset = lseek(fd, 0, SEEK_CUR);
if (at_exit) {
err = perf_header__adds_write(self, fd);
err = perf_header__adds_write(self, evlist, fd);
if (err < 0)
return err;
}
......@@ -1133,7 +1136,7 @@ int event__process_event_type(event_t *self,
return 0;
}
int event__synthesize_tracing_data(int fd, struct list_head *pattrs,
int event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
event__handler_t process,
struct perf_session *session __unused)
{
......@@ -1144,7 +1147,7 @@ int event__synthesize_tracing_data(int fd, struct list_head *pattrs,
memset(&ev, 0, sizeof(ev));
ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
size = read_tracing_data_size(fd, pattrs);
size = read_tracing_data_size(fd, &evlist->entries);
if (size <= 0)
return size;
aligned_size = ALIGN(size, sizeof(u64));
......@@ -1154,7 +1157,7 @@ int event__synthesize_tracing_data(int fd, struct list_head *pattrs,
process(&ev, NULL, session);
err = read_tracing_data(fd, pattrs);
err = read_tracing_data(fd, &evlist->entries);
write_padded(fd, NULL, 0, padding);
return aligned_size;
......
......@@ -65,8 +65,11 @@ struct perf_header {
int perf_header__init(struct perf_header *self);
void perf_header__exit(struct perf_header *self);
struct perf_evlist;
int perf_header__read(struct perf_session *session, int fd);
int perf_header__write(struct perf_header *self, int fd, bool at_exit);
int perf_header__write(struct perf_header *self, struct perf_evlist *evlist,
int fd, bool at_exit);
int perf_header__write_pipe(int fd);
int perf_header__add_attr(struct perf_header *self,
......@@ -113,7 +116,7 @@ int event__synthesize_event_types(event__handler_t process,
int event__process_event_type(event_t *self,
struct perf_session *session);
int event__synthesize_tracing_data(int fd, struct list_head *pattrs,
int event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
event__handler_t process,
struct perf_session *session);
int event__process_tracing_data(event_t *self,
......
......@@ -211,7 +211,9 @@ void hist_entry__free(struct hist_entry *he)
* collapse the histogram
*/
static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
static bool hists__collapse_insert_entry(struct hists *self,
struct rb_root *root,
struct hist_entry *he)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
......@@ -226,8 +228,11 @@ static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
if (!cmp) {
iter->period += he->period;
if (symbol_conf.use_callchain)
callchain_merge(iter->callchain, he->callchain);
if (symbol_conf.use_callchain) {
callchain_cursor_reset(&self->callchain_cursor);
callchain_merge(&self->callchain_cursor, iter->callchain,
he->callchain);
}
hist_entry__free(he);
return false;
}
......@@ -262,7 +267,7 @@ void hists__collapse_resort(struct hists *self)
next = rb_next(&n->rb_node);
rb_erase(&n->rb_node, &self->entries);
if (collapse__insert_entry(&tmp, n))
if (hists__collapse_insert_entry(self, &tmp, n))
hists__inc_nr_entries(self, n);
}
......@@ -425,7 +430,7 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
u64 cumul;
child = rb_entry(node, struct callchain_node, rb_node);
cumul = cumul_hits(child);
cumul = callchain_cumul_hits(child);
remaining -= cumul;
/*
......
......@@ -77,6 +77,8 @@ struct hists {
u64 event_stream;
u32 type;
u16 col_len[HISTC_NR_COLS];
/* Best would be to reuse the session callchain cursor */
struct callchain_cursor callchain_cursor;
};
struct hist_entry *__hists__add_entry(struct hists *self,
......
#include <linux/kernel.h>
#include "../../../../include/linux/list.h"
#ifndef PERF_LIST_H
......
#include "../../../include/linux/hw_breakpoint.h"
#include "util.h"
#include "../perf.h"
#include "evlist.h"
#include "evsel.h"
#include "parse-options.h"
#include "parse-events.h"
......@@ -11,10 +12,6 @@
#include "header.h"
#include "debugfs.h"
int nr_counters;
LIST_HEAD(evsel_list);
struct event_symbol {
u8 type;
u64 config;
......@@ -449,8 +446,8 @@ parse_single_tracepoint_event(char *sys_name,
/* sys + ':' + event + ':' + flags*/
#define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128)
static enum event_result
parse_multiple_tracepoint_event(char *sys_name, const char *evt_exp,
char *flags)
parse_multiple_tracepoint_event(const struct option *opt, char *sys_name,
const char *evt_exp, char *flags)
{
char evt_path[MAXPATHLEN];
struct dirent *evt_ent;
......@@ -483,14 +480,15 @@ parse_multiple_tracepoint_event(char *sys_name, const char *evt_exp,
if (len < 0)
return EVT_FAILED;
if (parse_events(NULL, event_opt, 0))
if (parse_events(opt, event_opt, 0))
return EVT_FAILED;
}
return EVT_HANDLED_ALL;
}
static enum event_result parse_tracepoint_event(const char **strp,
static enum event_result
parse_tracepoint_event(const struct option *opt, const char **strp,
struct perf_event_attr *attr)
{
const char *evt_name;
......@@ -530,7 +528,7 @@ static enum event_result parse_tracepoint_event(const char **strp,
return EVT_FAILED;
if (strpbrk(evt_name, "*?")) {
*strp += strlen(sys_name) + evt_length + 1; /* 1 == the ':' */
return parse_multiple_tracepoint_event(sys_name, evt_name,
return parse_multiple_tracepoint_event(opt, sys_name, evt_name,
flags);
} else {
return parse_single_tracepoint_event(sys_name, evt_name,
......@@ -740,11 +738,12 @@ parse_event_modifier(const char **strp, struct perf_event_attr *attr)
* Symbolic names are (almost) exactly matched.
*/
static enum event_result
parse_event_symbols(const char **str, struct perf_event_attr *attr)
parse_event_symbols(const struct option *opt, const char **str,
struct perf_event_attr *attr)
{
enum event_result ret;
ret = parse_tracepoint_event(str, attr);
ret = parse_tracepoint_event(opt, str, attr);
if (ret != EVT_FAILED)
goto modifier;
......@@ -778,14 +777,15 @@ parse_event_symbols(const char **str, struct perf_event_attr *attr)
return ret;
}
int parse_events(const struct option *opt __used, const char *str, int unset __used)
int parse_events(const struct option *opt, const char *str, int unset __used)
{
struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
struct perf_event_attr attr;
enum event_result ret;
for (;;) {
memset(&attr, 0, sizeof(attr));
ret = parse_event_symbols(&str, &attr);
ret = parse_event_symbols(opt, &str, &attr);
if (ret == EVT_FAILED)
return -1;
......@@ -794,12 +794,10 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u
if (ret != EVT_HANDLED_ALL) {
struct perf_evsel *evsel;
evsel = perf_evsel__new(&attr,
nr_counters);
evsel = perf_evsel__new(&attr, evlist->nr_entries);
if (evsel == NULL)
return -1;
list_add_tail(&evsel->node, &evsel_list);
++nr_counters;
perf_evlist__add(evlist, evsel);
}
if (*str == 0)
......@@ -813,13 +811,14 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u
return 0;
}
int parse_filter(const struct option *opt __used, const char *str,
int parse_filter(const struct option *opt, const char *str,
int unset __used)
{
struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
struct perf_evsel *last = NULL;
if (!list_empty(&evsel_list))
last = list_entry(evsel_list.prev, struct perf_evsel, node);
if (evlist->nr_entries > 0)
last = list_entry(evlist->entries.prev, struct perf_evsel, node);
if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) {
fprintf(stderr,
......@@ -981,33 +980,3 @@ void print_events(void)
exit(129);
}
int perf_evsel_list__create_default(void)
{
struct perf_evsel *evsel;
struct perf_event_attr attr;
memset(&attr, 0, sizeof(attr));
attr.type = PERF_TYPE_HARDWARE;
attr.config = PERF_COUNT_HW_CPU_CYCLES;
evsel = perf_evsel__new(&attr, 0);
if (evsel == NULL)
return -ENOMEM;
list_add(&evsel->node, &evsel_list);
++nr_counters;
return 0;
}
void perf_evsel_list__delete(void)
{
struct perf_evsel *pos, *n;
list_for_each_entry_safe(pos, n, &evsel_list, node) {
list_del_init(&pos->node);
perf_evsel__delete(pos);
}
nr_counters = 0;
}
......@@ -9,11 +9,6 @@
struct list_head;
struct perf_evsel;
extern struct list_head evsel_list;
int perf_evsel_list__create_default(void);
void perf_evsel_list__delete(void);
struct option;
struct tracepoint_path {
......@@ -25,8 +20,6 @@ struct tracepoint_path {
extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
extern bool have_tracepoints(struct list_head *evlist);
extern int nr_counters;
const char *event_name(struct perf_evsel *event);
extern const char *__event_name(int type, u64 config);
......
......@@ -31,6 +31,7 @@
#include <string.h>
#include <stdarg.h>
#include <limits.h>
#include <elf.h>
#undef _GNU_SOURCE
#include "util.h"
......@@ -111,7 +112,25 @@ static struct symbol *__find_kernel_function_by_name(const char *name,
NULL);
}
const char *kernel_get_module_path(const char *module)
static struct map *kernel_get_module_map(const char *module)
{
struct rb_node *nd;
struct map_groups *grp = &machine.kmaps;
if (!module)
module = "kernel";
for (nd = rb_first(&grp->maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) {
struct map *pos = rb_entry(nd, struct map, rb_node);
if (strncmp(pos->dso->short_name + 1, module,
pos->dso->short_name_len - 2) == 0) {
return pos;
}
}
return NULL;
}
static struct dso *kernel_get_module_dso(const char *module)
{
struct dso *dso;
struct map *map;
......@@ -141,7 +160,13 @@ const char *kernel_get_module_path(const char *module)
}
}
found:
return dso->long_name;
return dso;
}
const char *kernel_get_module_path(const char *module)
{
struct dso *dso = kernel_get_module_dso(module);
return (dso) ? dso->long_name : NULL;
}
#ifdef DWARF_SUPPORT
......@@ -1913,3 +1938,42 @@ int del_perf_probe_events(struct strlist *dellist)
return ret;
}
/*
* If a symbol corresponds to a function with global binding return 0.
* For all others return 1.
*/
static int filter_non_global_functions(struct map *map __unused,
struct symbol *sym)
{
if (sym->binding != STB_GLOBAL)
return 1;
return 0;
}
int show_available_funcs(const char *module)
{
struct map *map;
int ret;
setup_pager();
ret = init_vmlinux();
if (ret < 0)
return ret;
map = kernel_get_module_map(module);
if (!map) {
pr_err("Failed to find %s map.\n", (module) ? : "kernel");
return -EINVAL;
}
if (map__load(map, filter_non_global_functions)) {
pr_err("Failed to load map.\n");
return -EINVAL;
}
if (!dso__sorted_by_name(map->dso, map->type))
dso__sort_by_name(map->dso, map->type);
dso__fprintf_symbols_by_name(map->dso, map->type, stdout);
return 0;
}
......@@ -127,6 +127,7 @@ extern int show_line_range(struct line_range *lr, const char *module);
extern int show_available_vars(struct perf_probe_event *pevs, int npevs,
int max_probe_points, const char *module,
bool externs);
extern int show_available_funcs(const char *module);
/* Maximum index number of event-name postfix */
......
This diff is collapsed.
......@@ -242,17 +242,16 @@ static bool symbol__match_parent_regex(struct symbol *sym)
return 0;
}
struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
int perf_session__resolve_callchain(struct perf_session *self,
struct thread *thread,
struct ip_callchain *chain,
struct symbol **parent)
{
u8 cpumode = PERF_RECORD_MISC_USER;
unsigned int i;
struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
int err;
if (!syms)
return NULL;
callchain_cursor_reset(&self->callchain_cursor);
for (i = 0; i < chain->nr; i++) {
u64 ip = chain->ips[i];
......@@ -281,12 +280,15 @@ struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
*parent = al.sym;
if (!symbol_conf.use_callchain)
break;
syms[i].map = al.map;
syms[i].sym = al.sym;
}
err = callchain_cursor_append(&self->callchain_cursor,
ip, al.map, al.sym);
if (err)
return err;
}
return syms;
return 0;
}
static int process_event_synth_stub(event_t *event __used,
......@@ -494,7 +496,7 @@ static void flush_sample_queue(struct perf_session *s,
if (iter->timestamp > limit)
break;
event__parse_sample(iter->event, s, &sample);
perf_session__parse_sample(s, iter->event, &sample);
perf_session_deliver_event(s, iter->event, &sample, ops,
iter->file_offset);
......@@ -804,7 +806,7 @@ static int perf_session__process_event(struct perf_session *session,
/*
* For all kernel events we get the sample data
*/
event__parse_sample(event, session, &sample);
perf_session__parse_sample(session, event, &sample);
/* Preprocess sample records - precheck callchains */
if (perf_session__preprocess_sample(session, event, &sample))
......
......@@ -51,6 +51,7 @@ struct perf_session {
int cwdlen;
char *cwd;
struct ordered_samples ordered_samples;
struct callchain_cursor callchain_cursor;
char filename[0];
};
......@@ -94,7 +95,7 @@ int __perf_session__process_events(struct perf_session *self,
int perf_session__process_events(struct perf_session *self,
struct perf_event_ops *event_ops);
struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
int perf_session__resolve_callchain(struct perf_session *self,
struct thread *thread,
struct ip_callchain *chain,
struct symbol **parent);
......@@ -154,4 +155,13 @@ size_t perf_session__fprintf_nr_events(struct perf_session *self, FILE *fp)
{
return hists__fprintf_nr_events(&self->hists, fp);
}
static inline int perf_session__parse_sample(struct perf_session *session,
const event_t *event,
struct sample_data *sample)
{
return event__parse_sample(event, session->sample_type,
session->sample_id_all, sample);
}
#endif /* __PERF_SESSION_H */
......@@ -7,61 +7,6 @@
#include "util.h"
#include "debug.h"
/* Skip "." and ".." directories */
static int filter(const struct dirent *dir)
{
if (dir->d_name[0] == '.')
return 0;
else
return 1;
}
struct thread_map *thread_map__new_by_pid(pid_t pid)
{
struct thread_map *threads;
char name[256];
int items;
struct dirent **namelist = NULL;
int i;
sprintf(name, "/proc/%d/task", pid);
items = scandir(name, &namelist, filter, NULL);
if (items <= 0)
return NULL;
threads = malloc(sizeof(*threads) + sizeof(pid_t) * items);
if (threads != NULL) {
for (i = 0; i < items; i++)
threads->map[i] = atoi(namelist[i]->d_name);
threads->nr = items;
}
for (i=0; i<items; i++)
free(namelist[i]);
free(namelist);
return threads;
}
struct thread_map *thread_map__new_by_tid(pid_t tid)
{
struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t));
if (threads != NULL) {
threads->map[0] = tid;
threads->nr = 1;
}
return threads;
}
struct thread_map *thread_map__new(pid_t pid, pid_t tid)
{
if (pid != -1)
return thread_map__new_by_pid(pid);
return thread_map__new_by_tid(tid);
}
static struct thread *thread__new(pid_t pid)
{
struct thread *self = zalloc(sizeof(*self));
......
......@@ -18,24 +18,10 @@ struct thread {
int comm_len;
};
struct thread_map {
int nr;
int map[];
};
struct perf_session;
void thread__delete(struct thread *self);
struct thread_map *thread_map__new_by_pid(pid_t pid);
struct thread_map *thread_map__new_by_tid(pid_t tid);
struct thread_map *thread_map__new(pid_t pid, pid_t tid);
static inline void thread_map__delete(struct thread_map *threads)
{
free(threads);
}
int thread__set_comm(struct thread *self, const char *comm);
int thread__comm_len(struct thread *self);
struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
......
#include <dirent.h>
#include <stdlib.h>
#include <stdio.h>
#include "thread_map.h"
/* Skip "." and ".." directories */
static int filter(const struct dirent *dir)
{
if (dir->d_name[0] == '.')
return 0;
else
return 1;
}
struct thread_map *thread_map__new_by_pid(pid_t pid)
{
struct thread_map *threads;
char name[256];
int items;
struct dirent **namelist = NULL;
int i;
sprintf(name, "/proc/%d/task", pid);
items = scandir(name, &namelist, filter, NULL);
if (items <= 0)
return NULL;
threads = malloc(sizeof(*threads) + sizeof(pid_t) * items);
if (threads != NULL) {
for (i = 0; i < items; i++)
threads->map[i] = atoi(namelist[i]->d_name);
threads->nr = items;
}
for (i=0; i<items; i++)
free(namelist[i]);
free(namelist);
return threads;
}
struct thread_map *thread_map__new_by_tid(pid_t tid)
{
struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t));
if (threads != NULL) {
threads->map[0] = tid;
threads->nr = 1;
}
return threads;
}
struct thread_map *thread_map__new(pid_t pid, pid_t tid)
{
if (pid != -1)
return thread_map__new_by_pid(pid);
return thread_map__new_by_tid(tid);
}
void thread_map__delete(struct thread_map *threads)
{
free(threads);
}
#ifndef __PERF_THREAD_MAP_H
#define __PERF_THREAD_MAP_H
#include <sys/types.h>
struct thread_map {
int nr;
int map[];
};
struct thread_map *thread_map__new_by_pid(pid_t pid);
struct thread_map *thread_map__new_by_tid(pid_t tid);
struct thread_map *thread_map__new(pid_t pid, pid_t tid);
void thread_map__delete(struct thread_map *threads);
#endif /* __PERF_THREAD_MAP_H */
......@@ -377,7 +377,7 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self,
while (node) {
struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
struct rb_node *next = rb_next(node);
u64 cumul = cumul_hits(child);
u64 cumul = callchain_cumul_hits(child);
struct callchain_list *chain;
char folded_sign = ' ';
int first = true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment