Commit 322cd21f authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-core-for-mingo' of...

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

Infrastructure changes:

 - Add gzip decompression support for kernel modules (Namhyung Kim)

 - More prep patches for Intel PT, including a a thread stack and
   more stuff made available via the database export mechanism (Adrian Hunter)

 - Optimize checking that tracepoint events are defined in perf script perl/python (Jiri Olsa)

 - Do not free pevent when deleting tracepoint evsel (Jiri Olsa)

 - Fix build-id matching for vmlinux (Namhyung Kim)
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 05b2537e daa01794
......@@ -66,6 +66,9 @@ include config/utilities.mak
#
# Define NO_PERF_READ_VDSOX32 if you do not want to build perf-read-vdsox32
# for reading the x32 mode 32-bit compatibility VDSO in 64-bit mode
#
# Define NO_ZLIB if you do not want to support compressed kernel modules
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(shell pwd)))
......@@ -317,6 +320,7 @@ LIB_H += ui/util.h
LIB_H += ui/ui.h
LIB_H += util/data.h
LIB_H += util/kvm-stat.h
LIB_H += util/thread-stack.h
LIB_OBJS += $(OUTPUT)util/abspath.o
LIB_OBJS += $(OUTPUT)util/alias.o
......@@ -394,6 +398,7 @@ LIB_OBJS += $(OUTPUT)util/srcline.o
LIB_OBJS += $(OUTPUT)util/data.o
LIB_OBJS += $(OUTPUT)util/tsc.o
LIB_OBJS += $(OUTPUT)util/cloexec.o
LIB_OBJS += $(OUTPUT)util/thread-stack.o
LIB_OBJS += $(OUTPUT)ui/setup.o
LIB_OBJS += $(OUTPUT)ui/helpline.o
......@@ -582,6 +587,10 @@ ifndef NO_LIBNUMA
BUILTIN_OBJS += $(OUTPUT)bench/numa.o
endif
ifndef NO_ZLIB
LIB_OBJS += $(OUTPUT)util/zlib.o
endif
ifdef ASCIIDOC8
export ASCIIDOC8
endif
......
......@@ -200,6 +200,17 @@ static int process_buildids(struct record *rec)
if (size == 0)
return 0;
/*
* During this process, it'll load kernel map and replace the
* dso->long_name to a real pathname it found. In this case
* we prefer the vmlinux path like
* /lib/modules/3.16.4/build/vmlinux
*
* rather than build-id path (in debug directory).
* $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
*/
symbol_conf.ignore_vmlinux_buildid = true;
return __perf_session__process_events(session, start,
size - start,
size, &build_id__mark_dso_hit_ops);
......
......@@ -200,7 +200,8 @@ CORE_FEATURE_TESTS = \
libunwind \
stackprotector-all \
timerfd \
libdw-dwarf-unwind
libdw-dwarf-unwind \
zlib
LIB_FEATURE_TESTS = \
dwarf \
......@@ -214,7 +215,8 @@ LIB_FEATURE_TESTS = \
libpython \
libslang \
libunwind \
libdw-dwarf-unwind
libdw-dwarf-unwind \
zlib
VF_FEATURE_TESTS = \
backtrace \
......@@ -604,6 +606,15 @@ ifneq ($(filter -lbfd,$(EXTLIBS)),)
CFLAGS += -DHAVE_LIBBFD_SUPPORT
endif
ifndef NO_ZLIB
ifeq ($(feature-zlib), 1)
CFLAGS += -DHAVE_ZLIB_SUPPORT
EXTLIBS += -lz
else
NO_ZLIB := 1
endif
endif
ifndef NO_BACKTRACE
ifeq ($(feature-backtrace), 1)
CFLAGS += -DHAVE_BACKTRACE_SUPPORT
......
......@@ -29,7 +29,8 @@ FILES= \
test-timerfd.bin \
test-libdw-dwarf-unwind.bin \
test-compile-32.bin \
test-compile-x32.bin
test-compile-x32.bin \
test-zlib.bin
CC := $(CROSS_COMPILE)gcc -MD
PKG_CONFIG := $(CROSS_COMPILE)pkg-config
......@@ -41,7 +42,7 @@ BUILD = $(CC) $(CFLAGS) -o $(OUTPUT)$@ $(patsubst %.bin,%.c,$@) $(LDFLAGS)
###############################
test-all.bin:
$(BUILD) -Werror -fstack-protector-all -O2 -Werror -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -laudit -I/usr/include/slang -lslang $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl
$(BUILD) -Werror -fstack-protector-all -O2 -Werror -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -laudit -I/usr/include/slang -lslang $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz
test-hello.bin:
$(BUILD)
......@@ -139,6 +140,9 @@ test-compile-32.bin:
test-compile-x32.bin:
$(CC) -mx32 -o $(OUTPUT)$@ test-compile.c
test-zlib.bin:
$(BUILD) -lz
-include *.d
###############################
......
......@@ -93,6 +93,10 @@
# include "test-sync-compare-and-swap.c"
#undef main
#define main main_test_zlib
# include "test-zlib.c"
#undef main
int main(int argc, char *argv[])
{
main_test_libpython();
......@@ -116,6 +120,7 @@ int main(int argc, char *argv[])
main_test_stackprotector_all();
main_test_libdw_dwarf_unwind();
main_test_sync_compare_and_swap(argc, argv);
main_test_zlib();
return 0;
}
#include <zlib.h>
int main(void)
{
z_stream zs;
inflateInit(&zs);
return 0;
}
#!/bin/bash
# description: export perf data to a postgresql database
# args: [database name] [columns]
# args: [database name] [columns] [calls]
n_args=0
for i in "$@"
do
......@@ -9,11 +9,16 @@ do
fi
n_args=$(( $n_args + 1 ))
done
if [ "$n_args" -gt 2 ] ; then
echo "usage: export-to-postgresql-report [database name] [columns]"
if [ "$n_args" -gt 3 ] ; then
echo "usage: export-to-postgresql-report [database name] [columns] [calls]"
exit
fi
if [ "$n_args" -gt 1 ] ; then
if [ "$n_args" -gt 2 ] ; then
dbname=$1
columns=$2
calls=$3
shift 3
elif [ "$n_args" -gt 1 ] ; then
dbname=$1
columns=$2
shift 2
......@@ -21,4 +26,4 @@ elif [ "$n_args" -gt 0 ] ; then
dbname=$1
shift
fi
perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/export-to-postgresql.py $dbname $columns
perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/export-to-postgresql.py $dbname $columns $calls
......@@ -40,10 +40,12 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
#from Core import *
perf_db_export_mode = True
perf_db_export_calls = False
def usage():
print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>]"
print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>]"
print >> sys.stderr, "where: columns 'all' or 'branches'"
print >> sys.stderr, " calls 'calls' => create calls table"
raise Exception("Too few arguments")
if (len(sys.argv) < 2):
......@@ -61,6 +63,12 @@ if columns not in ("all", "branches"):
branches = (columns == "branches")
if (len(sys.argv) >= 4):
if (sys.argv[3] == "calls"):
perf_db_export_calls = True
else:
usage()
output_dir_name = os.getcwd() + "/" + dbname + "-perf-data"
os.mkdir(output_dir_name)
......@@ -123,6 +131,10 @@ do_query(query, 'CREATE TABLE symbols ('
'sym_end bigint,'
'binding integer,'
'name varchar(2048))')
do_query(query, 'CREATE TABLE branch_types ('
'id integer NOT NULL,'
'name varchar(80))')
if branches:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
......@@ -139,7 +151,9 @@ if branches:
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint)')
'to_ip bigint,'
'branch_type integer,'
'in_tx boolean)')
else:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
......@@ -160,7 +174,28 @@ else:
'period bigint,'
'weight bigint,'
'transaction bigint,'
'data_src bigint)')
'data_src bigint,'
'branch_type integer,'
'in_tx boolean)')
if perf_db_export_calls:
do_query(query, 'CREATE TABLE call_paths ('
'id bigint NOT NULL,'
'parent_id bigint,'
'symbol_id bigint,'
'ip bigint)')
do_query(query, 'CREATE TABLE calls ('
'id bigint NOT NULL,'
'thread_id bigint,'
'comm_id bigint,'
'call_path_id bigint,'
'call_time bigint,'
'return_time bigint,'
'branch_count bigint,'
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
'flags integer)')
do_query(query, 'CREATE VIEW samples_view AS '
'SELECT '
......@@ -178,7 +213,9 @@ do_query(query, 'CREATE VIEW samples_view AS '
'to_hex(to_ip) AS to_ip_hex,'
'(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,'
'to_sym_offset,'
'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name'
'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
'(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
'in_tx'
' FROM samples')
......@@ -234,7 +271,11 @@ comm_file = open_output_file("comm_table.bin")
comm_thread_file = open_output_file("comm_thread_table.bin")
dso_file = open_output_file("dso_table.bin")
symbol_file = open_output_file("symbol_table.bin")
branch_type_file = open_output_file("branch_type_table.bin")
sample_file = open_output_file("sample_table.bin")
if perf_db_export_calls:
call_path_file = open_output_file("call_path_table.bin")
call_file = open_output_file("call_table.bin")
def trace_begin():
print datetime.datetime.today(), "Writing to intermediate files..."
......@@ -245,6 +286,9 @@ def trace_begin():
comm_table(0, "unknown")
dso_table(0, 0, "unknown", "unknown", "")
symbol_table(0, 0, 0, 0, 0, "unknown")
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls:
call_path_table(0, 0, 0, 0)
unhandled_count = 0
......@@ -257,7 +301,11 @@ def trace_end():
copy_output_file(comm_thread_file, "comm_threads")
copy_output_file(dso_file, "dsos")
copy_output_file(symbol_file, "symbols")
copy_output_file(branch_type_file, "branch_types")
copy_output_file(sample_file, "samples")
if perf_db_export_calls:
copy_output_file(call_path_file, "call_paths")
copy_output_file(call_file, "calls")
print datetime.datetime.today(), "Removing intermediate files..."
remove_output_file(evsel_file)
......@@ -267,7 +315,11 @@ def trace_end():
remove_output_file(comm_thread_file)
remove_output_file(dso_file)
remove_output_file(symbol_file)
remove_output_file(branch_type_file)
remove_output_file(sample_file)
if perf_db_export_calls:
remove_output_file(call_path_file)
remove_output_file(call_file)
os.rmdir(output_dir_name)
print datetime.datetime.today(), "Adding primary keys"
do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
......@@ -277,7 +329,11 @@ def trace_end():
do_query(query, 'ALTER TABLE comm_threads ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE dsos ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE symbols ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE branch_types ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE samples ADD PRIMARY KEY (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
print datetime.datetime.today(), "Adding foreign keys"
do_query(query, 'ALTER TABLE threads '
......@@ -299,6 +355,18 @@ def trace_end():
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id),'
'ADD CONSTRAINT todsofk FOREIGN KEY (to_dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT tosymbolfk FOREIGN KEY (to_symbol_id) REFERENCES symbols (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE call_paths '
'ADD CONSTRAINT parentfk FOREIGN KEY (parent_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id)')
do_query(query, 'ALTER TABLE calls '
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT call_pathfk FOREIGN KEY (call_path_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT callfk FOREIGN KEY (call_id) REFERENCES samples (id),'
'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),'
'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)')
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
if (unhandled_count):
print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
......@@ -352,9 +420,25 @@ def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x
value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
symbol_file.write(value)
def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, *x):
def branch_type_table(branch_type, name, *x):
n = len(name)
fmt = "!hiii" + str(n) + "s"
value = struct.pack(fmt, 2, 4, branch_type, n, name)
branch_type_file.write(value)
def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, *x):
if branches:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiq", 15, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip)
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiB", 17, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx)
else:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiq", 19, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src)
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiB", 21, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx)
sample_file.write(value)
def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
fmt = "!hiqiqiqiq"
value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip)
call_path_file.write(value)
def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, *x):
fmt = "!hiqiqiqiqiqiqiqiqiqiqii"
value = struct.pack(fmt, 11, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags)
call_file.write(value)
......@@ -15,6 +15,8 @@
#include "debug.h"
#include "session.h"
#include "tool.h"
#include "header.h"
#include "vdso.h"
int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
union perf_event *event,
......@@ -105,3 +107,335 @@ char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size)
build_id_hex, build_id_hex + 2);
return bf;
}
#define dsos__for_each_with_build_id(pos, head) \
list_for_each_entry(pos, head, node) \
if (!pos->has_build_id) \
continue; \
else
static int write_buildid(const char *name, size_t name_len, u8 *build_id,
pid_t pid, u16 misc, int fd)
{
int err;
struct build_id_event b;
size_t len;
len = name_len + 1;
len = PERF_ALIGN(len, NAME_ALIGN);
memset(&b, 0, sizeof(b));
memcpy(&b.build_id, build_id, BUILD_ID_SIZE);
b.pid = pid;
b.header.misc = misc;
b.header.size = sizeof(b) + len;
err = writen(fd, &b, sizeof(b));
if (err < 0)
return err;
return write_padded(fd, name, name_len + 1, len);
}
static int __dsos__write_buildid_table(struct list_head *head,
struct machine *machine,
pid_t pid, u16 misc, int fd)
{
char nm[PATH_MAX];
struct dso *pos;
dsos__for_each_with_build_id(pos, head) {
int err;
const char *name;
size_t name_len;
if (!pos->hit)
continue;
if (dso__is_vdso(pos)) {
name = pos->short_name;
name_len = pos->short_name_len + 1;
} else if (dso__is_kcore(pos)) {
machine__mmap_name(machine, nm, sizeof(nm));
name = nm;
name_len = strlen(nm) + 1;
} else {
name = pos->long_name;
name_len = pos->long_name_len + 1;
}
err = write_buildid(name, name_len, pos->build_id,
pid, misc, fd);
if (err)
return err;
}
return 0;
}
static int machine__write_buildid_table(struct machine *machine, int fd)
{
int err;
u16 kmisc = PERF_RECORD_MISC_KERNEL,
umisc = PERF_RECORD_MISC_USER;
if (!machine__is_host(machine)) {
kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
umisc = PERF_RECORD_MISC_GUEST_USER;
}
err = __dsos__write_buildid_table(&machine->kernel_dsos.head, machine,
machine->pid, kmisc, fd);
if (err == 0)
err = __dsos__write_buildid_table(&machine->user_dsos.head,
machine, machine->pid, umisc,
fd);
return err;
}
int perf_session__write_buildid_table(struct perf_session *session, int fd)
{
struct rb_node *nd;
int err = machine__write_buildid_table(&session->machines.host, fd);
if (err)
return err;
for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
err = machine__write_buildid_table(pos, fd);
if (err)
break;
}
return err;
}
static int __dsos__hit_all(struct list_head *head)
{
struct dso *pos;
list_for_each_entry(pos, head, node)
pos->hit = true;
return 0;
}
static int machine__hit_all_dsos(struct machine *machine)
{
int err;
err = __dsos__hit_all(&machine->kernel_dsos.head);
if (err)
return err;
return __dsos__hit_all(&machine->user_dsos.head);
}
int dsos__hit_all(struct perf_session *session)
{
struct rb_node *nd;
int err;
err = machine__hit_all_dsos(&session->machines.host);
if (err)
return err;
for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
err = machine__hit_all_dsos(pos);
if (err)
return err;
}
return 0;
}
int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
const char *name, bool is_kallsyms, bool is_vdso)
{
const size_t size = PATH_MAX;
char *realname, *filename = zalloc(size),
*linkname = zalloc(size), *targetname;
int len, err = -1;
bool slash = is_kallsyms || is_vdso;
if (is_kallsyms) {
if (symbol_conf.kptr_restrict) {
pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
err = 0;
goto out_free;
}
realname = (char *) name;
} else
realname = realpath(name, NULL);
if (realname == NULL || filename == NULL || linkname == NULL)
goto out_free;
len = scnprintf(filename, size, "%s%s%s",
debugdir, slash ? "/" : "",
is_vdso ? DSO__NAME_VDSO : realname);
if (mkdir_p(filename, 0755))
goto out_free;
snprintf(filename + len, size - len, "/%s", sbuild_id);
if (access(filename, F_OK)) {
if (is_kallsyms) {
if (copyfile("/proc/kallsyms", filename))
goto out_free;
} else if (link(realname, filename) && copyfile(name, filename))
goto out_free;
}
len = scnprintf(linkname, size, "%s/.build-id/%.2s",
debugdir, sbuild_id);
if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
goto out_free;
snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
targetname = filename + strlen(debugdir) - 5;
memcpy(targetname, "../..", 5);
if (symlink(targetname, linkname) == 0)
err = 0;
out_free:
if (!is_kallsyms)
free(realname);
free(filename);
free(linkname);
return err;
}
static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
const char *name, const char *debugdir,
bool is_kallsyms, bool is_vdso)
{
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
build_id__sprintf(build_id, build_id_size, sbuild_id);
return build_id_cache__add_s(sbuild_id, debugdir, name,
is_kallsyms, is_vdso);
}
int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
{
const size_t size = PATH_MAX;
char *filename = zalloc(size),
*linkname = zalloc(size);
int err = -1;
if (filename == NULL || linkname == NULL)
goto out_free;
snprintf(linkname, size, "%s/.build-id/%.2s/%s",
debugdir, sbuild_id, sbuild_id + 2);
if (access(linkname, F_OK))
goto out_free;
if (readlink(linkname, filename, size - 1) < 0)
goto out_free;
if (unlink(linkname))
goto out_free;
/*
* Since the link is relative, we must make it absolute:
*/
snprintf(linkname, size, "%s/.build-id/%.2s/%s",
debugdir, sbuild_id, filename);
if (unlink(linkname))
goto out_free;
err = 0;
out_free:
free(filename);
free(linkname);
return err;
}
static int dso__cache_build_id(struct dso *dso, struct machine *machine,
const char *debugdir)
{
bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
bool is_vdso = dso__is_vdso(dso);
const char *name = dso->long_name;
char nm[PATH_MAX];
if (dso__is_kcore(dso)) {
is_kallsyms = true;
machine__mmap_name(machine, nm, sizeof(nm));
name = nm;
}
return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name,
debugdir, is_kallsyms, is_vdso);
}
static int __dsos__cache_build_ids(struct list_head *head,
struct machine *machine, const char *debugdir)
{
struct dso *pos;
int err = 0;
dsos__for_each_with_build_id(pos, head)
if (dso__cache_build_id(pos, machine, debugdir))
err = -1;
return err;
}
static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
{
int ret = __dsos__cache_build_ids(&machine->kernel_dsos.head, machine,
debugdir);
ret |= __dsos__cache_build_ids(&machine->user_dsos.head, machine,
debugdir);
return ret;
}
int perf_session__cache_build_ids(struct perf_session *session)
{
struct rb_node *nd;
int ret;
char debugdir[PATH_MAX];
snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
return -1;
ret = machine__cache_build_ids(&session->machines.host, debugdir);
for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret |= machine__cache_build_ids(pos, debugdir);
}
return ret ? -1 : 0;
}
static bool machine__read_build_ids(struct machine *machine, bool with_hits)
{
bool ret;
ret = __dsos__read_build_ids(&machine->kernel_dsos.head, with_hits);
ret |= __dsos__read_build_ids(&machine->user_dsos.head, with_hits);
return ret;
}
bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
{
struct rb_node *nd;
bool ret = machine__read_build_ids(&session->machines.host, with_hits);
for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret |= machine__read_build_ids(pos, with_hits);
}
return ret;
}
......@@ -15,4 +15,15 @@ char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size);
int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct perf_evsel *evsel,
struct machine *machine);
int dsos__hit_all(struct perf_session *session);
bool perf_session__read_build_ids(struct perf_session *session, bool with_hits);
int perf_session__write_buildid_table(struct perf_session *session, int fd);
int perf_session__cache_build_ids(struct perf_session *session);
int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
const char *name, bool is_kallsyms, bool is_vdso);
int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
#endif
......@@ -21,16 +21,76 @@
#include "comm.h"
#include "symbol.h"
#include "event.h"
#include "util.h"
#include "thread-stack.h"
#include "db-export.h"
struct deferred_export {
struct list_head node;
struct comm *comm;
};
static int db_export__deferred(struct db_export *dbe)
{
struct deferred_export *de;
int err;
while (!list_empty(&dbe->deferred)) {
de = list_entry(dbe->deferred.next, struct deferred_export,
node);
err = dbe->export_comm(dbe, de->comm);
list_del(&de->node);
free(de);
if (err)
return err;
}
return 0;
}
static void db_export__free_deferred(struct db_export *dbe)
{
struct deferred_export *de;
while (!list_empty(&dbe->deferred)) {
de = list_entry(dbe->deferred.next, struct deferred_export,
node);
list_del(&de->node);
free(de);
}
}
static int db_export__defer_comm(struct db_export *dbe, struct comm *comm)
{
struct deferred_export *de;
de = zalloc(sizeof(struct deferred_export));
if (!de)
return -ENOMEM;
de->comm = comm;
list_add_tail(&de->node, &dbe->deferred);
return 0;
}
int db_export__init(struct db_export *dbe)
{
memset(dbe, 0, sizeof(struct db_export));
INIT_LIST_HEAD(&dbe->deferred);
return 0;
}
void db_export__exit(struct db_export *dbe __maybe_unused)
int db_export__flush(struct db_export *dbe)
{
return db_export__deferred(dbe);
}
void db_export__exit(struct db_export *dbe)
{
db_export__free_deferred(dbe);
call_return_processor__free(dbe->crp);
dbe->crp = NULL;
}
int db_export__evsel(struct db_export *dbe, struct perf_evsel *evsel)
......@@ -112,7 +172,10 @@ int db_export__comm(struct db_export *dbe, struct comm *comm,
comm->db_id = ++dbe->comm_last_db_id;
if (dbe->export_comm) {
err = dbe->export_comm(dbe, comm);
if (main_thread->comm_set)
err = dbe->export_comm(dbe, comm);
else
err = db_export__defer_comm(dbe, comm);
if (err)
return err;
}
......@@ -208,6 +271,15 @@ static int db_ids_from_al(struct db_export *dbe, struct addr_location *al,
return 0;
}
int db_export__branch_type(struct db_export *dbe, u32 branch_type,
const char *name)
{
if (dbe->export_branch_type)
return dbe->export_branch_type(dbe, branch_type, name);
return 0;
}
int db_export__sample(struct db_export *dbe, union perf_event *event,
struct perf_sample *sample, struct perf_evsel *evsel,
struct thread *thread, struct addr_location *al)
......@@ -261,6 +333,13 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
&es.addr_sym_db_id, &es.addr_offset);
if (err)
return err;
if (dbe->crp) {
err = thread_stack__process(thread, comm, sample, al,
&addr_al, es.db_id,
dbe->crp);
if (err)
return err;
}
}
if (dbe->export_sample)
......@@ -268,3 +347,82 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
return 0;
}
static struct {
u32 branch_type;
const char *name;
} branch_types[] = {
{0, "no branch"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL, "call"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN, "return"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CONDITIONAL, "conditional jump"},
{PERF_IP_FLAG_BRANCH, "unconditional jump"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_INTERRUPT,
"software interrupt"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_INTERRUPT,
"return from interrupt"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_SYSCALLRET,
"system call"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_SYSCALLRET,
"return from system call"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_ASYNC, "asynchronous branch"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
PERF_IP_FLAG_INTERRUPT, "hardware interrupt"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT, "transaction abort"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_BEGIN, "trace begin"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_END, "trace end"},
{0, NULL}
};
int db_export__branch_types(struct db_export *dbe)
{
int i, err = 0;
for (i = 0; branch_types[i].name ; i++) {
err = db_export__branch_type(dbe, branch_types[i].branch_type,
branch_types[i].name);
if (err)
break;
}
return err;
}
int db_export__call_path(struct db_export *dbe, struct call_path *cp)
{
int err;
if (cp->db_id)
return 0;
if (cp->parent) {
err = db_export__call_path(dbe, cp->parent);
if (err)
return err;
}
cp->db_id = ++dbe->call_path_last_db_id;
if (dbe->export_call_path)
return dbe->export_call_path(dbe, cp);
return 0;
}
int db_export__call_return(struct db_export *dbe, struct call_return *cr)
{
int err;
if (cr->db_id)
return 0;
err = db_export__call_path(dbe, cr->cp);
if (err)
return err;
cr->db_id = ++dbe->call_return_last_db_id;
if (dbe->export_call_return)
return dbe->export_call_return(dbe, cr);
return 0;
}
......@@ -17,6 +17,7 @@
#define __PERF_DB_EXPORT_H
#include <linux/types.h>
#include <linux/list.h>
struct perf_evsel;
struct machine;
......@@ -25,6 +26,9 @@ struct comm;
struct dso;
struct perf_sample;
struct addr_location;
struct call_return_processor;
struct call_path;
struct call_return;
struct export_sample {
union perf_event *event;
......@@ -54,7 +58,13 @@ struct db_export {
struct machine *machine);
int (*export_symbol)(struct db_export *dbe, struct symbol *sym,
struct dso *dso);
int (*export_branch_type)(struct db_export *dbe, u32 branch_type,
const char *name);
int (*export_sample)(struct db_export *dbe, struct export_sample *es);
int (*export_call_path)(struct db_export *dbe, struct call_path *cp);
int (*export_call_return)(struct db_export *dbe,
struct call_return *cr);
struct call_return_processor *crp;
u64 evsel_last_db_id;
u64 machine_last_db_id;
u64 thread_last_db_id;
......@@ -63,9 +73,13 @@ struct db_export {
u64 dso_last_db_id;
u64 symbol_last_db_id;
u64 sample_last_db_id;
u64 call_path_last_db_id;
u64 call_return_last_db_id;
struct list_head deferred;
};
int db_export__init(struct db_export *dbe);
int db_export__flush(struct db_export *dbe);
void db_export__exit(struct db_export *dbe);
int db_export__evsel(struct db_export *dbe, struct perf_evsel *evsel);
int db_export__machine(struct db_export *dbe, struct machine *machine);
......@@ -79,8 +93,15 @@ int db_export__dso(struct db_export *dbe, struct dso *dso,
struct machine *machine);
int db_export__symbol(struct db_export *dbe, struct symbol *sym,
struct dso *dso);
int db_export__branch_type(struct db_export *dbe, u32 branch_type,
const char *name);
int db_export__sample(struct db_export *dbe, union perf_event *event,
struct perf_sample *sample, struct perf_evsel *evsel,
struct thread *thread, struct addr_location *al);
int db_export__branch_types(struct db_export *dbe);
int db_export__call_path(struct db_export *dbe, struct call_path *cp);
int db_export__call_return(struct db_export *dbe, struct call_return *cr);
#endif
......@@ -21,8 +21,10 @@ char dso__symtab_origin(const struct dso *dso)
[DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
[DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm',
[DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
[DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
[DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M',
[DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
};
......@@ -112,11 +114,13 @@ int dso__read_binary_type_filename(const struct dso *dso,
break;
case DSO_BINARY_TYPE__GUEST_KMODULE:
case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
path__join3(filename, size, symbol_conf.symfs,
root_dir, dso->long_name);
break;
case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
__symbol__join_symfs(filename, size, dso->long_name);
break;
......@@ -137,6 +141,73 @@ int dso__read_binary_type_filename(const struct dso *dso,
return ret;
}
static const struct {
const char *fmt;
int (*decompress)(const char *input, int output);
} compressions[] = {
#ifdef HAVE_ZLIB_SUPPORT
{ "gz", gzip_decompress_to_file },
#endif
{ NULL, NULL },
};
bool is_supported_compression(const char *ext)
{
unsigned i;
for (i = 0; compressions[i].fmt; i++) {
if (!strcmp(ext, compressions[i].fmt))
return true;
}
return false;
}
bool is_kmodule_extension(const char *ext)
{
if (strncmp(ext, "ko", 2))
return false;
if (ext[2] == '\0' || (ext[2] == '.' && is_supported_compression(ext+3)))
return true;
return false;
}
bool is_kernel_module(const char *pathname, bool *compressed)
{
const char *ext = strrchr(pathname, '.');
if (ext == NULL)
return false;
if (is_supported_compression(ext + 1)) {
if (compressed)
*compressed = true;
ext -= 3;
} else if (compressed)
*compressed = false;
return is_kmodule_extension(ext + 1);
}
bool decompress_to_file(const char *ext, const char *filename, int output_fd)
{
unsigned i;
for (i = 0; compressions[i].fmt; i++) {
if (!strcmp(ext, compressions[i].fmt))
return !compressions[i].decompress(filename,
output_fd);
}
return false;
}
bool dso__needs_decompress(struct dso *dso)
{
return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
}
/*
* Global list of open DSOs and the counter.
*/
......
......@@ -22,7 +22,9 @@ enum dso_binary_type {
DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
DSO_BINARY_TYPE__GUEST_KMODULE,
DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
DSO_BINARY_TYPE__KCORE,
DSO_BINARY_TYPE__GUEST_KCORE,
DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
......@@ -185,6 +187,11 @@ int dso__kernel_module_get_build_id(struct dso *dso, const char *root_dir);
char dso__symtab_origin(const struct dso *dso);
int dso__read_binary_type_filename(const struct dso *dso, enum dso_binary_type type,
char *root_dir, char *filename, size_t size);
bool is_supported_compression(const char *ext);
bool is_kmodule_extension(const char *ext);
bool is_kernel_module(const char *pathname, bool *compressed);
bool decompress_to_file(const char *ext, const char *filename, int output_fd);
bool dso__needs_decompress(struct dso *dso);
/*
* The dso__data_* external interface provides following functions:
......
......@@ -143,6 +143,32 @@ struct branch_stack {
struct branch_entry entries[0];
};
enum {
PERF_IP_FLAG_BRANCH = 1ULL << 0,
PERF_IP_FLAG_CALL = 1ULL << 1,
PERF_IP_FLAG_RETURN = 1ULL << 2,
PERF_IP_FLAG_CONDITIONAL = 1ULL << 3,
PERF_IP_FLAG_SYSCALLRET = 1ULL << 4,
PERF_IP_FLAG_ASYNC = 1ULL << 5,
PERF_IP_FLAG_INTERRUPT = 1ULL << 6,
PERF_IP_FLAG_TX_ABORT = 1ULL << 7,
PERF_IP_FLAG_TRACE_BEGIN = 1ULL << 8,
PERF_IP_FLAG_TRACE_END = 1ULL << 9,
PERF_IP_FLAG_IN_TX = 1ULL << 10,
};
#define PERF_BRANCH_MASK (\
PERF_IP_FLAG_BRANCH |\
PERF_IP_FLAG_CALL |\
PERF_IP_FLAG_RETURN |\
PERF_IP_FLAG_CONDITIONAL |\
PERF_IP_FLAG_SYSCALLRET |\
PERF_IP_FLAG_ASYNC |\
PERF_IP_FLAG_INTERRUPT |\
PERF_IP_FLAG_TX_ABORT |\
PERF_IP_FLAG_TRACE_BEGIN |\
PERF_IP_FLAG_TRACE_END)
struct perf_sample {
u64 ip;
u32 pid, tid;
......
......@@ -853,8 +853,6 @@ void perf_evsel__exit(struct perf_evsel *evsel)
perf_evsel__free_id(evsel);
close_cgroup(evsel->cgrp);
zfree(&evsel->group_name);
if (evsel->tp_format)
pevent_free_format(evsel->tp_format);
zfree(&evsel->name);
perf_evsel__object.fini(evsel);
}
......
......@@ -79,10 +79,7 @@ static int do_write(int fd, const void *buf, size_t size)
return 0;
}
#define NAME_ALIGN 64
static int write_padded(int fd, const void *bf, size_t count,
size_t count_aligned)
int write_padded(int fd, const void *bf, size_t count, size_t count_aligned)
{
static const char zero_buf[NAME_ALIGN];
int err = do_write(fd, bf, count);
......@@ -171,340 +168,6 @@ perf_header__set_cmdline(int argc, const char **argv)
return 0;
}
#define dsos__for_each_with_build_id(pos, head) \
list_for_each_entry(pos, head, node) \
if (!pos->has_build_id) \
continue; \
else
static int write_buildid(const char *name, size_t name_len, u8 *build_id,
pid_t pid, u16 misc, int fd)
{
int err;
struct build_id_event b;
size_t len;
len = name_len + 1;
len = PERF_ALIGN(len, NAME_ALIGN);
memset(&b, 0, sizeof(b));
memcpy(&b.build_id, build_id, BUILD_ID_SIZE);
b.pid = pid;
b.header.misc = misc;
b.header.size = sizeof(b) + len;
err = do_write(fd, &b, sizeof(b));
if (err < 0)
return err;
return write_padded(fd, name, name_len + 1, len);
}
static int __dsos__hit_all(struct list_head *head)
{
struct dso *pos;
list_for_each_entry(pos, head, node)
pos->hit = true;
return 0;
}
static int machine__hit_all_dsos(struct machine *machine)
{
int err;
err = __dsos__hit_all(&machine->kernel_dsos.head);
if (err)
return err;
return __dsos__hit_all(&machine->user_dsos.head);
}
int dsos__hit_all(struct perf_session *session)
{
struct rb_node *nd;
int err;
err = machine__hit_all_dsos(&session->machines.host);
if (err)
return err;
for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
err = machine__hit_all_dsos(pos);
if (err)
return err;
}
return 0;
}
static int __dsos__write_buildid_table(struct list_head *head,
struct machine *machine,
pid_t pid, u16 misc, int fd)
{
char nm[PATH_MAX];
struct dso *pos;
dsos__for_each_with_build_id(pos, head) {
int err;
const char *name;
size_t name_len;
if (!pos->hit)
continue;
if (dso__is_vdso(pos)) {
name = pos->short_name;
name_len = pos->short_name_len + 1;
} else if (dso__is_kcore(pos)) {
machine__mmap_name(machine, nm, sizeof(nm));
name = nm;
name_len = strlen(nm) + 1;
} else {
name = pos->long_name;
name_len = pos->long_name_len + 1;
}
err = write_buildid(name, name_len, pos->build_id,
pid, misc, fd);
if (err)
return err;
}
return 0;
}
static int machine__write_buildid_table(struct machine *machine, int fd)
{
int err;
u16 kmisc = PERF_RECORD_MISC_KERNEL,
umisc = PERF_RECORD_MISC_USER;
if (!machine__is_host(machine)) {
kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
umisc = PERF_RECORD_MISC_GUEST_USER;
}
err = __dsos__write_buildid_table(&machine->kernel_dsos.head, machine,
machine->pid, kmisc, fd);
if (err == 0)
err = __dsos__write_buildid_table(&machine->user_dsos.head,
machine, machine->pid, umisc,
fd);
return err;
}
static int dsos__write_buildid_table(struct perf_header *header, int fd)
{
struct perf_session *session = container_of(header,
struct perf_session, header);
struct rb_node *nd;
int err = machine__write_buildid_table(&session->machines.host, fd);
if (err)
return err;
for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
err = machine__write_buildid_table(pos, fd);
if (err)
break;
}
return err;
}
int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
const char *name, bool is_kallsyms, bool is_vdso)
{
const size_t size = PATH_MAX;
char *realname, *filename = zalloc(size),
*linkname = zalloc(size), *targetname;
int len, err = -1;
bool slash = is_kallsyms || is_vdso;
if (is_kallsyms) {
if (symbol_conf.kptr_restrict) {
pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
err = 0;
goto out_free;
}
realname = (char *) name;
} else
realname = realpath(name, NULL);
if (realname == NULL || filename == NULL || linkname == NULL)
goto out_free;
len = scnprintf(filename, size, "%s%s%s",
debugdir, slash ? "/" : "",
is_vdso ? DSO__NAME_VDSO : realname);
if (mkdir_p(filename, 0755))
goto out_free;
snprintf(filename + len, size - len, "/%s", sbuild_id);
if (access(filename, F_OK)) {
if (is_kallsyms) {
if (copyfile("/proc/kallsyms", filename))
goto out_free;
} else if (link(realname, filename) && copyfile(name, filename))
goto out_free;
}
len = scnprintf(linkname, size, "%s/.build-id/%.2s",
debugdir, sbuild_id);
if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
goto out_free;
snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
targetname = filename + strlen(debugdir) - 5;
memcpy(targetname, "../..", 5);
if (symlink(targetname, linkname) == 0)
err = 0;
out_free:
if (!is_kallsyms)
free(realname);
free(filename);
free(linkname);
return err;
}
static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
const char *name, const char *debugdir,
bool is_kallsyms, bool is_vdso)
{
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
build_id__sprintf(build_id, build_id_size, sbuild_id);
return build_id_cache__add_s(sbuild_id, debugdir, name,
is_kallsyms, is_vdso);
}
int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
{
const size_t size = PATH_MAX;
char *filename = zalloc(size),
*linkname = zalloc(size);
int err = -1;
if (filename == NULL || linkname == NULL)
goto out_free;
snprintf(linkname, size, "%s/.build-id/%.2s/%s",
debugdir, sbuild_id, sbuild_id + 2);
if (access(linkname, F_OK))
goto out_free;
if (readlink(linkname, filename, size - 1) < 0)
goto out_free;
if (unlink(linkname))
goto out_free;
/*
* Since the link is relative, we must make it absolute:
*/
snprintf(linkname, size, "%s/.build-id/%.2s/%s",
debugdir, sbuild_id, filename);
if (unlink(linkname))
goto out_free;
err = 0;
out_free:
free(filename);
free(linkname);
return err;
}
static int dso__cache_build_id(struct dso *dso, struct machine *machine,
const char *debugdir)
{
bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
bool is_vdso = dso__is_vdso(dso);
const char *name = dso->long_name;
char nm[PATH_MAX];
if (dso__is_kcore(dso)) {
is_kallsyms = true;
machine__mmap_name(machine, nm, sizeof(nm));
name = nm;
}
return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name,
debugdir, is_kallsyms, is_vdso);
}
static int __dsos__cache_build_ids(struct list_head *head,
struct machine *machine, const char *debugdir)
{
struct dso *pos;
int err = 0;
dsos__for_each_with_build_id(pos, head)
if (dso__cache_build_id(pos, machine, debugdir))
err = -1;
return err;
}
static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
{
int ret = __dsos__cache_build_ids(&machine->kernel_dsos.head, machine,
debugdir);
ret |= __dsos__cache_build_ids(&machine->user_dsos.head, machine,
debugdir);
return ret;
}
static int perf_session__cache_build_ids(struct perf_session *session)
{
struct rb_node *nd;
int ret;
char debugdir[PATH_MAX];
snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
return -1;
ret = machine__cache_build_ids(&session->machines.host, debugdir);
for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret |= machine__cache_build_ids(pos, debugdir);
}
return ret ? -1 : 0;
}
static bool machine__read_build_ids(struct machine *machine, bool with_hits)
{
bool ret;
ret = __dsos__read_build_ids(&machine->kernel_dsos.head, with_hits);
ret |= __dsos__read_build_ids(&machine->user_dsos.head, with_hits);
return ret;
}
static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
{
struct rb_node *nd;
bool ret = machine__read_build_ids(&session->machines.host, with_hits);
for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret |= machine__read_build_ids(pos, with_hits);
}
return ret;
}
static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
struct perf_evlist *evlist)
{
......@@ -523,7 +186,7 @@ static int write_build_id(int fd, struct perf_header *h,
if (!perf_session__read_build_ids(session, true))
return -1;
err = dsos__write_buildid_table(h, fd);
err = perf_session__write_buildid_table(session, fd);
if (err < 0) {
pr_debug("failed to write buildid table\n");
return err;
......@@ -1606,7 +1269,7 @@ static int __event_process_build_id(struct build_id_event *bev,
dso__set_build_id(dso, &bev->build_id);
if (filename[0] == '[')
if (!is_kernel_module(filename, NULL))
dso->kernel = dso_type;
build_id__sprintf(dso->build_id, sizeof(dso->build_id),
......
......@@ -122,10 +122,6 @@ int perf_header__process_sections(struct perf_header *header, int fd,
int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full);
int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
const char *name, bool is_kallsyms, bool is_vdso);
int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
int perf_event__synthesize_attr(struct perf_tool *tool,
struct perf_event_attr *attr, u32 ids, u64 *id,
perf_event__handler_t process);
......@@ -151,7 +147,9 @@ int perf_event__process_build_id(struct perf_tool *tool,
struct perf_session *session);
bool is_perf_magic(u64 magic);
int dsos__hit_all(struct perf_session *session);
#define NAME_ALIGN 64
int write_padded(int fd, const void *bf, size_t count, size_t count_aligned);
/*
* arch specific callback
......
......@@ -46,4 +46,21 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
__bitmap_or(dst, src1, src2, nbits);
}
/**
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*/
static inline int test_and_set_bit(int nr, unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
unsigned long old;
old = *p;
*p = old | mask;
return (old & mask) != 0;
}
#endif /* _PERF_BITOPS_H */
......@@ -15,6 +15,8 @@
#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE)
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
#define for_each_set_bit(bit, addr, size) \
for ((bit) = find_first_bit((addr), (size)); \
......
......@@ -464,6 +464,7 @@ struct map *machine__new_module(struct machine *machine, u64 start,
{
struct map *map;
struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
bool compressed;
if (dso == NULL)
return NULL;
......@@ -476,6 +477,11 @@ struct map *machine__new_module(struct machine *machine, u64 start,
dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
else
dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
/* _KMODULE_COMP should be next to _KMODULE */
if (is_kernel_module(filename, &compressed) && compressed)
dso->symtab_type++;
map_groups__insert(&machine->kmaps, map);
return map;
}
......@@ -861,8 +867,14 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg,
struct map *map;
char *long_name;
if (dot == NULL || strcmp(dot, ".ko"))
if (dot == NULL)
continue;
/* On some system, modules are compressed like .ko.gz */
if (is_supported_compression(dot + 1) &&
is_kmodule_extension(dot - 2))
dot -= 3;
snprintf(dso_name, sizeof(dso_name), "[%.*s]",
(int)(dot - dent->d_name), dent->d_name);
......@@ -1044,6 +1056,11 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
dot = strrchr(name, '.');
if (dot == NULL)
goto out_problem;
/* On some system, modules are compressed like .ko.gz */
if (is_supported_compression(dot + 1))
dot -= 3;
if (!is_kmodule_extension(dot + 1))
goto out_problem;
snprintf(short_module_name, sizeof(short_module_name),
"[%.*s]", (int)(dot - name), name);
strxfrchar(short_module_name, '-', '_');
......@@ -1068,8 +1085,20 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
* Should be there already, from the build-id table in
* the header.
*/
struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
kmmap_prefix);
struct dso *kernel = NULL;
struct dso *dso;
list_for_each_entry(dso, &machine->kernel_dsos.head, node) {
if (is_kernel_module(dso->long_name, NULL))
continue;
kernel = dso;
break;
}
if (kernel == NULL)
kernel = __dsos__findnew(&machine->kernel_dsos,
kmmap_prefix);
if (kernel == NULL)
goto out_problem;
......@@ -1077,6 +1106,9 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
if (__machine__create_kernel_maps(machine, kernel) < 0)
goto out_problem;
if (strstr(dso->long_name, "vmlinux"))
dso__set_short_name(dso, "[kernel.vmlinux]", false);
machine__set_kernel_mmap_len(machine, event);
/*
......
......@@ -24,6 +24,7 @@
#include <string.h>
#include <ctype.h>
#include <errno.h>
#include <linux/bitmap.h>
#include "../util.h"
#include <EXTERN.h>
......@@ -57,7 +58,7 @@ INTERP my_perl;
#define FTRACE_MAX_EVENT \
((1 << (sizeof(unsigned short) * 8)) - 1)
struct event_format *events[FTRACE_MAX_EVENT];
static DECLARE_BITMAP(events_defined, FTRACE_MAX_EVENT);
extern struct scripting_context *scripting_context;
......@@ -238,35 +239,15 @@ static void define_event_symbols(struct event_format *event,
define_event_symbols(event, ev_name, args->next);
}
static inline struct event_format *find_cache_event(struct perf_evsel *evsel)
{
static char ev_name[256];
struct event_format *event;
int type = evsel->attr.config;
if (events[type])
return events[type];
events[type] = event = evsel->tp_format;
if (!event)
return NULL;
sprintf(ev_name, "%s::%s", event->system, event->name);
define_event_symbols(event, ev_name, event->print_fmt.args);
return event;
}
static void perl_process_tracepoint(struct perf_sample *sample,
struct perf_evsel *evsel,
struct thread *thread)
{
struct event_format *event = evsel->tp_format;
struct format_field *field;
static char handler[256];
unsigned long long val;
unsigned long s, ns;
struct event_format *event;
int pid;
int cpu = sample->cpu;
void *data = sample->raw_data;
......@@ -278,7 +259,6 @@ static void perl_process_tracepoint(struct perf_sample *sample,
if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
return;
event = find_cache_event(evsel);
if (!event)
die("ug! no event found for type %" PRIu64, (u64)evsel->attr.config);
......@@ -286,6 +266,9 @@ static void perl_process_tracepoint(struct perf_sample *sample,
sprintf(handler, "%s::%s", event->system, event->name);
if (!test_and_set_bit(event->id, events_defined))
define_event_symbols(event, handler, event->print_fmt.args);
s = nsecs / NSECS_PER_SEC;
ns = nsecs - s * NSECS_PER_SEC;
......
......@@ -26,6 +26,7 @@
#include <string.h>
#include <stdbool.h>
#include <errno.h>
#include <linux/bitmap.h>
#include "../../perf.h"
#include "../debug.h"
......@@ -37,6 +38,7 @@
#include "../comm.h"
#include "../machine.h"
#include "../db-export.h"
#include "../thread-stack.h"
#include "../trace-event.h"
#include "../machine.h"
......@@ -45,7 +47,7 @@ PyMODINIT_FUNC initperf_trace_context(void);
#define FTRACE_MAX_EVENT \
((1 << (sizeof(unsigned short) * 8)) - 1)
struct event_format *events[FTRACE_MAX_EVENT];
static DECLARE_BITMAP(events_defined, FTRACE_MAX_EVENT);
#define MAX_FIELDS 64
#define N_COMMON_FIELDS 7
......@@ -66,7 +68,10 @@ struct tables {
PyObject *comm_thread_handler;
PyObject *dso_handler;
PyObject *symbol_handler;
PyObject *branch_type_handler;
PyObject *sample_handler;
PyObject *call_path_handler;
PyObject *call_return_handler;
bool db_export_mode;
};
......@@ -251,31 +256,6 @@ static void define_event_symbols(struct event_format *event,
define_event_symbols(event, ev_name, args->next);
}
static inline struct event_format *find_cache_event(struct perf_evsel *evsel)
{
static char ev_name[256];
struct event_format *event;
int type = evsel->attr.config;
/*
* XXX: Do we really need to cache this since now we have evsel->tp_format
* cached already? Need to re-read this "cache" routine that as well calls
* define_event_symbols() :-\
*/
if (events[type])
return events[type];
events[type] = event = evsel->tp_format;
if (!event)
return NULL;
sprintf(ev_name, "%s__%s", event->system, event->name);
define_event_symbols(event, ev_name, event->print_fmt.args);
return event;
}
static PyObject *get_field_numeric_entry(struct event_format *event,
struct format_field *field, void *data)
{
......@@ -399,12 +379,12 @@ static void python_process_tracepoint(struct perf_sample *sample,
struct thread *thread,
struct addr_location *al)
{
struct event_format *event = evsel->tp_format;
PyObject *handler, *context, *t, *obj, *callchain;
PyObject *dict = NULL;
static char handler_name[256];
struct format_field *field;
unsigned long s, ns;
struct event_format *event;
unsigned n = 0;
int pid;
int cpu = sample->cpu;
......@@ -416,7 +396,6 @@ static void python_process_tracepoint(struct perf_sample *sample,
if (!t)
Py_FatalError("couldn't create Python tuple");
event = find_cache_event(evsel);
if (!event)
die("ug! no event found for type %d", (int)evsel->attr.config);
......@@ -424,6 +403,9 @@ static void python_process_tracepoint(struct perf_sample *sample,
sprintf(handler_name, "%s__%s", event->system, event->name);
if (!test_and_set_bit(event->id, events_defined))
define_event_symbols(event, handler_name, event->print_fmt.args);
handler = get_handler(handler_name);
if (!handler) {
dict = PyDict_New();
......@@ -664,13 +646,31 @@ static int python_export_symbol(struct db_export *dbe, struct symbol *sym,
return 0;
}
static int python_export_branch_type(struct db_export *dbe, u32 branch_type,
const char *name)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
t = tuple_new(2);
tuple_set_s32(t, 0, branch_type);
tuple_set_string(t, 1, name);
call_object(tables->branch_type_handler, t, "branch_type_table");
Py_DECREF(t);
return 0;
}
static int python_export_sample(struct db_export *dbe,
struct export_sample *es)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
t = tuple_new(19);
t = tuple_new(21);
tuple_set_u64(t, 0, es->db_id);
tuple_set_u64(t, 1, es->evsel->db_id);
......@@ -691,6 +691,8 @@ static int python_export_sample(struct db_export *dbe,
tuple_set_u64(t, 16, es->sample->weight);
tuple_set_u64(t, 17, es->sample->transaction);
tuple_set_u64(t, 18, es->sample->data_src);
tuple_set_s32(t, 19, es->sample->flags & PERF_BRANCH_MASK);
tuple_set_s32(t, 20, !!(es->sample->flags & PERF_IP_FLAG_IN_TX));
call_object(tables->sample_handler, t, "sample_table");
......@@ -699,6 +701,64 @@ static int python_export_sample(struct db_export *dbe,
return 0;
}
static int python_export_call_path(struct db_export *dbe, struct call_path *cp)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
u64 parent_db_id, sym_db_id;
parent_db_id = cp->parent ? cp->parent->db_id : 0;
sym_db_id = cp->sym ? *(u64 *)symbol__priv(cp->sym) : 0;
t = tuple_new(4);
tuple_set_u64(t, 0, cp->db_id);
tuple_set_u64(t, 1, parent_db_id);
tuple_set_u64(t, 2, sym_db_id);
tuple_set_u64(t, 3, cp->ip);
call_object(tables->call_path_handler, t, "call_path_table");
Py_DECREF(t);
return 0;
}
static int python_export_call_return(struct db_export *dbe,
struct call_return *cr)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
u64 comm_db_id = cr->comm ? cr->comm->db_id : 0;
PyObject *t;
t = tuple_new(11);
tuple_set_u64(t, 0, cr->db_id);
tuple_set_u64(t, 1, cr->thread->db_id);
tuple_set_u64(t, 2, comm_db_id);
tuple_set_u64(t, 3, cr->cp->db_id);
tuple_set_u64(t, 4, cr->call_time);
tuple_set_u64(t, 5, cr->return_time);
tuple_set_u64(t, 6, cr->branch_count);
tuple_set_u64(t, 7, cr->call_ref);
tuple_set_u64(t, 8, cr->return_ref);
tuple_set_u64(t, 9, cr->cp->parent->db_id);
tuple_set_s32(t, 10, cr->flags);
call_object(tables->call_return_handler, t, "call_return_table");
Py_DECREF(t);
return 0;
}
static int python_process_call_return(struct call_return *cr, void *data)
{
struct db_export *dbe = data;
return db_export__call_return(dbe, cr);
}
static void python_process_general_event(struct perf_sample *sample,
struct perf_evsel *evsel,
struct thread *thread,
......@@ -831,7 +891,9 @@ static int run_start_sub(void)
static void set_table_handlers(struct tables *tables)
{
const char *perf_db_export_mode = "perf_db_export_mode";
PyObject *db_export_mode;
const char *perf_db_export_calls = "perf_db_export_calls";
PyObject *db_export_mode, *db_export_calls;
bool export_calls = false;
int ret;
memset(tables, 0, sizeof(struct tables));
......@@ -848,6 +910,23 @@ static void set_table_handlers(struct tables *tables)
if (!ret)
return;
tables->dbe.crp = NULL;
db_export_calls = PyDict_GetItemString(main_dict, perf_db_export_calls);
if (db_export_calls) {
ret = PyObject_IsTrue(db_export_calls);
if (ret == -1)
handler_call_die(perf_db_export_calls);
export_calls = !!ret;
}
if (export_calls) {
tables->dbe.crp =
call_return_processor__new(python_process_call_return,
&tables->dbe);
if (!tables->dbe.crp)
Py_FatalError("failed to create calls processor");
}
tables->db_export_mode = true;
/*
* Reserve per symbol space for symbol->db_id via symbol__priv()
......@@ -861,7 +940,10 @@ static void set_table_handlers(struct tables *tables)
SET_TABLE_HANDLER(comm_thread);
SET_TABLE_HANDLER(dso);
SET_TABLE_HANDLER(symbol);
SET_TABLE_HANDLER(branch_type);
SET_TABLE_HANDLER(sample);
SET_TABLE_HANDLER(call_path);
SET_TABLE_HANDLER(call_return);
}
/*
......@@ -910,6 +992,12 @@ static int python_start_script(const char *script, int argc, const char **argv)
set_table_handlers(tables);
if (tables->db_export_mode) {
err = db_export__branch_types(&tables->dbe);
if (err)
goto error;
}
return err;
error:
Py_Finalize();
......@@ -920,7 +1008,9 @@ static int python_start_script(const char *script, int argc, const char **argv)
static int python_flush_script(void)
{
return 0;
struct tables *tables = &tables_global;
return db_export__flush(&tables->dbe);
}
/*
......
......@@ -546,6 +546,35 @@ static int dso__swap_init(struct dso *dso, unsigned char eidata)
return 0;
}
static int decompress_kmodule(struct dso *dso, const char *name,
enum dso_binary_type type)
{
int fd;
const char *ext = strrchr(name, '.');
char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
if ((type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP) ||
type != dso->symtab_type)
return -1;
if (!ext || !is_supported_compression(ext + 1))
return -1;
fd = mkstemp(tmpbuf);
if (fd < 0)
return -1;
if (!decompress_to_file(ext + 1, name, fd)) {
close(fd);
fd = -1;
}
unlink(tmpbuf);
return fd;
}
bool symsrc__possibly_runtime(struct symsrc *ss)
{
return ss->dynsym || ss->opdsec;
......@@ -571,7 +600,11 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
Elf *elf;
int fd;
fd = open(name, O_RDONLY);
if (dso__needs_decompress(dso))
fd = decompress_kmodule(dso, name, type);
else
fd = open(name, O_RDONLY);
if (fd < 0)
return -1;
......
......@@ -51,7 +51,9 @@ static enum dso_binary_type binary_type_symtab[] = {
DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
DSO_BINARY_TYPE__GUEST_KMODULE,
DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
DSO_BINARY_TYPE__NOT_FOUND,
};
......@@ -1300,7 +1302,9 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
return dso->kernel == DSO_TYPE_GUEST_KERNEL;
case DSO_BINARY_TYPE__GUEST_KMODULE:
case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
/*
* kernel modules know their symtab type - it's set when
* creating a module dso in machine__new_module().
......@@ -1368,7 +1372,9 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
return -1;
kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
/*
* Iterate over candidate debug images.
......@@ -1505,12 +1511,10 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map,
symbol_filter_t filter)
{
int i, err = 0;
char *filename;
char *filename = NULL;
pr_debug("Looking at the vmlinux_path (%d entries long)\n",
vmlinux_path__nr_entries + 1);
filename = dso__build_id_filename(dso, NULL, 0);
if (!symbol_conf.ignore_vmlinux_buildid)
filename = dso__build_id_filename(dso, NULL, 0);
if (filename != NULL) {
err = dso__load_vmlinux(dso, map, filename, true, filter);
if (err > 0)
......@@ -1518,6 +1522,9 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map,
free(filename);
}
pr_debug("Looking at the vmlinux_path (%d entries long)\n",
vmlinux_path__nr_entries + 1);
for (i = 0; i < vmlinux_path__nr_entries; ++i) {
err = dso__load_vmlinux(dso, map, vmlinux_path[i], false, filter);
if (err > 0)
......
......@@ -105,6 +105,7 @@ struct symbol_conf {
unsigned short nr_events;
bool try_vmlinux_path,
ignore_vmlinux,
ignore_vmlinux_buildid,
show_kernel_path,
use_modules,
sort_by_name,
......
This diff is collapsed.
/*
* thread-stack.h: Synthesize a thread's stack using call / return events
* Copyright (c) 2014, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#ifndef __PERF_THREAD_STACK_H
#define __PERF_THREAD_STACK_H
#include <sys/types.h>
#include <linux/types.h>
#include <linux/rbtree.h>
struct thread;
struct comm;
struct ip_callchain;
struct symbol;
struct dso;
struct call_return_processor;
struct comm;
struct perf_sample;
struct addr_location;
/*
* Call/Return flags.
*
* CALL_RETURN_NO_CALL: 'return' but no matching 'call'
* CALL_RETURN_NO_RETURN: 'call' but no matching 'return'
*/
enum {
CALL_RETURN_NO_CALL = 1 << 0,
CALL_RETURN_NO_RETURN = 1 << 1,
};
/**
* struct call_return - paired call/return information.
* @thread: thread in which call/return occurred
* @comm: comm in which call/return occurred
* @cp: call path
* @call_time: timestamp of call (if known)
* @return_time: timestamp of return (if known)
* @branch_count: number of branches seen between call and return
* @call_ref: external reference to 'call' sample (e.g. db_id)
* @return_ref: external reference to 'return' sample (e.g. db_id)
* @db_id: id used for db-export
* @flags: Call/Return flags
*/
struct call_return {
struct thread *thread;
struct comm *comm;
struct call_path *cp;
u64 call_time;
u64 return_time;
u64 branch_count;
u64 call_ref;
u64 return_ref;
u64 db_id;
u32 flags;
};
/**
* struct call_path - node in list of calls leading to a function call.
* @parent: call path to the parent function call
* @sym: symbol of function called
* @ip: only if sym is null, the ip of the function
* @db_id: id used for db-export
* @in_kernel: whether function is a in the kernel
* @rb_node: node in parent's tree of called functions
* @children: tree of call paths of functions called
*
* In combination with the call_return structure, the call_path structure
* defines a context-sensitve call-graph.
*/
struct call_path {
struct call_path *parent;
struct symbol *sym;
u64 ip;
u64 db_id;
bool in_kernel;
struct rb_node rb_node;
struct rb_root children;
};
int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
u64 to_ip, u16 insn_len, u64 trace_nr);
void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr);
void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
size_t sz, u64 ip);
void thread_stack__free(struct thread *thread);
struct call_return_processor *
call_return_processor__new(int (*process)(struct call_return *cr, void *data),
void *data);
void call_return_processor__free(struct call_return_processor *crp);
int thread_stack__process(struct thread *thread, struct comm *comm,
struct perf_sample *sample,
struct addr_location *from_al,
struct addr_location *to_al, u64 ref,
struct call_return_processor *crp);
#endif
......@@ -4,6 +4,7 @@
#include <string.h>
#include "session.h"
#include "thread.h"
#include "thread-stack.h"
#include "util.h"
#include "debug.h"
#include "comm.h"
......@@ -66,6 +67,8 @@ void thread__delete(struct thread *thread)
{
struct comm *comm, *tmp;
thread_stack__free(thread);
if (thread->mg) {
map_groups__put(thread->mg);
thread->mg = NULL;
......
......@@ -8,6 +8,8 @@
#include "symbol.h"
#include <strlist.h>
struct thread_stack;
struct thread {
union {
struct rb_node rb_node;
......@@ -26,6 +28,7 @@ struct thread {
u64 db_id;
void *priv;
struct thread_stack *ts;
};
struct machine;
......
......@@ -351,4 +351,9 @@ void mem_bswap_32(void *src, int byte_size);
const char *get_filename_for_perf_kvm(void);
bool find_process(const char *name);
#ifdef HAVE_ZLIB_SUPPORT
int gzip_decompress_to_file(const char *input, int output_fd);
#endif
#endif /* GIT_COMPAT_UTIL_H */
#include <stdio.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <zlib.h>
#include "util/util.h"
#include "util/debug.h"
#define CHUNK_SIZE 16384
int gzip_decompress_to_file(const char *input, int output_fd)
{
int ret = Z_STREAM_ERROR;
int input_fd;
void *ptr;
int len;
struct stat stbuf;
unsigned char buf[CHUNK_SIZE];
z_stream zs = {
.zalloc = Z_NULL,
.zfree = Z_NULL,
.opaque = Z_NULL,
.avail_in = 0,
.next_in = Z_NULL,
};
input_fd = open(input, O_RDONLY);
if (input_fd < 0)
return -1;
if (fstat(input_fd, &stbuf) < 0)
goto out_close;
ptr = mmap(NULL, stbuf.st_size, PROT_READ, MAP_PRIVATE, input_fd, 0);
if (ptr == MAP_FAILED)
goto out_close;
if (inflateInit2(&zs, 16 + MAX_WBITS) != Z_OK)
goto out_unmap;
zs.next_in = ptr;
zs.avail_in = stbuf.st_size;
do {
zs.next_out = buf;
zs.avail_out = CHUNK_SIZE;
ret = inflate(&zs, Z_NO_FLUSH);
switch (ret) {
case Z_NEED_DICT:
ret = Z_DATA_ERROR;
/* fall through */
case Z_DATA_ERROR:
case Z_MEM_ERROR:
goto out;
default:
break;
}
len = CHUNK_SIZE - zs.avail_out;
if (writen(output_fd, buf, len) != len) {
ret = Z_DATA_ERROR;
goto out;
}
} while (ret != Z_STREAM_END);
out:
inflateEnd(&zs);
out_unmap:
munmap(ptr, stbuf.st_size);
out_close:
close(input_fd);
return ret == Z_STREAM_END ? 0 : -1;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment