Commit 0a119538 authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-core-for-mingo' of...

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Conflicts:
	tools/Makefile

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

. Honor parallel jobs, fix from Borislav Petkov

. Introduce tools/lib/lk library, initially just removing duplication
  among tools/perf and tools/vm. from Borislav Petkov

. Fix build on non-glibc systems due to libio.h absence, from Cody P Schafer.

. Remove some perf_session and tracing dead code, from David Ahern.

. Introduce perf stat --repeat forever, from Frederik Deweerdt.

. Add perf test entries for checking --cpu in record and stat, from Jiri Olsa.

. Add perf test entries for checking breakpoint overflow signal handler issues,
  from Jiri Olsa.

. Add perf test entry for for checking number of EXIT events, from Namhyung Kim.

. Simplify some perf_evlist methods and to allow 'stat' to share code with
  'record' and 'trace'.

. Remove dead code in related to libtraceevent integration, from Namhyung Kim.

. Event group view for 'annotate' in --stdio, --tui and --gtk, from Namhyung Kim.
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
[ resolved the trivial merge conflict with upstream ]
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 3bf23917 bc96b361
......@@ -1331,11 +1331,11 @@ kernelversion:
# Clear a bunch of variables before executing the submake
tools/: FORCE
$(Q)mkdir -p $(objtree)/tools
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS= O=$(objtree) subdir=tools -C $(src)/tools/
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(objtree) subdir=tools -C $(src)/tools/
tools/%: FORCE
$(Q)mkdir -p $(objtree)/tools
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS= O=$(objtree) subdir=tools -C $(src)/tools/ $*
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(objtree) subdir=tools -C $(src)/tools/ $*
# Single targets
# ---------------------------------------------------------------------------
......
......@@ -34,7 +34,13 @@ help:
cpupower: FORCE
$(call descend,power/$@)
cgroup firewire lguest perf usb virtio vm: FORCE
cgroup firewire guest usb virtio vm: FORCE
$(call descend,$@)
liblk: FORCE
$(call descend,lib/lk)
perf: liblk FORCE
$(call descend,$@)
selftests: FORCE
......@@ -62,7 +68,13 @@ install: cgroup_install cpupower_install firewire_install lguest_install \
cpupower_clean:
$(call descend,power/cpupower,clean)
cgroup_clean firewire_clean lguest_clean perf_clean usb_clean virtio_clean vm_clean:
cgroup_clean firewire_clean lguest_clean usb_clean virtio_clean vm_clean:
$(call descend,$(@:_clean=),clean)
liblk_clean:
$(call descend,lib/lk,clean)
perf_clean: liblk_clean
$(call descend,$(@:_clean=),clean)
selftests_clean:
......
include ../../scripts/Makefile.include
# guard against environment variables
LIB_H=
LIB_OBJS=
LIB_H += debugfs.h
LIB_OBJS += $(OUTPUT)debugfs.o
LIBFILE = liblk.a
CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) -fPIC
EXTLIBS = -lpthread -lrt -lelf -lm
ALL_CFLAGS = $(CFLAGS) $(BASIC_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
ALL_LDFLAGS = $(LDFLAGS)
RM = rm -f
$(LIBFILE): $(LIB_OBJS)
$(QUIET_AR)$(RM) $@ && $(AR) rcs $(OUTPUT)$@ $(LIB_OBJS)
$(LIB_OBJS): $(LIB_H)
$(OUTPUT)%.o: %.c
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
$(OUTPUT)%.s: %.c
$(QUIET_CC)$(CC) -S $(ALL_CFLAGS) $<
$(OUTPUT)%.o: %.S
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
clean:
$(RM) $(LIB_OBJS) $(LIBFILE)
.PHONY: clean
#include "util.h"
#include "debugfs.h"
#include "cache.h"
#include <linux/kernel.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <sys/vfs.h>
#include <sys/mount.h>
#include <linux/magic.h>
#include <linux/kernel.h>
#include "debugfs.h"
static int debugfs_premounted;
char debugfs_mountpoint[PATH_MAX + 1] = "/sys/kernel/debug";
char tracing_events_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing/events";
static const char *debugfs_known_mountpoints[] = {
static const char * const debugfs_known_mountpoints[] = {
"/sys/kernel/debug/",
"/debug/",
0,
};
static int debugfs_found;
static bool debugfs_found;
/* find the path to the mounted debugfs */
const char *debugfs_find_mountpoint(void)
{
const char **ptr;
const char * const *ptr;
char type[100];
FILE *fp;
if (debugfs_found)
return (const char *) debugfs_mountpoint;
return (const char *)debugfs_mountpoint;
ptr = debugfs_known_mountpoints;
while (*ptr) {
if (debugfs_valid_mountpoint(*ptr) == 0) {
debugfs_found = 1;
debugfs_found = true;
strcpy(debugfs_mountpoint, *ptr);
return debugfs_mountpoint;
}
......@@ -52,7 +55,7 @@ const char *debugfs_find_mountpoint(void)
if (strcmp(type, "debugfs") != 0)
return NULL;
debugfs_found = 1;
debugfs_found = true;
return debugfs_mountpoint;
}
......@@ -71,21 +74,12 @@ int debugfs_valid_mountpoint(const char *debugfs)
return 0;
}
static void debugfs_set_tracing_events_path(const char *mountpoint)
{
snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s",
mountpoint, "tracing/events");
}
/* mount the debugfs somewhere if it's not mounted */
char *debugfs_mount(const char *mountpoint)
{
/* see if it's already mounted */
if (debugfs_find_mountpoint()) {
debugfs_premounted = 1;
if (debugfs_find_mountpoint())
goto out;
}
/* if not mounted and no argument */
if (mountpoint == NULL) {
......@@ -100,15 +94,8 @@ char *debugfs_mount(const char *mountpoint)
return NULL;
/* save the mountpoint */
debugfs_found = 1;
debugfs_found = true;
strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint));
out:
debugfs_set_tracing_events_path(debugfs_mountpoint);
return debugfs_mountpoint;
}
void debugfs_set_path(const char *mountpoint)
{
snprintf(debugfs_mountpoint, sizeof(debugfs_mountpoint), "%s", mountpoint);
debugfs_set_tracing_events_path(mountpoint);
}
#ifndef __LK_DEBUGFS_H__
#define __LK_DEBUGFS_H__
#define _STR(x) #x
#define STR(x) _STR(x)
/*
* On most systems <limits.h> would have given us this, but not on some systems
* (e.g. GNU/Hurd).
*/
#ifndef PATH_MAX
#define PATH_MAX 4096
#endif
#ifndef DEBUGFS_MAGIC
#define DEBUGFS_MAGIC 0x64626720
#endif
#ifndef PERF_DEBUGFS_ENVIRONMENT
#define PERF_DEBUGFS_ENVIRONMENT "PERF_DEBUGFS_DIR"
#endif
const char *debugfs_find_mountpoint(void);
int debugfs_valid_mountpoint(const char *debugfs);
char *debugfs_mount(const char *mountpoint);
extern char debugfs_mountpoint[];
#endif /* __LK_DEBUGFS_H__ */
......@@ -93,6 +93,9 @@ OPTIONS
--skip-missing::
Skip symbols that cannot be annotated.
--group::
Show event group information together
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-report[1]
......@@ -52,7 +52,7 @@ OPTIONS
-r::
--repeat=<n>::
repeat command and print average + stddev (max: 100)
repeat command and print average + stddev (max: 100). 0 means forever.
-B::
--big-num::
......
tools/perf
tools/scripts
tools/lib/traceevent
tools/lib/lk
include/linux/const.h
include/linux/perf_event.h
include/linux/rbtree.h
......
......@@ -215,6 +215,7 @@ BASIC_CFLAGS = \
-Iutil \
-I. \
-I$(TRACE_EVENT_DIR) \
-I../lib/ \
-D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
BASIC_LDFLAGS =
......@@ -240,19 +241,28 @@ SCRIPT_SH += perf-archive.sh
grep-libs = $(filter -l%,$(1))
strip-libs = $(filter-out -l%,$(1))
LK_DIR = ../lib/lk/
TRACE_EVENT_DIR = ../lib/traceevent/
LK_PATH=$(LK_DIR)
ifneq ($(OUTPUT),)
TE_PATH=$(OUTPUT)
ifneq ($(subdir),)
LK_PATH=$(OUTPUT)$(LK_DIR)
else
LK_PATH=$(OUTPUT)
endif
else
TE_PATH=$(TRACE_EVENT_DIR)
endif
LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
TE_LIB := -L$(TE_PATH) -ltraceevent
export LIBTRACEEVENT
LIBLK = $(LK_PATH)liblk.a
export LIBLK
# python extension build directories
PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/
PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/
......@@ -355,7 +365,6 @@ LIB_H += util/cache.h
LIB_H += util/callchain.h
LIB_H += util/build-id.h
LIB_H += util/debug.h
LIB_H += util/debugfs.h
LIB_H += util/sysfs.h
LIB_H += util/pmu.h
LIB_H += util/event.h
......@@ -416,7 +425,6 @@ LIB_OBJS += $(OUTPUT)util/annotate.o
LIB_OBJS += $(OUTPUT)util/build-id.o
LIB_OBJS += $(OUTPUT)util/config.o
LIB_OBJS += $(OUTPUT)util/ctype.o
LIB_OBJS += $(OUTPUT)util/debugfs.o
LIB_OBJS += $(OUTPUT)util/sysfs.o
LIB_OBJS += $(OUTPUT)util/pmu.o
LIB_OBJS += $(OUTPUT)util/environment.o
......@@ -503,6 +511,10 @@ LIB_OBJS += $(OUTPUT)tests/evsel-tp-sched.o
LIB_OBJS += $(OUTPUT)tests/pmu.o
LIB_OBJS += $(OUTPUT)tests/hists_link.o
LIB_OBJS += $(OUTPUT)tests/python-use.o
LIB_OBJS += $(OUTPUT)tests/bp_signal.o
LIB_OBJS += $(OUTPUT)tests/bp_signal_overflow.o
LIB_OBJS += $(OUTPUT)tests/task-exit.o
LIB_OBJS += $(OUTPUT)tests/sw-clock.o
BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
......@@ -536,7 +548,7 @@ BUILTIN_OBJS += $(OUTPUT)builtin-kvm.o
BUILTIN_OBJS += $(OUTPUT)builtin-inject.o
BUILTIN_OBJS += $(OUTPUT)tests/builtin-test.o
PERFLIBS = $(LIB_FILE) $(LIBTRACEEVENT)
PERFLIBS = $(LIB_FILE) $(LIBLK) $(LIBTRACEEVENT)
#
# Platform specific tweaks
......@@ -1051,6 +1063,18 @@ $(LIBTRACEEVENT):
$(LIBTRACEEVENT)-clean:
$(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) clean
# if subdir is set, we've been called from above so target has been built
# already
$(LIBLK):
ifeq ($(subdir),)
$(QUIET_SUBDIR0)$(LK_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) liblk.a
endif
$(LIBLK)-clean:
ifeq ($(subdir),)
$(QUIET_SUBDIR0)$(LK_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) clean
endif
help:
@echo 'Perf make targets:'
@echo ' doc - make *all* documentation (see below)'
......@@ -1171,7 +1195,7 @@ $(INSTALL_DOC_TARGETS):
### Cleaning rules
clean: $(LIBTRACEEVENT)-clean
clean: $(LIBTRACEEVENT)-clean $(LIBLK)-clean
$(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf.o $(LANG_BINDINGS)
$(RM) $(ALL_PROGRAMS) perf
$(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope*
......@@ -1181,6 +1205,6 @@ clean: $(LIBTRACEEVENT)-clean
$(RM) $(OUTPUT)util/*-flex*
$(python-clean)
.PHONY: all install clean strip $(LIBTRACEEVENT)
.PHONY: all install clean strip $(LIBTRACEEVENT) $(LIBLK)
.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
.PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope .FORCE-PERF-CFLAGS
......@@ -8,10 +8,7 @@
* published by the Free Software Foundation.
*/
#include <stdlib.h>
#ifndef __UCLIBC__
#include <libio.h>
#endif
#include <stddef.h>
#include <dwarf-regs.h>
struct pt_regs_dwarfnum {
......
......@@ -9,10 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <stdlib.h>
#ifndef __UCLIBC__
#include <libio.h>
#endif
#include <stddef.h>
#include <dwarf-regs.h>
......
......@@ -6,7 +6,7 @@
*
*/
#include <libio.h>
#include <stddef.h>
#include <dwarf-regs.h>
#define NUM_GPRS 16
......
......@@ -19,7 +19,7 @@
*
*/
#include <libio.h>
#include <stddef.h>
#include <dwarf-regs.h>
/*
......
......@@ -9,7 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <libio.h>
#include <stddef.h>
#include <dwarf-regs.h>
#define SPARC_MAX_REGS 96
......
......@@ -20,7 +20,7 @@
*
*/
#include <libio.h>
#include <stddef.h>
#include <dwarf-regs.h>
/*
......
......@@ -109,14 +109,16 @@ static int process_sample_event(struct perf_tool *tool,
return 0;
}
static int hist_entry__tty_annotate(struct hist_entry *he, int evidx,
static int hist_entry__tty_annotate(struct hist_entry *he,
struct perf_evsel *evsel,
struct perf_annotate *ann)
{
return symbol__tty_annotate(he->ms.sym, he->ms.map, evidx,
return symbol__tty_annotate(he->ms.sym, he->ms.map, evsel,
ann->print_line, ann->full_paths, 0, 0);
}
static void hists__find_annotations(struct hists *self, int evidx,
static void hists__find_annotations(struct hists *self,
struct perf_evsel *evsel,
struct perf_annotate *ann)
{
struct rb_node *nd = rb_first(&self->entries), *next;
......@@ -142,14 +144,14 @@ static void hists__find_annotations(struct hists *self, int evidx,
if (use_browser == 2) {
int ret;
ret = hist_entry__gtk_annotate(he, evidx, NULL);
ret = hist_entry__gtk_annotate(he, evsel, NULL);
if (!ret || !ann->skip_missing)
return;
/* skip missing symbols */
nd = rb_next(nd);
} else if (use_browser == 1) {
key = hist_entry__tui_annotate(he, evidx, NULL);
key = hist_entry__tui_annotate(he, evsel, NULL);
switch (key) {
case -1:
if (!ann->skip_missing)
......@@ -168,7 +170,7 @@ static void hists__find_annotations(struct hists *self, int evidx,
if (next != NULL)
nd = next;
} else {
hist_entry__tty_annotate(he, evidx, ann);
hist_entry__tty_annotate(he, evsel, ann);
nd = rb_next(nd);
/*
* Since we have a hist_entry per IP for the same
......@@ -230,7 +232,12 @@ static int __cmd_annotate(struct perf_annotate *ann)
total_nr_samples += nr_samples;
hists__collapse_resort(hists);
hists__output_resort(hists);
hists__find_annotations(hists, pos->idx, ann);
if (symbol_conf.event_group &&
!perf_evsel__is_group_leader(pos))
continue;
hists__find_annotations(hists, pos, ann);
}
}
......@@ -312,6 +319,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
"Specify disassembler style (e.g. -M intel for intel syntax)"),
OPT_STRING(0, "objdump", &objdump_path, "path",
"objdump binary to use for disassembly and annotations"),
OPT_BOOLEAN(0, "group", &symbol_conf.event_group,
"Show event group information together"),
OPT_END()
};
......
......@@ -12,7 +12,7 @@
#include "util/parse-options.h"
#include "util/trace-event.h"
#include "util/debug.h"
#include "util/debugfs.h"
#include <lk/debugfs.h>
#include "util/tool.h"
#include "util/stat.h"
......
......@@ -37,7 +37,7 @@
#include "util/strfilter.h"
#include "util/symbol.h"
#include "util/debug.h"
#include "util/debugfs.h"
#include <lk/debugfs.h>
#include "util/parse-options.h"
#include "util/probe-finder.h"
#include "util/probe-event.h"
......
......@@ -474,7 +474,9 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
}
if (forks) {
err = perf_evlist__prepare_workload(evsel_list, opts, argv);
err = perf_evlist__prepare_workload(evsel_list, &opts->target,
argv, opts->pipe_output,
true);
if (err < 0) {
pr_err("Couldn't run the workload!\n");
goto out_delete_session;
......@@ -964,7 +966,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
struct perf_record *rec = &record;
char errbuf[BUFSIZ];
evsel_list = perf_evlist__new(NULL, NULL);
evsel_list = perf_evlist__new();
if (evsel_list == NULL)
return -ENOMEM;
......@@ -1026,7 +1028,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
ui__error("%s", errbuf);
err = -saved_errno;
goto out_free_fd;
goto out_symbol_exit;
}
err = -ENOMEM;
......@@ -1057,6 +1059,9 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
}
err = __cmd_record(&record, argc, argv);
perf_evlist__munmap(evsel_list);
perf_evlist__close(evsel_list);
out_free_fd:
perf_evlist__delete_maps(evsel_list);
out_symbol_exit:
......
......@@ -13,7 +13,6 @@
#include "util/annotate.h"
#include "util/color.h"
#include <linux/list.h>
#include "util/cache.h"
#include <linux/rbtree.h>
#include "util/symbol.h"
#include "util/callchain.h"
......@@ -314,7 +313,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *self,
char buf[512];
size_t size = sizeof(buf);
if (symbol_conf.event_group && evsel->nr_members > 1) {
if (perf_evsel__is_group_event(evsel)) {
struct perf_evsel *pos;
perf_evsel__group_desc(evsel, buf, size);
......
......@@ -94,6 +94,7 @@ static const char *pre_cmd = NULL;
static const char *post_cmd = NULL;
static bool sync_run = false;
static unsigned int interval = 0;
static bool forever = false;
static struct timespec ref_time;
static struct cpu_map *sock_map;
......@@ -125,6 +126,11 @@ static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel)
return perf_evsel__cpus(evsel)->nr;
}
static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
{
memset(evsel->priv, 0, sizeof(struct perf_stat));
}
static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
{
evsel->priv = zalloc(sizeof(struct perf_stat));
......@@ -160,6 +166,35 @@ static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
evsel->prev_raw_counts = NULL;
}
static void perf_evlist__free_stats(struct perf_evlist *evlist)
{
struct perf_evsel *evsel;
list_for_each_entry(evsel, &evlist->entries, node) {
perf_evsel__free_stat_priv(evsel);
perf_evsel__free_counts(evsel);
perf_evsel__free_prev_raw_counts(evsel);
}
}
static int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw)
{
struct perf_evsel *evsel;
list_for_each_entry(evsel, &evlist->entries, node) {
if (perf_evsel__alloc_stat_priv(evsel) < 0 ||
perf_evsel__alloc_counts(evsel, perf_evsel__nr_cpus(evsel)) < 0 ||
(alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel) < 0))
goto out_free;
}
return 0;
out_free:
perf_evlist__free_stats(evlist);
return -1;
}
static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
static struct stats runtime_cycles_stats[MAX_NR_CPUS];
static struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS];
......@@ -173,6 +208,29 @@ static struct stats runtime_itlb_cache_stats[MAX_NR_CPUS];
static struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS];
static struct stats walltime_nsecs_stats;
static void perf_stat__reset_stats(struct perf_evlist *evlist)
{
struct perf_evsel *evsel;
list_for_each_entry(evsel, &evlist->entries, node) {
perf_evsel__reset_stat_priv(evsel);
perf_evsel__reset_counts(evsel, perf_evsel__nr_cpus(evsel));
}
memset(runtime_nsecs_stats, 0, sizeof(runtime_nsecs_stats));
memset(runtime_cycles_stats, 0, sizeof(runtime_cycles_stats));
memset(runtime_stalled_cycles_front_stats, 0, sizeof(runtime_stalled_cycles_front_stats));
memset(runtime_stalled_cycles_back_stats, 0, sizeof(runtime_stalled_cycles_back_stats));
memset(runtime_branches_stats, 0, sizeof(runtime_branches_stats));
memset(runtime_cacherefs_stats, 0, sizeof(runtime_cacherefs_stats));
memset(runtime_l1_dcache_stats, 0, sizeof(runtime_l1_dcache_stats));
memset(runtime_l1_icache_stats, 0, sizeof(runtime_l1_icache_stats));
memset(runtime_ll_cache_stats, 0, sizeof(runtime_ll_cache_stats));
memset(runtime_itlb_cache_stats, 0, sizeof(runtime_itlb_cache_stats));
memset(runtime_dtlb_cache_stats, 0, sizeof(runtime_dtlb_cache_stats));
memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
}
static int create_perf_stat_counter(struct perf_evsel *evsel)
{
struct perf_event_attr *attr = &evsel->attr;
......@@ -249,7 +307,7 @@ static int read_counter_aggr(struct perf_evsel *counter)
int i;
if (__perf_evsel__read(counter, perf_evsel__nr_cpus(counter),
evsel_list->threads->nr, scale) < 0)
thread_map__nr(evsel_list->threads), scale) < 0)
return -1;
for (i = 0; i < 3; i++)
......@@ -337,16 +395,14 @@ static void print_interval(void)
}
}
static int __run_perf_stat(int argc __maybe_unused, const char **argv)
static int __run_perf_stat(int argc, const char **argv)
{
char msg[512];
unsigned long long t0, t1;
struct perf_evsel *counter;
struct timespec ts;
int status = 0;
int child_ready_pipe[2], go_pipe[2];
const bool forks = (argc > 0);
char buf;
if (interval) {
ts.tv_sec = interval / 1000;
......@@ -362,55 +418,12 @@ static int __run_perf_stat(int argc __maybe_unused, const char **argv)
return -1;
}
if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
perror("failed to create pipes");
return -1;
}
if (forks) {
if ((child_pid = fork()) < 0)
perror("failed to fork");
if (!child_pid) {
close(child_ready_pipe[0]);
close(go_pipe[1]);
fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
/*
* Do a dummy execvp to get the PLT entry resolved,
* so we avoid the resolver overhead on the real
* execvp call.
*/
execvp("", (char **)argv);
/*
* Tell the parent we're ready to go
*/
close(child_ready_pipe[1]);
/*
* Wait until the parent tells us to go.
*/
if (read(go_pipe[0], &buf, 1) == -1)
perror("unable to read pipe");
execvp(argv[0], (char **)argv);
perror(argv[0]);
exit(-1);
if (perf_evlist__prepare_workload(evsel_list, &target, argv,
false, false) < 0) {
perror("failed to prepare workload");
return -1;
}
if (perf_target__none(&target))
evsel_list->threads->map[0] = child_pid;
/*
* Wait for the child to be ready to exec.
*/
close(child_ready_pipe[1]);
close(go_pipe[0]);
if (read(child_ready_pipe[0], &buf, 1) == -1)
perror("unable to read pipe");
close(child_ready_pipe[0]);
}
if (group)
......@@ -457,7 +470,8 @@ static int __run_perf_stat(int argc __maybe_unused, const char **argv)
clock_gettime(CLOCK_MONOTONIC, &ref_time);
if (forks) {
close(go_pipe[1]);
perf_evlist__start_workload(evsel_list);
if (interval) {
while (!waitpid(child_pid, &status, WNOHANG)) {
nanosleep(&ts, NULL);
......@@ -488,7 +502,7 @@ static int __run_perf_stat(int argc __maybe_unused, const char **argv)
list_for_each_entry(counter, &evsel_list->entries, node) {
read_counter_aggr(counter);
perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
evsel_list->threads->nr);
thread_map__nr(evsel_list->threads));
}
}
......@@ -1296,7 +1310,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_INTEGER('r', "repeat", &run_count,
"repeat command and print average + stddev (max: 100)"),
"repeat command and print average + stddev (max: 100, forever: 0)"),
OPT_BOOLEAN('n', "null", &null_run,
"null run - dont start any counters"),
OPT_INCR('d', "detailed", &detailed_run,
......@@ -1330,13 +1344,12 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
"perf stat [<options>] [<command>]",
NULL
};
struct perf_evsel *pos;
int status = -ENOMEM, run_idx;
const char *mode;
setlocale(LC_ALL, "");
evsel_list = perf_evlist__new(NULL, NULL);
evsel_list = perf_evlist__new();
if (evsel_list == NULL)
return -ENOMEM;
......@@ -1399,8 +1412,12 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
if (!argc && !perf_target__has_task(&target))
usage_with_options(stat_usage, options);
if (run_count <= 0)
if (run_count < 0) {
usage_with_options(stat_usage, options);
} else if (run_count == 0) {
forever = true;
run_count = 1;
}
/* no_aggr, cgroup are for system-wide only */
if ((no_aggr || nr_cgroups) && !perf_target__has_cpu(&target)) {
......@@ -1438,17 +1455,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
return -1;
}
list_for_each_entry(pos, &evsel_list->entries, node) {
if (perf_evsel__alloc_stat_priv(pos) < 0 ||
perf_evsel__alloc_counts(pos, perf_evsel__nr_cpus(pos)) < 0)
goto out_free_fd;
}
if (interval) {
list_for_each_entry(pos, &evsel_list->entries, node) {
if (perf_evsel__alloc_prev_raw_counts(pos) < 0)
goto out_free_fd;
}
}
if (perf_evlist__alloc_stats(evsel_list, interval))
goto out_free_maps;
/*
* We dont want to block the signals - that would cause
......@@ -1457,28 +1465,30 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
* task, but being ignored by perf stat itself:
*/
atexit(sig_atexit);
signal(SIGINT, skip_signal);
if (!forever)
signal(SIGINT, skip_signal);
signal(SIGCHLD, skip_signal);
signal(SIGALRM, skip_signal);
signal(SIGABRT, skip_signal);
status = 0;
for (run_idx = 0; run_idx < run_count; run_idx++) {
for (run_idx = 0; forever || run_idx < run_count; run_idx++) {
if (run_count != 1 && verbose)
fprintf(output, "[ perf stat: executing run #%d ... ]\n",
run_idx + 1);
status = run_perf_stat(argc, argv);
if (forever && status != -1) {
print_stat(argc, argv);
perf_stat__reset_stats(evsel_list);
}
}
if (status != -1 && !interval)
if (!forever && status != -1 && !interval)
print_stat(argc, argv);
out_free_fd:
list_for_each_entry(pos, &evsel_list->entries, node) {
perf_evsel__free_stat_priv(pos);
perf_evsel__free_counts(pos);
perf_evsel__free_prev_raw_counts(pos);
}
perf_evlist__free_stats(evsel_list);
out_free_maps:
perf_evlist__delete_maps(evsel_list);
out:
perf_evlist__delete(evsel_list);
......
......@@ -231,7 +231,7 @@ static void perf_top__show_details(struct perf_top *top)
printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name);
printf(" Events Pcnt (>=%d%%)\n", top->sym_pcnt_filter);
more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel->idx,
more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel,
0, top->sym_pcnt_filter, top->print_entries, 4);
if (top->zero)
symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx);
......@@ -1116,7 +1116,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
NULL
};
top.evlist = perf_evlist__new(NULL, NULL);
top.evlist = perf_evlist__new();
if (top.evlist == NULL)
return -ENOMEM;
......
......@@ -419,7 +419,7 @@ static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evs
static int trace__run(struct trace *trace, int argc, const char **argv)
{
struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
struct perf_evlist *evlist = perf_evlist__new();
struct perf_evsel *evsel;
int err = -1, i;
unsigned long before;
......@@ -452,7 +452,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
err = trace__symbols_init(trace, evlist);
if (err < 0) {
printf("Problems initializing symbol libraries!\n");
goto out_delete_evlist;
goto out_delete_maps;
}
perf_evlist__config(evlist, &trace->opts);
......@@ -461,23 +461,24 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
signal(SIGINT, sig_handler);
if (forks) {
err = perf_evlist__prepare_workload(evlist, &trace->opts, argv);
err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
argv, false, false);
if (err < 0) {
printf("Couldn't run the workload!\n");
goto out_delete_evlist;
goto out_delete_maps;
}
}
err = perf_evlist__open(evlist);
if (err < 0) {
printf("Couldn't create the events: %s\n", strerror(errno));
goto out_delete_evlist;
goto out_delete_maps;
}
err = perf_evlist__mmap(evlist, UINT_MAX, false);
if (err < 0) {
printf("Couldn't mmap the events: %s\n", strerror(errno));
goto out_delete_evlist;
goto out_close_evlist;
}
perf_evlist__enable(evlist);
......@@ -526,13 +527,6 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
continue;
}
if (sample.raw_data == NULL) {
printf("%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
perf_evsel__name(evsel), sample.tid,
sample.cpu, sample.raw_size);
continue;
}
handler = evsel->handler.func;
handler(trace, evsel, &sample);
}
......@@ -540,7 +534,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (trace->nr_events == before) {
if (done)
goto out_delete_evlist;
goto out_unmap_evlist;
poll(evlist->pollfd, evlist->nr_fds, -1);
}
......@@ -550,6 +544,12 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
goto again;
out_unmap_evlist:
perf_evlist__munmap(evlist);
out_close_evlist:
perf_evlist__close(evlist);
out_delete_maps:
perf_evlist__delete_maps(evlist);
out_delete_evlist:
perf_evlist__delete(evlist);
out:
......
......@@ -10,17 +10,17 @@ perf-buildid-list mainporcelain common
perf-diff mainporcelain common
perf-evlist mainporcelain common
perf-inject mainporcelain common
perf-kmem mainporcelain common
perf-kvm mainporcelain common
perf-list mainporcelain common
perf-sched mainporcelain common
perf-lock mainporcelain common
perf-probe mainporcelain full
perf-record mainporcelain common
perf-report mainporcelain common
perf-sched mainporcelain common
perf-script mainporcelain common
perf-stat mainporcelain common
perf-test mainporcelain common
perf-timechart mainporcelain common
perf-top mainporcelain common
perf-trace mainporcelain common
perf-script mainporcelain common
perf-probe mainporcelain full
perf-kmem mainporcelain common
perf-lock mainporcelain common
perf-kvm mainporcelain common
perf-test mainporcelain common
......@@ -13,7 +13,7 @@
#include "util/quote.h"
#include "util/run-command.h"
#include "util/parse-events.h"
#include "util/debugfs.h"
#include <lk/debugfs.h>
#include <pthread.h>
const char perf_usage_string[] =
......@@ -193,13 +193,13 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
fprintf(stderr, "No directory given for --debugfs-dir.\n");
usage(perf_usage_string);
}
debugfs_set_path((*argv)[1]);
perf_debugfs_set_path((*argv)[1]);
if (envchanged)
*envchanged = 1;
(*argv)++;
(*argc)--;
} else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) {
debugfs_set_path(cmd + strlen(CMD_DEBUGFS_DIR));
perf_debugfs_set_path(cmd + strlen(CMD_DEBUGFS_DIR));
fprintf(stderr, "dir: %s\n", debugfs_mountpoint);
if (envchanged)
*envchanged = 1;
......@@ -461,7 +461,7 @@ int main(int argc, const char **argv)
if (!cmd)
cmd = "perf-help";
/* get debugfs mount point from /proc/mounts */
debugfs_mount(NULL);
perf_debugfs_mount(NULL);
/*
* "perf-xxxx" is the same as "perf xxxx", but we obviously:
*
......
......@@ -147,10 +147,15 @@ void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
static int run_dir(const char *d, const char *perf)
{
char v[] = "-vvvvv";
int vcnt = min(verbose, (int) sizeof(v) - 1);
char cmd[3*PATH_MAX];
snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %s",
d, d, perf, verbose ? "-v" : "");
if (verbose)
vcnt++;
snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s",
d, d, perf, vcnt, v);
return system(cmd);
}
......
......@@ -24,6 +24,7 @@ class Unsup(Exception):
class Event(dict):
terms = [
'cpu',
'flags',
'type',
'size',
......@@ -121,7 +122,7 @@ class Test(object):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.debug("running '%s'" % path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
......@@ -172,7 +173,7 @@ class Test(object):
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.warning(" running '%s' ret %d " % (cmd, ret))
log.info(" '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
......
......@@ -2,6 +2,7 @@
fd=1
group_fd=-1
flags=0
cpu=*
type=0|1
size=96
config=0
......
......@@ -2,6 +2,7 @@
fd=1
group_fd=-1
flags=0
cpu=*
type=0
size=96
config=0
......
[config]
command = record
args = -C 0 kill >/dev/null 2>&1
[event:base-record]
cpu=0
# no enable on exec for CPU attached
enable_on_exec=0
# PERF_SAMPLE_IP | PERF_SAMPLE_TID PERF_SAMPLE_TIME | # PERF_SAMPLE_PERIOD
# + PERF_SAMPLE_CPU added by -C 0
sample_type=391
[config]
command = stat
args = -e cycles -C 0 kill >/dev/null 2>&1
ret = 1
[event:base-stat]
# events are enabled by default when attached to cpu
disabled=0
enable_on_exec=0
/*
* Inspired by breakpoint overflow test done by
* Vince Weaver <vincent.weaver@maine.edu> for perf_event_tests
* (git://github.com/deater/perf_event_tests)
*/
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <sys/ioctl.h>
#include <time.h>
#include <fcntl.h>
#include <signal.h>
#include <sys/mman.h>
#include <linux/compiler.h>
#include <linux/hw_breakpoint.h>
#include "tests.h"
#include "debug.h"
#include "perf.h"
static int fd1;
static int fd2;
static int overflows;
__attribute__ ((noinline))
static int test_function(void)
{
return time(NULL);
}
static void sig_handler(int signum __maybe_unused,
siginfo_t *oh __maybe_unused,
void *uc __maybe_unused)
{
overflows++;
if (overflows > 10) {
/*
* This should be executed only once during
* this test, if we are here for the 10th
* time, consider this the recursive issue.
*
* We can get out of here by disable events,
* so no new SIGIO is delivered.
*/
ioctl(fd1, PERF_EVENT_IOC_DISABLE, 0);
ioctl(fd2, PERF_EVENT_IOC_DISABLE, 0);
}
}
static int bp_event(void *fn, int setup_signal)
{
struct perf_event_attr pe;
int fd;
memset(&pe, 0, sizeof(struct perf_event_attr));
pe.type = PERF_TYPE_BREAKPOINT;
pe.size = sizeof(struct perf_event_attr);
pe.config = 0;
pe.bp_type = HW_BREAKPOINT_X;
pe.bp_addr = (unsigned long) fn;
pe.bp_len = sizeof(long);
pe.sample_period = 1;
pe.sample_type = PERF_SAMPLE_IP;
pe.wakeup_events = 1;
pe.disabled = 1;
pe.exclude_kernel = 1;
pe.exclude_hv = 1;
fd = sys_perf_event_open(&pe, 0, -1, -1, 0);
if (fd < 0) {
pr_debug("failed opening event %llx\n", pe.config);
return TEST_FAIL;
}
if (setup_signal) {
fcntl(fd, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC);
fcntl(fd, F_SETSIG, SIGIO);
fcntl(fd, F_SETOWN, getpid());
}
ioctl(fd, PERF_EVENT_IOC_RESET, 0);
return fd;
}
static long long bp_count(int fd)
{
long long count;
int ret;
ret = read(fd, &count, sizeof(long long));
if (ret != sizeof(long long)) {
pr_debug("failed to read: %d\n", ret);
return TEST_FAIL;
}
return count;
}
int test__bp_signal(void)
{
struct sigaction sa;
long long count1, count2;
/* setup SIGIO signal handler */
memset(&sa, 0, sizeof(struct sigaction));
sa.sa_sigaction = (void *) sig_handler;
sa.sa_flags = SA_SIGINFO;
if (sigaction(SIGIO, &sa, NULL) < 0) {
pr_debug("failed setting up signal handler\n");
return TEST_FAIL;
}
/*
* We create following events:
*
* fd1 - breakpoint event on test_function with SIGIO
* signal configured. We should get signal
* notification each time the breakpoint is hit
*
* fd2 - breakpoint event on sig_handler without SIGIO
* configured.
*
* Following processing should happen:
* - execute test_function
* - fd1 event breakpoint hit -> count1 == 1
* - SIGIO is delivered -> overflows == 1
* - fd2 event breakpoint hit -> count2 == 1
*
* The test case check following error conditions:
* - we get stuck in signal handler because of debug
* exception being triggered receursively due to
* the wrong RF EFLAG management
*
* - we never trigger the sig_handler breakpoint due
* to the rong RF EFLAG management
*
*/
fd1 = bp_event(test_function, 1);
fd2 = bp_event(sig_handler, 0);
ioctl(fd1, PERF_EVENT_IOC_ENABLE, 0);
ioctl(fd2, PERF_EVENT_IOC_ENABLE, 0);
/*
* Kick off the test by trigering 'fd1'
* breakpoint.
*/
test_function();
ioctl(fd1, PERF_EVENT_IOC_DISABLE, 0);
ioctl(fd2, PERF_EVENT_IOC_DISABLE, 0);
count1 = bp_count(fd1);
count2 = bp_count(fd2);
close(fd1);
close(fd2);
pr_debug("count1 %lld, count2 %lld, overflow %d\n",
count1, count2, overflows);
if (count1 != 1) {
if (count1 == 11)
pr_debug("failed: RF EFLAG recursion issue detected\n");
else
pr_debug("failed: wrong count for bp1%lld\n", count1);
}
if (overflows != 1)
pr_debug("failed: wrong overflow hit\n");
if (count2 != 1)
pr_debug("failed: wrong count for bp2\n");
return count1 == 1 && overflows == 1 && count2 == 1 ?
TEST_OK : TEST_FAIL;
}
/*
* Originally done by Vince Weaver <vincent.weaver@maine.edu> for
* perf_event_tests (git://github.com/deater/perf_event_tests)
*/
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <sys/ioctl.h>
#include <time.h>
#include <fcntl.h>
#include <signal.h>
#include <sys/mman.h>
#include <linux/compiler.h>
#include <linux/hw_breakpoint.h>
#include "tests.h"
#include "debug.h"
#include "perf.h"
static int overflows;
__attribute__ ((noinline))
static int test_function(void)
{
return time(NULL);
}
static void sig_handler(int signum __maybe_unused,
siginfo_t *oh __maybe_unused,
void *uc __maybe_unused)
{
overflows++;
}
static long long bp_count(int fd)
{
long long count;
int ret;
ret = read(fd, &count, sizeof(long long));
if (ret != sizeof(long long)) {
pr_debug("failed to read: %d\n", ret);
return TEST_FAIL;
}
return count;
}
#define EXECUTIONS 10000
#define THRESHOLD 100
int test__bp_signal_overflow(void)
{
struct perf_event_attr pe;
struct sigaction sa;
long long count;
int fd, i, fails = 0;
/* setup SIGIO signal handler */
memset(&sa, 0, sizeof(struct sigaction));
sa.sa_sigaction = (void *) sig_handler;
sa.sa_flags = SA_SIGINFO;
if (sigaction(SIGIO, &sa, NULL) < 0) {
pr_debug("failed setting up signal handler\n");
return TEST_FAIL;
}
memset(&pe, 0, sizeof(struct perf_event_attr));
pe.type = PERF_TYPE_BREAKPOINT;
pe.size = sizeof(struct perf_event_attr);
pe.config = 0;
pe.bp_type = HW_BREAKPOINT_X;
pe.bp_addr = (unsigned long) test_function;
pe.bp_len = sizeof(long);
pe.sample_period = THRESHOLD;
pe.sample_type = PERF_SAMPLE_IP;
pe.wakeup_events = 1;
pe.disabled = 1;
pe.exclude_kernel = 1;
pe.exclude_hv = 1;
fd = sys_perf_event_open(&pe, 0, -1, -1, 0);
if (fd < 0) {
pr_debug("failed opening event %llx\n", pe.config);
return TEST_FAIL;
}
fcntl(fd, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC);
fcntl(fd, F_SETSIG, SIGIO);
fcntl(fd, F_SETOWN, getpid());
ioctl(fd, PERF_EVENT_IOC_RESET, 0);
ioctl(fd, PERF_EVENT_IOC_ENABLE, 0);
for (i = 0; i < EXECUTIONS; i++)
test_function();
ioctl(fd, PERF_EVENT_IOC_DISABLE, 0);
count = bp_count(fd);
close(fd);
pr_debug("count %lld, overflow %d\n",
count, overflows);
if (count != EXECUTIONS) {
pr_debug("\tWrong number of executions %lld != %d\n",
count, EXECUTIONS);
fails++;
}
if (overflows != EXECUTIONS / THRESHOLD) {
pr_debug("\tWrong number of overflows %d != %d\n",
overflows, EXECUTIONS / THRESHOLD);
fails++;
}
return fails ? TEST_FAIL : TEST_OK;
}
......@@ -77,6 +77,22 @@ static struct test {
.desc = "Try 'use perf' in python, checking link problems",
.func = test__python_use,
},
{
.desc = "Test breakpoint overflow signal handler",
.func = test__bp_signal,
},
{
.desc = "Test breakpoint overflow sampling",
.func = test__bp_signal_overflow,
},
{
.desc = "Test number of exit event of a simple workload",
.func = test__task_exit,
},
{
.desc = "Test software clock events have valid period values",
.func = test__sw_clock_freq,
},
{
.func = NULL,
},
......
......@@ -8,7 +8,7 @@ static int perf_evsel__roundtrip_cache_name_test(void)
char name[128];
int type, op, err = 0, ret = 0, i, idx;
struct perf_evsel *evsel;
struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
struct perf_evlist *evlist = perf_evlist__new();
if (evlist == NULL)
return -ENOMEM;
......@@ -64,7 +64,7 @@ static int __perf_evsel__name_array_test(const char *names[], int nr_names)
{
int i, err;
struct perf_evsel *evsel;
struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
struct perf_evlist *evlist = perf_evlist__new();
if (evlist == NULL)
return -ENOMEM;
......
......@@ -436,7 +436,7 @@ int test__hists_link(void)
struct machines machines;
struct machine *machine = NULL;
struct perf_evsel *evsel, *first;
struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
struct perf_evlist *evlist = perf_evlist__new();
if (evlist == NULL)
return -ENOMEM;
......
......@@ -53,12 +53,14 @@ int test__basic_mmap(void)
goto out_free_cpus;
}
evlist = perf_evlist__new(cpus, threads);
evlist = perf_evlist__new();
if (evlist == NULL) {
pr_debug("perf_evlist__new\n");
goto out_free_cpus;
}
perf_evlist__set_maps(evlist, cpus, threads);
for (i = 0; i < nsyscalls; ++i) {
char name[64];
......
......@@ -18,7 +18,7 @@ int test__syscall_open_tp_fields(void)
};
const char *filename = "/etc/passwd";
int flags = O_RDONLY | O_DIRECTORY;
struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
struct perf_evlist *evlist = perf_evlist__new();
struct perf_evsel *evsel;
int err = -1, i, nr_events = 0, nr_polls = 0;
......@@ -48,13 +48,13 @@ int test__syscall_open_tp_fields(void)
err = perf_evlist__open(evlist);
if (err < 0) {
pr_debug("perf_evlist__open: %s\n", strerror(errno));
goto out_delete_evlist;
goto out_delete_maps;
}
err = perf_evlist__mmap(evlist, UINT_MAX, false);
if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
goto out_delete_evlist;
goto out_close_evlist;
}
perf_evlist__enable(evlist);
......@@ -110,6 +110,10 @@ int test__syscall_open_tp_fields(void)
err = 0;
out_munmap:
perf_evlist__munmap(evlist);
out_close_evlist:
perf_evlist__close(evlist);
out_delete_maps:
perf_evlist__delete_maps(evlist);
out_delete_evlist:
perf_evlist__delete(evlist);
out:
......
......@@ -3,7 +3,7 @@
#include "evsel.h"
#include "evlist.h"
#include "sysfs.h"
#include "debugfs.h"
#include <lk/debugfs.h>
#include "tests.h"
#include <linux/hw_breakpoint.h>
......@@ -1218,7 +1218,7 @@ static int test_event(struct evlist_test *e)
struct perf_evlist *evlist;
int ret;
evlist = perf_evlist__new(NULL, NULL);
evlist = perf_evlist__new();
if (evlist == NULL)
return -ENOMEM;
......
......@@ -45,7 +45,7 @@ int test__PERF_RECORD(void)
};
cpu_set_t cpu_mask;
size_t cpu_mask_size = sizeof(cpu_mask);
struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
struct perf_evlist *evlist = perf_evlist__new();
struct perf_evsel *evsel;
struct perf_sample sample;
const char *cmd = "sleep";
......@@ -93,7 +93,8 @@ int test__PERF_RECORD(void)
* so that we have time to open the evlist (calling sys_perf_event_open
* on all the fds) and then mmap them.
*/
err = perf_evlist__prepare_workload(evlist, &opts, argv);
err = perf_evlist__prepare_workload(evlist, &opts.target, argv,
false, false);
if (err < 0) {
pr_debug("Couldn't run the workload!\n");
goto out_delete_maps;
......@@ -142,7 +143,7 @@ int test__PERF_RECORD(void)
err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
goto out_delete_maps;
goto out_close_evlist;
}
/*
......@@ -305,6 +306,8 @@ int test__PERF_RECORD(void)
}
out_err:
perf_evlist__munmap(evlist);
out_close_evlist:
perf_evlist__close(evlist);
out_delete_maps:
perf_evlist__delete_maps(evlist);
out_delete_evlist:
......
#include <unistd.h>
#include <stdlib.h>
#include <signal.h>
#include <sys/mman.h>
#include "tests.h"
#include "util/evsel.h"
#include "util/evlist.h"
#include "util/cpumap.h"
#include "util/thread_map.h"
#define NR_LOOPS 1000000
/*
* This test will open software clock events (cpu-clock, task-clock)
* then check their frequency -> period conversion has no artifact of
* setting period to 1 forcefully.
*/
static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
{
int i, err = -1;
volatile int tmp = 0;
u64 total_periods = 0;
int nr_samples = 0;
union perf_event *event;
struct perf_evsel *evsel;
struct perf_evlist *evlist;
struct perf_event_attr attr = {
.type = PERF_TYPE_SOFTWARE,
.config = clock_id,
.sample_type = PERF_SAMPLE_PERIOD,
.exclude_kernel = 1,
.disabled = 1,
.freq = 1,
};
attr.sample_freq = 10000;
evlist = perf_evlist__new();
if (evlist == NULL) {
pr_debug("perf_evlist__new\n");
return -1;
}
evsel = perf_evsel__new(&attr, 0);
if (evsel == NULL) {
pr_debug("perf_evsel__new\n");
goto out_free_evlist;
}
perf_evlist__add(evlist, evsel);
evlist->cpus = cpu_map__dummy_new();
evlist->threads = thread_map__new_by_tid(getpid());
if (!evlist->cpus || !evlist->threads) {
err = -ENOMEM;
pr_debug("Not enough memory to create thread/cpu maps\n");
goto out_delete_maps;
}
perf_evlist__open(evlist);
err = perf_evlist__mmap(evlist, 128, true);
if (err < 0) {
pr_debug("failed to mmap event: %d (%s)\n", errno,
strerror(errno));
goto out_close_evlist;
}
perf_evlist__enable(evlist);
/* collect samples */
for (i = 0; i < NR_LOOPS; i++)
tmp++;
perf_evlist__disable(evlist);
while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
struct perf_sample sample;
if (event->header.type != PERF_RECORD_SAMPLE)
continue;
err = perf_evlist__parse_sample(evlist, event, &sample);
if (err < 0) {
pr_debug("Error during parse sample\n");
goto out_unmap_evlist;
}
total_periods += sample.period;
nr_samples++;
}
if ((u64) nr_samples == total_periods) {
pr_debug("All (%d) samples have period value of 1!\n",
nr_samples);
err = -1;
}
out_unmap_evlist:
perf_evlist__munmap(evlist);
out_close_evlist:
perf_evlist__close(evlist);
out_delete_maps:
perf_evlist__delete_maps(evlist);
out_free_evlist:
perf_evlist__delete(evlist);
return err;
}
int test__sw_clock_freq(void)
{
int ret;
ret = __test__sw_clock_freq(PERF_COUNT_SW_CPU_CLOCK);
if (!ret)
ret = __test__sw_clock_freq(PERF_COUNT_SW_TASK_CLOCK);
return ret;
}
#include "evlist.h"
#include "evsel.h"
#include "thread_map.h"
#include "cpumap.h"
#include "tests.h"
#include <signal.h>
static int exited;
static int nr_exit;
static void sig_handler(int sig)
{
exited = 1;
if (sig == SIGUSR1)
nr_exit = -1;
}
/*
* This test will start a workload that does nothing then it checks
* if the number of exit event reported by the kernel is 1 or not
* in order to check the kernel returns correct number of event.
*/
int test__task_exit(void)
{
int err = -1;
union perf_event *event;
struct perf_evsel *evsel;
struct perf_evlist *evlist;
struct perf_target target = {
.uid = UINT_MAX,
.uses_mmap = true,
};
const char *argv[] = { "true", NULL };
signal(SIGCHLD, sig_handler);
signal(SIGUSR1, sig_handler);
evlist = perf_evlist__new();
if (evlist == NULL) {
pr_debug("perf_evlist__new\n");
return -1;
}
/*
* We need at least one evsel in the evlist, use the default
* one: "cycles".
*/
err = perf_evlist__add_default(evlist);
if (err < 0) {
pr_debug("Not enough memory to create evsel\n");
goto out_free_evlist;
}
/*
* Create maps of threads and cpus to monitor. In this case
* we start with all threads and cpus (-1, -1) but then in
* perf_evlist__prepare_workload we'll fill in the only thread
* we're monitoring, the one forked there.
*/
evlist->cpus = cpu_map__dummy_new();
evlist->threads = thread_map__new_by_tid(-1);
if (!evlist->cpus || !evlist->threads) {
err = -ENOMEM;
pr_debug("Not enough memory to create thread/cpu maps\n");
goto out_delete_maps;
}
err = perf_evlist__prepare_workload(evlist, &target, argv, false, true);
if (err < 0) {
pr_debug("Couldn't run the workload!\n");
goto out_delete_maps;
}
evsel = perf_evlist__first(evlist);
evsel->attr.task = 1;
evsel->attr.sample_freq = 0;
evsel->attr.inherit = 0;
evsel->attr.watermark = 0;
evsel->attr.wakeup_events = 1;
evsel->attr.exclude_kernel = 1;
err = perf_evlist__open(evlist);
if (err < 0) {
pr_debug("Couldn't open the evlist: %s\n", strerror(-err));
goto out_delete_maps;
}
if (perf_evlist__mmap(evlist, 128, true) < 0) {
pr_debug("failed to mmap events: %d (%s)\n", errno,
strerror(errno));
goto out_close_evlist;
}
perf_evlist__start_workload(evlist);
retry:
while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
if (event->header.type != PERF_RECORD_EXIT)
continue;
nr_exit++;
}
if (!exited || !nr_exit) {
poll(evlist->pollfd, evlist->nr_fds, -1);
goto retry;
}
if (nr_exit != 1) {
pr_debug("received %d EXIT records\n", nr_exit);
err = -1;
}
perf_evlist__munmap(evlist);
out_close_evlist:
perf_evlist__close(evlist);
out_delete_maps:
perf_evlist__delete_maps(evlist);
out_free_evlist:
perf_evlist__delete(evlist);
return err;
}
......@@ -23,5 +23,9 @@ int test__dso_data(void);
int test__parse_events(void);
int test__hists_link(void);
int test__python_use(void);
int test__bp_signal(void);
int test__bp_signal_overflow(void);
int test__task_exit(void);
int test__sw_clock_freq(void);
#endif /* TESTS_H */
......@@ -8,15 +8,20 @@
#include "../../util/hist.h"
#include "../../util/sort.h"
#include "../../util/symbol.h"
#include "../../util/evsel.h"
#include <pthread.h>
#include <newt.h>
struct browser_disasm_line {
struct rb_node rb_node;
double percent;
u32 idx;
int idx_asm;
int jump_sources;
/*
* actual length of this array is saved on the nr_events field
* of the struct annotate_browser
*/
double percent[1];
};
static struct annotate_browser_opt {
......@@ -33,8 +38,9 @@ struct annotate_browser {
struct ui_browser b;
struct rb_root entries;
struct rb_node *curr_hot;
struct disasm_line *selection;
struct disasm_line *selection;
struct disasm_line **offsets;
int nr_events;
u64 start;
int nr_asm_entries;
int nr_entries;
......@@ -94,14 +100,24 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
(!current_entry || (browser->use_navkeypressed &&
!browser->navkeypressed)));
int width = browser->width, printed;
int i, pcnt_width = 7 * ab->nr_events;
double percent_max = 0.0;
char bf[256];
if (dl->offset != -1 && bdl->percent != 0.0) {
ui_browser__set_percent_color(browser, bdl->percent, current_entry);
slsmg_printf("%6.2f ", bdl->percent);
for (i = 0; i < ab->nr_events; i++) {
if (bdl->percent[i] > percent_max)
percent_max = bdl->percent[i];
}
if (dl->offset != -1 && percent_max != 0.0) {
for (i = 0; i < ab->nr_events; i++) {
ui_browser__set_percent_color(browser, bdl->percent[i],
current_entry);
slsmg_printf("%6.2f ", bdl->percent[i]);
}
} else {
ui_browser__set_percent_color(browser, 0, current_entry);
slsmg_write_nstring(" ", 7);
slsmg_write_nstring(" ", pcnt_width);
}
SLsmg_write_char(' ');
......@@ -111,12 +127,12 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
width += 1;
if (!*dl->line)
slsmg_write_nstring(" ", width - 7);
slsmg_write_nstring(" ", width - pcnt_width);
else if (dl->offset == -1) {
printed = scnprintf(bf, sizeof(bf), "%*s ",
ab->addr_width, " ");
slsmg_write_nstring(bf, printed);
slsmg_write_nstring(dl->line, width - printed - 6);
slsmg_write_nstring(dl->line, width - printed - pcnt_width + 1);
} else {
u64 addr = dl->offset;
int color = -1;
......@@ -175,7 +191,7 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
}
disasm_line__scnprintf(dl, bf, sizeof(bf), !annotate_browser__opts.use_offset);
slsmg_write_nstring(bf, width - 10 - printed);
slsmg_write_nstring(bf, width - pcnt_width - 3 - printed);
}
if (current_entry)
......@@ -200,6 +216,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
unsigned int from, to;
struct map_symbol *ms = ab->b.priv;
struct symbol *sym = ms->sym;
u8 pcnt_width = 7;
/* PLT symbols contain external offsets */
if (strstr(sym->name, "@plt"))
......@@ -223,57 +240,44 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
to = (u64)btarget->idx;
}
pcnt_width *= ab->nr_events;
ui_browser__set_color(browser, HE_COLORSET_CODE);
__ui_browser__line_arrow(browser, 9 + ab->addr_width, from, to);
__ui_browser__line_arrow(browser, pcnt_width + 2 + ab->addr_width,
from, to);
}
static unsigned int annotate_browser__refresh(struct ui_browser *browser)
{
struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
int ret = ui_browser__list_head_refresh(browser);
int pcnt_width;
pcnt_width = 7 * ab->nr_events;
if (annotate_browser__opts.jump_arrows)
annotate_browser__draw_current_jump(browser);
ui_browser__set_color(browser, HE_COLORSET_NORMAL);
__ui_browser__vline(browser, 7, 0, browser->height - 1);
__ui_browser__vline(browser, pcnt_width, 0, browser->height - 1);
return ret;
}
static double disasm_line__calc_percent(struct disasm_line *dl, struct symbol *sym, int evidx)
static int disasm__cmp(struct browser_disasm_line *a,
struct browser_disasm_line *b, int nr_pcnt)
{
double percent = 0.0;
if (dl->offset != -1) {
int len = sym->end - sym->start;
unsigned int hits = 0;
struct annotation *notes = symbol__annotation(sym);
struct source_line *src_line = notes->src->lines;
struct sym_hist *h = annotation__histogram(notes, evidx);
s64 offset = dl->offset;
struct disasm_line *next;
int i;
next = disasm__get_next_ip_line(&notes->src->source, dl);
while (offset < (s64)len &&
(next == NULL || offset < next->offset)) {
if (src_line) {
percent += src_line[offset].percent;
} else
hits += h->addr[offset];
++offset;
}
/*
* If the percentage wasn't already calculated in
* symbol__get_source_line, do it now:
*/
if (src_line == NULL && h->sum)
percent = 100.0 * hits / h->sum;
for (i = 0; i < nr_pcnt; i++) {
if (a->percent[i] == b->percent[i])
continue;
return a->percent[i] < b->percent[i];
}
return percent;
return 0;
}
static void disasm_rb_tree__insert(struct rb_root *root, struct browser_disasm_line *bdl)
static void disasm_rb_tree__insert(struct rb_root *root, struct browser_disasm_line *bdl,
int nr_events)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
......@@ -282,7 +286,8 @@ static void disasm_rb_tree__insert(struct rb_root *root, struct browser_disasm_l
while (*p != NULL) {
parent = *p;
l = rb_entry(parent, struct browser_disasm_line, rb_node);
if (bdl->percent < l->percent)
if (disasm__cmp(bdl, l, nr_events))
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
......@@ -331,12 +336,13 @@ static void annotate_browser__set_rb_top(struct annotate_browser *browser,
}
static void annotate_browser__calc_percent(struct annotate_browser *browser,
int evidx)
struct perf_evsel *evsel)
{
struct map_symbol *ms = browser->b.priv;
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
struct disasm_line *pos;
struct disasm_line *pos, *next;
s64 len = symbol__size(sym);
browser->entries = RB_ROOT;
......@@ -344,12 +350,34 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
list_for_each_entry(pos, &notes->src->source, node) {
struct browser_disasm_line *bpos = disasm_line__browser(pos);
bpos->percent = disasm_line__calc_percent(pos, sym, evidx);
if (bpos->percent < 0.01) {
const char *path = NULL;
double max_percent = 0.0;
int i;
if (pos->offset == -1) {
RB_CLEAR_NODE(&bpos->rb_node);
continue;
}
next = disasm__get_next_ip_line(&notes->src->source, pos);
for (i = 0; i < browser->nr_events; i++) {
bpos->percent[i] = disasm__calc_percent(notes,
evsel->idx + i,
pos->offset,
next ? next->offset : len,
&path);
if (max_percent < bpos->percent[i])
max_percent = bpos->percent[i];
}
if (max_percent < 0.01) {
RB_CLEAR_NODE(&bpos->rb_node);
continue;
}
disasm_rb_tree__insert(&browser->entries, bpos);
disasm_rb_tree__insert(&browser->entries, bpos,
browser->nr_events);
}
pthread_mutex_unlock(&notes->lock);
......@@ -401,7 +429,8 @@ static void annotate_browser__init_asm_mode(struct annotate_browser *browser)
browser->b.nr_entries = browser->nr_asm_entries;
}
static bool annotate_browser__callq(struct annotate_browser *browser, int evidx,
static bool annotate_browser__callq(struct annotate_browser *browser,
struct perf_evsel *evsel,
struct hist_browser_timer *hbt)
{
struct map_symbol *ms = browser->b.priv;
......@@ -432,7 +461,7 @@ static bool annotate_browser__callq(struct annotate_browser *browser, int evidx,
}
pthread_mutex_unlock(&notes->lock);
symbol__tui_annotate(target, ms->map, evidx, hbt);
symbol__tui_annotate(target, ms->map, evsel, hbt);
ui_browser__show_title(&browser->b, sym->name);
return true;
}
......@@ -615,7 +644,8 @@ static void annotate_browser__update_addr_width(struct annotate_browser *browser
browser->addr_width += browser->jumps_width + 1;
}
static int annotate_browser__run(struct annotate_browser *browser, int evidx,
static int annotate_browser__run(struct annotate_browser *browser,
struct perf_evsel *evsel,
struct hist_browser_timer *hbt)
{
struct rb_node *nd = NULL;
......@@ -628,7 +658,7 @@ static int annotate_browser__run(struct annotate_browser *browser, int evidx,
if (ui_browser__show(&browser->b, sym->name, help) < 0)
return -1;
annotate_browser__calc_percent(browser, evidx);
annotate_browser__calc_percent(browser, evsel);
if (browser->curr_hot) {
annotate_browser__set_rb_top(browser, browser->curr_hot);
......@@ -641,7 +671,7 @@ static int annotate_browser__run(struct annotate_browser *browser, int evidx,
key = ui_browser__run(&browser->b, delay_secs);
if (delay_secs != 0) {
annotate_browser__calc_percent(browser, evidx);
annotate_browser__calc_percent(browser, evsel);
/*
* Current line focus got out of the list of most active
* lines, NULL it so that if TAB|UNTAB is pressed, we
......@@ -657,7 +687,7 @@ static int annotate_browser__run(struct annotate_browser *browser, int evidx,
hbt->timer(hbt->arg);
if (delay_secs != 0)
symbol__annotate_decay_histogram(sym, evidx);
symbol__annotate_decay_histogram(sym, evsel->idx);
continue;
case K_TAB:
if (nd != NULL) {
......@@ -754,7 +784,7 @@ static int annotate_browser__run(struct annotate_browser *browser, int evidx,
goto show_sup_ins;
goto out;
} else if (!(annotate_browser__jump(browser) ||
annotate_browser__callq(browser, evidx, hbt))) {
annotate_browser__callq(browser, evsel, hbt))) {
show_sup_ins:
ui_helpline__puts("Actions are only available for 'callq', 'retq' & jump instructions.");
}
......@@ -776,10 +806,10 @@ static int annotate_browser__run(struct annotate_browser *browser, int evidx,
return key;
}
int hist_entry__tui_annotate(struct hist_entry *he, int evidx,
int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel,
struct hist_browser_timer *hbt)
{
return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx, hbt);
return symbol__tui_annotate(he->ms.sym, he->ms.map, evsel, hbt);
}
static void annotate_browser__mark_jump_targets(struct annotate_browser *browser,
......@@ -826,7 +856,8 @@ static inline int width_jumps(int n)
return 1;
}
int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
int symbol__tui_annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel,
struct hist_browser_timer *hbt)
{
struct disasm_line *pos, *n;
......@@ -847,6 +878,8 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
},
};
int ret = -1;
int nr_pcnt = 1;
size_t sizeof_bdl = sizeof(struct browser_disasm_line);
if (sym == NULL)
return -1;
......@@ -862,7 +895,12 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
return -1;
}
if (symbol__annotate(sym, map, sizeof(struct browser_disasm_line)) < 0) {
if (perf_evsel__is_group_event(evsel)) {
nr_pcnt = evsel->nr_members;
sizeof_bdl += sizeof(double) * (nr_pcnt - 1);
}
if (symbol__annotate(sym, map, sizeof_bdl) < 0) {
ui__error("%s", ui_helpline__last_msg);
goto out_free_offsets;
}
......@@ -900,6 +938,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
browser.addr_width = browser.target_width = browser.min_addr_width = hex_width(size);
browser.max_addr_width = hex_width(sym->end);
browser.jumps_width = width_jumps(browser.max_jump_sources);
browser.nr_events = nr_pcnt;
browser.b.nr_entries = browser.nr_entries;
browser.b.entries = &notes->src->source,
browser.b.width += 18; /* Percentage */
......@@ -909,7 +948,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
annotate_browser__update_addr_width(&browser);
ret = annotate_browser__run(&browser, evidx, hbt);
ret = annotate_browser__run(&browser, evsel, hbt);
list_for_each_entry_safe(pos, n, &notes->src->source, node) {
list_del(&pos->node);
disasm_line__free(pos);
......
......@@ -1193,7 +1193,7 @@ static int hists__browser_title(struct hists *hists, char *bf, size_t size,
char buf[512];
size_t buflen = sizeof(buf);
if (symbol_conf.event_group && evsel->nr_members > 1) {
if (perf_evsel__is_group_event(evsel)) {
struct perf_evsel *pos;
perf_evsel__group_desc(evsel, buf, buflen);
......@@ -1599,7 +1599,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
* Don't let this be freed, say, by hists__decay_entry.
*/
he->used = true;
err = hist_entry__tui_annotate(he, evsel->idx, hbt);
err = hist_entry__tui_annotate(he, evsel, hbt);
he->used = false;
/*
* offer option to annotate the other branch source or target
......@@ -1709,7 +1709,7 @@ static void perf_evsel_menu__write(struct ui_browser *browser,
ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
HE_COLORSET_NORMAL);
if (symbol_conf.event_group && evsel->nr_members > 1) {
if (perf_evsel__is_group_event(evsel)) {
struct perf_evsel *pos;
ev_name = perf_evsel__group_name(evsel);
......
#include "gtk.h"
#include "util/debug.h"
#include "util/annotate.h"
#include "util/evsel.h"
#include "ui/helpline.h"
......@@ -32,7 +33,7 @@ static int perf_gtk__get_percent(char *buf, size_t size, struct symbol *sym,
return 0;
symhist = annotation__histogram(symbol__annotation(sym), evidx);
if (!symhist->addr[dl->offset])
if (!symbol_conf.event_group && !symhist->addr[dl->offset])
return 0;
percent = 100.0 * symhist->addr[dl->offset] / symhist->sum;
......@@ -85,7 +86,7 @@ static int perf_gtk__get_line(char *buf, size_t size, struct disasm_line *dl)
}
static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym,
struct map *map, int evidx,
struct map *map, struct perf_evsel *evsel,
struct hist_browser_timer *hbt __maybe_unused)
{
struct disasm_line *pos, *n;
......@@ -118,10 +119,24 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym,
list_for_each_entry(pos, &notes->src->source, node) {
GtkTreeIter iter;
int ret = 0;
gtk_list_store_append(store, &iter);
if (perf_gtk__get_percent(s, sizeof(s), sym, pos, evidx))
if (perf_evsel__is_group_event(evsel)) {
for (i = 0; i < evsel->nr_members; i++) {
ret += perf_gtk__get_percent(s + ret,
sizeof(s) - ret,
sym, pos,
evsel->idx + i);
ret += scnprintf(s + ret, sizeof(s) - ret, " ");
}
} else {
ret = perf_gtk__get_percent(s, sizeof(s), sym, pos,
evsel->idx);
}
if (ret)
gtk_list_store_set(store, &iter, ANN_COL__PERCENT, s, -1);
if (perf_gtk__get_offset(s, sizeof(s), sym, map, pos))
gtk_list_store_set(store, &iter, ANN_COL__OFFSET, s, -1);
......@@ -139,7 +154,8 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym,
return 0;
}
int symbol__gtk_annotate(struct symbol *sym, struct map *map, int evidx,
int symbol__gtk_annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel,
struct hist_browser_timer *hbt)
{
GtkWidget *window;
......@@ -206,7 +222,7 @@ int symbol__gtk_annotate(struct symbol *sym, struct map *map, int evidx,
gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window,
tab_label);
perf_gtk__annotate_symbol(scrolled_window, sym, map, evidx, hbt);
perf_gtk__annotate_symbol(scrolled_window, sym, map, evsel, hbt);
return 0;
}
......
......@@ -32,21 +32,18 @@ static int __hpp__color_fmt(struct perf_hpp *hpp, struct hist_entry *he,
int ret;
double percent = 0.0;
struct hists *hists = he->hists;
struct perf_evsel *evsel = hists_to_evsel(hists);
if (hists->stats.total_period)
percent = 100.0 * get_field(he) / hists->stats.total_period;
ret = __percent_color_snprintf(hpp->buf, hpp->size, percent);
if (symbol_conf.event_group) {
if (perf_evsel__is_group_event(evsel)) {
int prev_idx, idx_delta;
struct perf_evsel *evsel = hists_to_evsel(hists);
struct hist_entry *pair;
int nr_members = evsel->nr_members;
if (nr_members <= 1)
return ret;
prev_idx = perf_evsel__group_idx(evsel);
list_for_each_entry(pair, &he->pairs.head, pairs.node) {
......
......@@ -16,6 +16,7 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
{
int ret;
struct hists *hists = he->hists;
struct perf_evsel *evsel = hists_to_evsel(hists);
if (fmt_percent) {
double percent = 0.0;
......@@ -28,15 +29,11 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
} else
ret = print_fn(hpp->buf, hpp->size, fmt, get_field(he));
if (symbol_conf.event_group) {
if (perf_evsel__is_group_event(evsel)) {
int prev_idx, idx_delta;
struct perf_evsel *evsel = hists_to_evsel(hists);
struct hist_entry *pair;
int nr_members = evsel->nr_members;
if (nr_members <= 1)
return ret;
prev_idx = perf_evsel__group_idx(evsel);
list_for_each_entry(pair, &he->pairs.head, pairs.node) {
......
......@@ -14,6 +14,7 @@
#include "symbol.h"
#include "debug.h"
#include "annotate.h"
#include "evsel.h"
#include <pthread.h>
#include <linux/bitops.h>
......@@ -602,8 +603,42 @@ struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disa
return NULL;
}
double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
s64 end, const char **path)
{
struct source_line *src_line = notes->src->lines;
double percent = 0.0;
if (src_line) {
size_t sizeof_src_line = sizeof(*src_line) +
sizeof(src_line->p) * (src_line->nr_pcnt - 1);
while (offset < end) {
src_line = (void *)notes->src->lines +
(sizeof_src_line * offset);
if (*path == NULL)
*path = src_line->path;
percent += src_line->p[evidx].percent;
offset++;
}
} else {
struct sym_hist *h = annotation__histogram(notes, evidx);
unsigned int hits = 0;
while (offset < end)
hits += h->addr[offset++];
if (h->sum)
percent = 100.0 * hits / h->sum;
}
return percent;
}
static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 start,
int evidx, u64 len, int min_pcnt, int printed,
struct perf_evsel *evsel, u64 len, int min_pcnt, int printed,
int max_lines, struct disasm_line *queue)
{
static const char *prev_line;
......@@ -611,34 +646,37 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
if (dl->offset != -1) {
const char *path = NULL;
unsigned int hits = 0;
double percent = 0.0;
double percent, max_percent = 0.0;
double *ppercents = &percent;
int i, nr_percent = 1;
const char *color;
struct annotation *notes = symbol__annotation(sym);
struct source_line *src_line = notes->src->lines;
struct sym_hist *h = annotation__histogram(notes, evidx);
s64 offset = dl->offset;
const u64 addr = start + offset;
struct disasm_line *next;
next = disasm__get_next_ip_line(&notes->src->source, dl);
while (offset < (s64)len &&
(next == NULL || offset < next->offset)) {
if (src_line) {
if (path == NULL)
path = src_line[offset].path;
percent += src_line[offset].percent;
} else
hits += h->addr[offset];
++offset;
if (perf_evsel__is_group_event(evsel)) {
nr_percent = evsel->nr_members;
ppercents = calloc(nr_percent, sizeof(double));
if (ppercents == NULL)
return -1;
}
if (src_line == NULL && h->sum)
percent = 100.0 * hits / h->sum;
for (i = 0; i < nr_percent; i++) {
percent = disasm__calc_percent(notes,
notes->src->lines ? i : evsel->idx + i,
offset,
next ? next->offset : (s64) len,
&path);
ppercents[i] = percent;
if (percent > max_percent)
max_percent = percent;
}
if (percent < min_pcnt)
if (max_percent < min_pcnt)
return -1;
if (max_lines && printed >= max_lines)
......@@ -648,12 +686,12 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
list_for_each_entry_from(queue, &notes->src->source, node) {
if (queue == dl)
break;
disasm_line__print(queue, sym, start, evidx, len,
disasm_line__print(queue, sym, start, evsel, len,
0, 0, 1, NULL);
}
}
color = get_percent_color(percent);
color = get_percent_color(max_percent);
/*
* Also color the filename and line if needed, with
......@@ -669,25 +707,59 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
}
}
color_fprintf(stdout, color, " %7.2f", percent);
for (i = 0; i < nr_percent; i++) {
percent = ppercents[i];
color = get_percent_color(percent);
color_fprintf(stdout, color, " %7.2f", percent);
}
printf(" : ");
color_fprintf(stdout, PERF_COLOR_MAGENTA, " %" PRIx64 ":", addr);
color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", dl->line);
if (ppercents != &percent)
free(ppercents);
} else if (max_lines && printed >= max_lines)
return 1;
else {
int width = 8;
if (queue)
return -1;
if (perf_evsel__is_group_event(evsel))
width *= evsel->nr_members;
if (!*dl->line)
printf(" :\n");
printf(" %*s:\n", width, " ");
else
printf(" : %s\n", dl->line);
printf(" %*s: %s\n", width, " ", dl->line);
}
return 0;
}
/*
* symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw)
* which looks like following
*
* 0000000000415500 <_init>:
* 415500: sub $0x8,%rsp
* 415504: mov 0x2f5ad5(%rip),%rax # 70afe0 <_DYNAMIC+0x2f8>
* 41550b: test %rax,%rax
* 41550e: je 415515 <_init+0x15>
* 415510: callq 416e70 <__gmon_start__@plt>
* 415515: add $0x8,%rsp
* 415519: retq
*
* it will be parsed and saved into struct disasm_line as
* <offset> <name> <ops.raw>
*
* The offset will be a relative offset from the start of the symbol and -1
* means that it's not a disassembly line so should be treated differently.
* The ops.raw part will be parsed further according to type of the instruction.
*/
static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
FILE *file, size_t privsize)
{
......@@ -858,7 +930,7 @@ static void insert_source_line(struct rb_root *root, struct source_line *src_lin
struct source_line *iter;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
int ret;
int i, ret;
while (*p != NULL) {
parent = *p;
......@@ -866,7 +938,8 @@ static void insert_source_line(struct rb_root *root, struct source_line *src_lin
ret = strcmp(iter->path, src_line->path);
if (ret == 0) {
iter->percent_sum += src_line->percent;
for (i = 0; i < src_line->nr_pcnt; i++)
iter->p[i].percent_sum += src_line->p[i].percent;
return;
}
......@@ -876,12 +949,26 @@ static void insert_source_line(struct rb_root *root, struct source_line *src_lin
p = &(*p)->rb_right;
}
src_line->percent_sum = src_line->percent;
for (i = 0; i < src_line->nr_pcnt; i++)
src_line->p[i].percent_sum = src_line->p[i].percent;
rb_link_node(&src_line->node, parent, p);
rb_insert_color(&src_line->node, root);
}
static int cmp_source_line(struct source_line *a, struct source_line *b)
{
int i;
for (i = 0; i < a->nr_pcnt; i++) {
if (a->p[i].percent_sum == b->p[i].percent_sum)
continue;
return a->p[i].percent_sum > b->p[i].percent_sum;
}
return 0;
}
static void __resort_source_line(struct rb_root *root, struct source_line *src_line)
{
struct source_line *iter;
......@@ -892,7 +979,7 @@ static void __resort_source_line(struct rb_root *root, struct source_line *src_l
parent = *p;
iter = rb_entry(parent, struct source_line, node);
if (src_line->percent_sum > iter->percent_sum)
if (cmp_source_line(src_line, iter))
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
......@@ -924,32 +1011,52 @@ static void symbol__free_source_line(struct symbol *sym, int len)
{
struct annotation *notes = symbol__annotation(sym);
struct source_line *src_line = notes->src->lines;
size_t sizeof_src_line;
int i;
for (i = 0; i < len; i++)
free(src_line[i].path);
sizeof_src_line = sizeof(*src_line) +
(sizeof(src_line->p) * (src_line->nr_pcnt - 1));
for (i = 0; i < len; i++) {
free(src_line->path);
src_line = (void *)src_line + sizeof_src_line;
}
free(src_line);
free(notes->src->lines);
notes->src->lines = NULL;
}
/* Get the filename:line for the colored entries */
static int symbol__get_source_line(struct symbol *sym, struct map *map,
int evidx, struct rb_root *root, int len,
struct perf_evsel *evsel,
struct rb_root *root, int len,
const char *filename)
{
u64 start;
int i;
int i, k;
int evidx = evsel->idx;
char cmd[PATH_MAX * 2];
struct source_line *src_line;
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evidx);
struct rb_root tmp_root = RB_ROOT;
int nr_pcnt = 1;
u64 h_sum = h->sum;
size_t sizeof_src_line = sizeof(struct source_line);
if (perf_evsel__is_group_event(evsel)) {
for (i = 1; i < evsel->nr_members; i++) {
h = annotation__histogram(notes, evidx + i);
h_sum += h->sum;
}
nr_pcnt = evsel->nr_members;
sizeof_src_line += (nr_pcnt - 1) * sizeof(src_line->p);
}
if (!h->sum)
if (!h_sum)
return 0;
src_line = notes->src->lines = calloc(len, sizeof(struct source_line));
src_line = notes->src->lines = calloc(len, sizeof_src_line);
if (!notes->src->lines)
return -1;
......@@ -960,29 +1067,41 @@ static int symbol__get_source_line(struct symbol *sym, struct map *map,
size_t line_len;
u64 offset;
FILE *fp;
double percent_max = 0.0;
src_line[i].percent = 100.0 * h->addr[i] / h->sum;
if (src_line[i].percent <= 0.5)
continue;
src_line->nr_pcnt = nr_pcnt;
for (k = 0; k < nr_pcnt; k++) {
h = annotation__histogram(notes, evidx + k);
src_line->p[k].percent = 100.0 * h->addr[i] / h->sum;
if (src_line->p[k].percent > percent_max)
percent_max = src_line->p[k].percent;
}
if (percent_max <= 0.5)
goto next;
offset = start + i;
sprintf(cmd, "addr2line -e %s %016" PRIx64, filename, offset);
fp = popen(cmd, "r");
if (!fp)
continue;
goto next;
if (getline(&path, &line_len, fp) < 0 || !line_len)
goto next;
goto next_close;
src_line[i].path = malloc(sizeof(char) * line_len + 1);
if (!src_line[i].path)
goto next;
src_line->path = malloc(sizeof(char) * line_len + 1);
if (!src_line->path)
goto next_close;
strcpy(src_line[i].path, path);
insert_source_line(&tmp_root, &src_line[i]);
strcpy(src_line->path, path);
insert_source_line(&tmp_root, src_line);
next:
next_close:
pclose(fp);
next:
src_line = (void *)src_line + sizeof_src_line;
}
resort_source_line(root, &tmp_root);
......@@ -1004,24 +1123,33 @@ static void print_summary(struct rb_root *root, const char *filename)
node = rb_first(root);
while (node) {
double percent;
double percent, percent_max = 0.0;
const char *color;
char *path;
int i;
src_line = rb_entry(node, struct source_line, node);
percent = src_line->percent_sum;
color = get_percent_color(percent);
for (i = 0; i < src_line->nr_pcnt; i++) {
percent = src_line->p[i].percent_sum;
color = get_percent_color(percent);
color_fprintf(stdout, color, " %7.2f", percent);
if (percent > percent_max)
percent_max = percent;
}
path = src_line->path;
color = get_percent_color(percent_max);
color_fprintf(stdout, color, " %s", path);
color_fprintf(stdout, color, " %7.2f %s", percent, path);
node = rb_next(node);
}
}
static void symbol__annotate_hits(struct symbol *sym, int evidx)
static void symbol__annotate_hits(struct symbol *sym, struct perf_evsel *evsel)
{
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evidx);
struct sym_hist *h = annotation__histogram(notes, evsel->idx);
u64 len = symbol__size(sym), offset;
for (offset = 0; offset < len; ++offset)
......@@ -1031,9 +1159,9 @@ static void symbol__annotate_hits(struct symbol *sym, int evidx)
printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum);
}
int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
bool full_paths, int min_pcnt, int max_lines,
int context)
int symbol__annotate_printf(struct symbol *sym, struct map *map,
struct perf_evsel *evsel, bool full_paths,
int min_pcnt, int max_lines, int context)
{
struct dso *dso = map->dso;
char *filename;
......@@ -1044,6 +1172,8 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
int printed = 2, queue_len = 0;
int more = 0;
u64 len;
int width = 8;
int namelen;
filename = strdup(dso->long_name);
if (!filename)
......@@ -1055,12 +1185,18 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
d_filename = basename(filename);
len = symbol__size(sym);
namelen = strlen(d_filename);
if (perf_evsel__is_group_event(evsel))
width *= evsel->nr_members;
printf(" Percent | Source code & Disassembly of %s\n", d_filename);
printf("------------------------------------------------\n");
printf(" %-*.*s| Source code & Disassembly of %s\n",
width, width, "Percent", d_filename);
printf("-%-*.*s-------------------------------------\n",
width+namelen, width+namelen, graph_dotted_line);
if (verbose)
symbol__annotate_hits(sym, evidx);
symbol__annotate_hits(sym, evsel);
list_for_each_entry(pos, &notes->src->source, node) {
if (context && queue == NULL) {
......@@ -1068,7 +1204,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
queue_len = 0;
}
switch (disasm_line__print(pos, sym, start, evidx, len,
switch (disasm_line__print(pos, sym, start, evsel, len,
min_pcnt, printed, max_lines,
queue)) {
case 0:
......@@ -1163,9 +1299,9 @@ size_t disasm__fprintf(struct list_head *head, FILE *fp)
return printed;
}
int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
bool print_lines, bool full_paths, int min_pcnt,
int max_lines)
int symbol__tty_annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel, bool print_lines,
bool full_paths, int min_pcnt, int max_lines)
{
struct dso *dso = map->dso;
const char *filename = dso->long_name;
......@@ -1178,12 +1314,12 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
len = symbol__size(sym);
if (print_lines) {
symbol__get_source_line(sym, map, evidx, &source_line,
symbol__get_source_line(sym, map, evsel, &source_line,
len, filename);
print_summary(&source_line, filename);
}
symbol__annotate_printf(sym, map, evidx, full_paths,
symbol__annotate_printf(sym, map, evsel, full_paths,
min_pcnt, max_lines, 0);
if (print_lines)
symbol__free_source_line(sym, len);
......
......@@ -50,6 +50,8 @@ bool ins__is_jump(const struct ins *ins);
bool ins__is_call(const struct ins *ins);
int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops);
struct annotation;
struct disasm_line {
struct list_head node;
s64 offset;
......@@ -68,17 +70,24 @@ void disasm_line__free(struct disasm_line *dl);
struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos);
int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw);
size_t disasm__fprintf(struct list_head *head, FILE *fp);
double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
s64 end, const char **path);
struct sym_hist {
u64 sum;
u64 addr[0];
};
struct source_line {
struct rb_node node;
struct source_line_percent {
double percent;
double percent_sum;
};
struct source_line {
struct rb_node node;
char *path;
int nr_pcnt;
struct source_line_percent p[1];
};
/** struct annotated_source - symbols with hits have this attached as in sannotation
......@@ -130,47 +139,49 @@ void symbol__annotate_zero_histograms(struct symbol *sym);
int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize);
int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym);
int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
bool full_paths, int min_pcnt, int max_lines,
int context);
int symbol__annotate_printf(struct symbol *sym, struct map *map,
struct perf_evsel *evsel, bool full_paths,
int min_pcnt, int max_lines, int context);
void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
void disasm__purge(struct list_head *head);
int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
bool print_lines, bool full_paths, int min_pcnt,
int max_lines);
int symbol__tty_annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel, bool print_lines,
bool full_paths, int min_pcnt, int max_lines);
#ifdef NEWT_SUPPORT
int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
int symbol__tui_annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel,
struct hist_browser_timer *hbt);
#else
static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused,
struct map *map __maybe_unused,
int evidx __maybe_unused,
struct hist_browser_timer *hbt
__maybe_unused)
struct map *map __maybe_unused,
struct perf_evsel *evsel __maybe_unused,
struct hist_browser_timer *hbt
__maybe_unused)
{
return 0;
}
#endif
#ifdef GTK2_SUPPORT
int symbol__gtk_annotate(struct symbol *sym, struct map *map, int evidx,
int symbol__gtk_annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel,
struct hist_browser_timer *hbt);
static inline int hist_entry__gtk_annotate(struct hist_entry *he, int evidx,
static inline int hist_entry__gtk_annotate(struct hist_entry *he,
struct perf_evsel *evsel,
struct hist_browser_timer *hbt)
{
return symbol__gtk_annotate(he->ms.sym, he->ms.map, evidx, hbt);
return symbol__gtk_annotate(he->ms.sym, he->ms.map, evsel, hbt);
}
void perf_gtk__show_annotations(void);
#else
static inline int hist_entry__gtk_annotate(struct hist_entry *he __maybe_unused,
int evidx __maybe_unused,
struct hist_browser_timer *hbt
__maybe_unused)
struct perf_evsel *evsel __maybe_unused,
struct hist_browser_timer *hbt __maybe_unused)
{
return 0;
}
......
#ifndef __DEBUGFS_H__
#define __DEBUGFS_H__
const char *debugfs_find_mountpoint(void);
int debugfs_valid_mountpoint(const char *debugfs);
char *debugfs_mount(const char *mountpoint);
void debugfs_set_path(const char *mountpoint);
extern char debugfs_mountpoint[];
extern char tracing_events_path[];
#endif /* __DEBUGFS_H__ */
......@@ -7,7 +7,7 @@
* Released under the GPL v2. (and only v2, not any later version)
*/
#include "util.h"
#include "debugfs.h"
#include <lk/debugfs.h>
#include <poll.h>
#include "cpumap.h"
#include "thread_map.h"
......@@ -38,13 +38,12 @@ void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
evlist->workload.pid = -1;
}
struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
struct thread_map *threads)
struct perf_evlist *perf_evlist__new(void)
{
struct perf_evlist *evlist = zalloc(sizeof(*evlist));
if (evlist != NULL)
perf_evlist__init(evlist, cpus, threads);
perf_evlist__init(evlist, NULL, NULL);
return evlist;
}
......@@ -228,12 +227,14 @@ void perf_evlist__disable(struct perf_evlist *evlist)
{
int cpu, thread;
struct perf_evsel *pos;
int nr_cpus = cpu_map__nr(evlist->cpus);
int nr_threads = thread_map__nr(evlist->threads);
for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
for (cpu = 0; cpu < nr_cpus; cpu++) {
list_for_each_entry(pos, &evlist->entries, node) {
if (!perf_evsel__is_group_leader(pos))
continue;
for (thread = 0; thread < evlist->threads->nr; thread++)
for (thread = 0; thread < nr_threads; thread++)
ioctl(FD(pos, cpu, thread),
PERF_EVENT_IOC_DISABLE, 0);
}
......@@ -244,12 +245,14 @@ void perf_evlist__enable(struct perf_evlist *evlist)
{
int cpu, thread;
struct perf_evsel *pos;
int nr_cpus = cpu_map__nr(evlist->cpus);
int nr_threads = thread_map__nr(evlist->threads);
for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
for (cpu = 0; cpu < nr_cpus; cpu++) {
list_for_each_entry(pos, &evlist->entries, node) {
if (!perf_evsel__is_group_leader(pos))
continue;
for (thread = 0; thread < evlist->threads->nr; thread++)
for (thread = 0; thread < nr_threads; thread++)
ioctl(FD(pos, cpu, thread),
PERF_EVENT_IOC_ENABLE, 0);
}
......@@ -258,7 +261,9 @@ void perf_evlist__enable(struct perf_evlist *evlist)
static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
{
int nfds = cpu_map__nr(evlist->cpus) * evlist->threads->nr * evlist->nr_entries;
int nr_cpus = cpu_map__nr(evlist->cpus);
int nr_threads = thread_map__nr(evlist->threads);
int nfds = nr_cpus * nr_threads * evlist->nr_entries;
evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
return evlist->pollfd != NULL ? 0 : -ENOMEM;
}
......@@ -417,7 +422,7 @@ static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
{
evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
if (cpu_map__all(evlist->cpus))
evlist->nr_mmaps = evlist->threads->nr;
evlist->nr_mmaps = thread_map__nr(evlist->threads);
evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
return evlist->mmap != NULL ? 0 : -ENOMEM;
}
......@@ -442,11 +447,13 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
{
struct perf_evsel *evsel;
int cpu, thread;
int nr_cpus = cpu_map__nr(evlist->cpus);
int nr_threads = thread_map__nr(evlist->threads);
for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
for (cpu = 0; cpu < nr_cpus; cpu++) {
int output = -1;
for (thread = 0; thread < evlist->threads->nr; thread++) {
for (thread = 0; thread < nr_threads; thread++) {
list_for_each_entry(evsel, &evlist->entries, node) {
int fd = FD(evsel, cpu, thread);
......@@ -470,7 +477,7 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
return 0;
out_unmap:
for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
for (cpu = 0; cpu < nr_cpus; cpu++) {
if (evlist->mmap[cpu].base != NULL) {
munmap(evlist->mmap[cpu].base, evlist->mmap_len);
evlist->mmap[cpu].base = NULL;
......@@ -483,8 +490,9 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
{
struct perf_evsel *evsel;
int thread;
int nr_threads = thread_map__nr(evlist->threads);
for (thread = 0; thread < evlist->threads->nr; thread++) {
for (thread = 0; thread < nr_threads; thread++) {
int output = -1;
list_for_each_entry(evsel, &evlist->entries, node) {
......@@ -509,7 +517,7 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
return 0;
out_unmap:
for (thread = 0; thread < evlist->threads->nr; thread++) {
for (thread = 0; thread < nr_threads; thread++) {
if (evlist->mmap[thread].base != NULL) {
munmap(evlist->mmap[thread].base, evlist->mmap_len);
evlist->mmap[thread].base = NULL;
......@@ -610,7 +618,7 @@ int perf_evlist__apply_filters(struct perf_evlist *evlist)
struct perf_evsel *evsel;
int err = 0;
const int ncpus = cpu_map__nr(evlist->cpus),
nthreads = evlist->threads->nr;
nthreads = thread_map__nr(evlist->threads);
list_for_each_entry(evsel, &evlist->entries, node) {
if (evsel->filter == NULL)
......@@ -629,7 +637,7 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
struct perf_evsel *evsel;
int err = 0;
const int ncpus = cpu_map__nr(evlist->cpus),
nthreads = evlist->threads->nr;
nthreads = thread_map__nr(evlist->threads);
list_for_each_entry(evsel, &evlist->entries, node) {
err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
......@@ -712,10 +720,20 @@ void perf_evlist__set_selected(struct perf_evlist *evlist,
evlist->selected = evsel;
}
void perf_evlist__close(struct perf_evlist *evlist)
{
struct perf_evsel *evsel;
int ncpus = cpu_map__nr(evlist->cpus);
int nthreads = thread_map__nr(evlist->threads);
list_for_each_entry_reverse(evsel, &evlist->entries, node)
perf_evsel__close(evsel, ncpus, nthreads);
}
int perf_evlist__open(struct perf_evlist *evlist)
{
struct perf_evsel *evsel;
int err, ncpus, nthreads;
int err;
list_for_each_entry(evsel, &evlist->entries, node) {
err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
......@@ -725,19 +743,15 @@ int perf_evlist__open(struct perf_evlist *evlist)
return 0;
out_err:
ncpus = evlist->cpus ? evlist->cpus->nr : 1;
nthreads = evlist->threads ? evlist->threads->nr : 1;
list_for_each_entry_reverse(evsel, &evlist->entries, node)
perf_evsel__close(evsel, ncpus, nthreads);
perf_evlist__close(evlist);
errno = -err;
return err;
}
int perf_evlist__prepare_workload(struct perf_evlist *evlist,
struct perf_record_opts *opts,
const char *argv[])
struct perf_target *target,
const char *argv[], bool pipe_output,
bool want_signal)
{
int child_ready_pipe[2], go_pipe[2];
char bf;
......@@ -759,7 +773,7 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist,
}
if (!evlist->workload.pid) {
if (opts->pipe_output)
if (pipe_output)
dup2(2, 1);
close(child_ready_pipe[0]);
......@@ -787,11 +801,12 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist,
execvp(argv[0], (char **)argv);
perror(argv[0]);
kill(getppid(), SIGUSR1);
if (want_signal)
kill(getppid(), SIGUSR1);
exit(-1);
}
if (perf_target__none(&opts->target))
if (perf_target__none(target))
evlist->threads->map[0] = evlist->workload.pid;
close(child_ready_pipe[1]);
......
......@@ -49,8 +49,7 @@ struct perf_evsel_str_handler {
void *handler;
};
struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
struct thread_map *threads);
struct perf_evlist *perf_evlist__new(void);
void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
struct thread_map *threads);
void perf_evlist__exit(struct perf_evlist *evlist);
......@@ -82,13 +81,15 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
int perf_evlist__open(struct perf_evlist *evlist);
void perf_evlist__close(struct perf_evlist *evlist);
void perf_evlist__config(struct perf_evlist *evlist,
struct perf_record_opts *opts);
int perf_evlist__prepare_workload(struct perf_evlist *evlist,
struct perf_record_opts *opts,
const char *argv[]);
struct perf_target *target,
const char *argv[], bool pipe_output,
bool want_signal);
int perf_evlist__start_workload(struct perf_evlist *evlist);
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
......
......@@ -10,7 +10,7 @@
#include <byteswap.h>
#include <linux/bitops.h>
#include "asm/bug.h"
#include "debugfs.h"
#include <lk/debugfs.h>
#include "event-parse.h"
#include "evsel.h"
#include "evlist.h"
......@@ -633,6 +633,12 @@ int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
return 0;
}
void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus)
{
memset(evsel->counts, 0, (sizeof(*evsel->counts) +
(ncpus * sizeof(struct perf_counts_values))));
}
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
{
evsel->counts = zalloc((sizeof(*evsel->counts) +
......@@ -673,9 +679,8 @@ void perf_evsel__free_counts(struct perf_evsel *evsel)
void perf_evsel__exit(struct perf_evsel *evsel)
{
assert(list_empty(&evsel->node));
xyarray__delete(evsel->fd);
xyarray__delete(evsel->sample_id);
free(evsel->id);
perf_evsel__free_fd(evsel);
perf_evsel__free_id(evsel);
}
void perf_evsel__delete(struct perf_evsel *evsel)
......
......@@ -9,6 +9,7 @@
#include "xyarray.h"
#include "cgroup.h"
#include "hist.h"
#include "symbol.h"
struct perf_counts_values {
union {
......@@ -120,6 +121,7 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size);
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus);
void perf_evsel__free_fd(struct perf_evsel *evsel);
void perf_evsel__free_id(struct perf_evsel *evsel);
void perf_evsel__free_counts(struct perf_evsel *evsel);
......@@ -246,11 +248,34 @@ static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel)
return list_entry(evsel->node.next, struct perf_evsel, node);
}
/**
* perf_evsel__is_group_leader - Return whether given evsel is a leader event
*
* @evsel - evsel selector to be tested
*
* Return %true if @evsel is a group leader or a stand-alone event
*/
static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel)
{
return evsel->leader == evsel;
}
/**
* perf_evsel__is_group_event - Return whether given evsel is a group event
*
* @evsel - evsel selector to be tested
*
* Return %true iff event group view is enabled and @evsel is a actual group
* leader which has other members in the group
*/
static inline bool perf_evsel__is_group_event(struct perf_evsel *evsel)
{
if (!symbol_conf.event_group)
return false;
return perf_evsel__is_group_leader(evsel) && evsel->nr_members > 1;
}
struct perf_attr_details {
bool freq;
bool verbose;
......
......@@ -2789,7 +2789,7 @@ int perf_session__read_header(struct perf_session *session, int fd)
u64 f_id;
int nr_attrs, nr_ids, i, j;
session->evlist = perf_evlist__new(NULL, NULL);
session->evlist = perf_evlist__new();
if (session->evlist == NULL)
return -ENOMEM;
......@@ -2940,7 +2940,7 @@ int perf_event__process_attr(union perf_event *event,
struct perf_evlist *evlist = *pevlist;
if (evlist == NULL) {
*pevlist = evlist = perf_evlist__new(NULL, NULL);
*pevlist = evlist = perf_evlist__new();
if (evlist == NULL)
return -ENOMEM;
}
......
......@@ -177,7 +177,7 @@ struct hist_browser_timer {
#ifdef NEWT_SUPPORT
#include "../ui/keysyms.h"
int hist_entry__tui_annotate(struct hist_entry *he, int evidx,
int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel,
struct hist_browser_timer *hbt);
int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
......@@ -196,7 +196,8 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused,
static inline int hist_entry__tui_annotate(struct hist_entry *self
__maybe_unused,
int evidx __maybe_unused,
struct perf_evsel *evsel
__maybe_unused,
struct hist_browser_timer *hbt
__maybe_unused)
{
......
......@@ -1003,6 +1003,17 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
return 0;
}
static void machine__remove_thread(struct machine *machine, struct thread *th)
{
machine->last_match = NULL;
rb_erase(&th->rb_node, &machine->threads);
/*
* We may have references to this thread, for instance in some hist_entry
* instances, so just move them to a separate list.
*/
list_add_tail(&th->node, &machine->dead_threads);
}
int machine__process_exit_event(struct machine *machine, union perf_event *event)
{
struct thread *thread = machine__find_thread(machine, event->fork.tid);
......@@ -1039,17 +1050,6 @@ int machine__process_event(struct machine *machine, union perf_event *event)
return ret;
}
void machine__remove_thread(struct machine *machine, struct thread *th)
{
machine->last_match = NULL;
rb_erase(&th->rb_node, &machine->threads);
/*
* We may have references to this thread, for instance in some hist_entry
* instances, so just move them to a separate list.
*/
list_add_tail(&th->node, &machine->dead_threads);
}
static bool symbol__match_parent_regex(struct symbol *sym)
{
if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
......
......@@ -97,7 +97,6 @@ static inline bool machine__is_host(struct machine *machine)
}
struct thread *machine__findnew_thread(struct machine *machine, pid_t pid);
void machine__remove_thread(struct machine *machine, struct thread *th);
size_t machine__fprintf(struct machine *machine, FILE *fp);
......
......@@ -10,7 +10,7 @@
#include "symbol.h"
#include "cache.h"
#include "header.h"
#include "debugfs.h"
#include <lk/debugfs.h>
#include "parse-events-bison.h"
#define YY_EXTRA_TYPE int
#include "parse-events-flex.h"
......
......@@ -40,7 +40,7 @@
#include "color.h"
#include "symbol.h"
#include "thread.h"
#include "debugfs.h"
#include <lk/debugfs.h>
#include "trace-event.h" /* For __maybe_unused */
#include "probe-event.h"
#include "probe-finder.h"
......
......@@ -15,7 +15,6 @@ util/thread_map.c
util/util.c
util/xyarray.c
util/cgroup.c
util/debugfs.c
util/rblist.c
util/strlist.c
util/sysfs.c
......
......@@ -1365,18 +1365,6 @@ size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
return machine__fprintf(&session->machines.host, fp);
}
void perf_session__remove_thread(struct perf_session *session,
struct thread *th)
{
/*
* FIXME: This one makes no sense, we need to remove the thread from
* the machine it belongs to, perf_session can have many machines, so
* doing it always on ->machines.host is wrong. Fix when auditing all
* the 'perf kvm' code.
*/
machine__remove_thread(&session->machines.host, th);
}
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
unsigned int type)
{
......
......@@ -72,7 +72,6 @@ void perf_event__attr_swap(struct perf_event_attr *attr);
int perf_session__create_kernel_maps(struct perf_session *self);
void perf_session__set_id_hdr_size(struct perf_session *session);
void perf_session__remove_thread(struct perf_session *self, struct thread *th);
static inline
struct machine *perf_session__find_machine(struct perf_session *self, pid_t pid)
......
......@@ -24,6 +24,7 @@ cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
liblk = getenv('LIBLK')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
......@@ -32,7 +33,7 @@ perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent],
extra_objects = [libtraceevent, liblk],
)
setup(name='perf',
......
......@@ -21,4 +21,9 @@ void thread_map__delete(struct thread_map *threads);
size_t thread_map__fprintf(struct thread_map *threads, FILE *fp);
static inline int thread_map__nr(struct thread_map *threads)
{
return threads ? threads->nr : 1;
}
#endif /* __PERF_THREAD_MAP_H */
......@@ -38,35 +38,14 @@
#include "../perf.h"
#include "trace-event.h"
#include "debugfs.h"
#include <lk/debugfs.h>
#include "evsel.h"
#define VERSION "0.5"
#define TRACE_CTRL "tracing_on"
#define TRACE "trace"
#define AVAILABLE "available_tracers"
#define CURRENT "current_tracer"
#define ITER_CTRL "trace_options"
#define MAX_LATENCY "tracing_max_latency"
unsigned int page_size;
static const char *output_file = "trace.info";
static int output_fd;
struct event_list {
struct event_list *next;
const char *event;
};
struct events {
struct events *sibling;
struct events *children;
struct events *next;
char *name;
};
static void *malloc_or_die(unsigned int size)
{
......@@ -80,7 +59,7 @@ static void *malloc_or_die(unsigned int size)
static const char *find_debugfs(void)
{
const char *path = debugfs_mount(NULL);
const char *path = perf_debugfs_mount(NULL);
if (!path)
die("Your kernel not support debugfs filesystem");
......@@ -131,17 +110,10 @@ static void put_tracing_file(char *file)
free(file);
}
static ssize_t calc_data_size;
static ssize_t write_or_die(const void *buf, size_t len)
{
int ret;
if (calc_data_size) {
calc_data_size += len;
return len;
}
ret = write(output_fd, buf, len);
if (ret < 0)
die("writing to '%s'", output_file);
......@@ -457,7 +429,6 @@ static void tracing_data_header(void)
write_or_die(buf, 1);
/* save page_size */
page_size = sysconf(_SC_PAGESIZE);
write_or_die(&page_size, 4);
}
......
......@@ -183,43 +183,6 @@ void event_format__print(struct event_format *event,
trace_seq_do_printf(&s);
}
void print_trace_event(struct pevent *pevent, int cpu, void *data, int size)
{
int type = trace_parse_common_type(pevent, data);
struct event_format *event = pevent_find_event(pevent, type);
if (!event) {
warning("ug! no event found for type %d", type);
return;
}
event_format__print(event, cpu, data, size);
}
void print_event(struct pevent *pevent, int cpu, void *data, int size,
unsigned long long nsecs, char *comm)
{
struct pevent_record record;
struct trace_seq s;
int pid;
pevent->latency_format = latency_format;
record.ts = nsecs;
record.cpu = cpu;
record.size = size;
record.data = data;
pid = pevent_data_pid(pevent, &record);
if (!pevent_pid_is_registered(pevent, pid))
pevent_register_comm(pevent, comm, pid);
trace_seq_init(&s);
pevent_print_event(pevent, &s, &record);
trace_seq_do_printf(&s);
printf("\n");
}
void parse_proc_kallsyms(struct pevent *pevent,
char *file, unsigned int size __maybe_unused)
{
......
......@@ -41,8 +41,6 @@
static int input_fd;
static int read_page;
int file_bigendian;
int host_bigendian;
static int long_size;
......@@ -287,205 +285,6 @@ static void read_event_files(struct pevent *pevent)
}
}
struct cpu_data {
unsigned long long offset;
unsigned long long size;
unsigned long long timestamp;
struct pevent_record *next;
char *page;
int cpu;
int index;
int page_size;
};
static struct cpu_data *cpu_data;
static void update_cpu_data_index(int cpu)
{
cpu_data[cpu].offset += page_size;
cpu_data[cpu].size -= page_size;
cpu_data[cpu].index = 0;
}
static void get_next_page(int cpu)
{
off_t save_seek;
off_t ret;
if (!cpu_data[cpu].page)
return;
if (read_page) {
if (cpu_data[cpu].size <= page_size) {
free(cpu_data[cpu].page);
cpu_data[cpu].page = NULL;
return;
}
update_cpu_data_index(cpu);
/* other parts of the code may expect the pointer to not move */
save_seek = lseek(input_fd, 0, SEEK_CUR);
ret = lseek(input_fd, cpu_data[cpu].offset, SEEK_SET);
if (ret == (off_t)-1)
die("failed to lseek");
ret = read(input_fd, cpu_data[cpu].page, page_size);
if (ret < 0)
die("failed to read page");
/* reset the file pointer back */
lseek(input_fd, save_seek, SEEK_SET);
return;
}
munmap(cpu_data[cpu].page, page_size);
cpu_data[cpu].page = NULL;
if (cpu_data[cpu].size <= page_size)
return;
update_cpu_data_index(cpu);
cpu_data[cpu].page = mmap(NULL, page_size, PROT_READ, MAP_PRIVATE,
input_fd, cpu_data[cpu].offset);
if (cpu_data[cpu].page == MAP_FAILED)
die("failed to mmap cpu %d at offset 0x%llx",
cpu, cpu_data[cpu].offset);
}
static unsigned int type_len4host(unsigned int type_len_ts)
{
if (file_bigendian)
return (type_len_ts >> 27) & ((1 << 5) - 1);
else
return type_len_ts & ((1 << 5) - 1);
}
static unsigned int ts4host(unsigned int type_len_ts)
{
if (file_bigendian)
return type_len_ts & ((1 << 27) - 1);
else
return type_len_ts >> 5;
}
static int calc_index(void *ptr, int cpu)
{
return (unsigned long)ptr - (unsigned long)cpu_data[cpu].page;
}
struct pevent_record *trace_peek_data(struct pevent *pevent, int cpu)
{
struct pevent_record *data;
void *page = cpu_data[cpu].page;
int idx = cpu_data[cpu].index;
void *ptr = page + idx;
unsigned long long extend;
unsigned int type_len_ts;
unsigned int type_len;
unsigned int delta;
unsigned int length = 0;
if (cpu_data[cpu].next)
return cpu_data[cpu].next;
if (!page)
return NULL;
if (!idx) {
/* FIXME: handle header page */
if (header_page_ts_size != 8)
die("expected a long long type for timestamp");
cpu_data[cpu].timestamp = data2host8(pevent, ptr);
ptr += 8;
switch (header_page_size_size) {
case 4:
cpu_data[cpu].page_size = data2host4(pevent, ptr);
ptr += 4;
break;
case 8:
cpu_data[cpu].page_size = data2host8(pevent, ptr);
ptr += 8;
break;
default:
die("bad long size");
}
ptr = cpu_data[cpu].page + header_page_data_offset;
}
read_again:
idx = calc_index(ptr, cpu);
if (idx >= cpu_data[cpu].page_size) {
get_next_page(cpu);
return trace_peek_data(pevent, cpu);
}
type_len_ts = data2host4(pevent, ptr);
ptr += 4;
type_len = type_len4host(type_len_ts);
delta = ts4host(type_len_ts);
switch (type_len) {
case RINGBUF_TYPE_PADDING:
if (!delta)
die("error, hit unexpected end of page");
length = data2host4(pevent, ptr);
ptr += 4;
length *= 4;
ptr += length;
goto read_again;
case RINGBUF_TYPE_TIME_EXTEND:
extend = data2host4(pevent, ptr);
ptr += 4;
extend <<= TS_SHIFT;
extend += delta;
cpu_data[cpu].timestamp += extend;
goto read_again;
case RINGBUF_TYPE_TIME_STAMP:
ptr += 12;
break;
case 0:
length = data2host4(pevent, ptr);
ptr += 4;
die("here! length=%d", length);
break;
default:
length = type_len * 4;
break;
}
cpu_data[cpu].timestamp += delta;
data = malloc_or_die(sizeof(*data));
memset(data, 0, sizeof(*data));
data->ts = cpu_data[cpu].timestamp;
data->size = length;
data->data = ptr;
ptr += length;
cpu_data[cpu].index = calc_index(ptr, cpu);
cpu_data[cpu].next = data;
return data;
}
struct pevent_record *trace_read_data(struct pevent *pevent, int cpu)
{
struct pevent_record *data;
data = trace_peek_data(pevent, cpu);
cpu_data[cpu].next = NULL;
return data;
}
ssize_t trace_report(int fd, struct pevent **ppevent, bool __repipe)
{
char buf[BUFSIZ];
......
......@@ -30,13 +30,9 @@ enum {
int bigendian(void);
struct pevent *read_trace_init(int file_bigendian, int host_bigendian);
void print_trace_event(struct pevent *pevent, int cpu, void *data, int size);
void event_format__print(struct event_format *event,
int cpu, void *data, int size);
void print_event(struct pevent *pevent, int cpu, void *data, int size,
unsigned long long nsecs, char *comm);
int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size);
int parse_event_file(struct pevent *pevent,
char *buf, unsigned long size, char *sys);
......
......@@ -17,6 +17,8 @@ bool test_attr__enabled;
bool perf_host = true;
bool perf_guest = false;
char tracing_events_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing/events";
void event_attr_init(struct perf_event_attr *attr)
{
if (!perf_host)
......@@ -242,3 +244,28 @@ void get_term_dimensions(struct winsize *ws)
ws->ws_row = 25;
ws->ws_col = 80;
}
static void set_tracing_events_path(const char *mountpoint)
{
snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s",
mountpoint, "tracing/events");
}
const char *perf_debugfs_mount(const char *mountpoint)
{
const char *mnt;
mnt = debugfs_mount(mountpoint);
if (!mnt)
return NULL;
set_tracing_events_path(mnt);
return mnt;
}
void perf_debugfs_set_path(const char *mntpt)
{
snprintf(debugfs_mountpoint, strlen(debugfs_mountpoint), "%s", mntpt);
set_tracing_events_path(mntpt);
}
......@@ -73,10 +73,14 @@
#include <linux/magic.h>
#include "types.h"
#include <sys/ttydefaults.h>
#include <lk/debugfs.h>
extern const char *graph_line;
extern const char *graph_dotted_line;
extern char buildid_dir[];
extern char tracing_events_path[];
extern void perf_debugfs_set_path(const char *mountpoint);
const char *perf_debugfs_mount(const char *mountpoint);
/* On most systems <limits.h> would have given us this, but
* not on some systems (e.g. GNU/Hurd).
......@@ -274,5 +278,4 @@ extern unsigned int page_size;
struct winsize;
void get_term_dimensions(struct winsize *ws);
#endif
#endif /* GIT_COMPAT_UTIL_H */
ifneq ($(O),)
ifeq ($(origin O), command line)
dummy := $(if $(shell test -d $(O) || echo $(O)),$(error O=$(O) does not exist),)
ABSOLUTE_O := $(shell cd $(O) ; pwd)
......@@ -7,9 +8,10 @@ ifeq ($(objtree),)
objtree := $(O)
endif
endif
endif
ifneq ($(OUTPUT),)
# check that the output directory actually exists
ifneq ($(OUTPUT),)
OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
endif
......@@ -70,7 +72,7 @@ ifndef V
QUIET_BISON = @echo ' ' BISON $@;
descend = \
@echo ' ' DESCEND $(1); \
+@echo ' ' DESCEND $(1); \
mkdir -p $(OUTPUT)$(1) && \
$(MAKE) $(COMMAND_O) subdir=$(if $(subdir),$(subdir)/$(1),$(1)) $(PRINT_DIR) -C $(1) $(2)
endif
......
# Makefile for vm tools
#
TARGETS=page-types slabinfo
LK_DIR = ../lib/lk
LIBLK = $(LK_DIR)/liblk.a
CC = $(CROSS_COMPILE)gcc
CFLAGS = -Wall -Wextra
CFLAGS = -Wall -Wextra -I../lib/
LDFLAGS = $(LIBLK)
$(TARGETS): liblk
liblk:
make -C $(LK_DIR)
all: page-types slabinfo
%: %.c
$(CC) $(CFLAGS) -o $@ $^
$(CC) $(CFLAGS) -o $@ $< $(LDFLAGS)
clean:
$(RM) page-types slabinfo
make -C ../lib/lk clean
......@@ -36,7 +36,7 @@
#include <sys/statfs.h>
#include "../../include/uapi/linux/magic.h"
#include "../../include/uapi/linux/kernel-page-flags.h"
#include <lk/debugfs.h>
#ifndef MAX_PATH
# define MAX_PATH 256
......@@ -178,7 +178,7 @@ static int kpageflags_fd;
static int opt_hwpoison;
static int opt_unpoison;
static char hwpoison_debug_fs[MAX_PATH+1];
static char *hwpoison_debug_fs;
static int hwpoison_inject_fd;
static int hwpoison_forget_fd;
......@@ -458,81 +458,6 @@ static uint64_t kpageflags_flags(uint64_t flags)
return flags;
}
/* verify that a mountpoint is actually a debugfs instance */
static int debugfs_valid_mountpoint(const char *debugfs)
{
struct statfs st_fs;
if (statfs(debugfs, &st_fs) < 0)
return -ENOENT;
else if (st_fs.f_type != (long) DEBUGFS_MAGIC)
return -ENOENT;
return 0;
}
/* find the path to the mounted debugfs */
static const char *debugfs_find_mountpoint(void)
{
const char *const *ptr;
char type[100];
FILE *fp;
ptr = debugfs_known_mountpoints;
while (*ptr) {
if (debugfs_valid_mountpoint(*ptr) == 0) {
strcpy(hwpoison_debug_fs, *ptr);
return hwpoison_debug_fs;
}
ptr++;
}
/* give up and parse /proc/mounts */
fp = fopen("/proc/mounts", "r");
if (fp == NULL)
perror("Can't open /proc/mounts for read");
while (fscanf(fp, "%*s %"
STR(MAX_PATH)
"s %99s %*s %*d %*d\n",
hwpoison_debug_fs, type) == 2) {
if (strcmp(type, "debugfs") == 0)
break;
}
fclose(fp);
if (strcmp(type, "debugfs") != 0)
return NULL;
return hwpoison_debug_fs;
}
/* mount the debugfs somewhere if it's not mounted */
static void debugfs_mount(void)
{
const char *const *ptr;
/* see if it's already mounted */
if (debugfs_find_mountpoint())
return;
ptr = debugfs_known_mountpoints;
while (*ptr) {
if (mount(NULL, *ptr, "debugfs", 0, NULL) == 0) {
/* save the mountpoint */
strcpy(hwpoison_debug_fs, *ptr);
break;
}
ptr++;
}
if (*ptr == NULL) {
perror("mount debugfs");
exit(EXIT_FAILURE);
}
}
/*
* page actions
*/
......@@ -541,7 +466,11 @@ static void prepare_hwpoison_fd(void)
{
char buf[MAX_PATH + 1];
debugfs_mount();
hwpoison_debug_fs = debugfs_mount(NULL);
if (!hwpoison_debug_fs) {
perror("mount debugfs");
exit(EXIT_FAILURE);
}
if (opt_hwpoison && !hwpoison_inject_fd) {
snprintf(buf, MAX_PATH, "%s/hwpoison/corrupt-pfn",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment