Commit 21f77d23 authored by Ingo Molnar's avatar Ingo Molnar

Merge tag 'perf-core-for-mingo-20160516' of...

Merge tag 'perf-core-for-mingo-20160516' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

User visible changes:

- Honour the kernel.perf_event_max_stack knob more precisely by not counting
  PERF_CONTEXT_{KERNEL,USER} when deciding when to stop adding entries to
  the perf_sample->ip_callchain[] array (Arnaldo Carvalho de Melo)

- Fix identation of 'stalled-backend-cycles' in 'perf stat' (Namhyung Kim)

- Update runtime using 'cpu-clock' event in 'perf stat' (Namhyung Kim)

- Use 'cpu-clock' for cpu targets in 'perf stat' (Namhyung Kim)

- Avoid fractional digits for integer scales in 'perf stat' (Andi Kleen)

- Store vdso buildid unconditionally, as it appears in callchains and
  we're not checking those when creating the build-id table, so we
  end up not being able to resolve VDSO symbols when doing analysis
  on a different machine than the one where recording was done, possibly
  of a different arch even (arm -> x86_64) (He Kuang)

Infrastructure changes:

- Generalize max_stack sysctl handler, will be used for configuring
  multiple kernel knobs related to callchains (Arnaldo Carvalho de Melo)

Cleanups:

- Introduce DSO__NAME_KALLSYMS and DSO__NAME_KCORE, to stop using
  open coded strings (Masami Hiramatsu)
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents b0a434fb a29d5c9b
...@@ -61,6 +61,7 @@ show up in /proc/sys/kernel: ...@@ -61,6 +61,7 @@ show up in /proc/sys/kernel:
- perf_cpu_time_max_percent - perf_cpu_time_max_percent
- perf_event_paranoid - perf_event_paranoid
- perf_event_max_stack - perf_event_max_stack
- perf_event_max_contexts_per_stack
- pid_max - pid_max
- powersave-nap [ PPC only ] - powersave-nap [ PPC only ]
- printk - printk
...@@ -668,6 +669,19 @@ The default value is 127. ...@@ -668,6 +669,19 @@ The default value is 127.
============================================================== ==============================================================
perf_event_max_contexts_per_stack:
Controls maximum number of stack frame context entries for
(attr.sample_type & PERF_SAMPLE_CALLCHAIN) configured events, for
instance, when using 'perf record -g' or 'perf trace --call-graph fp'.
This can only be done when no events are in use that have callchains
enabled, otherwise writing to this file will return -EBUSY.
The default value is 8.
==============================================================
pid_max: pid_max:
PID allocation wrap value. When the kernel's next PID value PID allocation wrap value. When the kernel's next PID value
......
...@@ -48,7 +48,7 @@ struct arc_callchain_trace { ...@@ -48,7 +48,7 @@ struct arc_callchain_trace {
static int callchain_trace(unsigned int addr, void *data) static int callchain_trace(unsigned int addr, void *data)
{ {
struct arc_callchain_trace *ctrl = data; struct arc_callchain_trace *ctrl = data;
struct perf_callchain_entry *entry = ctrl->perf_stuff; struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
perf_callchain_store(entry, addr); perf_callchain_store(entry, addr);
if (ctrl->depth++ < 3) if (ctrl->depth++ < 3)
...@@ -58,7 +58,7 @@ static int callchain_trace(unsigned int addr, void *data) ...@@ -58,7 +58,7 @@ static int callchain_trace(unsigned int addr, void *data)
} }
void void
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{ {
struct arc_callchain_trace ctrl = { struct arc_callchain_trace ctrl = {
.depth = 0, .depth = 0,
...@@ -69,7 +69,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) ...@@ -69,7 +69,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
} }
void void
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{ {
/* /*
* User stack can't be unwound trivially with kernel dwarf unwinder * User stack can't be unwound trivially with kernel dwarf unwinder
......
...@@ -31,7 +31,7 @@ struct frame_tail { ...@@ -31,7 +31,7 @@ struct frame_tail {
*/ */
static struct frame_tail __user * static struct frame_tail __user *
user_backtrace(struct frame_tail __user *tail, user_backtrace(struct frame_tail __user *tail,
struct perf_callchain_entry *entry) struct perf_callchain_entry_ctx *entry)
{ {
struct frame_tail buftail; struct frame_tail buftail;
unsigned long err; unsigned long err;
...@@ -59,7 +59,7 @@ user_backtrace(struct frame_tail __user *tail, ...@@ -59,7 +59,7 @@ user_backtrace(struct frame_tail __user *tail,
} }
void void
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{ {
struct frame_tail __user *tail; struct frame_tail __user *tail;
...@@ -75,7 +75,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) ...@@ -75,7 +75,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
tail = (struct frame_tail __user *)regs->ARM_fp - 1; tail = (struct frame_tail __user *)regs->ARM_fp - 1;
while ((entry->nr < sysctl_perf_event_max_stack) && while ((entry->nr < entry->max_stack) &&
tail && !((unsigned long)tail & 0x3)) tail && !((unsigned long)tail & 0x3))
tail = user_backtrace(tail, entry); tail = user_backtrace(tail, entry);
} }
...@@ -89,13 +89,13 @@ static int ...@@ -89,13 +89,13 @@ static int
callchain_trace(struct stackframe *fr, callchain_trace(struct stackframe *fr,
void *data) void *data)
{ {
struct perf_callchain_entry *entry = data; struct perf_callchain_entry_ctx *entry = data;
perf_callchain_store(entry, fr->pc); perf_callchain_store(entry, fr->pc);
return 0; return 0;
} }
void void
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{ {
struct stackframe fr; struct stackframe fr;
......
...@@ -31,7 +31,7 @@ struct frame_tail { ...@@ -31,7 +31,7 @@ struct frame_tail {
*/ */
static struct frame_tail __user * static struct frame_tail __user *
user_backtrace(struct frame_tail __user *tail, user_backtrace(struct frame_tail __user *tail,
struct perf_callchain_entry *entry) struct perf_callchain_entry_ctx *entry)
{ {
struct frame_tail buftail; struct frame_tail buftail;
unsigned long err; unsigned long err;
...@@ -76,7 +76,7 @@ struct compat_frame_tail { ...@@ -76,7 +76,7 @@ struct compat_frame_tail {
static struct compat_frame_tail __user * static struct compat_frame_tail __user *
compat_user_backtrace(struct compat_frame_tail __user *tail, compat_user_backtrace(struct compat_frame_tail __user *tail,
struct perf_callchain_entry *entry) struct perf_callchain_entry_ctx *entry)
{ {
struct compat_frame_tail buftail; struct compat_frame_tail buftail;
unsigned long err; unsigned long err;
...@@ -106,7 +106,7 @@ compat_user_backtrace(struct compat_frame_tail __user *tail, ...@@ -106,7 +106,7 @@ compat_user_backtrace(struct compat_frame_tail __user *tail,
} }
#endif /* CONFIG_COMPAT */ #endif /* CONFIG_COMPAT */
void perf_callchain_user(struct perf_callchain_entry *entry, void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
...@@ -122,7 +122,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry, ...@@ -122,7 +122,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
tail = (struct frame_tail __user *)regs->regs[29]; tail = (struct frame_tail __user *)regs->regs[29];
while (entry->nr < sysctl_perf_event_max_stack && while (entry->nr < entry->max_stack &&
tail && !((unsigned long)tail & 0xf)) tail && !((unsigned long)tail & 0xf))
tail = user_backtrace(tail, entry); tail = user_backtrace(tail, entry);
} else { } else {
...@@ -132,7 +132,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry, ...@@ -132,7 +132,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
tail = (struct compat_frame_tail __user *)regs->compat_fp - 1; tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
while ((entry->nr < sysctl_perf_event_max_stack) && while ((entry->nr < entry->max_stack) &&
tail && !((unsigned long)tail & 0x3)) tail && !((unsigned long)tail & 0x3))
tail = compat_user_backtrace(tail, entry); tail = compat_user_backtrace(tail, entry);
#endif #endif
...@@ -146,12 +146,12 @@ void perf_callchain_user(struct perf_callchain_entry *entry, ...@@ -146,12 +146,12 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
*/ */
static int callchain_trace(struct stackframe *frame, void *data) static int callchain_trace(struct stackframe *frame, void *data)
{ {
struct perf_callchain_entry *entry = data; struct perf_callchain_entry_ctx *entry = data;
perf_callchain_store(entry, frame->pc); perf_callchain_store(entry, frame->pc);
return 0; return 0;
} }
void perf_callchain_kernel(struct perf_callchain_entry *entry, void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct stackframe frame; struct stackframe frame;
......
...@@ -29,7 +29,7 @@ static bool is_valid_call(unsigned long calladdr) ...@@ -29,7 +29,7 @@ static bool is_valid_call(unsigned long calladdr)
static struct metag_frame __user * static struct metag_frame __user *
user_backtrace(struct metag_frame __user *user_frame, user_backtrace(struct metag_frame __user *user_frame,
struct perf_callchain_entry *entry) struct perf_callchain_entry_ctx *entry)
{ {
struct metag_frame frame; struct metag_frame frame;
unsigned long calladdr; unsigned long calladdr;
...@@ -56,7 +56,7 @@ user_backtrace(struct metag_frame __user *user_frame, ...@@ -56,7 +56,7 @@ user_backtrace(struct metag_frame __user *user_frame,
} }
void void
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{ {
unsigned long sp = regs->ctx.AX[0].U0; unsigned long sp = regs->ctx.AX[0].U0;
struct metag_frame __user *frame; struct metag_frame __user *frame;
...@@ -65,7 +65,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) ...@@ -65,7 +65,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
--frame; --frame;
while ((entry->nr < sysctl_perf_event_max_stack) && frame) while ((entry->nr < entry->max_stack) && frame)
frame = user_backtrace(frame, entry); frame = user_backtrace(frame, entry);
} }
...@@ -78,13 +78,13 @@ static int ...@@ -78,13 +78,13 @@ static int
callchain_trace(struct stackframe *fr, callchain_trace(struct stackframe *fr,
void *data) void *data)
{ {
struct perf_callchain_entry *entry = data; struct perf_callchain_entry_ctx *entry = data;
perf_callchain_store(entry, fr->pc); perf_callchain_store(entry, fr->pc);
return 0; return 0;
} }
void void
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{ {
struct stackframe fr; struct stackframe fr;
......
...@@ -25,8 +25,8 @@ ...@@ -25,8 +25,8 @@
* the user stack callchains, we will add it here. * the user stack callchains, we will add it here.
*/ */
static void save_raw_perf_callchain(struct perf_callchain_entry *entry, static void save_raw_perf_callchain(struct perf_callchain_entry_ctx *entry,
unsigned long reg29) unsigned long reg29)
{ {
unsigned long *sp = (unsigned long *)reg29; unsigned long *sp = (unsigned long *)reg29;
unsigned long addr; unsigned long addr;
...@@ -35,14 +35,14 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry, ...@@ -35,14 +35,14 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
addr = *sp++; addr = *sp++;
if (__kernel_text_address(addr)) { if (__kernel_text_address(addr)) {
perf_callchain_store(entry, addr); perf_callchain_store(entry, addr);
if (entry->nr >= sysctl_perf_event_max_stack) if (entry->nr >= entry->max_stack)
break; break;
} }
} }
} }
void perf_callchain_kernel(struct perf_callchain_entry *entry, void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
unsigned long sp = regs->regs[29]; unsigned long sp = regs->regs[29];
#ifdef CONFIG_KALLSYMS #ifdef CONFIG_KALLSYMS
...@@ -59,7 +59,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry, ...@@ -59,7 +59,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
} }
do { do {
perf_callchain_store(entry, pc); perf_callchain_store(entry, pc);
if (entry->nr >= sysctl_perf_event_max_stack) if (entry->nr >= entry->max_stack)
break; break;
pc = unwind_stack(current, &sp, pc, &ra); pc = unwind_stack(current, &sp, pc, &ra);
} while (pc); } while (pc);
......
...@@ -47,7 +47,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp) ...@@ -47,7 +47,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
} }
void void
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{ {
unsigned long sp, next_sp; unsigned long sp, next_sp;
unsigned long next_ip; unsigned long next_ip;
...@@ -76,7 +76,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) ...@@ -76,7 +76,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
next_ip = regs->nip; next_ip = regs->nip;
lr = regs->link; lr = regs->link;
level = 0; level = 0;
perf_callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain_store_context(entry, PERF_CONTEXT_KERNEL);
} else { } else {
if (level == 0) if (level == 0)
...@@ -232,7 +232,7 @@ static int sane_signal_64_frame(unsigned long sp) ...@@ -232,7 +232,7 @@ static int sane_signal_64_frame(unsigned long sp)
puc == (unsigned long) &sf->uc; puc == (unsigned long) &sf->uc;
} }
static void perf_callchain_user_64(struct perf_callchain_entry *entry, static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
unsigned long sp, next_sp; unsigned long sp, next_sp;
...@@ -247,7 +247,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry, ...@@ -247,7 +247,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
sp = regs->gpr[1]; sp = regs->gpr[1];
perf_callchain_store(entry, next_ip); perf_callchain_store(entry, next_ip);
while (entry->nr < sysctl_perf_event_max_stack) { while (entry->nr < entry->max_stack) {
fp = (unsigned long __user *) sp; fp = (unsigned long __user *) sp;
if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp)) if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
return; return;
...@@ -274,7 +274,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry, ...@@ -274,7 +274,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
read_user_stack_64(&uregs[PT_R1], &sp)) read_user_stack_64(&uregs[PT_R1], &sp))
return; return;
level = 0; level = 0;
perf_callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_store_context(entry, PERF_CONTEXT_USER);
perf_callchain_store(entry, next_ip); perf_callchain_store(entry, next_ip);
continue; continue;
} }
...@@ -319,7 +319,7 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) ...@@ -319,7 +319,7 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
return rc; return rc;
} }
static inline void perf_callchain_user_64(struct perf_callchain_entry *entry, static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
} }
...@@ -439,7 +439,7 @@ static unsigned int __user *signal_frame_32_regs(unsigned int sp, ...@@ -439,7 +439,7 @@ static unsigned int __user *signal_frame_32_regs(unsigned int sp,
return mctx->mc_gregs; return mctx->mc_gregs;
} }
static void perf_callchain_user_32(struct perf_callchain_entry *entry, static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
unsigned int sp, next_sp; unsigned int sp, next_sp;
...@@ -453,7 +453,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, ...@@ -453,7 +453,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
sp = regs->gpr[1]; sp = regs->gpr[1];
perf_callchain_store(entry, next_ip); perf_callchain_store(entry, next_ip);
while (entry->nr < sysctl_perf_event_max_stack) { while (entry->nr < entry->max_stack) {
fp = (unsigned int __user *) (unsigned long) sp; fp = (unsigned int __user *) (unsigned long) sp;
if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp)) if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
return; return;
...@@ -473,7 +473,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, ...@@ -473,7 +473,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
read_user_stack_32(&uregs[PT_R1], &sp)) read_user_stack_32(&uregs[PT_R1], &sp))
return; return;
level = 0; level = 0;
perf_callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_store_context(entry, PERF_CONTEXT_USER);
perf_callchain_store(entry, next_ip); perf_callchain_store(entry, next_ip);
continue; continue;
} }
...@@ -487,7 +487,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, ...@@ -487,7 +487,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
} }
void void
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{ {
if (current_is_64bit()) if (current_is_64bit())
perf_callchain_user_64(entry, regs); perf_callchain_user_64(entry, regs);
......
...@@ -224,13 +224,13 @@ arch_initcall(service_level_perf_register); ...@@ -224,13 +224,13 @@ arch_initcall(service_level_perf_register);
static int __perf_callchain_kernel(void *data, unsigned long address) static int __perf_callchain_kernel(void *data, unsigned long address)
{ {
struct perf_callchain_entry *entry = data; struct perf_callchain_entry_ctx *entry = data;
perf_callchain_store(entry, address); perf_callchain_store(entry, address);
return 0; return 0;
} }
void perf_callchain_kernel(struct perf_callchain_entry *entry, void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
if (user_mode(regs)) if (user_mode(regs))
......
...@@ -21,7 +21,7 @@ static int callchain_stack(void *data, char *name) ...@@ -21,7 +21,7 @@ static int callchain_stack(void *data, char *name)
static void callchain_address(void *data, unsigned long addr, int reliable) static void callchain_address(void *data, unsigned long addr, int reliable)
{ {
struct perf_callchain_entry *entry = data; struct perf_callchain_entry_ctx *entry = data;
if (reliable) if (reliable)
perf_callchain_store(entry, addr); perf_callchain_store(entry, addr);
...@@ -33,7 +33,7 @@ static const struct stacktrace_ops callchain_ops = { ...@@ -33,7 +33,7 @@ static const struct stacktrace_ops callchain_ops = {
}; };
void void
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{ {
perf_callchain_store(entry, regs->pc); perf_callchain_store(entry, regs->pc);
......
...@@ -1711,7 +1711,7 @@ static int __init init_hw_perf_events(void) ...@@ -1711,7 +1711,7 @@ static int __init init_hw_perf_events(void)
} }
pure_initcall(init_hw_perf_events); pure_initcall(init_hw_perf_events);
void perf_callchain_kernel(struct perf_callchain_entry *entry, void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
unsigned long ksp, fp; unsigned long ksp, fp;
...@@ -1756,7 +1756,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry, ...@@ -1756,7 +1756,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
} }
} }
#endif #endif
} while (entry->nr < sysctl_perf_event_max_stack); } while (entry->nr < entry->max_stack);
} }
static inline int static inline int
...@@ -1769,7 +1769,7 @@ valid_user_frame(const void __user *fp, unsigned long size) ...@@ -1769,7 +1769,7 @@ valid_user_frame(const void __user *fp, unsigned long size)
return (__range_not_ok(fp, size, TASK_SIZE) == 0); return (__range_not_ok(fp, size, TASK_SIZE) == 0);
} }
static void perf_callchain_user_64(struct perf_callchain_entry *entry, static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
unsigned long ufp; unsigned long ufp;
...@@ -1790,10 +1790,10 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry, ...@@ -1790,10 +1790,10 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
pc = sf.callers_pc; pc = sf.callers_pc;
ufp = (unsigned long)sf.fp + STACK_BIAS; ufp = (unsigned long)sf.fp + STACK_BIAS;
perf_callchain_store(entry, pc); perf_callchain_store(entry, pc);
} while (entry->nr < sysctl_perf_event_max_stack); } while (entry->nr < entry->max_stack);
} }
static void perf_callchain_user_32(struct perf_callchain_entry *entry, static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
unsigned long ufp; unsigned long ufp;
...@@ -1822,11 +1822,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, ...@@ -1822,11 +1822,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
ufp = (unsigned long)sf.fp; ufp = (unsigned long)sf.fp;
} }
perf_callchain_store(entry, pc); perf_callchain_store(entry, pc);
} while (entry->nr < sysctl_perf_event_max_stack); } while (entry->nr < entry->max_stack);
} }
void void
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{ {
u64 saved_fault_address = current_thread_info()->fault_address; u64 saved_fault_address = current_thread_info()->fault_address;
u8 saved_fault_code = get_thread_fault_code(); u8 saved_fault_code = get_thread_fault_code();
......
...@@ -941,7 +941,7 @@ arch_initcall(init_hw_perf_events); ...@@ -941,7 +941,7 @@ arch_initcall(init_hw_perf_events);
/* /*
* Tile specific backtracing code for perf_events. * Tile specific backtracing code for perf_events.
*/ */
static inline void perf_callchain(struct perf_callchain_entry *entry, static inline void perf_callchain(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct KBacktraceIterator kbt; struct KBacktraceIterator kbt;
...@@ -992,13 +992,13 @@ static inline void perf_callchain(struct perf_callchain_entry *entry, ...@@ -992,13 +992,13 @@ static inline void perf_callchain(struct perf_callchain_entry *entry,
} }
} }
void perf_callchain_user(struct perf_callchain_entry *entry, void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
perf_callchain(entry, regs); perf_callchain(entry, regs);
} }
void perf_callchain_kernel(struct perf_callchain_entry *entry, void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
perf_callchain(entry, regs); perf_callchain(entry, regs);
......
...@@ -2202,7 +2202,7 @@ static int backtrace_stack(void *data, char *name) ...@@ -2202,7 +2202,7 @@ static int backtrace_stack(void *data, char *name)
static int backtrace_address(void *data, unsigned long addr, int reliable) static int backtrace_address(void *data, unsigned long addr, int reliable)
{ {
struct perf_callchain_entry *entry = data; struct perf_callchain_entry_ctx *entry = data;
return perf_callchain_store(entry, addr); return perf_callchain_store(entry, addr);
} }
...@@ -2214,7 +2214,7 @@ static const struct stacktrace_ops backtrace_ops = { ...@@ -2214,7 +2214,7 @@ static const struct stacktrace_ops backtrace_ops = {
}; };
void void
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{ {
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* TODO: We don't support guest os callchain now */ /* TODO: We don't support guest os callchain now */
...@@ -2268,7 +2268,7 @@ static unsigned long get_segment_base(unsigned int segment) ...@@ -2268,7 +2268,7 @@ static unsigned long get_segment_base(unsigned int segment)
#include <asm/compat.h> #include <asm/compat.h>
static inline int static inline int
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
{ {
/* 32-bit process in 64-bit kernel. */ /* 32-bit process in 64-bit kernel. */
unsigned long ss_base, cs_base; unsigned long ss_base, cs_base;
...@@ -2283,7 +2283,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) ...@@ -2283,7 +2283,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
fp = compat_ptr(ss_base + regs->bp); fp = compat_ptr(ss_base + regs->bp);
pagefault_disable(); pagefault_disable();
while (entry->nr < sysctl_perf_event_max_stack) { while (entry->nr < entry->max_stack) {
unsigned long bytes; unsigned long bytes;
frame.next_frame = 0; frame.next_frame = 0;
frame.return_address = 0; frame.return_address = 0;
...@@ -2309,14 +2309,14 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) ...@@ -2309,14 +2309,14 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
} }
#else #else
static inline int static inline int
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
{ {
return 0; return 0;
} }
#endif #endif
void void
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{ {
struct stack_frame frame; struct stack_frame frame;
const void __user *fp; const void __user *fp;
...@@ -2343,7 +2343,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) ...@@ -2343,7 +2343,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
return; return;
pagefault_disable(); pagefault_disable();
while (entry->nr < sysctl_perf_event_max_stack) { while (entry->nr < entry->max_stack) {
unsigned long bytes; unsigned long bytes;
frame.next_frame = NULL; frame.next_frame = NULL;
frame.return_address = 0; frame.return_address = 0;
......
...@@ -323,23 +323,23 @@ static void xtensa_pmu_read(struct perf_event *event) ...@@ -323,23 +323,23 @@ static void xtensa_pmu_read(struct perf_event *event)
static int callchain_trace(struct stackframe *frame, void *data) static int callchain_trace(struct stackframe *frame, void *data)
{ {
struct perf_callchain_entry *entry = data; struct perf_callchain_entry_ctx *entry = data;
perf_callchain_store(entry, frame->pc); perf_callchain_store(entry, frame->pc);
return 0; return 0;
} }
void perf_callchain_kernel(struct perf_callchain_entry *entry, void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
xtensa_backtrace_kernel(regs, sysctl_perf_event_max_stack, xtensa_backtrace_kernel(regs, entry->max_stack,
callchain_trace, NULL, entry); callchain_trace, NULL, entry);
} }
void perf_callchain_user(struct perf_callchain_entry *entry, void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
xtensa_backtrace_user(regs, sysctl_perf_event_max_stack, xtensa_backtrace_user(regs, entry->max_stack,
callchain_trace, entry); callchain_trace, entry);
} }
......
...@@ -61,6 +61,14 @@ struct perf_callchain_entry { ...@@ -61,6 +61,14 @@ struct perf_callchain_entry {
__u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */ __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
}; };
struct perf_callchain_entry_ctx {
struct perf_callchain_entry *entry;
u32 max_stack;
u32 nr;
short contexts;
bool contexts_maxed;
};
struct perf_raw_record { struct perf_raw_record {
u32 size; u32 size;
void *data; void *data;
...@@ -1063,20 +1071,36 @@ extern void perf_event_fork(struct task_struct *tsk); ...@@ -1063,20 +1071,36 @@ extern void perf_event_fork(struct task_struct *tsk);
/* Callchains */ /* Callchains */
DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern struct perf_callchain_entry * extern struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
bool crosstask, bool add_mark); u32 max_stack, bool crosstask, bool add_mark);
extern int get_callchain_buffers(void); extern int get_callchain_buffers(void);
extern void put_callchain_buffers(void); extern void put_callchain_buffers(void);
extern int sysctl_perf_event_max_stack; extern int sysctl_perf_event_max_stack;
extern int sysctl_perf_event_max_contexts_per_stack;
static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
{
if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
struct perf_callchain_entry *entry = ctx->entry;
entry->ip[entry->nr++] = ip;
++ctx->contexts;
return 0;
} else {
ctx->contexts_maxed = true;
return -1; /* no more room, stop walking the stack */
}
}
static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
{ {
if (entry->nr < sysctl_perf_event_max_stack) { if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
struct perf_callchain_entry *entry = ctx->entry;
entry->ip[entry->nr++] = ip; entry->ip[entry->nr++] = ip;
++ctx->nr;
return 0; return 0;
} else { } else {
return -1; /* no more room, stop walking the stack */ return -1; /* no more room, stop walking the stack */
......
...@@ -862,6 +862,7 @@ enum perf_event_type { ...@@ -862,6 +862,7 @@ enum perf_event_type {
}; };
#define PERF_MAX_STACK_DEPTH 127 #define PERF_MAX_STACK_DEPTH 127
#define PERF_MAX_CONTEXTS_PER_STACK 8
enum perf_callchain_context { enum perf_callchain_context {
PERF_CONTEXT_HV = (__u64)-32, PERF_CONTEXT_HV = (__u64)-32,
......
...@@ -136,7 +136,8 @@ static u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5) ...@@ -136,7 +136,8 @@ static u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
return -EINVAL; return -EINVAL;
trace = get_perf_callchain(regs, init_nr, kernel, user, false, false); trace = get_perf_callchain(regs, init_nr, kernel, user,
sysctl_perf_event_max_stack, false, false);
if (unlikely(!trace)) if (unlikely(!trace))
/* couldn't fetch the stack trace */ /* couldn't fetch the stack trace */
......
...@@ -19,11 +19,13 @@ struct callchain_cpus_entries { ...@@ -19,11 +19,13 @@ struct callchain_cpus_entries {
}; };
int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH; int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK;
static inline size_t perf_callchain_entry__sizeof(void) static inline size_t perf_callchain_entry__sizeof(void)
{ {
return (sizeof(struct perf_callchain_entry) + return (sizeof(struct perf_callchain_entry) +
sizeof(__u64) * sysctl_perf_event_max_stack); sizeof(__u64) * (sysctl_perf_event_max_stack +
sysctl_perf_event_max_contexts_per_stack));
} }
static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
...@@ -32,12 +34,12 @@ static DEFINE_MUTEX(callchain_mutex); ...@@ -32,12 +34,12 @@ static DEFINE_MUTEX(callchain_mutex);
static struct callchain_cpus_entries *callchain_cpus_entries; static struct callchain_cpus_entries *callchain_cpus_entries;
__weak void perf_callchain_kernel(struct perf_callchain_entry *entry, __weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
} }
__weak void perf_callchain_user(struct perf_callchain_entry *entry, __weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
} }
...@@ -176,14 +178,15 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs) ...@@ -176,14 +178,15 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
if (!kernel && !user) if (!kernel && !user)
return NULL; return NULL;
return get_perf_callchain(regs, 0, kernel, user, crosstask, true); return get_perf_callchain(regs, 0, kernel, user, sysctl_perf_event_max_stack, crosstask, true);
} }
struct perf_callchain_entry * struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
bool crosstask, bool add_mark) u32 max_stack, bool crosstask, bool add_mark)
{ {
struct perf_callchain_entry *entry; struct perf_callchain_entry *entry;
struct perf_callchain_entry_ctx ctx;
int rctx; int rctx;
entry = get_callchain_entry(&rctx); entry = get_callchain_entry(&rctx);
...@@ -193,12 +196,16 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, ...@@ -193,12 +196,16 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
if (!entry) if (!entry)
goto exit_put; goto exit_put;
entry->nr = init_nr; ctx.entry = entry;
ctx.max_stack = max_stack;
ctx.nr = entry->nr = init_nr;
ctx.contexts = 0;
ctx.contexts_maxed = false;
if (kernel && !user_mode(regs)) { if (kernel && !user_mode(regs)) {
if (add_mark) if (add_mark)
perf_callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
perf_callchain_kernel(entry, regs); perf_callchain_kernel(&ctx, regs);
} }
if (user) { if (user) {
...@@ -214,8 +221,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, ...@@ -214,8 +221,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
goto exit_put; goto exit_put;
if (add_mark) if (add_mark)
perf_callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
perf_callchain_user(entry, regs); perf_callchain_user(&ctx, regs);
} }
} }
...@@ -225,10 +232,15 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, ...@@ -225,10 +232,15 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
return entry; return entry;
} }
/*
* Used for sysctl_perf_event_max_stack and
* sysctl_perf_event_max_contexts_per_stack.
*/
int perf_event_max_stack_handler(struct ctl_table *table, int write, int perf_event_max_stack_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos) void __user *buffer, size_t *lenp, loff_t *ppos)
{ {
int new_value = sysctl_perf_event_max_stack, ret; int *value = table->data;
int new_value = *value, ret;
struct ctl_table new_table = *table; struct ctl_table new_table = *table;
new_table.data = &new_value; new_table.data = &new_value;
...@@ -240,7 +252,7 @@ int perf_event_max_stack_handler(struct ctl_table *table, int write, ...@@ -240,7 +252,7 @@ int perf_event_max_stack_handler(struct ctl_table *table, int write,
if (atomic_read(&nr_callchain_events)) if (atomic_read(&nr_callchain_events))
ret = -EBUSY; ret = -EBUSY;
else else
sysctl_perf_event_max_stack = new_value; *value = new_value;
mutex_unlock(&callchain_mutex); mutex_unlock(&callchain_mutex);
......
...@@ -1149,13 +1149,22 @@ static struct ctl_table kern_table[] = { ...@@ -1149,13 +1149,22 @@ static struct ctl_table kern_table[] = {
}, },
{ {
.procname = "perf_event_max_stack", .procname = "perf_event_max_stack",
.data = NULL, /* filled in by handler */ .data = &sysctl_perf_event_max_stack,
.maxlen = sizeof(sysctl_perf_event_max_stack), .maxlen = sizeof(sysctl_perf_event_max_stack),
.mode = 0644, .mode = 0644,
.proc_handler = perf_event_max_stack_handler, .proc_handler = perf_event_max_stack_handler,
.extra1 = &zero, .extra1 = &zero,
.extra2 = &six_hundred_forty_kb, .extra2 = &six_hundred_forty_kb,
}, },
{
.procname = "perf_event_max_contexts_per_stack",
.data = &sysctl_perf_event_max_contexts_per_stack,
.maxlen = sizeof(sysctl_perf_event_max_contexts_per_stack),
.mode = 0644,
.proc_handler = perf_event_max_stack_handler,
.extra1 = &zero,
.extra2 = &one_thousand,
},
#endif #endif
#ifdef CONFIG_KMEMCHECK #ifdef CONFIG_KMEMCHECK
{ {
......
...@@ -119,8 +119,8 @@ static int build_id_cache__add_kcore(const char *filename, bool force) ...@@ -119,8 +119,8 @@ static int build_id_cache__add_kcore(const char *filename, bool force)
if (build_id_cache__kcore_buildid(from_dir, sbuildid) < 0) if (build_id_cache__kcore_buildid(from_dir, sbuildid) < 0)
return -1; return -1;
scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s", scnprintf(to_dir, sizeof(to_dir), "%s/%s/%s",
buildid_dir, sbuildid); buildid_dir, DSO__NAME_KCORE, sbuildid);
if (!force && if (!force &&
!build_id_cache__kcore_existing(from_dir, to_dir, sizeof(to_dir))) { !build_id_cache__kcore_existing(from_dir, to_dir, sizeof(to_dir))) {
...@@ -131,8 +131,8 @@ static int build_id_cache__add_kcore(const char *filename, bool force) ...@@ -131,8 +131,8 @@ static int build_id_cache__add_kcore(const char *filename, bool force)
if (build_id_cache__kcore_dir(dir, sizeof(dir))) if (build_id_cache__kcore_dir(dir, sizeof(dir)))
return -1; return -1;
scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s/%s", scnprintf(to_dir, sizeof(to_dir), "%s/%s/%s/%s",
buildid_dir, sbuildid, dir); buildid_dir, DSO__NAME_KCORE, sbuildid, dir);
if (mkdir_p(to_dir, 0755)) if (mkdir_p(to_dir, 0755))
return -1; return -1;
......
...@@ -66,6 +66,7 @@ ...@@ -66,6 +66,7 @@
#include <stdlib.h> #include <stdlib.h>
#include <sys/prctl.h> #include <sys/prctl.h>
#include <locale.h> #include <locale.h>
#include <math.h>
#define DEFAULT_SEPARATOR " " #define DEFAULT_SEPARATOR " "
#define CNTR_NOT_SUPPORTED "<not supported>" #define CNTR_NOT_SUPPORTED "<not supported>"
...@@ -991,12 +992,12 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg) ...@@ -991,12 +992,12 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
const char *fmt; const char *fmt;
if (csv_output) { if (csv_output) {
fmt = sc != 1.0 ? "%.2f%s" : "%.0f%s"; fmt = floor(sc) != sc ? "%.2f%s" : "%.0f%s";
} else { } else {
if (big_num) if (big_num)
fmt = sc != 1.0 ? "%'18.2f%s" : "%'18.0f%s"; fmt = floor(sc) != sc ? "%'18.2f%s" : "%'18.0f%s";
else else
fmt = sc != 1.0 ? "%18.2f%s" : "%18.0f%s"; fmt = floor(sc) != sc ? "%18.2f%s" : "%18.0f%s";
} }
aggr_printout(evsel, id, nr); aggr_printout(evsel, id, nr);
...@@ -1909,6 +1910,9 @@ static int add_default_attributes(void) ...@@ -1909,6 +1910,9 @@ static int add_default_attributes(void)
} }
if (!evsel_list->nr_entries) { if (!evsel_list->nr_entries) {
if (target__has_cpu(&target))
default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
if (perf_evlist__add_default_attrs(evsel_list, default_attrs0) < 0) if (perf_evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
return -1; return -1;
if (pmu_have_event("cpu", "stalled-cycles-frontend")) { if (pmu_have_event("cpu", "stalled-cycles-frontend")) {
...@@ -2000,7 +2004,7 @@ static int process_stat_round_event(struct perf_tool *tool __maybe_unused, ...@@ -2000,7 +2004,7 @@ static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
union perf_event *event, union perf_event *event,
struct perf_session *session) struct perf_session *session)
{ {
struct stat_round_event *round = &event->stat_round; struct stat_round_event *stat_round = &event->stat_round;
struct perf_evsel *counter; struct perf_evsel *counter;
struct timespec tsh, *ts = NULL; struct timespec tsh, *ts = NULL;
const char **argv = session->header.env.cmdline_argv; const char **argv = session->header.env.cmdline_argv;
...@@ -2009,12 +2013,12 @@ static int process_stat_round_event(struct perf_tool *tool __maybe_unused, ...@@ -2009,12 +2013,12 @@ static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
evlist__for_each(evsel_list, counter) evlist__for_each(evsel_list, counter)
perf_stat_process_counter(&stat_config, counter); perf_stat_process_counter(&stat_config, counter);
if (round->type == PERF_STAT_ROUND_TYPE__FINAL) if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL)
update_stats(&walltime_nsecs_stats, round->time); update_stats(&walltime_nsecs_stats, stat_round->time);
if (stat_config.interval && round->time) { if (stat_config.interval && stat_round->time) {
tsh.tv_sec = round->time / NSECS_PER_SEC; tsh.tv_sec = stat_round->time / NSECS_PER_SEC;
tsh.tv_nsec = round->time % NSECS_PER_SEC; tsh.tv_nsec = stat_round->time % NSECS_PER_SEC;
ts = &tsh; ts = &tsh;
} }
......
...@@ -549,6 +549,9 @@ int main(int argc, const char **argv) ...@@ -549,6 +549,9 @@ int main(int argc, const char **argv)
if (sysctl__read_int("kernel/perf_event_max_stack", &value) == 0) if (sysctl__read_int("kernel/perf_event_max_stack", &value) == 0)
sysctl_perf_event_max_stack = value; sysctl_perf_event_max_stack = value;
if (sysctl__read_int("kernel/perf_event_max_contexts_per_stack", &value) == 0)
sysctl_perf_event_max_contexts_per_stack = value;
cmd = extract_argv0_path(argv[0]); cmd = extract_argv0_path(argv[0]);
if (!cmd) if (!cmd)
cmd = "perf-help"; cmd = "perf-help";
......
...@@ -1122,7 +1122,7 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize) ...@@ -1122,7 +1122,7 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
} else if (dso__is_kcore(dso)) { } else if (dso__is_kcore(dso)) {
goto fallback; goto fallback;
} else if (readlink(symfs_filename, command, sizeof(command)) < 0 || } else if (readlink(symfs_filename, command, sizeof(command)) < 0 ||
strstr(command, "[kernel.kallsyms]") || strstr(command, DSO__NAME_KALLSYMS) ||
access(symfs_filename, R_OK)) { access(symfs_filename, R_OK)) {
free(filename); free(filename);
fallback: fallback:
......
...@@ -256,7 +256,7 @@ static int machine__write_buildid_table(struct machine *machine, int fd) ...@@ -256,7 +256,7 @@ static int machine__write_buildid_table(struct machine *machine, int fd)
size_t name_len; size_t name_len;
bool in_kernel = false; bool in_kernel = false;
if (!pos->hit) if (!pos->hit && !dso__is_vdso(pos))
continue; continue;
if (dso__is_vdso(pos)) { if (dso__is_vdso(pos)) {
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include "auxtrace.h" #include "auxtrace.h"
#include "util.h" #include "util.h"
#include "debug.h" #include "debug.h"
#include "vdso.h"
char dso__symtab_origin(const struct dso *dso) char dso__symtab_origin(const struct dso *dso)
{ {
...@@ -1169,7 +1170,7 @@ bool __dsos__read_build_ids(struct list_head *head, bool with_hits) ...@@ -1169,7 +1170,7 @@ bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
struct dso *pos; struct dso *pos;
list_for_each_entry(pos, head, node) { list_for_each_entry(pos, head, node) {
if (with_hits && !pos->hit) if (with_hits && !pos->hit && !dso__is_vdso(pos))
continue; continue;
if (pos->has_build_id) { if (pos->has_build_id) {
have_build_id = true; have_build_id = true;
......
...@@ -709,7 +709,7 @@ static struct dso *machine__get_kernel(struct machine *machine) ...@@ -709,7 +709,7 @@ static struct dso *machine__get_kernel(struct machine *machine)
if (machine__is_host(machine)) { if (machine__is_host(machine)) {
vmlinux_name = symbol_conf.vmlinux_name; vmlinux_name = symbol_conf.vmlinux_name;
if (!vmlinux_name) if (!vmlinux_name)
vmlinux_name = "[kernel.kallsyms]"; vmlinux_name = DSO__NAME_KALLSYMS;
kernel = machine__findnew_kernel(machine, vmlinux_name, kernel = machine__findnew_kernel(machine, vmlinux_name,
"[kernel]", DSO_TYPE_KERNEL); "[kernel]", DSO_TYPE_KERNEL);
...@@ -1811,9 +1811,9 @@ static int thread__resolve_callchain_sample(struct thread *thread, ...@@ -1811,9 +1811,9 @@ static int thread__resolve_callchain_sample(struct thread *thread,
{ {
struct branch_stack *branch = sample->branch_stack; struct branch_stack *branch = sample->branch_stack;
struct ip_callchain *chain = sample->callchain; struct ip_callchain *chain = sample->callchain;
int chain_nr = min(max_stack, (int)chain->nr); int chain_nr = chain->nr;
u8 cpumode = PERF_RECORD_MISC_USER; u8 cpumode = PERF_RECORD_MISC_USER;
int i, j, err; int i, j, err, nr_entries, nr_contexts;
int skip_idx = -1; int skip_idx = -1;
int first_call = 0; int first_call = 0;
...@@ -1828,7 +1828,7 @@ static int thread__resolve_callchain_sample(struct thread *thread, ...@@ -1828,7 +1828,7 @@ static int thread__resolve_callchain_sample(struct thread *thread,
* Based on DWARF debug information, some architectures skip * Based on DWARF debug information, some architectures skip
* a callchain entry saved by the kernel. * a callchain entry saved by the kernel.
*/ */
if (chain->nr < sysctl_perf_event_max_stack) if (chain_nr < sysctl_perf_event_max_stack)
skip_idx = arch_skip_callchain_idx(thread, chain); skip_idx = arch_skip_callchain_idx(thread, chain);
/* /*
...@@ -1889,12 +1889,8 @@ static int thread__resolve_callchain_sample(struct thread *thread, ...@@ -1889,12 +1889,8 @@ static int thread__resolve_callchain_sample(struct thread *thread,
} }
check_calls: check_calls:
if (chain->nr > sysctl_perf_event_max_stack && (int)chain->nr > max_stack) { for (i = first_call, nr_entries = 0, nr_contexts = 0;
pr_warning("corrupted callchain. skipping...\n"); i < chain_nr && nr_entries < max_stack; i++) {
return 0;
}
for (i = first_call; i < chain_nr; i++) {
u64 ip; u64 ip;
if (callchain_param.order == ORDER_CALLEE) if (callchain_param.order == ORDER_CALLEE)
...@@ -1908,6 +1904,14 @@ static int thread__resolve_callchain_sample(struct thread *thread, ...@@ -1908,6 +1904,14 @@ static int thread__resolve_callchain_sample(struct thread *thread,
#endif #endif
ip = chain->ips[j]; ip = chain->ips[j];
if (ip >= PERF_CONTEXT_MAX) {
if (++nr_contexts > sysctl_perf_event_max_contexts_per_stack)
goto out_corrupted_callchain;
} else {
if (++nr_entries > sysctl_perf_event_max_stack)
goto out_corrupted_callchain;
}
err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip); err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip);
if (err) if (err)
...@@ -1915,6 +1919,10 @@ static int thread__resolve_callchain_sample(struct thread *thread, ...@@ -1915,6 +1919,10 @@ static int thread__resolve_callchain_sample(struct thread *thread,
} }
return 0; return 0;
out_corrupted_callchain:
pr_warning("corrupted callchain. skipping...\n");
return 0;
} }
static int unwind_entry(struct unwind_entry *entry, void *arg) static int unwind_entry(struct unwind_entry *entry, void *arg)
......
...@@ -94,7 +94,8 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count, ...@@ -94,7 +94,8 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
{ {
int ctx = evsel_context(counter); int ctx = evsel_context(counter);
if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) ||
perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK))
update_stats(&runtime_nsecs_stats[cpu], count[0]); update_stats(&runtime_nsecs_stats[cpu], count[0]);
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
update_stats(&runtime_cycles_stats[ctx][cpu], count[0]); update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
...@@ -188,7 +189,7 @@ static void print_stalled_cycles_backend(int cpu, ...@@ -188,7 +189,7 @@ static void print_stalled_cycles_backend(int cpu,
color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio); color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
out->print_metric(out->ctx, color, "%6.2f%%", "backend cycles idle", ratio); out->print_metric(out->ctx, color, "%7.2f%%", "backend cycles idle", ratio);
} }
static void print_branch_misses(int cpu, static void print_branch_misses(int cpu,
...@@ -444,7 +445,8 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel, ...@@ -444,7 +445,8 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
ratio = total / avg; ratio = total / avg;
print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio); print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio);
} else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) { } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK) ||
perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK)) {
if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0) if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
print_metric(ctxp, NULL, "%8.3f", "CPUs utilized", print_metric(ctxp, NULL, "%8.3f", "CPUs utilized",
avg / ratio); avg / ratio);
......
...@@ -1662,8 +1662,8 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map) ...@@ -1662,8 +1662,8 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s", buildid_dir, scnprintf(path, sizeof(path), "%s/%s/%s", buildid_dir,
sbuild_id); DSO__NAME_KCORE, sbuild_id);
/* Use /proc/kallsyms if possible */ /* Use /proc/kallsyms if possible */
if (is_host) { if (is_host) {
...@@ -1699,8 +1699,8 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map) ...@@ -1699,8 +1699,8 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
if (!find_matching_kcore(map, path, sizeof(path))) if (!find_matching_kcore(map, path, sizeof(path)))
return strdup(path); return strdup(path);
scnprintf(path, sizeof(path), "%s/[kernel.kallsyms]/%s", scnprintf(path, sizeof(path), "%s/%s/%s",
buildid_dir, sbuild_id); buildid_dir, DSO__NAME_KALLSYMS, sbuild_id);
if (access(path, F_OK)) { if (access(path, F_OK)) {
pr_err("No kallsyms or vmlinux with build-id %s was found\n", pr_err("No kallsyms or vmlinux with build-id %s was found\n",
...@@ -1769,7 +1769,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, ...@@ -1769,7 +1769,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map,
if (err > 0 && !dso__is_kcore(dso)) { if (err > 0 && !dso__is_kcore(dso)) {
dso->binary_type = DSO_BINARY_TYPE__KALLSYMS; dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
dso__set_long_name(dso, "[kernel.kallsyms]", false); dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
map__fixup_start(map); map__fixup_start(map);
map__fixup_end(map); map__fixup_end(map);
} }
......
...@@ -44,6 +44,9 @@ Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, ...@@ -44,6 +44,9 @@ Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */ #define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
#endif #endif
#define DSO__NAME_KALLSYMS "[kernel.kallsyms]"
#define DSO__NAME_KCORE "[kernel.kcore]"
/** struct symbol - symtab entry /** struct symbol - symtab entry
* *
* @ignore - resolvable but tools ignore it (e.g. idle routines) * @ignore - resolvable but tools ignore it (e.g. idle routines)
......
...@@ -33,7 +33,8 @@ struct callchain_param callchain_param = { ...@@ -33,7 +33,8 @@ struct callchain_param callchain_param = {
unsigned int page_size; unsigned int page_size;
int cacheline_size; int cacheline_size;
unsigned int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH; int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH;
int sysctl_perf_event_max_contexts_per_stack = PERF_MAX_CONTEXTS_PER_STACK;
bool test_attr__enabled; bool test_attr__enabled;
......
...@@ -261,7 +261,8 @@ void sighandler_dump_stack(int sig); ...@@ -261,7 +261,8 @@ void sighandler_dump_stack(int sig);
extern unsigned int page_size; extern unsigned int page_size;
extern int cacheline_size; extern int cacheline_size;
extern unsigned int sysctl_perf_event_max_stack; extern int sysctl_perf_event_max_stack;
extern int sysctl_perf_event_max_contexts_per_stack;
struct parse_tag { struct parse_tag {
char tag; char tag;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment