Commit f8ef15d6 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar.

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/x86: Add Intel IvyBridge event scheduling constraints
  ftrace: Call ftrace cleanup module notifier after all other notifiers
  tracing/syscalls: Allow archs to ignore tracing compat syscalls
parents 6515925b ff1fb5f6
...@@ -72,4 +72,28 @@ int ftrace_int3_handler(struct pt_regs *regs); ...@@ -72,4 +72,28 @@ int ftrace_int3_handler(struct pt_regs *regs);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
#if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS)
#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION)
#include <asm/compat.h>
/*
* Because ia32 syscalls do not map to x86_64 syscall numbers
* this screws up the trace output when tracing a ia32 task.
* Instead of reporting bogus syscalls, just do not trace them.
*
* If the user realy wants these, then they should use the
* raw syscall tracepoints with filtering.
*/
#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1
static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
{
if (is_compat_task())
return true;
return false;
}
#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
#endif /* !__ASSEMBLY__ && !COMPILE_OFFSETS */
#endif /* _ASM_X86_FTRACE_H */ #endif /* _ASM_X86_FTRACE_H */
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
struct task_struct; struct task_struct;
struct exec_domain; struct exec_domain;
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/ftrace.h>
#include <linux/atomic.h> #include <linux/atomic.h>
struct thread_info { struct thread_info {
......
...@@ -107,6 +107,27 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly = ...@@ -107,6 +107,27 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
EVENT_CONSTRAINT_END EVENT_CONSTRAINT_END
}; };
static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
EVENT_CONSTRAINT_END
};
static struct extra_reg intel_westmere_extra_regs[] __read_mostly = static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
{ {
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
...@@ -2095,7 +2116,7 @@ __init int intel_pmu_init(void) ...@@ -2095,7 +2116,7 @@ __init int intel_pmu_init(void)
intel_pmu_lbr_init_snb(); intel_pmu_lbr_init_snb();
x86_pmu.event_constraints = intel_snb_event_constraints; x86_pmu.event_constraints = intel_ivb_event_constraints;
x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
x86_pmu.pebs_aliases = intel_pebs_aliases_snb; x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
x86_pmu.extra_regs = intel_snb_extra_regs; x86_pmu.extra_regs = intel_snb_extra_regs;
......
...@@ -3996,37 +3996,51 @@ static void ftrace_init_module(struct module *mod, ...@@ -3996,37 +3996,51 @@ static void ftrace_init_module(struct module *mod,
ftrace_process_locs(mod, start, end); ftrace_process_locs(mod, start, end);
} }
static int ftrace_module_notify(struct notifier_block *self, static int ftrace_module_notify_enter(struct notifier_block *self,
unsigned long val, void *data) unsigned long val, void *data)
{ {
struct module *mod = data; struct module *mod = data;
switch (val) { if (val == MODULE_STATE_COMING)
case MODULE_STATE_COMING:
ftrace_init_module(mod, mod->ftrace_callsites, ftrace_init_module(mod, mod->ftrace_callsites,
mod->ftrace_callsites + mod->ftrace_callsites +
mod->num_ftrace_callsites); mod->num_ftrace_callsites);
break; return 0;
case MODULE_STATE_GOING: }
static int ftrace_module_notify_exit(struct notifier_block *self,
unsigned long val, void *data)
{
struct module *mod = data;
if (val == MODULE_STATE_GOING)
ftrace_release_mod(mod); ftrace_release_mod(mod);
break;
}
return 0; return 0;
} }
#else #else
static int ftrace_module_notify(struct notifier_block *self, static int ftrace_module_notify_enter(struct notifier_block *self,
unsigned long val, void *data) unsigned long val, void *data)
{
return 0;
}
static int ftrace_module_notify_exit(struct notifier_block *self,
unsigned long val, void *data)
{ {
return 0; return 0;
} }
#endif /* CONFIG_MODULES */ #endif /* CONFIG_MODULES */
struct notifier_block ftrace_module_nb = { struct notifier_block ftrace_module_enter_nb = {
.notifier_call = ftrace_module_notify, .notifier_call = ftrace_module_notify_enter,
.priority = INT_MAX, /* Run before anything that can use kprobes */ .priority = INT_MAX, /* Run before anything that can use kprobes */
}; };
struct notifier_block ftrace_module_exit_nb = {
.notifier_call = ftrace_module_notify_exit,
.priority = INT_MIN, /* Run after anything that can remove kprobes */
};
extern unsigned long __start_mcount_loc[]; extern unsigned long __start_mcount_loc[];
extern unsigned long __stop_mcount_loc[]; extern unsigned long __stop_mcount_loc[];
...@@ -4058,9 +4072,13 @@ void __init ftrace_init(void) ...@@ -4058,9 +4072,13 @@ void __init ftrace_init(void)
__start_mcount_loc, __start_mcount_loc,
__stop_mcount_loc); __stop_mcount_loc);
ret = register_module_notifier(&ftrace_module_nb); ret = register_module_notifier(&ftrace_module_enter_nb);
if (ret)
pr_warning("Failed to register trace ftrace module enter notifier\n");
ret = register_module_notifier(&ftrace_module_exit_nb);
if (ret) if (ret)
pr_warning("Failed to register trace ftrace module notifier\n"); pr_warning("Failed to register trace ftrace module exit notifier\n");
set_ftrace_early_filters(); set_ftrace_early_filters();
......
#include <trace/syscall.h> #include <trace/syscall.h>
#include <trace/events/syscalls.h> #include <trace/events/syscalls.h>
#include <linux/syscalls.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */ #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
...@@ -47,6 +48,38 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name ...@@ -47,6 +48,38 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name
} }
#endif #endif
#ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
/*
* Some architectures that allow for 32bit applications
* to run on a 64bit kernel, do not map the syscalls for
* the 32bit tasks the same as they do for 64bit tasks.
*
* *cough*x86*cough*
*
* In such a case, instead of reporting the wrong syscalls,
* simply ignore them.
*
* For an arch to ignore the compat syscalls it needs to
* define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as
* define the function arch_trace_is_compat_syscall() to let
* the tracing system know that it should ignore it.
*/
static int
trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
{
if (unlikely(arch_trace_is_compat_syscall(regs)))
return -1;
return syscall_get_nr(task, regs);
}
#else
static inline int
trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
{
return syscall_get_nr(task, regs);
}
#endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */
static __init struct syscall_metadata * static __init struct syscall_metadata *
find_syscall_meta(unsigned long syscall) find_syscall_meta(unsigned long syscall)
{ {
...@@ -276,10 +309,10 @@ static void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) ...@@ -276,10 +309,10 @@ static void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
struct syscall_metadata *sys_data; struct syscall_metadata *sys_data;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer; struct ring_buffer *buffer;
int size;
int syscall_nr; int syscall_nr;
int size;
syscall_nr = syscall_get_nr(current, regs); syscall_nr = trace_get_syscall_nr(current, regs);
if (syscall_nr < 0) if (syscall_nr < 0)
return; return;
if (!test_bit(syscall_nr, enabled_enter_syscalls)) if (!test_bit(syscall_nr, enabled_enter_syscalls))
...@@ -313,7 +346,7 @@ static void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) ...@@ -313,7 +346,7 @@ static void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
struct ring_buffer *buffer; struct ring_buffer *buffer;
int syscall_nr; int syscall_nr;
syscall_nr = syscall_get_nr(current, regs); syscall_nr = trace_get_syscall_nr(current, regs);
if (syscall_nr < 0) if (syscall_nr < 0)
return; return;
if (!test_bit(syscall_nr, enabled_exit_syscalls)) if (!test_bit(syscall_nr, enabled_exit_syscalls))
...@@ -502,7 +535,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) ...@@ -502,7 +535,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
int rctx; int rctx;
int size; int size;
syscall_nr = syscall_get_nr(current, regs); syscall_nr = trace_get_syscall_nr(current, regs);
if (syscall_nr < 0) if (syscall_nr < 0)
return; return;
if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
...@@ -578,7 +611,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) ...@@ -578,7 +611,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
int rctx; int rctx;
int size; int size;
syscall_nr = syscall_get_nr(current, regs); syscall_nr = trace_get_syscall_nr(current, regs);
if (syscall_nr < 0) if (syscall_nr < 0)
return; return;
if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment