Commit 9b3e7c9b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
 "Misc fixlets from all around the place"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/x86/uncore: Fix IVT/SNB-EP uncore CBOX NID filter table
  perf/x86: Correctly use FEATURE_PDCM
  perf, nmi: Fix unknown NMI warning
  perf trace: Fix ioctl 'request' beautifier build problems on !(i386 || x86_64) arches
  perf trace: Add fallback definition of EFD_SEMAPHORE
  perf list: Fix checking for supported events on older kernels
  perf tools: Handle PERF_RECORD_HEADER_EVENT_TYPE properly
  perf probe: Do not add offset twice to uprobe address
  perf/x86: Fix Userspace RDPMC switch
  perf/x86/intel/p6: Add userspace RDPMC quirk for PPro
parents 0f0ca143 a9d3f94e
...@@ -1521,6 +1521,8 @@ static int __init init_hw_perf_events(void) ...@@ -1521,6 +1521,8 @@ static int __init init_hw_perf_events(void)
pr_cont("%s PMU driver.\n", x86_pmu.name); pr_cont("%s PMU driver.\n", x86_pmu.name);
x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
quirk->func(); quirk->func();
...@@ -1534,7 +1536,6 @@ static int __init init_hw_perf_events(void) ...@@ -1534,7 +1536,6 @@ static int __init init_hw_perf_events(void)
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
0, x86_pmu.num_counters, 0, 0); 0, x86_pmu.num_counters, 0, 0);
x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
x86_pmu_format_group.attrs = x86_pmu.format_attrs; x86_pmu_format_group.attrs = x86_pmu.format_attrs;
if (x86_pmu.event_attrs) if (x86_pmu.event_attrs)
...@@ -1820,9 +1821,12 @@ static ssize_t set_attr_rdpmc(struct device *cdev, ...@@ -1820,9 +1821,12 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
if (ret) if (ret)
return ret; return ret;
if (x86_pmu.attr_rdpmc_broken)
return -ENOTSUPP;
if (!!val != !!x86_pmu.attr_rdpmc) { if (!!val != !!x86_pmu.attr_rdpmc) {
x86_pmu.attr_rdpmc = !!val; x86_pmu.attr_rdpmc = !!val;
smp_call_function(change_rdpmc, (void *)val, 1); on_each_cpu(change_rdpmc, (void *)val, 1);
} }
return count; return count;
......
...@@ -409,6 +409,7 @@ struct x86_pmu { ...@@ -409,6 +409,7 @@ struct x86_pmu {
/* /*
* sysfs attrs * sysfs attrs
*/ */
int attr_rdpmc_broken;
int attr_rdpmc; int attr_rdpmc;
struct attribute **format_attrs; struct attribute **format_attrs;
struct attribute **event_attrs; struct attribute **event_attrs;
......
...@@ -1361,10 +1361,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) ...@@ -1361,10 +1361,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
intel_pmu_disable_all(); intel_pmu_disable_all();
handled = intel_pmu_drain_bts_buffer(); handled = intel_pmu_drain_bts_buffer();
status = intel_pmu_get_status(); status = intel_pmu_get_status();
if (!status) { if (!status)
intel_pmu_enable_all(0); goto done;
return handled;
}
loops = 0; loops = 0;
again: again:
...@@ -2310,10 +2308,7 @@ __init int intel_pmu_init(void) ...@@ -2310,10 +2308,7 @@ __init int intel_pmu_init(void)
if (version > 1) if (version > 1)
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
/* if (boot_cpu_has(X86_FEATURE_PDCM)) {
* v2 and above have a perf capabilities MSR
*/
if (version > 1) {
u64 capabilities; u64 capabilities;
rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities); rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
......
...@@ -501,8 +501,11 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = { ...@@ -501,8 +501,11 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
SNBEP_CBO_PMON_CTL_TID_EN, 0x1), SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
...@@ -1178,10 +1181,15 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = { ...@@ -1178,10 +1181,15 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
SNBEP_CBO_PMON_CTL_TID_EN, 0x1), SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10), SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10), SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10), SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
......
...@@ -231,31 +231,49 @@ static __initconst const struct x86_pmu p6_pmu = { ...@@ -231,31 +231,49 @@ static __initconst const struct x86_pmu p6_pmu = {
}; };
static __init void p6_pmu_rdpmc_quirk(void)
{
if (boot_cpu_data.x86_mask < 9) {
/*
* PPro erratum 26; fixed in stepping 9 and above.
*/
pr_warn("Userspace RDPMC support disabled due to a CPU erratum\n");
x86_pmu.attr_rdpmc_broken = 1;
x86_pmu.attr_rdpmc = 0;
}
}
__init int p6_pmu_init(void) __init int p6_pmu_init(void)
{ {
x86_pmu = p6_pmu;
switch (boot_cpu_data.x86_model) { switch (boot_cpu_data.x86_model) {
case 1: case 1: /* Pentium Pro */
case 3: /* Pentium Pro */ x86_add_quirk(p6_pmu_rdpmc_quirk);
case 5: break;
case 6: /* Pentium II */
case 7: case 3: /* Pentium II - Klamath */
case 8: case 5: /* Pentium II - Deschutes */
case 11: /* Pentium III */ case 6: /* Pentium II - Mendocino */
case 9: break;
case 13:
/* Pentium M */ case 7: /* Pentium III - Katmai */
case 8: /* Pentium III - Coppermine */
case 10: /* Pentium III Xeon */
case 11: /* Pentium III - Tualatin */
break;
case 9: /* Pentium M - Banias */
case 13: /* Pentium M - Dothan */
break; break;
default: default:
pr_cont("unsupported p6 CPU model %d ", pr_cont("unsupported p6 CPU model %d ", boot_cpu_data.x86_model);
boot_cpu_data.x86_model);
return -ENODEV; return -ENODEV;
} }
x86_pmu = p6_pmu;
memcpy(hw_cache_event_ids, p6_hw_cache_event_ids, memcpy(hw_cache_event_ids, p6_hw_cache_event_ids,
sizeof(hw_cache_event_ids)); sizeof(hw_cache_event_ids));
return 0; return 0;
} }
...@@ -37,6 +37,10 @@ ...@@ -37,6 +37,10 @@
# define MADV_UNMERGEABLE 13 # define MADV_UNMERGEABLE 13
#endif #endif
#ifndef EFD_SEMAPHORE
# define EFD_SEMAPHORE 1
#endif
struct tp_field { struct tp_field {
int offset; int offset;
union { union {
...@@ -279,6 +283,11 @@ static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size, ...@@ -279,6 +283,11 @@ static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
#define SCA_STRARRAY syscall_arg__scnprintf_strarray #define SCA_STRARRAY syscall_arg__scnprintf_strarray
#if defined(__i386__) || defined(__x86_64__)
/*
* FIXME: Make this available to all arches as soon as the ioctl beautifier
* gets rewritten to support all arches.
*/
static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size, static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size,
struct syscall_arg *arg) struct syscall_arg *arg)
{ {
...@@ -286,6 +295,7 @@ static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size, ...@@ -286,6 +295,7 @@ static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size,
} }
#define SCA_STRHEXARRAY syscall_arg__scnprintf_strhexarray #define SCA_STRHEXARRAY syscall_arg__scnprintf_strhexarray
#endif /* defined(__i386__) || defined(__x86_64__) */
static size_t syscall_arg__scnprintf_fd(char *bf, size_t size, static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
struct syscall_arg *arg); struct syscall_arg *arg);
...@@ -839,6 +849,10 @@ static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscal ...@@ -839,6 +849,10 @@ static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscal
#define SCA_SIGNUM syscall_arg__scnprintf_signum #define SCA_SIGNUM syscall_arg__scnprintf_signum
#if defined(__i386__) || defined(__x86_64__)
/*
* FIXME: Make this available to all arches.
*/
#define TCGETS 0x5401 #define TCGETS 0x5401
static const char *tioctls[] = { static const char *tioctls[] = {
...@@ -860,6 +874,7 @@ static const char *tioctls[] = { ...@@ -860,6 +874,7 @@ static const char *tioctls[] = {
}; };
static DEFINE_STRARRAY_OFFSET(tioctls, 0x5401); static DEFINE_STRARRAY_OFFSET(tioctls, 0x5401);
#endif /* defined(__i386__) || defined(__x86_64__) */
#define STRARRAY(arg, name, array) \ #define STRARRAY(arg, name, array) \
.arg_scnprintf = { [arg] = SCA_STRARRAY, }, \ .arg_scnprintf = { [arg] = SCA_STRARRAY, }, \
...@@ -941,9 +956,16 @@ static struct syscall_fmt { ...@@ -941,9 +956,16 @@ static struct syscall_fmt {
{ .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), }, { .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
{ .name = "ioctl", .errmsg = true, { .name = "ioctl", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ .arg_scnprintf = { [0] = SCA_FD, /* fd */
#if defined(__i386__) || defined(__x86_64__)
/*
* FIXME: Make this available to all arches.
*/
[1] = SCA_STRHEXARRAY, /* cmd */ [1] = SCA_STRHEXARRAY, /* cmd */
[2] = SCA_HEX, /* arg */ }, [2] = SCA_HEX, /* arg */ },
.arg_parm = { [1] = &strarray__tioctls, /* cmd */ }, }, .arg_parm = { [1] = &strarray__tioctls, /* cmd */ }, },
#else
[2] = SCA_HEX, /* arg */ }, },
#endif
{ .name = "kill", .errmsg = true, { .name = "kill", .errmsg = true,
.arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, }, .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
{ .name = "linkat", .errmsg = true, { .name = "linkat", .errmsg = true,
......
...@@ -1091,12 +1091,12 @@ int is_valid_tracepoint(const char *event_string) ...@@ -1091,12 +1091,12 @@ int is_valid_tracepoint(const char *event_string)
static bool is_event_supported(u8 type, unsigned config) static bool is_event_supported(u8 type, unsigned config)
{ {
bool ret = true; bool ret = true;
int open_return;
struct perf_evsel *evsel; struct perf_evsel *evsel;
struct perf_event_attr attr = { struct perf_event_attr attr = {
.type = type, .type = type,
.config = config, .config = config,
.disabled = 1, .disabled = 1,
.exclude_kernel = 1,
}; };
struct { struct {
struct thread_map map; struct thread_map map;
...@@ -1108,7 +1108,20 @@ static bool is_event_supported(u8 type, unsigned config) ...@@ -1108,7 +1108,20 @@ static bool is_event_supported(u8 type, unsigned config)
evsel = perf_evsel__new(&attr); evsel = perf_evsel__new(&attr);
if (evsel) { if (evsel) {
open_return = perf_evsel__open(evsel, NULL, &tmap.map);
ret = open_return >= 0;
if (open_return == -EACCES) {
/*
* This happens if the paranoid value
* /proc/sys/kernel/perf_event_paranoid is set to 2
* Re-run with exclude_kernel set; we don't do that
* by default as some ARM machines do not support it.
*
*/
evsel->attr.exclude_kernel = 1;
ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0; ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0;
}
perf_evsel__delete(evsel); perf_evsel__delete(evsel);
} }
......
...@@ -336,8 +336,8 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs, ...@@ -336,8 +336,8 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
return ret; return ret;
for (i = 0; i < ntevs && ret >= 0; i++) { for (i = 0; i < ntevs && ret >= 0; i++) {
/* point.address is the addres of point.symbol + point.offset */
offset = tevs[i].point.address - stext; offset = tevs[i].point.address - stext;
offset += tevs[i].point.offset;
tevs[i].point.offset = 0; tevs[i].point.offset = 0;
zfree(&tevs[i].point.symbol); zfree(&tevs[i].point.symbol);
ret = e_snprintf(buf, 32, "0x%lx", offset); ret = e_snprintf(buf, 32, "0x%lx", offset);
......
...@@ -1008,6 +1008,12 @@ static int perf_session__process_user_event(struct perf_session *session, union ...@@ -1008,6 +1008,12 @@ static int perf_session__process_user_event(struct perf_session *session, union
if (err == 0) if (err == 0)
perf_session__set_id_hdr_size(session); perf_session__set_id_hdr_size(session);
return err; return err;
case PERF_RECORD_HEADER_EVENT_TYPE:
/*
* Depreceated, but we need to handle it for sake
* of old data files create in pipe mode.
*/
return 0;
case PERF_RECORD_HEADER_TRACING_DATA: case PERF_RECORD_HEADER_TRACING_DATA:
/* setup for reading amidst mmap */ /* setup for reading amidst mmap */
lseek(fd, file_offset, SEEK_SET); lseek(fd, file_offset, SEEK_SET);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment