Commit 19b3340c authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf/x86: Fix MSR PMU driver

Currently we only update the sysfs event files per available MSR, we
didn't actually disallow creating unlisted events.

Rework things such that the dectection, sysfs listing and event
creation are better coordinated.

Sadly it appears it's impossible to probe R/O MSRs under virt. This
means we have to do the full model table to avoid listing all MSRs all
the time.
Tested-by: default avatarKan Liang <kan.liang@intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarAndy Lutomirski <luto@amacapital.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 3d325bf0
...@@ -10,17 +10,63 @@ enum perf_msr_id { ...@@ -10,17 +10,63 @@ enum perf_msr_id {
PERF_MSR_EVENT_MAX, PERF_MSR_EVENT_MAX,
}; };
bool test_aperfmperf(int idx)
{
return boot_cpu_has(X86_FEATURE_APERFMPERF);
}
bool test_intel(int idx)
{
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
boot_cpu_data.x86 != 6)
return false;
switch (boot_cpu_data.x86_model) {
case 30: /* 45nm Nehalem */
case 26: /* 45nm Nehalem-EP */
case 46: /* 45nm Nehalem-EX */
case 37: /* 32nm Westmere */
case 44: /* 32nm Westmere-EP */
case 47: /* 32nm Westmere-EX */
case 42: /* 32nm SandyBridge */
case 45: /* 32nm SandyBridge-E/EN/EP */
case 58: /* 22nm IvyBridge */
case 62: /* 22nm IvyBridge-EP/EX */
case 60: /* 22nm Haswell Core */
case 63: /* 22nm Haswell Server */
case 69: /* 22nm Haswell ULT */
case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
case 61: /* 14nm Broadwell Core-M */
case 86: /* 14nm Broadwell Xeon D */
case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
case 79: /* 14nm Broadwell Server */
case 55: /* 22nm Atom "Silvermont" */
case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
case 76: /* 14nm Atom "Airmont" */
if (idx == PERF_MSR_SMI)
return true;
break;
case 78: /* 14nm Skylake Mobile */
case 94: /* 14nm Skylake Desktop */
if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
return true;
break;
}
return false;
}
struct perf_msr { struct perf_msr {
int id;
u64 msr; u64 msr;
}; struct perf_pmu_events_attr *attr;
bool (*test)(int idx);
static struct perf_msr msr[] = {
{ PERF_MSR_TSC, 0 },
{ PERF_MSR_APERF, MSR_IA32_APERF },
{ PERF_MSR_MPERF, MSR_IA32_MPERF },
{ PERF_MSR_PPERF, MSR_PPERF },
{ PERF_MSR_SMI, MSR_SMI_COUNT },
}; };
PMU_EVENT_ATTR_STRING(tsc, evattr_tsc, "event=0x00"); PMU_EVENT_ATTR_STRING(tsc, evattr_tsc, "event=0x00");
...@@ -29,8 +75,16 @@ PMU_EVENT_ATTR_STRING(mperf, evattr_mperf, "event=0x02"); ...@@ -29,8 +75,16 @@ PMU_EVENT_ATTR_STRING(mperf, evattr_mperf, "event=0x02");
PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03"); PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03");
PMU_EVENT_ATTR_STRING(smi, evattr_smi, "event=0x04"); PMU_EVENT_ATTR_STRING(smi, evattr_smi, "event=0x04");
static struct perf_msr msr[] = {
[PERF_MSR_TSC] = { 0, &evattr_tsc, NULL, },
[PERF_MSR_APERF] = { MSR_IA32_APERF, &evattr_aperf, test_aperfmperf, },
[PERF_MSR_MPERF] = { MSR_IA32_MPERF, &evattr_mperf, test_aperfmperf, },
[PERF_MSR_PPERF] = { MSR_PPERF, &evattr_pperf, test_intel, },
[PERF_MSR_SMI] = { MSR_SMI_COUNT, &evattr_smi, test_intel, },
};
static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = { static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = {
&evattr_tsc.attr.attr, NULL,
}; };
static struct attribute_group events_attr_group = { static struct attribute_group events_attr_group = {
...@@ -74,6 +128,9 @@ static int msr_event_init(struct perf_event *event) ...@@ -74,6 +128,9 @@ static int msr_event_init(struct perf_event *event)
event->attr.sample_period) /* no sampling */ event->attr.sample_period) /* no sampling */
return -EINVAL; return -EINVAL;
if (!msr[cfg].attr)
return -EINVAL;
event->hw.idx = -1; event->hw.idx = -1;
event->hw.event_base = msr[cfg].msr; event->hw.event_base = msr[cfg].msr;
event->hw.config = cfg; event->hw.config = cfg;
...@@ -151,89 +208,32 @@ static struct pmu pmu_msr = { ...@@ -151,89 +208,32 @@ static struct pmu pmu_msr = {
.capabilities = PERF_PMU_CAP_NO_INTERRUPT, .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
}; };
static int __init intel_msr_init(int idx)
{
if (boot_cpu_data.x86 != 6)
return 0;
switch (boot_cpu_data.x86_model) {
case 30: /* 45nm Nehalem */
case 26: /* 45nm Nehalem-EP */
case 46: /* 45nm Nehalem-EX */
case 37: /* 32nm Westmere */
case 44: /* 32nm Westmere-EP */
case 47: /* 32nm Westmere-EX */
case 42: /* 32nm SandyBridge */
case 45: /* 32nm SandyBridge-E/EN/EP */
case 58: /* 22nm IvyBridge */
case 62: /* 22nm IvyBridge-EP/EX */
case 60: /* 22nm Haswell Core */
case 63: /* 22nm Haswell Server */
case 69: /* 22nm Haswell ULT */
case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
case 61: /* 14nm Broadwell Core-M */
case 86: /* 14nm Broadwell Xeon D */
case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
case 79: /* 14nm Broadwell Server */
events_attrs[idx++] = &evattr_smi.attr.attr;
break;
case 78: /* 14nm Skylake Mobile */
case 94: /* 14nm Skylake Desktop */
events_attrs[idx++] = &evattr_pperf.attr.attr;
events_attrs[idx++] = &evattr_smi.attr.attr;
break;
case 55: /* 22nm Atom "Silvermont" */
case 76: /* 14nm Atom "Airmont" */
case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
events_attrs[idx++] = &evattr_smi.attr.attr;
break;
}
events_attrs[idx] = NULL;
return 0;
}
static int __init amd_msr_init(int idx)
{
return 0;
}
static int __init msr_init(void) static int __init msr_init(void)
{ {
int err; int i, j = 0;
int idx = 1;
if (boot_cpu_has(X86_FEATURE_APERFMPERF)) { if (!boot_cpu_has(X86_FEATURE_TSC)) {
events_attrs[idx++] = &evattr_aperf.attr.attr; pr_cont("no MSR PMU driver.\n");
events_attrs[idx++] = &evattr_mperf.attr.attr; return 0;
events_attrs[idx] = NULL;
} }
switch (boot_cpu_data.x86_vendor) { /* Probe the MSRs. */
case X86_VENDOR_INTEL: for (i = PERF_MSR_TSC + 1; i < PERF_MSR_EVENT_MAX; i++) {
err = intel_msr_init(idx); u64 val;
break;
case X86_VENDOR_AMD:
err = amd_msr_init(idx);
break;
default: /*
err = -ENOTSUPP; * Virt sucks arse; you cannot tell if a R/O MSR is present :/
*/
if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val))
msr[i].attr = NULL;
} }
if (err != 0) { /* List remaining MSRs in the sysfs attrs. */
pr_cont("no msr PMU driver.\n"); for (i = 0; i < PERF_MSR_EVENT_MAX; i++) {
return 0; if (msr[i].attr)
events_attrs[j++] = &msr[i].attr->attr.attr;
} }
events_attrs[j] = NULL;
perf_pmu_register(&pmu_msr, "msr", -1); perf_pmu_register(&pmu_msr, "msr", -1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment