Commit f967140d authored by Kim Phillips's avatar Kim Phillips Committed by Borislav Petkov

perf/amd/uncore: Replace manual sampling check with CAP_NO_INTERRUPT flag

Enable the sampling check in kernel/events/core.c::perf_event_open(),
which returns the more appropriate -EOPNOTSUPP.

BEFORE:

  $ sudo perf record -a -e instructions,l3_request_g1.caching_l3_cache_accesses true
  Error:
  The sys_perf_event_open() syscall returned with 22 (Invalid argument) for event (l3_request_g1.caching_l3_cache_accesses).
  /bin/dmesg | grep -i perf may provide additional information.

With nothing relevant in dmesg.

AFTER:

  $ sudo perf record -a -e instructions,l3_request_g1.caching_l3_cache_accesses true
  Error:
  l3_request_g1.caching_l3_cache_accesses: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'

Fixes: c43ca509 ("perf/x86/amd: Add support for AMD NB and L2I "uncore" counters")
Signed-off-by: default avatarKim Phillips <kim.phillips@amd.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/20200311191323.13124-1-kim.phillips@amd.com
parent 798048f8
...@@ -190,15 +190,12 @@ static int amd_uncore_event_init(struct perf_event *event) ...@@ -190,15 +190,12 @@ static int amd_uncore_event_init(struct perf_event *event)
/* /*
* NB and Last level cache counters (MSRs) are shared across all cores * NB and Last level cache counters (MSRs) are shared across all cores
* that share the same NB / Last level cache. Interrupts can be directed * that share the same NB / Last level cache. On family 16h and below,
* to a single target core, however, event counts generated by processes * Interrupts can be directed to a single target core, however, event
* running on other cores cannot be masked out. So we do not support * counts generated by processes running on other cores cannot be masked
* sampling and per-thread events. * out. So we do not support sampling and per-thread events via
* CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
*/ */
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EINVAL;
/* and we do not enable counter overflow interrupts */
hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB; hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
hwc->idx = -1; hwc->idx = -1;
...@@ -306,7 +303,7 @@ static struct pmu amd_nb_pmu = { ...@@ -306,7 +303,7 @@ static struct pmu amd_nb_pmu = {
.start = amd_uncore_start, .start = amd_uncore_start,
.stop = amd_uncore_stop, .stop = amd_uncore_stop,
.read = amd_uncore_read, .read = amd_uncore_read,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE, .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
}; };
static struct pmu amd_llc_pmu = { static struct pmu amd_llc_pmu = {
...@@ -317,7 +314,7 @@ static struct pmu amd_llc_pmu = { ...@@ -317,7 +314,7 @@ static struct pmu amd_llc_pmu = {
.start = amd_uncore_start, .start = amd_uncore_start,
.stop = amd_uncore_stop, .stop = amd_uncore_stop,
.read = amd_uncore_read, .read = amd_uncore_read,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE, .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
}; };
static struct amd_uncore *amd_uncore_alloc(unsigned int cpu) static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment