Commit d7cbbe49 authored by Natarajan, Janakarajan's avatar Natarajan, Janakarajan Committed by Ingo Molnar

perf/x86/amd/uncore: Set ThreadMask and SliceMask for L3 Cache perf events

In Family 17h, some L3 Cache Performance events require the ThreadMask
and SliceMask to be set. For other events, these fields do not affect
the count either way.

Set ThreadMask and SliceMask to 0xFF and 0xF respectively.
Signed-off-by: default avatarJanakarajan Natarajan <Janakarajan.Natarajan@amd.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: H . Peter Anvin <hpa@zytor.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Suravee <Suravee.Suthikulpanit@amd.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: http://lkml.kernel.org/r/Message-ID:
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 9d92cfea
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
static int num_counters_llc; static int num_counters_llc;
static int num_counters_nb; static int num_counters_nb;
static bool l3_mask;
static HLIST_HEAD(uncore_unused_list); static HLIST_HEAD(uncore_unused_list);
...@@ -209,6 +210,13 @@ static int amd_uncore_event_init(struct perf_event *event) ...@@ -209,6 +210,13 @@ static int amd_uncore_event_init(struct perf_event *event)
hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB; hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
hwc->idx = -1; hwc->idx = -1;
/*
* SliceMask and ThreadMask need to be set for certain L3 events in
* Family 17h. For other events, the two fields do not affect the count.
*/
if (l3_mask)
hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
if (event->cpu < 0) if (event->cpu < 0)
return -EINVAL; return -EINVAL;
...@@ -525,6 +533,7 @@ static int __init amd_uncore_init(void) ...@@ -525,6 +533,7 @@ static int __init amd_uncore_init(void)
amd_llc_pmu.name = "amd_l3"; amd_llc_pmu.name = "amd_l3";
format_attr_event_df.show = &event_show_df; format_attr_event_df.show = &event_show_df;
format_attr_event_l3.show = &event_show_l3; format_attr_event_l3.show = &event_show_l3;
l3_mask = true;
} else { } else {
num_counters_nb = NUM_COUNTERS_NB; num_counters_nb = NUM_COUNTERS_NB;
num_counters_llc = NUM_COUNTERS_L2; num_counters_llc = NUM_COUNTERS_L2;
...@@ -532,6 +541,7 @@ static int __init amd_uncore_init(void) ...@@ -532,6 +541,7 @@ static int __init amd_uncore_init(void)
amd_llc_pmu.name = "amd_l2"; amd_llc_pmu.name = "amd_l2";
format_attr_event_df = format_attr_event; format_attr_event_df = format_attr_event;
format_attr_event_l3 = format_attr_event; format_attr_event_l3 = format_attr_event;
l3_mask = false;
} }
amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df; amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df;
......
...@@ -46,6 +46,14 @@ ...@@ -46,6 +46,14 @@
#define INTEL_ARCH_EVENT_MASK \ #define INTEL_ARCH_EVENT_MASK \
(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
#define AMD64_L3_SLICE_SHIFT 48
#define AMD64_L3_SLICE_MASK \
((0xFULL) << AMD64_L3_SLICE_SHIFT)
#define AMD64_L3_THREAD_SHIFT 56
#define AMD64_L3_THREAD_MASK \
((0xFFULL) << AMD64_L3_THREAD_SHIFT)
#define X86_RAW_EVENT_MASK \ #define X86_RAW_EVENT_MASK \
(ARCH_PERFMON_EVENTSEL_EVENT | \ (ARCH_PERFMON_EVENTSEL_EVENT | \
ARCH_PERFMON_EVENTSEL_UMASK | \ ARCH_PERFMON_EVENTSEL_UMASK | \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment