Commit 27348f38 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf/x86/cqm: Factor out some common code

Having the same code twice (and once quite ugly) is fragile.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent e7ee3e8c
...@@ -463,6 +463,14 @@ static bool is_mbm_event(int e) ...@@ -463,6 +463,14 @@ static bool is_mbm_event(int e)
return (e >= QOS_MBM_TOTAL_EVENT_ID && e <= QOS_MBM_LOCAL_EVENT_ID); return (e >= QOS_MBM_TOTAL_EVENT_ID && e <= QOS_MBM_LOCAL_EVENT_ID);
} }
static void cqm_mask_call(struct rmid_read *rr)
{
if (is_mbm_event(rr->evt_type))
on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_count, rr, 1);
else
on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, rr, 1);
}
/* /*
* Exchange the RMID of a group of events. * Exchange the RMID of a group of events.
*/ */
...@@ -479,18 +487,12 @@ static u32 intel_cqm_xchg_rmid(struct perf_event *group, u32 rmid) ...@@ -479,18 +487,12 @@ static u32 intel_cqm_xchg_rmid(struct perf_event *group, u32 rmid)
*/ */
if (__rmid_valid(old_rmid) && !__rmid_valid(rmid)) { if (__rmid_valid(old_rmid) && !__rmid_valid(rmid)) {
struct rmid_read rr = { struct rmid_read rr = {
.value = ATOMIC64_INIT(0),
.rmid = old_rmid, .rmid = old_rmid,
.evt_type = group->attr.config,
.value = ATOMIC64_INIT(0),
}; };
if (is_mbm_event(group->attr.config)) { cqm_mask_call(&rr);
rr.evt_type = group->attr.config;
on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_count,
&rr, 1);
} else {
on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count,
&rr, 1);
}
local64_set(&group->count, atomic64_read(&rr.value)); local64_set(&group->count, atomic64_read(&rr.value));
} }
...@@ -1180,6 +1182,7 @@ static u64 intel_cqm_event_count(struct perf_event *event) ...@@ -1180,6 +1182,7 @@ static u64 intel_cqm_event_count(struct perf_event *event)
{ {
unsigned long flags; unsigned long flags;
struct rmid_read rr = { struct rmid_read rr = {
.evt_type = event->attr.config,
.value = ATOMIC64_INIT(0), .value = ATOMIC64_INIT(0),
}; };
...@@ -1229,12 +1232,7 @@ static u64 intel_cqm_event_count(struct perf_event *event) ...@@ -1229,12 +1232,7 @@ static u64 intel_cqm_event_count(struct perf_event *event)
if (!__rmid_valid(rr.rmid)) if (!__rmid_valid(rr.rmid))
goto out; goto out;
if (is_mbm_event(event->attr.config)) { cqm_mask_call(&rr);
rr.evt_type = event->attr.config;
on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_count, &rr, 1);
} else {
on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, &rr, 1);
}
raw_spin_lock_irqsave(&cache_lock, flags); raw_spin_lock_irqsave(&cache_lock, flags);
if (event->hw.cqm_rmid == rr.rmid) if (event->hw.cqm_rmid == rr.rmid)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment