Commit b8c4d1a8 authored by Kan Liang's avatar Kan Liang Committed by Peter Zijlstra

perf/x86/intel: Factor out intel_pmu_check_num_counters

Each Hybrid PMU has to check its own number of counters and mask fixed
counters before registration.

The intel_pmu_check_num_counters will be reused later to check the
number of the counters for each hybrid PMU.
Signed-off-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarAndi Kleen <ak@linux.intel.com>
Link: https://lkml.kernel.org/r/1618237865-33448-12-git-send-email-kan.liang@linux.intel.com
parent 183af736
...@@ -5064,6 +5064,26 @@ static const struct attribute_group *attr_update[] = { ...@@ -5064,6 +5064,26 @@ static const struct attribute_group *attr_update[] = {
static struct attribute *empty_attrs; static struct attribute *empty_attrs;
static void intel_pmu_check_num_counters(int *num_counters,
int *num_counters_fixed,
u64 *intel_ctrl, u64 fixed_mask)
{
if (*num_counters > INTEL_PMC_MAX_GENERIC) {
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
*num_counters, INTEL_PMC_MAX_GENERIC);
*num_counters = INTEL_PMC_MAX_GENERIC;
}
*intel_ctrl = (1ULL << *num_counters) - 1;
if (*num_counters_fixed > INTEL_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
*num_counters_fixed, INTEL_PMC_MAX_FIXED);
*num_counters_fixed = INTEL_PMC_MAX_FIXED;
}
*intel_ctrl |= fixed_mask << INTEL_PMC_IDX_FIXED;
}
__init int intel_pmu_init(void) __init int intel_pmu_init(void)
{ {
struct attribute **extra_skl_attr = &empty_attrs; struct attribute **extra_skl_attr = &empty_attrs;
...@@ -5703,20 +5723,10 @@ __init int intel_pmu_init(void) ...@@ -5703,20 +5723,10 @@ __init int intel_pmu_init(void)
x86_pmu.attr_update = attr_update; x86_pmu.attr_update = attr_update;
if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) { intel_pmu_check_num_counters(&x86_pmu.num_counters,
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", &x86_pmu.num_counters_fixed,
x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC); &x86_pmu.intel_ctrl,
x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC; (u64)fixed_mask);
}
x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
}
x86_pmu.intel_ctrl |= (u64)fixed_mask << INTEL_PMC_IDX_FIXED;
/* AnyThread may be deprecated on arch perfmon v5 or later */ /* AnyThread may be deprecated on arch perfmon v5 or later */
if (x86_pmu.intel_cap.anythread_deprecated) if (x86_pmu.intel_cap.anythread_deprecated)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment