Commit 722e42e4 authored by Kan Liang's avatar Kan Liang Committed by Peter Zijlstra

perf/x86: Support counter mask

The current perf assumes that both GP and fixed counters are contiguous.
But it's not guaranteed on newer Intel platforms or in a virtualization
environment.

Use the counter mask to replace the number of counters for both GP and
the fixed counters. For the other ARCHs or old platforms which don't
support a counter mask, using GENMASK_ULL(num_counter - 1, 0) to
replace. There is no functional change for them.

The interface to KVM is not changed. The number of counters still be
passed to KVM. It can be updated later separately.
Signed-off-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarAndi Kleen <ak@linux.intel.com>
Reviewed-by: default avatarIan Rogers <irogers@google.com>
Link: https://lkml.kernel.org/r/20240626143545.480761-3-kan.liang@linux.intel.com
parent a23eb2fc
...@@ -432,7 +432,7 @@ static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc, ...@@ -432,7 +432,7 @@ static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
* be removed on one CPU at a time AND PMU is disabled * be removed on one CPU at a time AND PMU is disabled
* when we come here * when we come here
*/ */
for (i = 0; i < x86_pmu.num_counters; i++) { for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
struct perf_event *tmp = event; struct perf_event *tmp = event;
if (try_cmpxchg(nb->owners + i, &tmp, NULL)) if (try_cmpxchg(nb->owners + i, &tmp, NULL))
...@@ -501,7 +501,7 @@ __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *ev ...@@ -501,7 +501,7 @@ __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *ev
* because of successive calls to x86_schedule_events() from * because of successive calls to x86_schedule_events() from
* hw_perf_group_sched_in() without hw_perf_enable() * hw_perf_group_sched_in() without hw_perf_enable()
*/ */
for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) { for_each_set_bit(idx, c->idxmsk, x86_pmu_max_num_counters(NULL)) {
if (new == -1 || hwc->idx == idx) if (new == -1 || hwc->idx == idx)
/* assign free slot, prefer hwc->idx */ /* assign free slot, prefer hwc->idx */
old = cmpxchg(nb->owners + idx, NULL, event); old = cmpxchg(nb->owners + idx, NULL, event);
...@@ -544,7 +544,7 @@ static struct amd_nb *amd_alloc_nb(int cpu) ...@@ -544,7 +544,7 @@ static struct amd_nb *amd_alloc_nb(int cpu)
/* /*
* initialize all possible NB constraints * initialize all possible NB constraints
*/ */
for (i = 0; i < x86_pmu.num_counters; i++) { for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
__set_bit(i, nb->event_constraints[i].idxmsk); __set_bit(i, nb->event_constraints[i].idxmsk);
nb->event_constraints[i].weight = 1; nb->event_constraints[i].weight = 1;
} }
...@@ -737,7 +737,7 @@ static void amd_pmu_check_overflow(void) ...@@ -737,7 +737,7 @@ static void amd_pmu_check_overflow(void)
* counters are always enabled when this function is called and * counters are always enabled when this function is called and
* ARCH_PERFMON_EVENTSEL_INT is always set. * ARCH_PERFMON_EVENTSEL_INT is always set.
*/ */
for (idx = 0; idx < x86_pmu.num_counters; idx++) { for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
if (!test_bit(idx, cpuc->active_mask)) if (!test_bit(idx, cpuc->active_mask))
continue; continue;
...@@ -757,7 +757,7 @@ static void amd_pmu_enable_all(int added) ...@@ -757,7 +757,7 @@ static void amd_pmu_enable_all(int added)
amd_brs_enable_all(); amd_brs_enable_all();
for (idx = 0; idx < x86_pmu.num_counters; idx++) { for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
/* only activate events which are marked as active */ /* only activate events which are marked as active */
if (!test_bit(idx, cpuc->active_mask)) if (!test_bit(idx, cpuc->active_mask))
continue; continue;
...@@ -980,7 +980,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs) ...@@ -980,7 +980,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
/* Clear any reserved bits set by buggy microcode */ /* Clear any reserved bits set by buggy microcode */
status &= amd_pmu_global_cntr_mask; status &= amd_pmu_global_cntr_mask;
for (idx = 0; idx < x86_pmu.num_counters; idx++) { for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
if (!test_bit(idx, cpuc->active_mask)) if (!test_bit(idx, cpuc->active_mask))
continue; continue;
...@@ -1315,7 +1315,7 @@ static __initconst const struct x86_pmu amd_pmu = { ...@@ -1315,7 +1315,7 @@ static __initconst const struct x86_pmu amd_pmu = {
.addr_offset = amd_pmu_addr_offset, .addr_offset = amd_pmu_addr_offset,
.event_map = amd_pmu_event_map, .event_map = amd_pmu_event_map,
.max_events = ARRAY_SIZE(amd_perfmon_event_map), .max_events = ARRAY_SIZE(amd_perfmon_event_map),
.num_counters = AMD64_NUM_COUNTERS, .cntr_mask64 = GENMASK_ULL(AMD64_NUM_COUNTERS - 1, 0),
.add = amd_pmu_add_event, .add = amd_pmu_add_event,
.del = amd_pmu_del_event, .del = amd_pmu_del_event,
.cntval_bits = 48, .cntval_bits = 48,
...@@ -1414,7 +1414,7 @@ static int __init amd_core_pmu_init(void) ...@@ -1414,7 +1414,7 @@ static int __init amd_core_pmu_init(void)
*/ */
x86_pmu.eventsel = MSR_F15H_PERF_CTL; x86_pmu.eventsel = MSR_F15H_PERF_CTL;
x86_pmu.perfctr = MSR_F15H_PERF_CTR; x86_pmu.perfctr = MSR_F15H_PERF_CTR;
x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE; x86_pmu.cntr_mask64 = GENMASK_ULL(AMD64_NUM_COUNTERS_CORE - 1, 0);
/* Check for Performance Monitoring v2 support */ /* Check for Performance Monitoring v2 support */
if (boot_cpu_has(X86_FEATURE_PERFMON_V2)) { if (boot_cpu_has(X86_FEATURE_PERFMON_V2)) {
...@@ -1424,9 +1424,9 @@ static int __init amd_core_pmu_init(void) ...@@ -1424,9 +1424,9 @@ static int __init amd_core_pmu_init(void)
x86_pmu.version = 2; x86_pmu.version = 2;
/* Find the number of available Core PMCs */ /* Find the number of available Core PMCs */
x86_pmu.num_counters = ebx.split.num_core_pmc; x86_pmu.cntr_mask64 = GENMASK_ULL(ebx.split.num_core_pmc - 1, 0);
amd_pmu_global_cntr_mask = (1ULL << x86_pmu.num_counters) - 1; amd_pmu_global_cntr_mask = x86_pmu.cntr_mask64;
/* Update PMC handling functions */ /* Update PMC handling functions */
x86_pmu.enable_all = amd_pmu_v2_enable_all; x86_pmu.enable_all = amd_pmu_v2_enable_all;
...@@ -1454,12 +1454,12 @@ static int __init amd_core_pmu_init(void) ...@@ -1454,12 +1454,12 @@ static int __init amd_core_pmu_init(void)
* even numbered counter that has a consecutive adjacent odd * even numbered counter that has a consecutive adjacent odd
* numbered counter following it. * numbered counter following it.
*/ */
for (i = 0; i < x86_pmu.num_counters - 1; i += 2) for (i = 0; i < x86_pmu_max_num_counters(NULL) - 1; i += 2)
even_ctr_mask |= BIT_ULL(i); even_ctr_mask |= BIT_ULL(i);
pair_constraint = (struct event_constraint) pair_constraint = (struct event_constraint)
__EVENT_CONSTRAINT(0, even_ctr_mask, 0, __EVENT_CONSTRAINT(0, even_ctr_mask, 0,
x86_pmu.num_counters / 2, 0, x86_pmu_max_num_counters(NULL) / 2, 0,
PERF_X86_EVENT_PAIR); PERF_X86_EVENT_PAIR);
x86_pmu.get_event_constraints = amd_get_event_constraints_f17h; x86_pmu.get_event_constraints = amd_get_event_constraints_f17h;
......
This diff is collapsed.
This diff is collapsed.
...@@ -1138,7 +1138,6 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) ...@@ -1138,7 +1138,6 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
{ {
struct debug_store *ds = cpuc->ds; struct debug_store *ds = cpuc->ds;
int max_pebs_events = intel_pmu_max_num_pebs(cpuc->pmu); int max_pebs_events = intel_pmu_max_num_pebs(cpuc->pmu);
int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
u64 threshold; u64 threshold;
int reserved; int reserved;
...@@ -1146,7 +1145,7 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) ...@@ -1146,7 +1145,7 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
return; return;
if (x86_pmu.flags & PMU_FL_PEBS_ALL) if (x86_pmu.flags & PMU_FL_PEBS_ALL)
reserved = max_pebs_events + num_counters_fixed; reserved = max_pebs_events + x86_pmu_max_num_counters_fixed(cpuc->pmu);
else else
reserved = max_pebs_events; reserved = max_pebs_events;
...@@ -2172,8 +2171,8 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d ...@@ -2172,8 +2171,8 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
mask = x86_pmu.pebs_events_mask; mask = x86_pmu.pebs_events_mask;
size = max_pebs_events; size = max_pebs_events;
if (x86_pmu.flags & PMU_FL_PEBS_ALL) { if (x86_pmu.flags & PMU_FL_PEBS_ALL) {
mask |= ((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED; mask |= x86_pmu.fixed_cntr_mask64 << INTEL_PMC_IDX_FIXED;
size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed; size = INTEL_PMC_IDX_FIXED + x86_pmu_max_num_counters_fixed(NULL);
} }
if (unlikely(base >= top)) { if (unlikely(base >= top)) {
...@@ -2269,11 +2268,10 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d ...@@ -2269,11 +2268,10 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
{ {
short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
struct debug_store *ds = cpuc->ds; struct debug_store *ds = cpuc->ds;
struct perf_event *event; struct perf_event *event;
void *base, *at, *top; void *base, *at, *top;
int bit, size; int bit;
u64 mask; u64 mask;
if (!x86_pmu.pebs_active) if (!x86_pmu.pebs_active)
...@@ -2285,11 +2283,10 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d ...@@ -2285,11 +2283,10 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
ds->pebs_index = ds->pebs_buffer_base; ds->pebs_index = ds->pebs_buffer_base;
mask = hybrid(cpuc->pmu, pebs_events_mask) | mask = hybrid(cpuc->pmu, pebs_events_mask) |
(((1ULL << num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED); (hybrid(cpuc->pmu, fixed_cntr_mask64) << INTEL_PMC_IDX_FIXED);
size = INTEL_PMC_IDX_FIXED + num_counters_fixed;
if (unlikely(base >= top)) { if (unlikely(base >= top)) {
intel_pmu_pebs_event_update_no_drain(cpuc, size); intel_pmu_pebs_event_update_no_drain(cpuc, X86_PMC_IDX_MAX);
return; return;
} }
...@@ -2299,11 +2296,11 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d ...@@ -2299,11 +2296,11 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
pebs_status = get_pebs_status(at) & cpuc->pebs_enabled; pebs_status = get_pebs_status(at) & cpuc->pebs_enabled;
pebs_status &= mask; pebs_status &= mask;
for_each_set_bit(bit, (unsigned long *)&pebs_status, size) for_each_set_bit(bit, (unsigned long *)&pebs_status, X86_PMC_IDX_MAX)
counts[bit]++; counts[bit]++;
} }
for_each_set_bit(bit, (unsigned long *)&mask, size) { for_each_set_bit(bit, (unsigned long *)&mask, X86_PMC_IDX_MAX) {
if (counts[bit] == 0) if (counts[bit] == 0)
continue; continue;
......
...@@ -303,7 +303,7 @@ static const struct x86_pmu knc_pmu __initconst = { ...@@ -303,7 +303,7 @@ static const struct x86_pmu knc_pmu __initconst = {
.apic = 1, .apic = 1,
.max_period = (1ULL << 39) - 1, .max_period = (1ULL << 39) - 1,
.version = 0, .version = 0,
.num_counters = 2, .cntr_mask64 = 0x3,
.cntval_bits = 40, .cntval_bits = 40,
.cntval_mask = (1ULL << 40) - 1, .cntval_mask = (1ULL << 40) - 1,
.get_event_constraints = x86_get_event_constraints, .get_event_constraints = x86_get_event_constraints,
......
...@@ -919,7 +919,7 @@ static void p4_pmu_disable_all(void) ...@@ -919,7 +919,7 @@ static void p4_pmu_disable_all(void)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx; int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) { for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
if (!test_bit(idx, cpuc->active_mask)) if (!test_bit(idx, cpuc->active_mask))
continue; continue;
...@@ -998,7 +998,7 @@ static void p4_pmu_enable_all(int added) ...@@ -998,7 +998,7 @@ static void p4_pmu_enable_all(int added)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx; int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) { for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
if (!test_bit(idx, cpuc->active_mask)) if (!test_bit(idx, cpuc->active_mask))
continue; continue;
...@@ -1040,7 +1040,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) ...@@ -1040,7 +1040,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
cpuc = this_cpu_ptr(&cpu_hw_events); cpuc = this_cpu_ptr(&cpu_hw_events);
for (idx = 0; idx < x86_pmu.num_counters; idx++) { for_each_set_bit(idx, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
int overflow; int overflow;
if (!test_bit(idx, cpuc->active_mask)) { if (!test_bit(idx, cpuc->active_mask)) {
...@@ -1353,7 +1353,7 @@ static __initconst const struct x86_pmu p4_pmu = { ...@@ -1353,7 +1353,7 @@ static __initconst const struct x86_pmu p4_pmu = {
* though leave it restricted at moment assuming * though leave it restricted at moment assuming
* HT is on * HT is on
*/ */
.num_counters = ARCH_P4_MAX_CCCR, .cntr_mask64 = GENMASK_ULL(ARCH_P4_MAX_CCCR - 1, 0),
.apic = 1, .apic = 1,
.cntval_bits = ARCH_P4_CNTRVAL_BITS, .cntval_bits = ARCH_P4_CNTRVAL_BITS,
.cntval_mask = ARCH_P4_CNTRVAL_MASK, .cntval_mask = ARCH_P4_CNTRVAL_MASK,
...@@ -1395,7 +1395,7 @@ __init int p4_pmu_init(void) ...@@ -1395,7 +1395,7 @@ __init int p4_pmu_init(void)
* *
* Solve this by zero'ing out the registers to mimic a reset. * Solve this by zero'ing out the registers to mimic a reset.
*/ */
for (i = 0; i < x86_pmu.num_counters; i++) { for_each_set_bit(i, x86_pmu.cntr_mask, X86_PMC_IDX_MAX) {
reg = x86_pmu_config_addr(i); reg = x86_pmu_config_addr(i);
wrmsrl_safe(reg, 0ULL); wrmsrl_safe(reg, 0ULL);
} }
......
...@@ -214,7 +214,7 @@ static __initconst const struct x86_pmu p6_pmu = { ...@@ -214,7 +214,7 @@ static __initconst const struct x86_pmu p6_pmu = {
.apic = 1, .apic = 1,
.max_period = (1ULL << 31) - 1, .max_period = (1ULL << 31) - 1,
.version = 0, .version = 0,
.num_counters = 2, .cntr_mask64 = 0x3,
/* /*
* Events have 40 bits implemented. However they are designed such * Events have 40 bits implemented. However they are designed such
* that bits [32-39] are sign extensions of bit 31. As such the * that bits [32-39] are sign extensions of bit 31. As such the
......
...@@ -685,8 +685,14 @@ struct x86_hybrid_pmu { ...@@ -685,8 +685,14 @@ struct x86_hybrid_pmu {
union perf_capabilities intel_cap; union perf_capabilities intel_cap;
u64 intel_ctrl; u64 intel_ctrl;
u64 pebs_events_mask; u64 pebs_events_mask;
int num_counters; union {
int num_counters_fixed; u64 cntr_mask64;
unsigned long cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
};
union {
u64 fixed_cntr_mask64;
unsigned long fixed_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
};
struct event_constraint unconstrained; struct event_constraint unconstrained;
u64 hw_cache_event_ids u64 hw_cache_event_ids
...@@ -774,8 +780,14 @@ struct x86_pmu { ...@@ -774,8 +780,14 @@ struct x86_pmu {
int (*rdpmc_index)(int index); int (*rdpmc_index)(int index);
u64 (*event_map)(int); u64 (*event_map)(int);
int max_events; int max_events;
int num_counters; union {
int num_counters_fixed; u64 cntr_mask64;
unsigned long cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
};
union {
u64 fixed_cntr_mask64;
unsigned long fixed_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
};
int cntval_bits; int cntval_bits;
u64 cntval_mask; u64 cntval_mask;
union { union {
...@@ -1125,8 +1137,8 @@ static inline int x86_pmu_rdpmc_index(int index) ...@@ -1125,8 +1137,8 @@ static inline int x86_pmu_rdpmc_index(int index)
return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index; return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
} }
bool check_hw_exists(struct pmu *pmu, int num_counters, bool check_hw_exists(struct pmu *pmu, unsigned long *cntr_mask,
int num_counters_fixed); unsigned long *fixed_cntr_mask);
int x86_add_exclusive(unsigned int what); int x86_add_exclusive(unsigned int what);
...@@ -1197,8 +1209,27 @@ void x86_pmu_enable_event(struct perf_event *event); ...@@ -1197,8 +1209,27 @@ void x86_pmu_enable_event(struct perf_event *event);
int x86_pmu_handle_irq(struct pt_regs *regs); int x86_pmu_handle_irq(struct pt_regs *regs);
void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed, void x86_pmu_show_pmu_cap(struct pmu *pmu);
u64 intel_ctrl);
static inline int x86_pmu_num_counters(struct pmu *pmu)
{
return hweight64(hybrid(pmu, cntr_mask64));
}
static inline int x86_pmu_max_num_counters(struct pmu *pmu)
{
return fls64(hybrid(pmu, cntr_mask64));
}
static inline int x86_pmu_num_counters_fixed(struct pmu *pmu)
{
return hweight64(hybrid(pmu, fixed_cntr_mask64));
}
static inline int x86_pmu_max_num_counters_fixed(struct pmu *pmu)
{
return fls64(hybrid(pmu, fixed_cntr_mask64));
}
extern struct event_constraint emptyconstraint; extern struct event_constraint emptyconstraint;
......
...@@ -530,13 +530,13 @@ __init int zhaoxin_pmu_init(void) ...@@ -530,13 +530,13 @@ __init int zhaoxin_pmu_init(void)
pr_info("Version check pass!\n"); pr_info("Version check pass!\n");
x86_pmu.version = version; x86_pmu.version = version;
x86_pmu.num_counters = eax.split.num_counters; x86_pmu.cntr_mask64 = GENMASK_ULL(eax.split.num_counters - 1, 0);
x86_pmu.cntval_bits = eax.split.bit_width; x86_pmu.cntval_bits = eax.split.bit_width;
x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1; x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
x86_pmu.events_maskl = ebx.full; x86_pmu.events_maskl = ebx.full;
x86_pmu.events_mask_len = eax.split.mask_length; x86_pmu.events_mask_len = eax.split.mask_length;
x86_pmu.num_counters_fixed = edx.split.num_counters_fixed; x86_pmu.fixed_cntr_mask64 = GENMASK_ULL(edx.split.num_counters_fixed - 1, 0);
x86_add_quirk(zhaoxin_arch_events_quirk); x86_add_quirk(zhaoxin_arch_events_quirk);
switch (boot_cpu_data.x86) { switch (boot_cpu_data.x86) {
...@@ -604,13 +604,13 @@ __init int zhaoxin_pmu_init(void) ...@@ -604,13 +604,13 @@ __init int zhaoxin_pmu_init(void)
return -ENODEV; return -ENODEV;
} }
x86_pmu.intel_ctrl = (1 << (x86_pmu.num_counters)) - 1; x86_pmu.intel_ctrl = x86_pmu.cntr_mask64;
x86_pmu.intel_ctrl |= ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED; x86_pmu.intel_ctrl |= x86_pmu.fixed_cntr_mask64 << INTEL_PMC_IDX_FIXED;
if (x86_pmu.event_constraints) { if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) { for_each_event_constraint(c, x86_pmu.event_constraints) {
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; c->idxmsk64 |= x86_pmu.cntr_mask64;
c->weight += x86_pmu.num_counters; c->weight += x86_pmu_num_counters(NULL);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment