Commit 41bf4989 authored by Robert Richter's avatar Robert Richter Committed by Ingo Molnar

perf, x86: Calculate perfctr msr addresses in helper functions

This patch adds helper functions to calculate perfctr msr addresses.
We need this to later add support for AMD family 15h cpus. For this we
have to change the algorithms to generate the perfctr's msr addresses.
Signed-off-by: default avatarRobert Richter <robert.richter@amd.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1296664860-10886-3-git-send-email-robert.richter@amd.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent d45dd923
...@@ -321,6 +321,16 @@ x86_perf_event_update(struct perf_event *event) ...@@ -321,6 +321,16 @@ x86_perf_event_update(struct perf_event *event)
return new_raw_count; return new_raw_count;
} }
static inline unsigned int x86_pmu_config_addr(int index)
{
return x86_pmu.eventsel + index;
}
static inline unsigned int x86_pmu_event_addr(int index)
{
return x86_pmu.perfctr + index;
}
static atomic_t active_events; static atomic_t active_events;
static DEFINE_MUTEX(pmc_reserve_mutex); static DEFINE_MUTEX(pmc_reserve_mutex);
...@@ -331,12 +341,12 @@ static bool reserve_pmc_hardware(void) ...@@ -331,12 +341,12 @@ static bool reserve_pmc_hardware(void)
int i; int i;
for (i = 0; i < x86_pmu.num_counters; i++) { for (i = 0; i < x86_pmu.num_counters; i++) {
if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
goto perfctr_fail; goto perfctr_fail;
} }
for (i = 0; i < x86_pmu.num_counters; i++) { for (i = 0; i < x86_pmu.num_counters; i++) {
if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
goto eventsel_fail; goto eventsel_fail;
} }
...@@ -344,13 +354,13 @@ static bool reserve_pmc_hardware(void) ...@@ -344,13 +354,13 @@ static bool reserve_pmc_hardware(void)
eventsel_fail: eventsel_fail:
for (i--; i >= 0; i--) for (i--; i >= 0; i--)
release_evntsel_nmi(x86_pmu.eventsel + i); release_evntsel_nmi(x86_pmu_config_addr(i));
i = x86_pmu.num_counters; i = x86_pmu.num_counters;
perfctr_fail: perfctr_fail:
for (i--; i >= 0; i--) for (i--; i >= 0; i--)
release_perfctr_nmi(x86_pmu.perfctr + i); release_perfctr_nmi(x86_pmu_event_addr(i));
return false; return false;
} }
...@@ -360,8 +370,8 @@ static void release_pmc_hardware(void) ...@@ -360,8 +370,8 @@ static void release_pmc_hardware(void)
int i; int i;
for (i = 0; i < x86_pmu.num_counters; i++) { for (i = 0; i < x86_pmu.num_counters; i++) {
release_perfctr_nmi(x86_pmu.perfctr + i); release_perfctr_nmi(x86_pmu_event_addr(i));
release_evntsel_nmi(x86_pmu.eventsel + i); release_evntsel_nmi(x86_pmu_config_addr(i));
} }
} }
...@@ -382,7 +392,7 @@ static bool check_hw_exists(void) ...@@ -382,7 +392,7 @@ static bool check_hw_exists(void)
* complain and bail. * complain and bail.
*/ */
for (i = 0; i < x86_pmu.num_counters; i++) { for (i = 0; i < x86_pmu.num_counters; i++) {
reg = x86_pmu.eventsel + i; reg = x86_pmu_config_addr(i);
ret = rdmsrl_safe(reg, &val); ret = rdmsrl_safe(reg, &val);
if (ret) if (ret)
goto msr_fail; goto msr_fail;
...@@ -407,8 +417,8 @@ static bool check_hw_exists(void) ...@@ -407,8 +417,8 @@ static bool check_hw_exists(void)
* that don't trap on the MSR access and always return 0s. * that don't trap on the MSR access and always return 0s.
*/ */
val = 0xabcdUL; val = 0xabcdUL;
ret = checking_wrmsrl(x86_pmu.perfctr, val); ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new); ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
if (ret || val != val_new) if (ret || val != val_new)
goto msr_fail; goto msr_fail;
...@@ -617,11 +627,11 @@ static void x86_pmu_disable_all(void) ...@@ -617,11 +627,11 @@ static void x86_pmu_disable_all(void)
if (!test_bit(idx, cpuc->active_mask)) if (!test_bit(idx, cpuc->active_mask))
continue; continue;
rdmsrl(x86_pmu.eventsel + idx, val); rdmsrl(x86_pmu_config_addr(idx), val);
if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
continue; continue;
val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsrl(x86_pmu.eventsel + idx, val); wrmsrl(x86_pmu_config_addr(idx), val);
} }
} }
...@@ -1110,8 +1120,8 @@ void perf_event_print_debug(void) ...@@ -1110,8 +1120,8 @@ void perf_event_print_debug(void)
pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
for (idx = 0; idx < x86_pmu.num_counters; idx++) { for (idx = 0; idx < x86_pmu.num_counters; idx++) {
rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
rdmsrl(x86_pmu.perfctr + idx, pmc_count); rdmsrl(x86_pmu_event_addr(idx), pmc_count);
prev_left = per_cpu(pmc_prev_left[idx], cpu); prev_left = per_cpu(pmc_prev_left[idx], cpu);
......
...@@ -691,8 +691,8 @@ static void intel_pmu_reset(void) ...@@ -691,8 +691,8 @@ static void intel_pmu_reset(void)
printk("clearing PMU state on CPU#%d\n", smp_processor_id()); printk("clearing PMU state on CPU#%d\n", smp_processor_id());
for (idx = 0; idx < x86_pmu.num_counters; idx++) { for (idx = 0; idx < x86_pmu.num_counters; idx++) {
checking_wrmsrl(x86_pmu.eventsel + idx, 0ull); checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
checking_wrmsrl(x86_pmu.perfctr + idx, 0ull); checking_wrmsrl(x86_pmu_event_addr(idx), 0ull);
} }
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment