Commit 7175f059 authored by Jan Glauber's avatar Jan Glauber Committed by Will Deacon

arm64: perf: Enable PMCR long cycle counter bit

With the long cycle counter bit (LC) disabled the cycle counter is not
working on ThunderX SOC (ThunderX only implements Aarch64).
Also, according to documentation LC == 0 is deprecated.

To keep the code simple the patch does not introduce 64 bit wide counter
functions. Instead writing the cycle counter always sets the upper
32 bits so overflow interrupts are generated as before.

Original patch from Andrew Pinksi <Andrew.Pinksi@caviumnetworks.com>
Signed-off-by: default avatarJan Glauber <jglauber@cavium.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent d0aa2bff
...@@ -405,6 +405,7 @@ static const struct attribute_group *armv8_pmuv3_attr_groups[] = { ...@@ -405,6 +405,7 @@ static const struct attribute_group *armv8_pmuv3_attr_groups[] = {
#define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ #define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
#define ARMV8_PMCR_X (1 << 4) /* Export to ETM */ #define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
#define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ #define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
#define ARMV8_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
#define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */ #define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
#define ARMV8_PMCR_N_MASK 0x1f #define ARMV8_PMCR_N_MASK 0x1f
#define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */ #define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
...@@ -494,9 +495,16 @@ static inline void armv8pmu_write_counter(struct perf_event *event, u32 value) ...@@ -494,9 +495,16 @@ static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
if (!armv8pmu_counter_valid(cpu_pmu, idx)) if (!armv8pmu_counter_valid(cpu_pmu, idx))
pr_err("CPU%u writing wrong counter %d\n", pr_err("CPU%u writing wrong counter %d\n",
smp_processor_id(), idx); smp_processor_id(), idx);
else if (idx == ARMV8_IDX_CYCLE_COUNTER) else if (idx == ARMV8_IDX_CYCLE_COUNTER) {
asm volatile("msr pmccntr_el0, %0" :: "r" (value)); /*
else if (armv8pmu_select_counter(idx) == idx) * Set the upper 32bits as this is a 64bit counter but we only
* count using the lower 32bits and we want an interrupt when
* it overflows.
*/
u64 value64 = 0xffffffff00000000ULL | value;
asm volatile("msr pmccntr_el0, %0" :: "r" (value64));
} else if (armv8pmu_select_counter(idx) == idx)
asm volatile("msr pmxevcntr_el0, %0" :: "r" (value)); asm volatile("msr pmxevcntr_el0, %0" :: "r" (value));
} }
...@@ -768,8 +776,11 @@ static void armv8pmu_reset(void *info) ...@@ -768,8 +776,11 @@ static void armv8pmu_reset(void *info)
armv8pmu_disable_intens(idx); armv8pmu_disable_intens(idx);
} }
/* Initialize & Reset PMNC: C and P bits. */ /*
armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C); * Initialize & Reset PMNC. Request overflow interrupt for
* 64 bit cycle counter but cheat in armv8pmu_write_counter().
*/
armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C | ARMV8_PMCR_LC);
} }
static int armv8_pmuv3_map_event(struct perf_event *event) static int armv8_pmuv3_map_event(struct perf_event *event)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment