Commit e8c68abb authored by Palmer Dabbelt's avatar Palmer Dabbelt

riscv_pmu_sbi: add support for PMU variant on T-Head C9xx cores

The PMU on T-Head C9xx cores is quite similar to the SSCOFPMF extension
but not completely identical, so this series adds a T-Head PMU errata
that handlen the differences.

* 'riscv-pmu' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/palmer/linux:
  drivers/perf: riscv_pmu_sbi: add support for PMU variant on T-Head C9xx cores
  RISC-V: Cache SBI vendor values
parents d233ab3c 65e9fb08
...@@ -66,4 +66,17 @@ config ERRATA_THEAD_CMO ...@@ -66,4 +66,17 @@ config ERRATA_THEAD_CMO
If you don't know what to do here, say "Y". If you don't know what to do here, say "Y".
config ERRATA_THEAD_PMU
bool "Apply T-Head PMU errata"
depends on ERRATA_THEAD && RISCV_PMU_SBI
default y
help
The T-Head C9xx cores implement a PMU overflow extension very
similar to the core SSCOFPMF extension.
This will apply the overflow errata to handle the non-standard
behaviour via the regular SBI PMU driver and interface.
If you don't know what to do here, say "Y".
endmenu # "CPU errata selection" endmenu # "CPU errata selection"
...@@ -47,6 +47,22 @@ static bool errata_probe_cmo(unsigned int stage, ...@@ -47,6 +47,22 @@ static bool errata_probe_cmo(unsigned int stage,
return true; return true;
} }
static bool errata_probe_pmu(unsigned int stage,
unsigned long arch_id, unsigned long impid)
{
if (!IS_ENABLED(CONFIG_ERRATA_THEAD_PMU))
return false;
/* target-c9xx cores report arch_id and impid as 0 */
if (arch_id != 0 || impid != 0)
return false;
if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
return false;
return true;
}
static u32 thead_errata_probe(unsigned int stage, static u32 thead_errata_probe(unsigned int stage,
unsigned long archid, unsigned long impid) unsigned long archid, unsigned long impid)
{ {
...@@ -58,6 +74,9 @@ static u32 thead_errata_probe(unsigned int stage, ...@@ -58,6 +74,9 @@ static u32 thead_errata_probe(unsigned int stage,
if (errata_probe_cmo(stage, archid, impid)) if (errata_probe_cmo(stage, archid, impid))
cpu_req_errata |= BIT(ERRATA_THEAD_CMO); cpu_req_errata |= BIT(ERRATA_THEAD_CMO);
if (errata_probe_pmu(stage, archid, impid))
cpu_req_errata |= BIT(ERRATA_THEAD_PMU);
return cpu_req_errata; return cpu_req_errata;
} }
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define ASM_ERRATA_LIST_H #define ASM_ERRATA_LIST_H
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/csr.h>
#include <asm/vendorid_list.h> #include <asm/vendorid_list.h>
#ifdef CONFIG_ERRATA_SIFIVE #ifdef CONFIG_ERRATA_SIFIVE
...@@ -17,7 +18,8 @@ ...@@ -17,7 +18,8 @@
#ifdef CONFIG_ERRATA_THEAD #ifdef CONFIG_ERRATA_THEAD
#define ERRATA_THEAD_PBMT 0 #define ERRATA_THEAD_PBMT 0
#define ERRATA_THEAD_CMO 1 #define ERRATA_THEAD_CMO 1
#define ERRATA_THEAD_NUMBER 2 #define ERRATA_THEAD_PMU 2
#define ERRATA_THEAD_NUMBER 3
#endif #endif
#define CPUFEATURE_SVPBMT 0 #define CPUFEATURE_SVPBMT 0
...@@ -142,6 +144,18 @@ asm volatile(ALTERNATIVE_2( \ ...@@ -142,6 +144,18 @@ asm volatile(ALTERNATIVE_2( \
"r"((unsigned long)(_start) + (_size)) \ "r"((unsigned long)(_start) + (_size)) \
: "a0") : "a0")
#define THEAD_C9XX_RV_IRQ_PMU 17
#define THEAD_C9XX_CSR_SCOUNTEROF 0x5c5
#define ALT_SBI_PMU_OVERFLOW(__ovl) \
asm volatile(ALTERNATIVE( \
"csrr %0, " __stringify(CSR_SSCOUNTOVF), \
"csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \
THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \
CONFIG_ERRATA_THEAD_PMU) \
: "=r" (__ovl) : \
: "memory")
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif #endif
...@@ -327,4 +327,9 @@ int sbi_err_map_linux_errno(int err); ...@@ -327,4 +327,9 @@ int sbi_err_map_linux_errno(int err);
static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; } static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; }
static inline void sbi_init(void) {} static inline void sbi_init(void) {}
#endif /* CONFIG_RISCV_SBI */ #endif /* CONFIG_RISCV_SBI */
unsigned long riscv_cached_mvendorid(unsigned int cpu_id);
unsigned long riscv_cached_marchid(unsigned int cpu_id);
unsigned long riscv_cached_mimpid(unsigned int cpu_id);
#endif /* _ASM_RISCV_SBI_H */ #endif /* _ASM_RISCV_SBI_H */
...@@ -70,8 +70,6 @@ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid) ...@@ -70,8 +70,6 @@ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
return -1; return -1;
} }
#ifdef CONFIG_PROC_FS
struct riscv_cpuinfo { struct riscv_cpuinfo {
unsigned long mvendorid; unsigned long mvendorid;
unsigned long marchid; unsigned long marchid;
...@@ -79,6 +77,30 @@ struct riscv_cpuinfo { ...@@ -79,6 +77,30 @@ struct riscv_cpuinfo {
}; };
static DEFINE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo); static DEFINE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
unsigned long riscv_cached_mvendorid(unsigned int cpu_id)
{
struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
return ci->mvendorid;
}
EXPORT_SYMBOL(riscv_cached_mvendorid);
unsigned long riscv_cached_marchid(unsigned int cpu_id)
{
struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
return ci->marchid;
}
EXPORT_SYMBOL(riscv_cached_marchid);
unsigned long riscv_cached_mimpid(unsigned int cpu_id)
{
struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
return ci->mimpid;
}
EXPORT_SYMBOL(riscv_cached_mimpid);
static int riscv_cpuinfo_starting(unsigned int cpu) static int riscv_cpuinfo_starting(unsigned int cpu)
{ {
struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo); struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
...@@ -113,7 +135,9 @@ static int __init riscv_cpuinfo_init(void) ...@@ -113,7 +135,9 @@ static int __init riscv_cpuinfo_init(void)
return 0; return 0;
} }
device_initcall(riscv_cpuinfo_init); arch_initcall(riscv_cpuinfo_init);
#ifdef CONFIG_PROC_FS
#define __RISCV_ISA_EXT_DATA(UPROP, EXTID) \ #define __RISCV_ISA_EXT_DATA(UPROP, EXTID) \
{ \ { \
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/cpu_pm.h> #include <linux/cpu_pm.h>
#include <linux/sched/clock.h> #include <linux/sched/clock.h>
#include <asm/errata_list.h>
#include <asm/sbi.h> #include <asm/sbi.h>
#include <asm/hwcap.h> #include <asm/hwcap.h>
...@@ -47,6 +48,8 @@ static const struct attribute_group *riscv_pmu_attr_groups[] = { ...@@ -47,6 +48,8 @@ static const struct attribute_group *riscv_pmu_attr_groups[] = {
* per_cpu in case of harts with different pmu counters * per_cpu in case of harts with different pmu counters
*/ */
static union sbi_pmu_ctr_info *pmu_ctr_list; static union sbi_pmu_ctr_info *pmu_ctr_list;
static bool riscv_pmu_use_irq;
static unsigned int riscv_pmu_irq_num;
static unsigned int riscv_pmu_irq; static unsigned int riscv_pmu_irq;
struct sbi_pmu_event_data { struct sbi_pmu_event_data {
...@@ -580,7 +583,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) ...@@ -580,7 +583,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS); fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
event = cpu_hw_evt->events[fidx]; event = cpu_hw_evt->events[fidx];
if (!event) { if (!event) {
csr_clear(CSR_SIP, SIP_LCOFIP); csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
return IRQ_NONE; return IRQ_NONE;
} }
...@@ -588,13 +591,13 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) ...@@ -588,13 +591,13 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
pmu_sbi_stop_hw_ctrs(pmu); pmu_sbi_stop_hw_ctrs(pmu);
/* Overflow status register should only be read after counter are stopped */ /* Overflow status register should only be read after counter are stopped */
overflow = csr_read(CSR_SSCOUNTOVF); ALT_SBI_PMU_OVERFLOW(overflow);
/* /*
* Overflow interrupt pending bit should only be cleared after stopping * Overflow interrupt pending bit should only be cleared after stopping
* all the counters to avoid any race condition. * all the counters to avoid any race condition.
*/ */
csr_clear(CSR_SIP, SIP_LCOFIP); csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
/* No overflow bit is set */ /* No overflow bit is set */
if (!overflow) if (!overflow)
...@@ -661,10 +664,10 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node) ...@@ -661,10 +664,10 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
/* Stop all the counters so that they can be enabled from perf */ /* Stop all the counters so that they can be enabled from perf */
pmu_sbi_stop_all(pmu); pmu_sbi_stop_all(pmu);
if (riscv_isa_extension_available(NULL, SSCOFPMF)) { if (riscv_pmu_use_irq) {
cpu_hw_evt->irq = riscv_pmu_irq; cpu_hw_evt->irq = riscv_pmu_irq;
csr_clear(CSR_IP, BIT(RV_IRQ_PMU)); csr_clear(CSR_IP, BIT(riscv_pmu_irq_num));
csr_set(CSR_IE, BIT(RV_IRQ_PMU)); csr_set(CSR_IE, BIT(riscv_pmu_irq_num));
enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE); enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE);
} }
...@@ -673,9 +676,9 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node) ...@@ -673,9 +676,9 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node) static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
{ {
if (riscv_isa_extension_available(NULL, SSCOFPMF)) { if (riscv_pmu_use_irq) {
disable_percpu_irq(riscv_pmu_irq); disable_percpu_irq(riscv_pmu_irq);
csr_clear(CSR_IE, BIT(RV_IRQ_PMU)); csr_clear(CSR_IE, BIT(riscv_pmu_irq_num));
} }
/* Disable all counters access for user mode now */ /* Disable all counters access for user mode now */
...@@ -691,7 +694,18 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde ...@@ -691,7 +694,18 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
struct device_node *cpu, *child; struct device_node *cpu, *child;
struct irq_domain *domain = NULL; struct irq_domain *domain = NULL;
if (!riscv_isa_extension_available(NULL, SSCOFPMF)) if (riscv_isa_extension_available(NULL, SSCOFPMF)) {
riscv_pmu_irq_num = RV_IRQ_PMU;
riscv_pmu_use_irq = true;
} else if (IS_ENABLED(CONFIG_ERRATA_THEAD_PMU) &&
riscv_cached_mvendorid(0) == THEAD_VENDOR_ID &&
riscv_cached_marchid(0) == 0 &&
riscv_cached_mimpid(0) == 0) {
riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU;
riscv_pmu_use_irq = true;
}
if (!riscv_pmu_use_irq)
return -EOPNOTSUPP; return -EOPNOTSUPP;
for_each_of_cpu_node(cpu) { for_each_of_cpu_node(cpu) {
...@@ -713,7 +727,7 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde ...@@ -713,7 +727,7 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
return -ENODEV; return -ENODEV;
} }
riscv_pmu_irq = irq_create_mapping(domain, RV_IRQ_PMU); riscv_pmu_irq = irq_create_mapping(domain, riscv_pmu_irq_num);
if (!riscv_pmu_irq) { if (!riscv_pmu_irq) {
pr_err("Failed to map PMU interrupt for node\n"); pr_err("Failed to map PMU interrupt for node\n");
return -ENODEV; return -ENODEV;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment