Commit 1537bf26 authored by Sergey Matyukevich's avatar Sergey Matyukevich Committed by Palmer Dabbelt

perf: RISC-V: exclude invalid pmu counters from SBI calls

SBI firmware may not provide information for some counters in response
to SBI_EXT_PMU_COUNTER_GET_INFO call. Exclude such counters from the
subsequent SBI requests. For this purpose use global mask to keep track
of fully specified counters.
Signed-off-by: default avatarSergey Matyukevich <sergey.matyukevich@syntacore.com>
Reviewed-by: default avatarAtish Patra <atishp@rivosinc.com>
Link: https://lore.kernel.org/r/20220830155306.301714-3-geomatsi@gmail.comSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parent 82c75dca
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#define RISCV_PMU_LEGACY_CYCLE 0 #define RISCV_PMU_LEGACY_CYCLE 0
#define RISCV_PMU_LEGACY_INSTRET 1 #define RISCV_PMU_LEGACY_INSTRET 1
#define RISCV_PMU_LEGACY_NUM_CTR 2
static bool pmu_init_done; static bool pmu_init_done;
...@@ -83,7 +82,8 @@ static void pmu_legacy_init(struct riscv_pmu *pmu) ...@@ -83,7 +82,8 @@ static void pmu_legacy_init(struct riscv_pmu *pmu)
{ {
pr_info("Legacy PMU implementation is available\n"); pr_info("Legacy PMU implementation is available\n");
pmu->num_counters = RISCV_PMU_LEGACY_NUM_CTR; pmu->cmask = BIT(RISCV_PMU_LEGACY_CYCLE) |
BIT(RISCV_PMU_LEGACY_INSTRET);
pmu->ctr_start = pmu_legacy_ctr_start; pmu->ctr_start = pmu_legacy_ctr_start;
pmu->ctr_stop = NULL; pmu->ctr_stop = NULL;
pmu->event_map = pmu_legacy_event_map; pmu->event_map = pmu_legacy_event_map;
......
...@@ -271,7 +271,6 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event) ...@@ -271,7 +271,6 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
struct sbiret ret; struct sbiret ret;
int idx; int idx;
uint64_t cbase = 0; uint64_t cbase = 0;
uint64_t cmask = GENMASK_ULL(rvpmu->num_counters - 1, 0);
unsigned long cflags = 0; unsigned long cflags = 0;
if (event->attr.exclude_kernel) if (event->attr.exclude_kernel)
...@@ -281,11 +280,12 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event) ...@@ -281,11 +280,12 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
/* retrieve the available counter index */ /* retrieve the available counter index */
#if defined(CONFIG_32BIT) #if defined(CONFIG_32BIT)
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask, ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
cflags, hwc->event_base, hwc->config, hwc->config >> 32); rvpmu->cmask, cflags, hwc->event_base, hwc->config,
hwc->config >> 32);
#else #else
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask, ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
cflags, hwc->event_base, hwc->config, 0); rvpmu->cmask, cflags, hwc->event_base, hwc->config, 0);
#endif #endif
if (ret.error) { if (ret.error) {
pr_debug("Not able to find a counter for event %lx config %llx\n", pr_debug("Not able to find a counter for event %lx config %llx\n",
...@@ -294,7 +294,7 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event) ...@@ -294,7 +294,7 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
} }
idx = ret.value; idx = ret.value;
if (idx >= rvpmu->num_counters || !pmu_ctr_list[idx].value) if (!test_bit(idx, &rvpmu->cmask) || !pmu_ctr_list[idx].value)
return -ENOENT; return -ENOENT;
/* Additional sanity check for the counter id */ /* Additional sanity check for the counter id */
...@@ -463,7 +463,7 @@ static int pmu_sbi_find_num_ctrs(void) ...@@ -463,7 +463,7 @@ static int pmu_sbi_find_num_ctrs(void)
return sbi_err_map_linux_errno(ret.error); return sbi_err_map_linux_errno(ret.error);
} }
static int pmu_sbi_get_ctrinfo(int nctr) static int pmu_sbi_get_ctrinfo(int nctr, unsigned long *mask)
{ {
struct sbiret ret; struct sbiret ret;
int i, num_hw_ctr = 0, num_fw_ctr = 0; int i, num_hw_ctr = 0, num_fw_ctr = 0;
...@@ -478,6 +478,9 @@ static int pmu_sbi_get_ctrinfo(int nctr) ...@@ -478,6 +478,9 @@ static int pmu_sbi_get_ctrinfo(int nctr)
if (ret.error) if (ret.error)
/* The logical counter ids are not expected to be contiguous */ /* The logical counter ids are not expected to be contiguous */
continue; continue;
*mask |= BIT(i);
cinfo.value = ret.value; cinfo.value = ret.value;
if (cinfo.type == SBI_PMU_CTR_TYPE_FW) if (cinfo.type == SBI_PMU_CTR_TYPE_FW)
num_fw_ctr++; num_fw_ctr++;
...@@ -498,7 +501,7 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu) ...@@ -498,7 +501,7 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
* which may include counters that are not enabled yet. * which may include counters that are not enabled yet.
*/ */
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
0, GENMASK_ULL(pmu->num_counters - 1, 0), 0, 0, 0, 0); 0, pmu->cmask, 0, 0, 0, 0);
} }
static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu) static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
...@@ -788,8 +791,9 @@ static void riscv_pmu_destroy(struct riscv_pmu *pmu) ...@@ -788,8 +791,9 @@ static void riscv_pmu_destroy(struct riscv_pmu *pmu)
static int pmu_sbi_device_probe(struct platform_device *pdev) static int pmu_sbi_device_probe(struct platform_device *pdev)
{ {
struct riscv_pmu *pmu = NULL; struct riscv_pmu *pmu = NULL;
int num_counters; unsigned long cmask = 0;
int ret = -ENODEV; int ret = -ENODEV;
int num_counters;
pr_info("SBI PMU extension is available\n"); pr_info("SBI PMU extension is available\n");
pmu = riscv_pmu_alloc(); pmu = riscv_pmu_alloc();
...@@ -803,7 +807,7 @@ static int pmu_sbi_device_probe(struct platform_device *pdev) ...@@ -803,7 +807,7 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
} }
/* cache all the information about counters now */ /* cache all the information about counters now */
if (pmu_sbi_get_ctrinfo(num_counters)) if (pmu_sbi_get_ctrinfo(num_counters, &cmask))
goto out_free; goto out_free;
ret = pmu_sbi_setup_irqs(pmu, pdev); ret = pmu_sbi_setup_irqs(pmu, pdev);
...@@ -812,8 +816,9 @@ static int pmu_sbi_device_probe(struct platform_device *pdev) ...@@ -812,8 +816,9 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE; pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
} }
pmu->pmu.attr_groups = riscv_pmu_attr_groups; pmu->pmu.attr_groups = riscv_pmu_attr_groups;
pmu->num_counters = num_counters; pmu->cmask = cmask;
pmu->ctr_start = pmu_sbi_ctr_start; pmu->ctr_start = pmu_sbi_ctr_start;
pmu->ctr_stop = pmu_sbi_ctr_stop; pmu->ctr_stop = pmu_sbi_ctr_stop;
pmu->event_map = pmu_sbi_event_map; pmu->event_map = pmu_sbi_event_map;
......
...@@ -45,7 +45,7 @@ struct riscv_pmu { ...@@ -45,7 +45,7 @@ struct riscv_pmu {
irqreturn_t (*handle_irq)(int irq_num, void *dev); irqreturn_t (*handle_irq)(int irq_num, void *dev);
int num_counters; unsigned long cmask;
u64 (*ctr_read)(struct perf_event *event); u64 (*ctr_read)(struct perf_event *event);
int (*ctr_get_idx)(struct perf_event *event); int (*ctr_get_idx)(struct perf_event *event);
int (*ctr_get_width)(int idx); int (*ctr_get_width)(int idx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment