Commit d8602f8b authored by Catalin Marinas's avatar Catalin Marinas

Merge remote-tracking branch 'arm64/for-next/perf' into for-next/core

* arm64/for-next/perf:
  perf/imx_ddr: Add system PMU identifier for userspace
  bindings: perf: imx-ddr: add compatible string
  arm64: Fix build failure when HARDLOCKUP_DETECTOR_PERF is enabled
  arm64: Enable perf events based hard lockup detector
  perf/imx_ddr: Add stop event counters support for i.MX8MP
  perf/smmuv3: Support sysfs identifier file
  drivers/perf: hisi: Add identifier sysfs file
  perf: remove duplicate check on fwnode
  driver/perf: Add PMU driver for the ARM DMC-620 memory controller
parents ba4259a6 881b0520
...@@ -15,6 +15,9 @@ properties: ...@@ -15,6 +15,9 @@ properties:
- enum: - enum:
- fsl,imx8-ddr-pmu - fsl,imx8-ddr-pmu
- fsl,imx8m-ddr-pmu - fsl,imx8m-ddr-pmu
- fsl,imx8mq-ddr-pmu
- fsl,imx8mm-ddr-pmu
- fsl,imx8mn-ddr-pmu
- fsl,imx8mp-ddr-pmu - fsl,imx8mp-ddr-pmu
- items: - items:
- enum: - enum:
......
...@@ -170,6 +170,8 @@ config ARM64 ...@@ -170,6 +170,8 @@ config ARM64
select HAVE_NMI select HAVE_NMI
select HAVE_PATA_PLATFORM select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI && HW_PERF_EVENTS
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
select HAVE_PERF_REGS select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
......
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/sched_clock.h> #include <linux/sched_clock.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/nmi.h>
#include <linux/cpufreq.h>
/* ARMv8 Cortex-A53 specific event types. */ /* ARMv8 Cortex-A53 specific event types. */
#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2 #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
...@@ -1248,10 +1250,21 @@ static struct platform_driver armv8_pmu_driver = { ...@@ -1248,10 +1250,21 @@ static struct platform_driver armv8_pmu_driver = {
static int __init armv8_pmu_driver_init(void) static int __init armv8_pmu_driver_init(void)
{ {
int ret;
if (acpi_disabled) if (acpi_disabled)
return platform_driver_register(&armv8_pmu_driver); ret = platform_driver_register(&armv8_pmu_driver);
else else
return arm_pmu_acpi_probe(armv8_pmuv3_init); ret = arm_pmu_acpi_probe(armv8_pmuv3_init);
/*
* Try to re-initialize lockup detector after PMU init in
* case PMU events are triggered via NMIs.
*/
if (ret == 0 && arm_pmu_irq_is_nmi())
lockup_detector_init();
return ret;
} }
device_initcall(armv8_pmu_driver_init) device_initcall(armv8_pmu_driver_init)
...@@ -1309,3 +1322,27 @@ void arch_perf_update_userpage(struct perf_event *event, ...@@ -1309,3 +1322,27 @@ void arch_perf_update_userpage(struct perf_event *event,
userpg->cap_user_time_zero = 1; userpg->cap_user_time_zero = 1;
userpg->cap_user_time_short = 1; userpg->cap_user_time_short = 1;
} }
#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
/*
* Safe maximum CPU frequency in case a particular platform doesn't implement
* cpufreq driver. Although, architecture doesn't put any restrictions on
* maximum frequency but 5 GHz seems to be safe maximum given the available
* Arm CPUs in the market which are clocked much less than 5 GHz. On the other
* hand, we can't make it much higher as it would lead to a large hard-lockup
* detection timeout on parts which are running slower (eg. 1GHz on
* Developerbox) and doesn't possess a cpufreq driver.
*/
#define SAFE_MAX_CPU_FREQ 5000000000UL // 5 GHz
u64 hw_nmi_get_sample_period(int watchdog_thresh)
{
unsigned int cpu = smp_processor_id();
unsigned long max_cpu_freq;
max_cpu_freq = cpufreq_get_hw_max_freq(cpu) * 1000UL;
if (!max_cpu_freq)
max_cpu_freq = SAFE_MAX_CPU_FREQ;
return (u64)max_cpu_freq * watchdog_thresh;
}
#endif
...@@ -130,6 +130,13 @@ config ARM_SPE_PMU ...@@ -130,6 +130,13 @@ config ARM_SPE_PMU
Extension, which provides periodic sampling of operations in Extension, which provides periodic sampling of operations in
the CPU pipeline and reports this via the perf AUX interface. the CPU pipeline and reports this via the perf AUX interface.
config ARM_DMC620_PMU
tristate "Enable PMU support for the ARM DMC-620 memory controller"
depends on (ARM64 && ACPI) || COMPILE_TEST
help
Support for PMU events monitoring on the ARM DMC-620 memory
controller.
source "drivers/perf/hisilicon/Kconfig" source "drivers/perf/hisilicon/Kconfig"
endmenu endmenu
...@@ -13,3 +13,4 @@ obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o ...@@ -13,3 +13,4 @@ obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o
obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
obj-$(CONFIG_ARM_DMC620_PMU) += arm_dmc620_pmu.o
// SPDX-License-Identifier: GPL-2.0-only
/*
* ARM DMC-620 memory controller PMU driver
*
* Copyright (C) 2020 Ampere Computing LLC.
*/
#define DMC620_PMUNAME "arm_dmc620"
#define DMC620_DRVNAME DMC620_PMUNAME "_pmu"
#define pr_fmt(fmt) DMC620_DRVNAME ": " fmt
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/cpuhotplug.h>
#include <linux/cpumask.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/rculist.h>
#include <linux/refcount.h>
#define DMC620_PA_SHIFT 12
#define DMC620_CNT_INIT 0x80000000
#define DMC620_CNT_MAX_PERIOD 0xffffffff
#define DMC620_PMU_CLKDIV2_MAX_COUNTERS 8
#define DMC620_PMU_CLK_MAX_COUNTERS 2
#define DMC620_PMU_MAX_COUNTERS \
(DMC620_PMU_CLKDIV2_MAX_COUNTERS + DMC620_PMU_CLK_MAX_COUNTERS)
/*
* The PMU registers start at 0xA00 in the DMC-620 memory map, and these
* offsets are relative to that base.
*
* Each counter has a group of control/value registers, and the
* DMC620_PMU_COUNTERn offsets are within a counter group.
*
* The counter registers groups start at 0xA10.
*/
#define DMC620_PMU_OVERFLOW_STATUS_CLKDIV2 0x8
#define DMC620_PMU_OVERFLOW_STATUS_CLKDIV2_MASK \
(DMC620_PMU_CLKDIV2_MAX_COUNTERS - 1)
#define DMC620_PMU_OVERFLOW_STATUS_CLK 0xC
#define DMC620_PMU_OVERFLOW_STATUS_CLK_MASK \
(DMC620_PMU_CLK_MAX_COUNTERS - 1)
#define DMC620_PMU_COUNTERS_BASE 0x10
#define DMC620_PMU_COUNTERn_MASK_31_00 0x0
#define DMC620_PMU_COUNTERn_MASK_63_32 0x4
#define DMC620_PMU_COUNTERn_MATCH_31_00 0x8
#define DMC620_PMU_COUNTERn_MATCH_63_32 0xC
#define DMC620_PMU_COUNTERn_CONTROL 0x10
#define DMC620_PMU_COUNTERn_CONTROL_ENABLE BIT(0)
#define DMC620_PMU_COUNTERn_CONTROL_INVERT BIT(1)
#define DMC620_PMU_COUNTERn_CONTROL_EVENT_MUX GENMASK(6, 2)
#define DMC620_PMU_COUNTERn_CONTROL_INCR_MUX GENMASK(8, 7)
#define DMC620_PMU_COUNTERn_VALUE 0x20
/* Offset of the registers for a given counter, relative to 0xA00 */
#define DMC620_PMU_COUNTERn_OFFSET(n) \
(DMC620_PMU_COUNTERS_BASE + 0x28 * (n))
static LIST_HEAD(dmc620_pmu_irqs);
static DEFINE_MUTEX(dmc620_pmu_irqs_lock);
struct dmc620_pmu_irq {
struct hlist_node node;
struct list_head pmus_node;
struct list_head irqs_node;
refcount_t refcount;
unsigned int irq_num;
unsigned int cpu;
};
struct dmc620_pmu {
struct pmu pmu;
void __iomem *base;
struct dmc620_pmu_irq *irq;
struct list_head pmus_node;
/*
* We put all clkdiv2 and clk counters to a same array.
* The first DMC620_PMU_CLKDIV2_MAX_COUNTERS bits belong to
* clkdiv2 counters, the last DMC620_PMU_CLK_MAX_COUNTERS
* belong to clk counters.
*/
DECLARE_BITMAP(used_mask, DMC620_PMU_MAX_COUNTERS);
struct perf_event *events[DMC620_PMU_MAX_COUNTERS];
};
#define to_dmc620_pmu(p) (container_of(p, struct dmc620_pmu, pmu))
static int cpuhp_state_num;
struct dmc620_pmu_event_attr {
struct device_attribute attr;
u8 clkdiv2;
u8 eventid;
};
static ssize_t
dmc620_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct dmc620_pmu_event_attr *eattr;
eattr = container_of(attr, typeof(*eattr), attr);
return sprintf(page, "event=0x%x,clkdiv2=0x%x\n", eattr->eventid, eattr->clkdiv2);
}
#define DMC620_PMU_EVENT_ATTR(_name, _eventid, _clkdiv2) \
(&((struct dmc620_pmu_event_attr[]) {{ \
.attr = __ATTR(_name, 0444, dmc620_pmu_event_show, NULL), \
.clkdiv2 = _clkdiv2, \
.eventid = _eventid, \
}})[0].attr.attr)
static struct attribute *dmc620_pmu_events_attrs[] = {
/* clkdiv2 events list */
DMC620_PMU_EVENT_ATTR(clkdiv2_cycle_count, 0x0, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_allocate, 0x1, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_queue_depth, 0x2, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_waiting_for_wr_data, 0x3, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_read_backlog, 0x4, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_waiting_for_mi, 0x5, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_hazard_resolution, 0x6, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_enqueue, 0x7, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_arbitrate, 0x8, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_lrank_turnaround_activate, 0x9, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_prank_turnaround_activate, 0xa, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_read_depth, 0xb, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_write_depth, 0xc, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_highigh_qos_depth, 0xd, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_high_qos_depth, 0xe, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_medium_qos_depth, 0xf, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_low_qos_depth, 0x10, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_activate, 0x11, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_rdwr, 0x12, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_refresh, 0x13, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_training_request, 0x14, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_t_mac_tracker, 0x15, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_bk_fsm_tracker, 0x16, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_bk_open_tracker, 0x17, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_ranks_in_pwr_down, 0x18, 1),
DMC620_PMU_EVENT_ATTR(clkdiv2_ranks_in_sref, 0x19, 1),
/* clk events list */
DMC620_PMU_EVENT_ATTR(clk_cycle_count, 0x0, 0),
DMC620_PMU_EVENT_ATTR(clk_request, 0x1, 0),
DMC620_PMU_EVENT_ATTR(clk_upload_stall, 0x2, 0),
NULL,
};
static struct attribute_group dmc620_pmu_events_attr_group = {
.name = "events",
.attrs = dmc620_pmu_events_attrs,
};
/* User ABI */
#define ATTR_CFG_FLD_mask_CFG config
#define ATTR_CFG_FLD_mask_LO 0
#define ATTR_CFG_FLD_mask_HI 44
#define ATTR_CFG_FLD_match_CFG config1
#define ATTR_CFG_FLD_match_LO 0
#define ATTR_CFG_FLD_match_HI 44
#define ATTR_CFG_FLD_invert_CFG config2
#define ATTR_CFG_FLD_invert_LO 0
#define ATTR_CFG_FLD_invert_HI 0
#define ATTR_CFG_FLD_incr_CFG config2
#define ATTR_CFG_FLD_incr_LO 1
#define ATTR_CFG_FLD_incr_HI 2
#define ATTR_CFG_FLD_event_CFG config2
#define ATTR_CFG_FLD_event_LO 3
#define ATTR_CFG_FLD_event_HI 8
#define ATTR_CFG_FLD_clkdiv2_CFG config2
#define ATTR_CFG_FLD_clkdiv2_LO 9
#define ATTR_CFG_FLD_clkdiv2_HI 9
#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
(lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
__GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
#define GEN_PMU_FORMAT_ATTR(name) \
PMU_FORMAT_ATTR(name, \
_GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \
ATTR_CFG_FLD_##name##_LO, \
ATTR_CFG_FLD_##name##_HI))
#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \
((((attr)->cfg) >> lo) & GENMASK_ULL(hi - lo, 0))
#define ATTR_CFG_GET_FLD(attr, name) \
_ATTR_CFG_GET_FLD(attr, \
ATTR_CFG_FLD_##name##_CFG, \
ATTR_CFG_FLD_##name##_LO, \
ATTR_CFG_FLD_##name##_HI)
GEN_PMU_FORMAT_ATTR(mask);
GEN_PMU_FORMAT_ATTR(match);
GEN_PMU_FORMAT_ATTR(invert);
GEN_PMU_FORMAT_ATTR(incr);
GEN_PMU_FORMAT_ATTR(event);
GEN_PMU_FORMAT_ATTR(clkdiv2);
static struct attribute *dmc620_pmu_formats_attrs[] = {
&format_attr_mask.attr,
&format_attr_match.attr,
&format_attr_invert.attr,
&format_attr_incr.attr,
&format_attr_event.attr,
&format_attr_clkdiv2.attr,
NULL,
};
static struct attribute_group dmc620_pmu_format_attr_group = {
.name = "format",
.attrs = dmc620_pmu_formats_attrs,
};
static const struct attribute_group *dmc620_pmu_attr_groups[] = {
&dmc620_pmu_events_attr_group,
&dmc620_pmu_format_attr_group,
NULL,
};
static inline
u32 dmc620_pmu_creg_read(struct dmc620_pmu *dmc620_pmu,
unsigned int idx, unsigned int reg)
{
return readl(dmc620_pmu->base + DMC620_PMU_COUNTERn_OFFSET(idx) + reg);
}
static inline
void dmc620_pmu_creg_write(struct dmc620_pmu *dmc620_pmu,
unsigned int idx, unsigned int reg, u32 val)
{
writel(val, dmc620_pmu->base + DMC620_PMU_COUNTERn_OFFSET(idx) + reg);
}
static
unsigned int dmc620_event_to_counter_control(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
unsigned int reg = 0;
reg |= FIELD_PREP(DMC620_PMU_COUNTERn_CONTROL_INVERT,
ATTR_CFG_GET_FLD(attr, invert));
reg |= FIELD_PREP(DMC620_PMU_COUNTERn_CONTROL_EVENT_MUX,
ATTR_CFG_GET_FLD(attr, event));
reg |= FIELD_PREP(DMC620_PMU_COUNTERn_CONTROL_INCR_MUX,
ATTR_CFG_GET_FLD(attr, incr));
return reg;
}
static int dmc620_get_event_idx(struct perf_event *event)
{
struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
int idx, start_idx, end_idx;
if (ATTR_CFG_GET_FLD(&event->attr, clkdiv2)) {
start_idx = 0;
end_idx = DMC620_PMU_CLKDIV2_MAX_COUNTERS;
} else {
start_idx = DMC620_PMU_CLKDIV2_MAX_COUNTERS;
end_idx = DMC620_PMU_MAX_COUNTERS;
}
for (idx = start_idx; idx < end_idx; ++idx) {
if (!test_and_set_bit(idx, dmc620_pmu->used_mask))
return idx;
}
/* The counters are all in use. */
return -EAGAIN;
}
static inline
u64 dmc620_pmu_read_counter(struct perf_event *event)
{
struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
return dmc620_pmu_creg_read(dmc620_pmu,
event->hw.idx, DMC620_PMU_COUNTERn_VALUE);
}
static void dmc620_pmu_event_update(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
u64 delta, prev_count, new_count;
do {
/* We may also be called from the irq handler */
prev_count = local64_read(&hwc->prev_count);
new_count = dmc620_pmu_read_counter(event);
} while (local64_cmpxchg(&hwc->prev_count,
prev_count, new_count) != prev_count);
delta = (new_count - prev_count) & DMC620_CNT_MAX_PERIOD;
local64_add(delta, &event->count);
}
static void dmc620_pmu_event_set_period(struct perf_event *event)
{
struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
local64_set(&event->hw.prev_count, DMC620_CNT_INIT);
dmc620_pmu_creg_write(dmc620_pmu,
event->hw.idx, DMC620_PMU_COUNTERn_VALUE, DMC620_CNT_INIT);
}
static void dmc620_pmu_enable_counter(struct perf_event *event)
{
struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
u32 reg;
reg = dmc620_event_to_counter_control(event) | DMC620_PMU_COUNTERn_CONTROL_ENABLE;
dmc620_pmu_creg_write(dmc620_pmu,
event->hw.idx, DMC620_PMU_COUNTERn_CONTROL, reg);
}
static void dmc620_pmu_disable_counter(struct perf_event *event)
{
struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
dmc620_pmu_creg_write(dmc620_pmu,
event->hw.idx, DMC620_PMU_COUNTERn_CONTROL, 0);
}
static irqreturn_t dmc620_pmu_handle_irq(int irq_num, void *data)
{
struct dmc620_pmu_irq *irq = data;
struct dmc620_pmu *dmc620_pmu;
irqreturn_t ret = IRQ_NONE;
rcu_read_lock();
list_for_each_entry_rcu(dmc620_pmu, &irq->pmus_node, pmus_node) {
unsigned long status;
struct perf_event *event;
unsigned int idx;
/*
* HW doesn't provide a control to atomically disable all counters.
* To prevent race condition (overflow happens while clearing status register),
* disable all events before continuing
*/
for (idx = 0; idx < DMC620_PMU_MAX_COUNTERS; idx++) {
event = dmc620_pmu->events[idx];
if (!event)
continue;
dmc620_pmu_disable_counter(event);
}
status = readl(dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
status |= (readl(dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK) <<
DMC620_PMU_CLKDIV2_MAX_COUNTERS);
if (status) {
for_each_set_bit(idx, &status,
DMC620_PMU_MAX_COUNTERS) {
event = dmc620_pmu->events[idx];
if (WARN_ON_ONCE(!event))
continue;
dmc620_pmu_event_update(event);
dmc620_pmu_event_set_period(event);
}
if (status & DMC620_PMU_OVERFLOW_STATUS_CLKDIV2_MASK)
writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
if ((status >> DMC620_PMU_CLKDIV2_MAX_COUNTERS) &
DMC620_PMU_OVERFLOW_STATUS_CLK_MASK)
writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK);
}
for (idx = 0; idx < DMC620_PMU_MAX_COUNTERS; idx++) {
event = dmc620_pmu->events[idx];
if (!event)
continue;
if (!(event->hw.state & PERF_HES_STOPPED))
dmc620_pmu_enable_counter(event);
}
ret = IRQ_HANDLED;
}
rcu_read_unlock();
return ret;
}
static struct dmc620_pmu_irq *__dmc620_pmu_get_irq(int irq_num)
{
struct dmc620_pmu_irq *irq;
int ret;
list_for_each_entry(irq, &dmc620_pmu_irqs, irqs_node)
if (irq->irq_num == irq_num && refcount_inc_not_zero(&irq->refcount))
return irq;
irq = kzalloc(sizeof(*irq), GFP_KERNEL);
if (!irq)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&irq->pmus_node);
/* Pick one CPU to be the preferred one to use */
irq->cpu = raw_smp_processor_id();
refcount_set(&irq->refcount, 1);
ret = request_irq(irq_num, dmc620_pmu_handle_irq,
IRQF_NOBALANCING | IRQF_NO_THREAD,
"dmc620-pmu", irq);
if (ret)
goto out_free_aff;
ret = irq_set_affinity_hint(irq_num, cpumask_of(irq->cpu));
if (ret)
goto out_free_irq;
ret = cpuhp_state_add_instance_nocalls(cpuhp_state_num, &irq->node);
if (ret)
goto out_free_irq;
irq->irq_num = irq_num;
list_add(&irq->irqs_node, &dmc620_pmu_irqs);
return irq;
out_free_irq:
free_irq(irq_num, irq);
out_free_aff:
kfree(irq);
return ERR_PTR(ret);
}
static int dmc620_pmu_get_irq(struct dmc620_pmu *dmc620_pmu, int irq_num)
{
struct dmc620_pmu_irq *irq;
mutex_lock(&dmc620_pmu_irqs_lock);
irq = __dmc620_pmu_get_irq(irq_num);
mutex_unlock(&dmc620_pmu_irqs_lock);
if (IS_ERR(irq))
return PTR_ERR(irq);
dmc620_pmu->irq = irq;
mutex_lock(&dmc620_pmu_irqs_lock);
list_add_rcu(&dmc620_pmu->pmus_node, &irq->pmus_node);
mutex_unlock(&dmc620_pmu_irqs_lock);
return 0;
}
static void dmc620_pmu_put_irq(struct dmc620_pmu *dmc620_pmu)
{
struct dmc620_pmu_irq *irq = dmc620_pmu->irq;
mutex_lock(&dmc620_pmu_irqs_lock);
list_del_rcu(&dmc620_pmu->pmus_node);
if (!refcount_dec_and_test(&irq->refcount)) {
mutex_unlock(&dmc620_pmu_irqs_lock);
return;
}
list_del(&irq->irqs_node);
mutex_unlock(&dmc620_pmu_irqs_lock);
WARN_ON(irq_set_affinity_hint(irq->irq_num, NULL));
free_irq(irq->irq_num, irq);
cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &irq->node);
kfree(irq);
}
static int dmc620_pmu_event_init(struct perf_event *event)
{
struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
struct perf_event *sibling;
if (event->attr.type != event->pmu->type)
return -ENOENT;
/*
* DMC 620 PMUs are shared across all cpus and cannot
* support task bound and sampling events.
*/
if (is_sampling_event(event) ||
event->attach_state & PERF_ATTACH_TASK) {
dev_dbg(dmc620_pmu->pmu.dev,
"Can't support per-task counters\n");
return -EOPNOTSUPP;
}
/*
* Many perf core operations (eg. events rotation) operate on a
* single CPU context. This is obvious for CPU PMUs, where one
* expects the same sets of events being observed on all CPUs,
* but can lead to issues for off-core PMUs, where each
* event could be theoretically assigned to a different CPU. To
* mitigate this, we enforce CPU assignment to one, selected
* processor.
*/
event->cpu = dmc620_pmu->irq->cpu;
if (event->cpu < 0)
return -EINVAL;
/*
* We can't atomically disable all HW counters so only one event allowed,
* although software events are acceptable.
*/
if (event->group_leader != event &&
!is_software_event(event->group_leader))
return -EINVAL;
for_each_sibling_event(sibling, event->group_leader) {
if (sibling != event &&
!is_software_event(sibling))
return -EINVAL;
}
hwc->idx = -1;
return 0;
}
static void dmc620_pmu_read(struct perf_event *event)
{
dmc620_pmu_event_update(event);
}
static void dmc620_pmu_start(struct perf_event *event, int flags)
{
event->hw.state = 0;
dmc620_pmu_event_set_period(event);
dmc620_pmu_enable_counter(event);
}
static void dmc620_pmu_stop(struct perf_event *event, int flags)
{
if (event->hw.state & PERF_HES_STOPPED)
return;
dmc620_pmu_disable_counter(event);
dmc620_pmu_event_update(event);
event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
static int dmc620_pmu_add(struct perf_event *event, int flags)
{
struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
int idx;
u64 reg;
idx = dmc620_get_event_idx(event);
if (idx < 0)
return idx;
hwc->idx = idx;
dmc620_pmu->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
reg = ATTR_CFG_GET_FLD(attr, mask);
dmc620_pmu_creg_write(dmc620_pmu,
idx, DMC620_PMU_COUNTERn_MASK_31_00, lower_32_bits(reg));
dmc620_pmu_creg_write(dmc620_pmu,
idx, DMC620_PMU_COUNTERn_MASK_63_32, upper_32_bits(reg));
reg = ATTR_CFG_GET_FLD(attr, match);
dmc620_pmu_creg_write(dmc620_pmu,
idx, DMC620_PMU_COUNTERn_MATCH_31_00, lower_32_bits(reg));
dmc620_pmu_creg_write(dmc620_pmu,
idx, DMC620_PMU_COUNTERn_MATCH_63_32, upper_32_bits(reg));
if (flags & PERF_EF_START)
dmc620_pmu_start(event, PERF_EF_RELOAD);
perf_event_update_userpage(event);
return 0;
}
static void dmc620_pmu_del(struct perf_event *event, int flags)
{
struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
dmc620_pmu_stop(event, PERF_EF_UPDATE);
dmc620_pmu->events[idx] = NULL;
clear_bit(idx, dmc620_pmu->used_mask);
perf_event_update_userpage(event);
}
static int dmc620_pmu_cpu_teardown(unsigned int cpu,
struct hlist_node *node)
{
struct dmc620_pmu_irq *irq;
struct dmc620_pmu *dmc620_pmu;
unsigned int target;
irq = hlist_entry_safe(node, struct dmc620_pmu_irq, node);
if (cpu != irq->cpu)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
return 0;
/* We're only reading, but this isn't the place to be involving RCU */
mutex_lock(&dmc620_pmu_irqs_lock);
list_for_each_entry(dmc620_pmu, &irq->pmus_node, pmus_node)
perf_pmu_migrate_context(&dmc620_pmu->pmu, irq->cpu, target);
mutex_unlock(&dmc620_pmu_irqs_lock);
WARN_ON(irq_set_affinity_hint(irq->irq_num, cpumask_of(target)));
irq->cpu = target;
return 0;
}
static int dmc620_pmu_device_probe(struct platform_device *pdev)
{
struct dmc620_pmu *dmc620_pmu;
struct resource *res;
char *name;
int irq_num;
int i, ret;
dmc620_pmu = devm_kzalloc(&pdev->dev,
sizeof(struct dmc620_pmu), GFP_KERNEL);
if (!dmc620_pmu)
return -ENOMEM;
platform_set_drvdata(pdev, dmc620_pmu);
dmc620_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_invalid_context,
.event_init = dmc620_pmu_event_init,
.add = dmc620_pmu_add,
.del = dmc620_pmu_del,
.start = dmc620_pmu_start,
.stop = dmc620_pmu_stop,
.read = dmc620_pmu_read,
.attr_groups = dmc620_pmu_attr_groups,
};
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dmc620_pmu->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dmc620_pmu->base))
return PTR_ERR(dmc620_pmu->base);
/* Make sure device is reset before enabling interrupt */
for (i = 0; i < DMC620_PMU_MAX_COUNTERS; i++)
dmc620_pmu_creg_write(dmc620_pmu, i, DMC620_PMU_COUNTERn_CONTROL, 0);
writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK);
irq_num = platform_get_irq(pdev, 0);
if (irq_num < 0)
return irq_num;
ret = dmc620_pmu_get_irq(dmc620_pmu, irq_num);
if (ret)
return ret;
name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"%s_%llx", DMC620_PMUNAME,
(u64)(res->start >> DMC620_PA_SHIFT));
if (!name) {
dev_err(&pdev->dev,
"Create name failed, PMU @%pa\n", &res->start);
goto out_teardown_dev;
}
ret = perf_pmu_register(&dmc620_pmu->pmu, name, -1);
if (ret)
goto out_teardown_dev;
return 0;
out_teardown_dev:
dmc620_pmu_put_irq(dmc620_pmu);
synchronize_rcu();
return ret;
}
static int dmc620_pmu_device_remove(struct platform_device *pdev)
{
struct dmc620_pmu *dmc620_pmu = platform_get_drvdata(pdev);
dmc620_pmu_put_irq(dmc620_pmu);
/* perf will synchronise RCU before devres can free dmc620_pmu */
perf_pmu_unregister(&dmc620_pmu->pmu);
return 0;
}
static const struct acpi_device_id dmc620_acpi_match[] = {
{ "ARMHD620", 0},
{},
};
MODULE_DEVICE_TABLE(acpi, dmc620_acpi_match);
static struct platform_driver dmc620_pmu_driver = {
.driver = {
.name = DMC620_DRVNAME,
.acpi_match_table = dmc620_acpi_match,
},
.probe = dmc620_pmu_device_probe,
.remove = dmc620_pmu_device_remove,
};
static int __init dmc620_pmu_init(void)
{
cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
DMC620_DRVNAME,
NULL,
dmc620_pmu_cpu_teardown);
if (cpuhp_state_num < 0)
return cpuhp_state_num;
return platform_driver_register(&dmc620_pmu_driver);
}
static void __exit dmc620_pmu_exit(void)
{
platform_driver_unregister(&dmc620_pmu_driver);
cpuhp_remove_multi_state(cpuhp_state_num);
}
module_init(dmc620_pmu_init);
module_exit(dmc620_pmu_exit);
MODULE_DESCRIPTION("Perf driver for the ARM DMC-620 memory controller");
MODULE_AUTHOR("Tuan Phan <tuanphan@os.amperecomputing.com");
MODULE_LICENSE("GPL v2");
...@@ -716,9 +716,6 @@ static int dsu_pmu_device_probe(struct platform_device *pdev) ...@@ -716,9 +716,6 @@ static int dsu_pmu_device_probe(struct platform_device *pdev)
if (IS_ERR(dsu_pmu)) if (IS_ERR(dsu_pmu))
return PTR_ERR(dsu_pmu); return PTR_ERR(dsu_pmu);
if (IS_ERR_OR_NULL(fwnode))
return -ENOENT;
if (is_of_node(fwnode)) if (is_of_node(fwnode))
rc = dsu_pmu_dt_get_cpus(&pdev->dev, &dsu_pmu->associated_cpus); rc = dsu_pmu_dt_get_cpus(&pdev->dev, &dsu_pmu->associated_cpus);
else if (is_acpi_device_node(fwnode)) else if (is_acpi_device_node(fwnode))
......
...@@ -726,6 +726,11 @@ static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) ...@@ -726,6 +726,11 @@ static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
return per_cpu(hw_events->irq, cpu); return per_cpu(hw_events->irq, cpu);
} }
bool arm_pmu_irq_is_nmi(void)
{
return has_nmi;
}
/* /*
* PMU hardware loses all context when a CPU goes offline. * PMU hardware loses all context when a CPU goes offline.
* When a CPU is hotplugged back in, since some hardware registers are * When a CPU is hotplugged back in, since some hardware registers are
......
...@@ -74,6 +74,7 @@ ...@@ -74,6 +74,7 @@
#define SMMU_PMCG_CFGR_NCTR GENMASK(5, 0) #define SMMU_PMCG_CFGR_NCTR GENMASK(5, 0)
#define SMMU_PMCG_CR 0xE04 #define SMMU_PMCG_CR 0xE04
#define SMMU_PMCG_CR_ENABLE BIT(0) #define SMMU_PMCG_CR_ENABLE BIT(0)
#define SMMU_PMCG_IIDR 0xE08
#define SMMU_PMCG_CEID0 0xE20 #define SMMU_PMCG_CEID0 0xE20
#define SMMU_PMCG_CEID1 0xE28 #define SMMU_PMCG_CEID1 0xE28
#define SMMU_PMCG_IRQ_CTRL 0xE50 #define SMMU_PMCG_IRQ_CTRL 0xE50
...@@ -112,6 +113,7 @@ struct smmu_pmu { ...@@ -112,6 +113,7 @@ struct smmu_pmu {
void __iomem *reloc_base; void __iomem *reloc_base;
u64 counter_mask; u64 counter_mask;
u32 options; u32 options;
u32 iidr;
bool global_filter; bool global_filter;
}; };
...@@ -552,6 +554,40 @@ static struct attribute_group smmu_pmu_events_group = { ...@@ -552,6 +554,40 @@ static struct attribute_group smmu_pmu_events_group = {
.is_visible = smmu_pmu_event_is_visible, .is_visible = smmu_pmu_event_is_visible,
}; };
static ssize_t smmu_pmu_identifier_attr_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
return snprintf(page, PAGE_SIZE, "0x%08x\n", smmu_pmu->iidr);
}
static umode_t smmu_pmu_identifier_attr_visible(struct kobject *kobj,
struct attribute *attr,
int n)
{
struct device *dev = kobj_to_dev(kobj);
struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
if (!smmu_pmu->iidr)
return 0;
return attr->mode;
}
static struct device_attribute smmu_pmu_identifier_attr =
__ATTR(identifier, 0444, smmu_pmu_identifier_attr_show, NULL);
static struct attribute *smmu_pmu_identifier_attrs[] = {
&smmu_pmu_identifier_attr.attr,
NULL
};
static struct attribute_group smmu_pmu_identifier_group = {
.attrs = smmu_pmu_identifier_attrs,
.is_visible = smmu_pmu_identifier_attr_visible,
};
/* Formats */ /* Formats */
PMU_FORMAT_ATTR(event, "config:0-15"); PMU_FORMAT_ATTR(event, "config:0-15");
PMU_FORMAT_ATTR(filter_stream_id, "config1:0-31"); PMU_FORMAT_ATTR(filter_stream_id, "config1:0-31");
...@@ -575,6 +611,7 @@ static const struct attribute_group *smmu_pmu_attr_grps[] = { ...@@ -575,6 +611,7 @@ static const struct attribute_group *smmu_pmu_attr_grps[] = {
&smmu_pmu_cpumask_group, &smmu_pmu_cpumask_group,
&smmu_pmu_events_group, &smmu_pmu_events_group,
&smmu_pmu_format_group, &smmu_pmu_format_group,
&smmu_pmu_identifier_group,
NULL NULL
}; };
...@@ -795,6 +832,8 @@ static int smmu_pmu_probe(struct platform_device *pdev) ...@@ -795,6 +832,8 @@ static int smmu_pmu_probe(struct platform_device *pdev)
return err; return err;
} }
smmu_pmu->iidr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_IIDR);
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx", name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx",
(res_0->start) >> SMMU_PMCG_PA_SHIFT); (res_0->start) >> SMMU_PMCG_PA_SHIFT);
if (!name) { if (!name) {
......
...@@ -50,6 +50,7 @@ static DEFINE_IDA(ddr_ida); ...@@ -50,6 +50,7 @@ static DEFINE_IDA(ddr_ida);
struct fsl_ddr_devtype_data { struct fsl_ddr_devtype_data {
unsigned int quirks; /* quirks needed for different DDR Perf core */ unsigned int quirks; /* quirks needed for different DDR Perf core */
const char *identifier; /* system PMU identifier for userspace */
}; };
static const struct fsl_ddr_devtype_data imx8_devtype_data; static const struct fsl_ddr_devtype_data imx8_devtype_data;
...@@ -58,13 +59,32 @@ static const struct fsl_ddr_devtype_data imx8m_devtype_data = { ...@@ -58,13 +59,32 @@ static const struct fsl_ddr_devtype_data imx8m_devtype_data = {
.quirks = DDR_CAP_AXI_ID_FILTER, .quirks = DDR_CAP_AXI_ID_FILTER,
}; };
static const struct fsl_ddr_devtype_data imx8mq_devtype_data = {
.quirks = DDR_CAP_AXI_ID_FILTER,
.identifier = "i.MX8MQ",
};
static const struct fsl_ddr_devtype_data imx8mm_devtype_data = {
.quirks = DDR_CAP_AXI_ID_FILTER,
.identifier = "i.MX8MM",
};
static const struct fsl_ddr_devtype_data imx8mn_devtype_data = {
.quirks = DDR_CAP_AXI_ID_FILTER,
.identifier = "i.MX8MN",
};
static const struct fsl_ddr_devtype_data imx8mp_devtype_data = { static const struct fsl_ddr_devtype_data imx8mp_devtype_data = {
.quirks = DDR_CAP_AXI_ID_FILTER_ENHANCED, .quirks = DDR_CAP_AXI_ID_FILTER_ENHANCED,
.identifier = "i.MX8MP",
}; };
static const struct of_device_id imx_ddr_pmu_dt_ids[] = { static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
{ .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data}, { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
{ .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data}, { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
{ .compatible = "fsl,imx8mq-ddr-pmu", .data = &imx8mq_devtype_data},
{ .compatible = "fsl,imx8mm-ddr-pmu", .data = &imx8mm_devtype_data},
{ .compatible = "fsl,imx8mn-ddr-pmu", .data = &imx8mn_devtype_data},
{ .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data}, { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data},
{ /* sentinel */ } { /* sentinel */ }
}; };
...@@ -84,6 +104,40 @@ struct ddr_pmu { ...@@ -84,6 +104,40 @@ struct ddr_pmu {
int id; int id;
}; };
static ssize_t ddr_perf_identifier_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct ddr_pmu *pmu = dev_get_drvdata(dev);
return sprintf(page, "%s\n", pmu->devtype_data->identifier);
}
static umode_t ddr_perf_identifier_attr_visible(struct kobject *kobj,
struct attribute *attr,
int n)
{
struct device *dev = kobj_to_dev(kobj);
struct ddr_pmu *pmu = dev_get_drvdata(dev);
if (!pmu->devtype_data->identifier)
return 0;
return attr->mode;
};
static struct device_attribute ddr_perf_identifier_attr =
__ATTR(identifier, 0444, ddr_perf_identifier_show, NULL);
static struct attribute *ddr_perf_identifier_attrs[] = {
&ddr_perf_identifier_attr.attr,
NULL,
};
static struct attribute_group ddr_perf_identifier_attr_group = {
.attrs = ddr_perf_identifier_attrs,
.is_visible = ddr_perf_identifier_attr_visible,
};
enum ddr_perf_filter_capabilities { enum ddr_perf_filter_capabilities {
PERF_CAP_AXI_ID_FILTER = 0, PERF_CAP_AXI_ID_FILTER = 0,
PERF_CAP_AXI_ID_FILTER_ENHANCED, PERF_CAP_AXI_ID_FILTER_ENHANCED,
...@@ -237,6 +291,7 @@ static const struct attribute_group *attr_groups[] = { ...@@ -237,6 +291,7 @@ static const struct attribute_group *attr_groups[] = {
&ddr_perf_format_attr_group, &ddr_perf_format_attr_group,
&ddr_perf_cpumask_attr_group, &ddr_perf_cpumask_attr_group,
&ddr_perf_filter_cap_attr_group, &ddr_perf_filter_cap_attr_group,
&ddr_perf_identifier_attr_group,
NULL, NULL,
}; };
...@@ -361,25 +416,6 @@ static int ddr_perf_event_init(struct perf_event *event) ...@@ -361,25 +416,6 @@ static int ddr_perf_event_init(struct perf_event *event)
return 0; return 0;
} }
static void ddr_perf_event_update(struct perf_event *event)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u64 delta, prev_raw_count, new_raw_count;
int counter = hwc->idx;
do {
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = ddr_perf_read_counter(pmu, counter);
} while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count);
delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
local64_add(delta, &event->count);
}
static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config, static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
int counter, bool enable) int counter, bool enable)
{ {
...@@ -404,6 +440,56 @@ static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config, ...@@ -404,6 +440,56 @@ static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
} }
} }
static bool ddr_perf_counter_overflow(struct ddr_pmu *pmu, int counter)
{
int val;
val = readl_relaxed(pmu->base + counter * 4 + COUNTER_CNTL);
return val & CNTL_OVER;
}
static void ddr_perf_counter_clear(struct ddr_pmu *pmu, int counter)
{
u8 reg = counter * 4 + COUNTER_CNTL;
int val;
val = readl_relaxed(pmu->base + reg);
val &= ~CNTL_CLEAR;
writel(val, pmu->base + reg);
val |= CNTL_CLEAR;
writel(val, pmu->base + reg);
}
static void ddr_perf_event_update(struct perf_event *event)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u64 new_raw_count;
int counter = hwc->idx;
int ret;
new_raw_count = ddr_perf_read_counter(pmu, counter);
local64_add(new_raw_count, &event->count);
/*
* For legacy SoCs: event counter continue counting when overflow,
* no need to clear the counter.
* For new SoCs: event counter stop counting when overflow, need
* clear counter to let it count again.
*/
if (counter != EVENT_CYCLES_COUNTER) {
ret = ddr_perf_counter_overflow(pmu, counter);
if (ret)
dev_warn_ratelimited(pmu->dev, "events lost due to counter overflow (config 0x%llx)\n",
event->attr.config);
}
/* clear counter every time for both cycle counter and event counter */
ddr_perf_counter_clear(pmu, counter);
}
static void ddr_perf_event_start(struct perf_event *event, int flags) static void ddr_perf_event_start(struct perf_event *event, int flags)
{ {
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
...@@ -537,7 +623,7 @@ static irqreturn_t ddr_perf_irq_handler(int irq, void *p) ...@@ -537,7 +623,7 @@ static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
{ {
int i; int i;
struct ddr_pmu *pmu = (struct ddr_pmu *) p; struct ddr_pmu *pmu = (struct ddr_pmu *) p;
struct perf_event *event, *cycle_event = NULL; struct perf_event *event;
/* all counter will stop if cycle counter disabled */ /* all counter will stop if cycle counter disabled */
ddr_perf_counter_enable(pmu, ddr_perf_counter_enable(pmu,
...@@ -547,7 +633,9 @@ static irqreturn_t ddr_perf_irq_handler(int irq, void *p) ...@@ -547,7 +633,9 @@ static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
/* /*
* When the cycle counter overflows, all counters are stopped, * When the cycle counter overflows, all counters are stopped,
* and an IRQ is raised. If any other counter overflows, it * and an IRQ is raised. If any other counter overflows, it
* continues counting, and no IRQ is raised. * continues counting, and no IRQ is raised. But for new SoCs,
* such as i.MX8MP, event counter would stop when overflow, so
* we need use cycle counter to stop overflow of event counter.
* *
* Cycles occur at least 4 times as often as other events, so we * Cycles occur at least 4 times as often as other events, so we
* can update all events on a cycle counter overflow and not * can update all events on a cycle counter overflow and not
...@@ -562,17 +650,12 @@ static irqreturn_t ddr_perf_irq_handler(int irq, void *p) ...@@ -562,17 +650,12 @@ static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
event = pmu->events[i]; event = pmu->events[i];
ddr_perf_event_update(event); ddr_perf_event_update(event);
if (event->hw.idx == EVENT_CYCLES_COUNTER)
cycle_event = event;
} }
ddr_perf_counter_enable(pmu, ddr_perf_counter_enable(pmu,
EVENT_CYCLES_ID, EVENT_CYCLES_ID,
EVENT_CYCLES_COUNTER, EVENT_CYCLES_COUNTER,
true); true);
if (cycle_event)
ddr_perf_event_update(cycle_event);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#define DDRC_INT_MASK 0x6c8 #define DDRC_INT_MASK 0x6c8
#define DDRC_INT_STATUS 0x6cc #define DDRC_INT_STATUS 0x6cc
#define DDRC_INT_CLEAR 0x6d0 #define DDRC_INT_CLEAR 0x6d0
#define DDRC_VERSION 0x710
/* DDRC has 8-counters */ /* DDRC has 8-counters */
#define DDRC_NR_COUNTERS 0x8 #define DDRC_NR_COUNTERS 0x8
...@@ -267,6 +268,8 @@ static int hisi_ddrc_pmu_init_data(struct platform_device *pdev, ...@@ -267,6 +268,8 @@ static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
return PTR_ERR(ddrc_pmu->base); return PTR_ERR(ddrc_pmu->base);
} }
ddrc_pmu->identifier = readl(ddrc_pmu->base + DDRC_VERSION);
return 0; return 0;
} }
...@@ -308,10 +311,23 @@ static const struct attribute_group hisi_ddrc_pmu_cpumask_attr_group = { ...@@ -308,10 +311,23 @@ static const struct attribute_group hisi_ddrc_pmu_cpumask_attr_group = {
.attrs = hisi_ddrc_pmu_cpumask_attrs, .attrs = hisi_ddrc_pmu_cpumask_attrs,
}; };
static struct device_attribute hisi_ddrc_pmu_identifier_attr =
__ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
static struct attribute *hisi_ddrc_pmu_identifier_attrs[] = {
&hisi_ddrc_pmu_identifier_attr.attr,
NULL
};
static struct attribute_group hisi_ddrc_pmu_identifier_group = {
.attrs = hisi_ddrc_pmu_identifier_attrs,
};
static const struct attribute_group *hisi_ddrc_pmu_attr_groups[] = { static const struct attribute_group *hisi_ddrc_pmu_attr_groups[] = {
&hisi_ddrc_pmu_format_group, &hisi_ddrc_pmu_format_group,
&hisi_ddrc_pmu_events_group, &hisi_ddrc_pmu_events_group,
&hisi_ddrc_pmu_cpumask_attr_group, &hisi_ddrc_pmu_cpumask_attr_group,
&hisi_ddrc_pmu_identifier_group,
NULL, NULL,
}; };
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#define HHA_INT_MASK 0x0804 #define HHA_INT_MASK 0x0804
#define HHA_INT_STATUS 0x0808 #define HHA_INT_STATUS 0x0808
#define HHA_INT_CLEAR 0x080C #define HHA_INT_CLEAR 0x080C
#define HHA_VERSION 0x1cf0
#define HHA_PERF_CTRL 0x1E00 #define HHA_PERF_CTRL 0x1E00
#define HHA_EVENT_CTRL 0x1E04 #define HHA_EVENT_CTRL 0x1E04
#define HHA_EVENT_TYPE0 0x1E80 #define HHA_EVENT_TYPE0 0x1E80
...@@ -261,6 +262,8 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev, ...@@ -261,6 +262,8 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
return PTR_ERR(hha_pmu->base); return PTR_ERR(hha_pmu->base);
} }
hha_pmu->identifier = readl(hha_pmu->base + HHA_VERSION);
return 0; return 0;
} }
...@@ -320,10 +323,23 @@ static const struct attribute_group hisi_hha_pmu_cpumask_attr_group = { ...@@ -320,10 +323,23 @@ static const struct attribute_group hisi_hha_pmu_cpumask_attr_group = {
.attrs = hisi_hha_pmu_cpumask_attrs, .attrs = hisi_hha_pmu_cpumask_attrs,
}; };
static struct device_attribute hisi_hha_pmu_identifier_attr =
__ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
static struct attribute *hisi_hha_pmu_identifier_attrs[] = {
&hisi_hha_pmu_identifier_attr.attr,
NULL
};
static struct attribute_group hisi_hha_pmu_identifier_group = {
.attrs = hisi_hha_pmu_identifier_attrs,
};
static const struct attribute_group *hisi_hha_pmu_attr_groups[] = { static const struct attribute_group *hisi_hha_pmu_attr_groups[] = {
&hisi_hha_pmu_format_group, &hisi_hha_pmu_format_group,
&hisi_hha_pmu_events_group, &hisi_hha_pmu_events_group,
&hisi_hha_pmu_cpumask_attr_group, &hisi_hha_pmu_cpumask_attr_group,
&hisi_hha_pmu_identifier_group,
NULL, NULL,
}; };
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#define L3C_INT_STATUS 0x0808 #define L3C_INT_STATUS 0x0808
#define L3C_INT_CLEAR 0x080c #define L3C_INT_CLEAR 0x080c
#define L3C_EVENT_CTRL 0x1c00 #define L3C_EVENT_CTRL 0x1c00
#define L3C_VERSION 0x1cf0
#define L3C_EVENT_TYPE0 0x1d00 #define L3C_EVENT_TYPE0 0x1d00
/* /*
* Each counter is 48-bits and [48:63] are reserved * Each counter is 48-bits and [48:63] are reserved
...@@ -264,6 +265,8 @@ static int hisi_l3c_pmu_init_data(struct platform_device *pdev, ...@@ -264,6 +265,8 @@ static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
return PTR_ERR(l3c_pmu->base); return PTR_ERR(l3c_pmu->base);
} }
l3c_pmu->identifier = readl(l3c_pmu->base + L3C_VERSION);
return 0; return 0;
} }
...@@ -310,10 +313,23 @@ static const struct attribute_group hisi_l3c_pmu_cpumask_attr_group = { ...@@ -310,10 +313,23 @@ static const struct attribute_group hisi_l3c_pmu_cpumask_attr_group = {
.attrs = hisi_l3c_pmu_cpumask_attrs, .attrs = hisi_l3c_pmu_cpumask_attrs,
}; };
static struct device_attribute hisi_l3c_pmu_identifier_attr =
__ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
static struct attribute *hisi_l3c_pmu_identifier_attrs[] = {
&hisi_l3c_pmu_identifier_attr.attr,
NULL
};
static struct attribute_group hisi_l3c_pmu_identifier_group = {
.attrs = hisi_l3c_pmu_identifier_attrs,
};
static const struct attribute_group *hisi_l3c_pmu_attr_groups[] = { static const struct attribute_group *hisi_l3c_pmu_attr_groups[] = {
&hisi_l3c_pmu_format_group, &hisi_l3c_pmu_format_group,
&hisi_l3c_pmu_events_group, &hisi_l3c_pmu_events_group,
&hisi_l3c_pmu_cpumask_attr_group, &hisi_l3c_pmu_cpumask_attr_group,
&hisi_l3c_pmu_identifier_group,
NULL, NULL,
}; };
......
...@@ -119,6 +119,16 @@ int hisi_uncore_pmu_get_event_idx(struct perf_event *event) ...@@ -119,6 +119,16 @@ int hisi_uncore_pmu_get_event_idx(struct perf_event *event)
} }
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_get_event_idx); EXPORT_SYMBOL_GPL(hisi_uncore_pmu_get_event_idx);
ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
return snprintf(page, PAGE_SIZE, "0x%08x\n", hisi_pmu->identifier);
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_identifier_attr_show);
static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx) static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx)
{ {
if (!hisi_uncore_pmu_counter_valid(hisi_pmu, idx)) { if (!hisi_uncore_pmu_counter_valid(hisi_pmu, idx)) {
......
...@@ -75,6 +75,7 @@ struct hisi_pmu { ...@@ -75,6 +75,7 @@ struct hisi_pmu {
int counter_bits; int counter_bits;
/* check event code range */ /* check event code range */
int check_event; int check_event;
u32 identifier;
}; };
int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx); int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx);
...@@ -97,4 +98,10 @@ ssize_t hisi_cpumask_sysfs_show(struct device *dev, ...@@ -97,4 +98,10 @@ ssize_t hisi_cpumask_sysfs_show(struct device *dev,
struct device_attribute *attr, char *buf); struct device_attribute *attr, char *buf);
int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node); int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node);
int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node); int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node);
ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
struct device_attribute *attr,
char *page);
#endif /* __HISI_UNCORE_PMU_H__ */ #endif /* __HISI_UNCORE_PMU_H__ */
...@@ -163,6 +163,8 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn); ...@@ -163,6 +163,8 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; } static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
#endif #endif
bool arm_pmu_irq_is_nmi(void);
/* Internal functions only for core arm_pmu code */ /* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void); struct arm_pmu *armpmu_alloc(void);
struct arm_pmu *armpmu_alloc_atomic(void); struct arm_pmu *armpmu_alloc_atomic(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment