Commit d8602f8b authored by Catalin Marinas's avatar Catalin Marinas

Merge remote-tracking branch 'arm64/for-next/perf' into for-next/core

* arm64/for-next/perf:
  perf/imx_ddr: Add system PMU identifier for userspace
  bindings: perf: imx-ddr: add compatible string
  arm64: Fix build failure when HARDLOCKUP_DETECTOR_PERF is enabled
  arm64: Enable perf events based hard lockup detector
  perf/imx_ddr: Add stop event counters support for i.MX8MP
  perf/smmuv3: Support sysfs identifier file
  drivers/perf: hisi: Add identifier sysfs file
  perf: remove duplicate check on fwnode
  driver/perf: Add PMU driver for the ARM DMC-620 memory controller
parents ba4259a6 881b0520
...@@ -15,6 +15,9 @@ properties: ...@@ -15,6 +15,9 @@ properties:
- enum: - enum:
- fsl,imx8-ddr-pmu - fsl,imx8-ddr-pmu
- fsl,imx8m-ddr-pmu - fsl,imx8m-ddr-pmu
- fsl,imx8mq-ddr-pmu
- fsl,imx8mm-ddr-pmu
- fsl,imx8mn-ddr-pmu
- fsl,imx8mp-ddr-pmu - fsl,imx8mp-ddr-pmu
- items: - items:
- enum: - enum:
......
...@@ -170,6 +170,8 @@ config ARM64 ...@@ -170,6 +170,8 @@ config ARM64
select HAVE_NMI select HAVE_NMI
select HAVE_PATA_PLATFORM select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI && HW_PERF_EVENTS
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
select HAVE_PERF_REGS select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
......
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/sched_clock.h> #include <linux/sched_clock.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/nmi.h>
#include <linux/cpufreq.h>
/* ARMv8 Cortex-A53 specific event types. */ /* ARMv8 Cortex-A53 specific event types. */
#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2 #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
...@@ -1248,10 +1250,21 @@ static struct platform_driver armv8_pmu_driver = { ...@@ -1248,10 +1250,21 @@ static struct platform_driver armv8_pmu_driver = {
static int __init armv8_pmu_driver_init(void) static int __init armv8_pmu_driver_init(void)
{ {
int ret;
if (acpi_disabled) if (acpi_disabled)
return platform_driver_register(&armv8_pmu_driver); ret = platform_driver_register(&armv8_pmu_driver);
else else
return arm_pmu_acpi_probe(armv8_pmuv3_init); ret = arm_pmu_acpi_probe(armv8_pmuv3_init);
/*
* Try to re-initialize lockup detector after PMU init in
* case PMU events are triggered via NMIs.
*/
if (ret == 0 && arm_pmu_irq_is_nmi())
lockup_detector_init();
return ret;
} }
device_initcall(armv8_pmu_driver_init) device_initcall(armv8_pmu_driver_init)
...@@ -1309,3 +1322,27 @@ void arch_perf_update_userpage(struct perf_event *event, ...@@ -1309,3 +1322,27 @@ void arch_perf_update_userpage(struct perf_event *event,
userpg->cap_user_time_zero = 1; userpg->cap_user_time_zero = 1;
userpg->cap_user_time_short = 1; userpg->cap_user_time_short = 1;
} }
#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
/*
* Safe maximum CPU frequency in case a particular platform doesn't implement
* cpufreq driver. Although, architecture doesn't put any restrictions on
* maximum frequency but 5 GHz seems to be safe maximum given the available
* Arm CPUs in the market which are clocked much less than 5 GHz. On the other
* hand, we can't make it much higher as it would lead to a large hard-lockup
* detection timeout on parts which are running slower (eg. 1GHz on
* Developerbox) and doesn't possess a cpufreq driver.
*/
#define SAFE_MAX_CPU_FREQ 5000000000UL // 5 GHz
u64 hw_nmi_get_sample_period(int watchdog_thresh)
{
unsigned int cpu = smp_processor_id();
unsigned long max_cpu_freq;
max_cpu_freq = cpufreq_get_hw_max_freq(cpu) * 1000UL;
if (!max_cpu_freq)
max_cpu_freq = SAFE_MAX_CPU_FREQ;
return (u64)max_cpu_freq * watchdog_thresh;
}
#endif
...@@ -130,6 +130,13 @@ config ARM_SPE_PMU ...@@ -130,6 +130,13 @@ config ARM_SPE_PMU
Extension, which provides periodic sampling of operations in Extension, which provides periodic sampling of operations in
the CPU pipeline and reports this via the perf AUX interface. the CPU pipeline and reports this via the perf AUX interface.
config ARM_DMC620_PMU
tristate "Enable PMU support for the ARM DMC-620 memory controller"
depends on (ARM64 && ACPI) || COMPILE_TEST
help
Support for PMU events monitoring on the ARM DMC-620 memory
controller.
source "drivers/perf/hisilicon/Kconfig" source "drivers/perf/hisilicon/Kconfig"
endmenu endmenu
...@@ -13,3 +13,4 @@ obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o ...@@ -13,3 +13,4 @@ obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o
obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
obj-$(CONFIG_ARM_DMC620_PMU) += arm_dmc620_pmu.o
This diff is collapsed.
...@@ -716,9 +716,6 @@ static int dsu_pmu_device_probe(struct platform_device *pdev) ...@@ -716,9 +716,6 @@ static int dsu_pmu_device_probe(struct platform_device *pdev)
if (IS_ERR(dsu_pmu)) if (IS_ERR(dsu_pmu))
return PTR_ERR(dsu_pmu); return PTR_ERR(dsu_pmu);
if (IS_ERR_OR_NULL(fwnode))
return -ENOENT;
if (is_of_node(fwnode)) if (is_of_node(fwnode))
rc = dsu_pmu_dt_get_cpus(&pdev->dev, &dsu_pmu->associated_cpus); rc = dsu_pmu_dt_get_cpus(&pdev->dev, &dsu_pmu->associated_cpus);
else if (is_acpi_device_node(fwnode)) else if (is_acpi_device_node(fwnode))
......
...@@ -726,6 +726,11 @@ static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) ...@@ -726,6 +726,11 @@ static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
return per_cpu(hw_events->irq, cpu); return per_cpu(hw_events->irq, cpu);
} }
bool arm_pmu_irq_is_nmi(void)
{
return has_nmi;
}
/* /*
* PMU hardware loses all context when a CPU goes offline. * PMU hardware loses all context when a CPU goes offline.
* When a CPU is hotplugged back in, since some hardware registers are * When a CPU is hotplugged back in, since some hardware registers are
......
...@@ -74,6 +74,7 @@ ...@@ -74,6 +74,7 @@
#define SMMU_PMCG_CFGR_NCTR GENMASK(5, 0) #define SMMU_PMCG_CFGR_NCTR GENMASK(5, 0)
#define SMMU_PMCG_CR 0xE04 #define SMMU_PMCG_CR 0xE04
#define SMMU_PMCG_CR_ENABLE BIT(0) #define SMMU_PMCG_CR_ENABLE BIT(0)
#define SMMU_PMCG_IIDR 0xE08
#define SMMU_PMCG_CEID0 0xE20 #define SMMU_PMCG_CEID0 0xE20
#define SMMU_PMCG_CEID1 0xE28 #define SMMU_PMCG_CEID1 0xE28
#define SMMU_PMCG_IRQ_CTRL 0xE50 #define SMMU_PMCG_IRQ_CTRL 0xE50
...@@ -112,6 +113,7 @@ struct smmu_pmu { ...@@ -112,6 +113,7 @@ struct smmu_pmu {
void __iomem *reloc_base; void __iomem *reloc_base;
u64 counter_mask; u64 counter_mask;
u32 options; u32 options;
u32 iidr;
bool global_filter; bool global_filter;
}; };
...@@ -552,6 +554,40 @@ static struct attribute_group smmu_pmu_events_group = { ...@@ -552,6 +554,40 @@ static struct attribute_group smmu_pmu_events_group = {
.is_visible = smmu_pmu_event_is_visible, .is_visible = smmu_pmu_event_is_visible,
}; };
static ssize_t smmu_pmu_identifier_attr_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
return snprintf(page, PAGE_SIZE, "0x%08x\n", smmu_pmu->iidr);
}
static umode_t smmu_pmu_identifier_attr_visible(struct kobject *kobj,
struct attribute *attr,
int n)
{
struct device *dev = kobj_to_dev(kobj);
struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
if (!smmu_pmu->iidr)
return 0;
return attr->mode;
}
static struct device_attribute smmu_pmu_identifier_attr =
__ATTR(identifier, 0444, smmu_pmu_identifier_attr_show, NULL);
static struct attribute *smmu_pmu_identifier_attrs[] = {
&smmu_pmu_identifier_attr.attr,
NULL
};
static struct attribute_group smmu_pmu_identifier_group = {
.attrs = smmu_pmu_identifier_attrs,
.is_visible = smmu_pmu_identifier_attr_visible,
};
/* Formats */ /* Formats */
PMU_FORMAT_ATTR(event, "config:0-15"); PMU_FORMAT_ATTR(event, "config:0-15");
PMU_FORMAT_ATTR(filter_stream_id, "config1:0-31"); PMU_FORMAT_ATTR(filter_stream_id, "config1:0-31");
...@@ -575,6 +611,7 @@ static const struct attribute_group *smmu_pmu_attr_grps[] = { ...@@ -575,6 +611,7 @@ static const struct attribute_group *smmu_pmu_attr_grps[] = {
&smmu_pmu_cpumask_group, &smmu_pmu_cpumask_group,
&smmu_pmu_events_group, &smmu_pmu_events_group,
&smmu_pmu_format_group, &smmu_pmu_format_group,
&smmu_pmu_identifier_group,
NULL NULL
}; };
...@@ -795,6 +832,8 @@ static int smmu_pmu_probe(struct platform_device *pdev) ...@@ -795,6 +832,8 @@ static int smmu_pmu_probe(struct platform_device *pdev)
return err; return err;
} }
smmu_pmu->iidr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_IIDR);
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx", name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx",
(res_0->start) >> SMMU_PMCG_PA_SHIFT); (res_0->start) >> SMMU_PMCG_PA_SHIFT);
if (!name) { if (!name) {
......
...@@ -50,6 +50,7 @@ static DEFINE_IDA(ddr_ida); ...@@ -50,6 +50,7 @@ static DEFINE_IDA(ddr_ida);
struct fsl_ddr_devtype_data { struct fsl_ddr_devtype_data {
unsigned int quirks; /* quirks needed for different DDR Perf core */ unsigned int quirks; /* quirks needed for different DDR Perf core */
const char *identifier; /* system PMU identifier for userspace */
}; };
static const struct fsl_ddr_devtype_data imx8_devtype_data; static const struct fsl_ddr_devtype_data imx8_devtype_data;
...@@ -58,13 +59,32 @@ static const struct fsl_ddr_devtype_data imx8m_devtype_data = { ...@@ -58,13 +59,32 @@ static const struct fsl_ddr_devtype_data imx8m_devtype_data = {
.quirks = DDR_CAP_AXI_ID_FILTER, .quirks = DDR_CAP_AXI_ID_FILTER,
}; };
static const struct fsl_ddr_devtype_data imx8mq_devtype_data = {
.quirks = DDR_CAP_AXI_ID_FILTER,
.identifier = "i.MX8MQ",
};
static const struct fsl_ddr_devtype_data imx8mm_devtype_data = {
.quirks = DDR_CAP_AXI_ID_FILTER,
.identifier = "i.MX8MM",
};
static const struct fsl_ddr_devtype_data imx8mn_devtype_data = {
.quirks = DDR_CAP_AXI_ID_FILTER,
.identifier = "i.MX8MN",
};
static const struct fsl_ddr_devtype_data imx8mp_devtype_data = { static const struct fsl_ddr_devtype_data imx8mp_devtype_data = {
.quirks = DDR_CAP_AXI_ID_FILTER_ENHANCED, .quirks = DDR_CAP_AXI_ID_FILTER_ENHANCED,
.identifier = "i.MX8MP",
}; };
static const struct of_device_id imx_ddr_pmu_dt_ids[] = { static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
{ .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data}, { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
{ .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data}, { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
{ .compatible = "fsl,imx8mq-ddr-pmu", .data = &imx8mq_devtype_data},
{ .compatible = "fsl,imx8mm-ddr-pmu", .data = &imx8mm_devtype_data},
{ .compatible = "fsl,imx8mn-ddr-pmu", .data = &imx8mn_devtype_data},
{ .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data}, { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data},
{ /* sentinel */ } { /* sentinel */ }
}; };
...@@ -84,6 +104,40 @@ struct ddr_pmu { ...@@ -84,6 +104,40 @@ struct ddr_pmu {
int id; int id;
}; };
static ssize_t ddr_perf_identifier_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct ddr_pmu *pmu = dev_get_drvdata(dev);
return sprintf(page, "%s\n", pmu->devtype_data->identifier);
}
static umode_t ddr_perf_identifier_attr_visible(struct kobject *kobj,
struct attribute *attr,
int n)
{
struct device *dev = kobj_to_dev(kobj);
struct ddr_pmu *pmu = dev_get_drvdata(dev);
if (!pmu->devtype_data->identifier)
return 0;
return attr->mode;
};
static struct device_attribute ddr_perf_identifier_attr =
__ATTR(identifier, 0444, ddr_perf_identifier_show, NULL);
static struct attribute *ddr_perf_identifier_attrs[] = {
&ddr_perf_identifier_attr.attr,
NULL,
};
static struct attribute_group ddr_perf_identifier_attr_group = {
.attrs = ddr_perf_identifier_attrs,
.is_visible = ddr_perf_identifier_attr_visible,
};
enum ddr_perf_filter_capabilities { enum ddr_perf_filter_capabilities {
PERF_CAP_AXI_ID_FILTER = 0, PERF_CAP_AXI_ID_FILTER = 0,
PERF_CAP_AXI_ID_FILTER_ENHANCED, PERF_CAP_AXI_ID_FILTER_ENHANCED,
...@@ -237,6 +291,7 @@ static const struct attribute_group *attr_groups[] = { ...@@ -237,6 +291,7 @@ static const struct attribute_group *attr_groups[] = {
&ddr_perf_format_attr_group, &ddr_perf_format_attr_group,
&ddr_perf_cpumask_attr_group, &ddr_perf_cpumask_attr_group,
&ddr_perf_filter_cap_attr_group, &ddr_perf_filter_cap_attr_group,
&ddr_perf_identifier_attr_group,
NULL, NULL,
}; };
...@@ -361,25 +416,6 @@ static int ddr_perf_event_init(struct perf_event *event) ...@@ -361,25 +416,6 @@ static int ddr_perf_event_init(struct perf_event *event)
return 0; return 0;
} }
static void ddr_perf_event_update(struct perf_event *event)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u64 delta, prev_raw_count, new_raw_count;
int counter = hwc->idx;
do {
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = ddr_perf_read_counter(pmu, counter);
} while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count);
delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
local64_add(delta, &event->count);
}
static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config, static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
int counter, bool enable) int counter, bool enable)
{ {
...@@ -404,6 +440,56 @@ static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config, ...@@ -404,6 +440,56 @@ static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
} }
} }
static bool ddr_perf_counter_overflow(struct ddr_pmu *pmu, int counter)
{
int val;
val = readl_relaxed(pmu->base + counter * 4 + COUNTER_CNTL);
return val & CNTL_OVER;
}
static void ddr_perf_counter_clear(struct ddr_pmu *pmu, int counter)
{
u8 reg = counter * 4 + COUNTER_CNTL;
int val;
val = readl_relaxed(pmu->base + reg);
val &= ~CNTL_CLEAR;
writel(val, pmu->base + reg);
val |= CNTL_CLEAR;
writel(val, pmu->base + reg);
}
static void ddr_perf_event_update(struct perf_event *event)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u64 new_raw_count;
int counter = hwc->idx;
int ret;
new_raw_count = ddr_perf_read_counter(pmu, counter);
local64_add(new_raw_count, &event->count);
/*
* For legacy SoCs: event counter continue counting when overflow,
* no need to clear the counter.
* For new SoCs: event counter stop counting when overflow, need
* clear counter to let it count again.
*/
if (counter != EVENT_CYCLES_COUNTER) {
ret = ddr_perf_counter_overflow(pmu, counter);
if (ret)
dev_warn_ratelimited(pmu->dev, "events lost due to counter overflow (config 0x%llx)\n",
event->attr.config);
}
/* clear counter every time for both cycle counter and event counter */
ddr_perf_counter_clear(pmu, counter);
}
static void ddr_perf_event_start(struct perf_event *event, int flags) static void ddr_perf_event_start(struct perf_event *event, int flags)
{ {
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
...@@ -537,7 +623,7 @@ static irqreturn_t ddr_perf_irq_handler(int irq, void *p) ...@@ -537,7 +623,7 @@ static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
{ {
int i; int i;
struct ddr_pmu *pmu = (struct ddr_pmu *) p; struct ddr_pmu *pmu = (struct ddr_pmu *) p;
struct perf_event *event, *cycle_event = NULL; struct perf_event *event;
/* all counter will stop if cycle counter disabled */ /* all counter will stop if cycle counter disabled */
ddr_perf_counter_enable(pmu, ddr_perf_counter_enable(pmu,
...@@ -547,7 +633,9 @@ static irqreturn_t ddr_perf_irq_handler(int irq, void *p) ...@@ -547,7 +633,9 @@ static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
/* /*
* When the cycle counter overflows, all counters are stopped, * When the cycle counter overflows, all counters are stopped,
* and an IRQ is raised. If any other counter overflows, it * and an IRQ is raised. If any other counter overflows, it
* continues counting, and no IRQ is raised. * continues counting, and no IRQ is raised. But for new SoCs,
* such as i.MX8MP, event counter would stop when overflow, so
* we need use cycle counter to stop overflow of event counter.
* *
* Cycles occur at least 4 times as often as other events, so we * Cycles occur at least 4 times as often as other events, so we
* can update all events on a cycle counter overflow and not * can update all events on a cycle counter overflow and not
...@@ -562,17 +650,12 @@ static irqreturn_t ddr_perf_irq_handler(int irq, void *p) ...@@ -562,17 +650,12 @@ static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
event = pmu->events[i]; event = pmu->events[i];
ddr_perf_event_update(event); ddr_perf_event_update(event);
if (event->hw.idx == EVENT_CYCLES_COUNTER)
cycle_event = event;
} }
ddr_perf_counter_enable(pmu, ddr_perf_counter_enable(pmu,
EVENT_CYCLES_ID, EVENT_CYCLES_ID,
EVENT_CYCLES_COUNTER, EVENT_CYCLES_COUNTER,
true); true);
if (cycle_event)
ddr_perf_event_update(cycle_event);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#define DDRC_INT_MASK 0x6c8 #define DDRC_INT_MASK 0x6c8
#define DDRC_INT_STATUS 0x6cc #define DDRC_INT_STATUS 0x6cc
#define DDRC_INT_CLEAR 0x6d0 #define DDRC_INT_CLEAR 0x6d0
#define DDRC_VERSION 0x710
/* DDRC has 8-counters */ /* DDRC has 8-counters */
#define DDRC_NR_COUNTERS 0x8 #define DDRC_NR_COUNTERS 0x8
...@@ -267,6 +268,8 @@ static int hisi_ddrc_pmu_init_data(struct platform_device *pdev, ...@@ -267,6 +268,8 @@ static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
return PTR_ERR(ddrc_pmu->base); return PTR_ERR(ddrc_pmu->base);
} }
ddrc_pmu->identifier = readl(ddrc_pmu->base + DDRC_VERSION);
return 0; return 0;
} }
...@@ -308,10 +311,23 @@ static const struct attribute_group hisi_ddrc_pmu_cpumask_attr_group = { ...@@ -308,10 +311,23 @@ static const struct attribute_group hisi_ddrc_pmu_cpumask_attr_group = {
.attrs = hisi_ddrc_pmu_cpumask_attrs, .attrs = hisi_ddrc_pmu_cpumask_attrs,
}; };
static struct device_attribute hisi_ddrc_pmu_identifier_attr =
__ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
static struct attribute *hisi_ddrc_pmu_identifier_attrs[] = {
&hisi_ddrc_pmu_identifier_attr.attr,
NULL
};
static struct attribute_group hisi_ddrc_pmu_identifier_group = {
.attrs = hisi_ddrc_pmu_identifier_attrs,
};
static const struct attribute_group *hisi_ddrc_pmu_attr_groups[] = { static const struct attribute_group *hisi_ddrc_pmu_attr_groups[] = {
&hisi_ddrc_pmu_format_group, &hisi_ddrc_pmu_format_group,
&hisi_ddrc_pmu_events_group, &hisi_ddrc_pmu_events_group,
&hisi_ddrc_pmu_cpumask_attr_group, &hisi_ddrc_pmu_cpumask_attr_group,
&hisi_ddrc_pmu_identifier_group,
NULL, NULL,
}; };
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#define HHA_INT_MASK 0x0804 #define HHA_INT_MASK 0x0804
#define HHA_INT_STATUS 0x0808 #define HHA_INT_STATUS 0x0808
#define HHA_INT_CLEAR 0x080C #define HHA_INT_CLEAR 0x080C
#define HHA_VERSION 0x1cf0
#define HHA_PERF_CTRL 0x1E00 #define HHA_PERF_CTRL 0x1E00
#define HHA_EVENT_CTRL 0x1E04 #define HHA_EVENT_CTRL 0x1E04
#define HHA_EVENT_TYPE0 0x1E80 #define HHA_EVENT_TYPE0 0x1E80
...@@ -261,6 +262,8 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev, ...@@ -261,6 +262,8 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
return PTR_ERR(hha_pmu->base); return PTR_ERR(hha_pmu->base);
} }
hha_pmu->identifier = readl(hha_pmu->base + HHA_VERSION);
return 0; return 0;
} }
...@@ -320,10 +323,23 @@ static const struct attribute_group hisi_hha_pmu_cpumask_attr_group = { ...@@ -320,10 +323,23 @@ static const struct attribute_group hisi_hha_pmu_cpumask_attr_group = {
.attrs = hisi_hha_pmu_cpumask_attrs, .attrs = hisi_hha_pmu_cpumask_attrs,
}; };
static struct device_attribute hisi_hha_pmu_identifier_attr =
__ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
static struct attribute *hisi_hha_pmu_identifier_attrs[] = {
&hisi_hha_pmu_identifier_attr.attr,
NULL
};
static struct attribute_group hisi_hha_pmu_identifier_group = {
.attrs = hisi_hha_pmu_identifier_attrs,
};
static const struct attribute_group *hisi_hha_pmu_attr_groups[] = { static const struct attribute_group *hisi_hha_pmu_attr_groups[] = {
&hisi_hha_pmu_format_group, &hisi_hha_pmu_format_group,
&hisi_hha_pmu_events_group, &hisi_hha_pmu_events_group,
&hisi_hha_pmu_cpumask_attr_group, &hisi_hha_pmu_cpumask_attr_group,
&hisi_hha_pmu_identifier_group,
NULL, NULL,
}; };
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#define L3C_INT_STATUS 0x0808 #define L3C_INT_STATUS 0x0808
#define L3C_INT_CLEAR 0x080c #define L3C_INT_CLEAR 0x080c
#define L3C_EVENT_CTRL 0x1c00 #define L3C_EVENT_CTRL 0x1c00
#define L3C_VERSION 0x1cf0
#define L3C_EVENT_TYPE0 0x1d00 #define L3C_EVENT_TYPE0 0x1d00
/* /*
* Each counter is 48-bits and [48:63] are reserved * Each counter is 48-bits and [48:63] are reserved
...@@ -264,6 +265,8 @@ static int hisi_l3c_pmu_init_data(struct platform_device *pdev, ...@@ -264,6 +265,8 @@ static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
return PTR_ERR(l3c_pmu->base); return PTR_ERR(l3c_pmu->base);
} }
l3c_pmu->identifier = readl(l3c_pmu->base + L3C_VERSION);
return 0; return 0;
} }
...@@ -310,10 +313,23 @@ static const struct attribute_group hisi_l3c_pmu_cpumask_attr_group = { ...@@ -310,10 +313,23 @@ static const struct attribute_group hisi_l3c_pmu_cpumask_attr_group = {
.attrs = hisi_l3c_pmu_cpumask_attrs, .attrs = hisi_l3c_pmu_cpumask_attrs,
}; };
static struct device_attribute hisi_l3c_pmu_identifier_attr =
__ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
static struct attribute *hisi_l3c_pmu_identifier_attrs[] = {
&hisi_l3c_pmu_identifier_attr.attr,
NULL
};
static struct attribute_group hisi_l3c_pmu_identifier_group = {
.attrs = hisi_l3c_pmu_identifier_attrs,
};
static const struct attribute_group *hisi_l3c_pmu_attr_groups[] = { static const struct attribute_group *hisi_l3c_pmu_attr_groups[] = {
&hisi_l3c_pmu_format_group, &hisi_l3c_pmu_format_group,
&hisi_l3c_pmu_events_group, &hisi_l3c_pmu_events_group,
&hisi_l3c_pmu_cpumask_attr_group, &hisi_l3c_pmu_cpumask_attr_group,
&hisi_l3c_pmu_identifier_group,
NULL, NULL,
}; };
......
...@@ -119,6 +119,16 @@ int hisi_uncore_pmu_get_event_idx(struct perf_event *event) ...@@ -119,6 +119,16 @@ int hisi_uncore_pmu_get_event_idx(struct perf_event *event)
} }
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_get_event_idx); EXPORT_SYMBOL_GPL(hisi_uncore_pmu_get_event_idx);
ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
return snprintf(page, PAGE_SIZE, "0x%08x\n", hisi_pmu->identifier);
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_identifier_attr_show);
static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx) static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx)
{ {
if (!hisi_uncore_pmu_counter_valid(hisi_pmu, idx)) { if (!hisi_uncore_pmu_counter_valid(hisi_pmu, idx)) {
......
...@@ -75,6 +75,7 @@ struct hisi_pmu { ...@@ -75,6 +75,7 @@ struct hisi_pmu {
int counter_bits; int counter_bits;
/* check event code range */ /* check event code range */
int check_event; int check_event;
u32 identifier;
}; };
int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx); int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx);
...@@ -97,4 +98,10 @@ ssize_t hisi_cpumask_sysfs_show(struct device *dev, ...@@ -97,4 +98,10 @@ ssize_t hisi_cpumask_sysfs_show(struct device *dev,
struct device_attribute *attr, char *buf); struct device_attribute *attr, char *buf);
int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node); int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node);
int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node); int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node);
ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
struct device_attribute *attr,
char *page);
#endif /* __HISI_UNCORE_PMU_H__ */ #endif /* __HISI_UNCORE_PMU_H__ */
...@@ -163,6 +163,8 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn); ...@@ -163,6 +163,8 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; } static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
#endif #endif
bool arm_pmu_irq_is_nmi(void);
/* Internal functions only for core arm_pmu code */ /* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void); struct arm_pmu *armpmu_alloc(void);
struct arm_pmu *armpmu_alloc_atomic(void); struct arm_pmu *armpmu_alloc_atomic(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment