Commit 9651f00e authored by Will Deacon's avatar Will Deacon

Merge branch 'for-next/perf' into for-next/core

* for-next/perf: (24 commits)
  KVM: arm64: Ensure CPU PMU probes before pKVM host de-privilege
  drivers/perf: hisi: add NULL check for name
  drivers/perf: hisi: Remove redundant initialized of pmu->name
  perf/arm-cmn: Fix port detection for CMN-700
  arm64: pmuv3: dynamically map PERF_COUNT_HW_BRANCH_INSTRUCTIONS
  perf/arm-cmn: Validate cycles events fully
  Revert "ARM: mach-virt: Select PMUv3 driver by default"
  drivers/perf: apple_m1: Add Apple M2 support
  dt-bindings: arm-pmu: Add PMU compatible strings for Apple M2 cores
  perf: arm_cspmu: Fix variable dereference warning
  perf/amlogic: Fix config1/config2 parsing issue
  drivers/perf: Use devm_platform_get_and_ioremap_resource()
  kbuild, drivers/perf: remove MODULE_LICENSE in non-modules
  perf: qcom: Use devm_platform_get_and_ioremap_resource()
  perf: arm: Use devm_platform_get_and_ioremap_resource()
  perf/arm-cmn: Move overlapping wp_combine field
  ARM: mach-virt: Select PMUv3 driver by default
  ARM: perf: Allow the use of the PMUv3 driver on 32bit ARM
  ARM: Make CONFIG_CPU_V7 valid for 32bit ARMv8 implementations
  perf: pmuv3: Change GENMASK to GENMASK_ULL
  ...
parents 1bb31cc7 87727ba2
......@@ -20,6 +20,8 @@ properties:
items:
- enum:
- apm,potenza-pmu
- apple,avalanche-pmu
- apple,blizzard-pmu
- apple,firestorm-pmu
- apple,icestorm-pmu
- arm,armv8-pmuv3 # Only for s/w models
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2012 ARM Ltd.
*/
#ifndef __ASM_PMUV3_H
#define __ASM_PMUV3_H
#include <asm/cp15.h>
#include <asm/cputype.h>
#define PMCCNTR __ACCESS_CP15_64(0, c9)
#define PMCR __ACCESS_CP15(c9, 0, c12, 0)
#define PMCNTENSET __ACCESS_CP15(c9, 0, c12, 1)
#define PMCNTENCLR __ACCESS_CP15(c9, 0, c12, 2)
#define PMOVSR __ACCESS_CP15(c9, 0, c12, 3)
#define PMSELR __ACCESS_CP15(c9, 0, c12, 5)
#define PMCEID0 __ACCESS_CP15(c9, 0, c12, 6)
#define PMCEID1 __ACCESS_CP15(c9, 0, c12, 7)
#define PMXEVTYPER __ACCESS_CP15(c9, 0, c13, 1)
#define PMXEVCNTR __ACCESS_CP15(c9, 0, c13, 2)
#define PMUSERENR __ACCESS_CP15(c9, 0, c14, 0)
#define PMINTENSET __ACCESS_CP15(c9, 0, c14, 1)
#define PMINTENCLR __ACCESS_CP15(c9, 0, c14, 2)
#define PMMIR __ACCESS_CP15(c9, 0, c14, 6)
#define PMCCFILTR __ACCESS_CP15(c14, 0, c15, 7)
#define PMEVCNTR0 __ACCESS_CP15(c14, 0, c8, 0)
#define PMEVCNTR1 __ACCESS_CP15(c14, 0, c8, 1)
#define PMEVCNTR2 __ACCESS_CP15(c14, 0, c8, 2)
#define PMEVCNTR3 __ACCESS_CP15(c14, 0, c8, 3)
#define PMEVCNTR4 __ACCESS_CP15(c14, 0, c8, 4)
#define PMEVCNTR5 __ACCESS_CP15(c14, 0, c8, 5)
#define PMEVCNTR6 __ACCESS_CP15(c14, 0, c8, 6)
#define PMEVCNTR7 __ACCESS_CP15(c14, 0, c8, 7)
#define PMEVCNTR8 __ACCESS_CP15(c14, 0, c9, 0)
#define PMEVCNTR9 __ACCESS_CP15(c14, 0, c9, 1)
#define PMEVCNTR10 __ACCESS_CP15(c14, 0, c9, 2)
#define PMEVCNTR11 __ACCESS_CP15(c14, 0, c9, 3)
#define PMEVCNTR12 __ACCESS_CP15(c14, 0, c9, 4)
#define PMEVCNTR13 __ACCESS_CP15(c14, 0, c9, 5)
#define PMEVCNTR14 __ACCESS_CP15(c14, 0, c9, 6)
#define PMEVCNTR15 __ACCESS_CP15(c14, 0, c9, 7)
#define PMEVCNTR16 __ACCESS_CP15(c14, 0, c10, 0)
#define PMEVCNTR17 __ACCESS_CP15(c14, 0, c10, 1)
#define PMEVCNTR18 __ACCESS_CP15(c14, 0, c10, 2)
#define PMEVCNTR19 __ACCESS_CP15(c14, 0, c10, 3)
#define PMEVCNTR20 __ACCESS_CP15(c14, 0, c10, 4)
#define PMEVCNTR21 __ACCESS_CP15(c14, 0, c10, 5)
#define PMEVCNTR22 __ACCESS_CP15(c14, 0, c10, 6)
#define PMEVCNTR23 __ACCESS_CP15(c14, 0, c10, 7)
#define PMEVCNTR24 __ACCESS_CP15(c14, 0, c11, 0)
#define PMEVCNTR25 __ACCESS_CP15(c14, 0, c11, 1)
#define PMEVCNTR26 __ACCESS_CP15(c14, 0, c11, 2)
#define PMEVCNTR27 __ACCESS_CP15(c14, 0, c11, 3)
#define PMEVCNTR28 __ACCESS_CP15(c14, 0, c11, 4)
#define PMEVCNTR29 __ACCESS_CP15(c14, 0, c11, 5)
#define PMEVCNTR30 __ACCESS_CP15(c14, 0, c11, 6)
#define PMEVTYPER0 __ACCESS_CP15(c14, 0, c12, 0)
#define PMEVTYPER1 __ACCESS_CP15(c14, 0, c12, 1)
#define PMEVTYPER2 __ACCESS_CP15(c14, 0, c12, 2)
#define PMEVTYPER3 __ACCESS_CP15(c14, 0, c12, 3)
#define PMEVTYPER4 __ACCESS_CP15(c14, 0, c12, 4)
#define PMEVTYPER5 __ACCESS_CP15(c14, 0, c12, 5)
#define PMEVTYPER6 __ACCESS_CP15(c14, 0, c12, 6)
#define PMEVTYPER7 __ACCESS_CP15(c14, 0, c12, 7)
#define PMEVTYPER8 __ACCESS_CP15(c14, 0, c13, 0)
#define PMEVTYPER9 __ACCESS_CP15(c14, 0, c13, 1)
#define PMEVTYPER10 __ACCESS_CP15(c14, 0, c13, 2)
#define PMEVTYPER11 __ACCESS_CP15(c14, 0, c13, 3)
#define PMEVTYPER12 __ACCESS_CP15(c14, 0, c13, 4)
#define PMEVTYPER13 __ACCESS_CP15(c14, 0, c13, 5)
#define PMEVTYPER14 __ACCESS_CP15(c14, 0, c13, 6)
#define PMEVTYPER15 __ACCESS_CP15(c14, 0, c13, 7)
#define PMEVTYPER16 __ACCESS_CP15(c14, 0, c14, 0)
#define PMEVTYPER17 __ACCESS_CP15(c14, 0, c14, 1)
#define PMEVTYPER18 __ACCESS_CP15(c14, 0, c14, 2)
#define PMEVTYPER19 __ACCESS_CP15(c14, 0, c14, 3)
#define PMEVTYPER20 __ACCESS_CP15(c14, 0, c14, 4)
#define PMEVTYPER21 __ACCESS_CP15(c14, 0, c14, 5)
#define PMEVTYPER22 __ACCESS_CP15(c14, 0, c14, 6)
#define PMEVTYPER23 __ACCESS_CP15(c14, 0, c14, 7)
#define PMEVTYPER24 __ACCESS_CP15(c14, 0, c15, 0)
#define PMEVTYPER25 __ACCESS_CP15(c14, 0, c15, 1)
#define PMEVTYPER26 __ACCESS_CP15(c14, 0, c15, 2)
#define PMEVTYPER27 __ACCESS_CP15(c14, 0, c15, 3)
#define PMEVTYPER28 __ACCESS_CP15(c14, 0, c15, 4)
#define PMEVTYPER29 __ACCESS_CP15(c14, 0, c15, 5)
#define PMEVTYPER30 __ACCESS_CP15(c14, 0, c15, 6)
#define RETURN_READ_PMEVCNTRN(n) \
return read_sysreg(PMEVCNTR##n)
static unsigned long read_pmevcntrn(int n)
{
PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
return 0;
}
#define WRITE_PMEVCNTRN(n) \
write_sysreg(val, PMEVCNTR##n)
static void write_pmevcntrn(int n, unsigned long val)
{
PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
}
#define WRITE_PMEVTYPERN(n) \
write_sysreg(val, PMEVTYPER##n)
static void write_pmevtypern(int n, unsigned long val)
{
PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
}
static inline unsigned long read_pmmir(void)
{
return read_sysreg(PMMIR);
}
static inline u32 read_pmuver(void)
{
/* PMUVers is not a signed field */
u32 dfr0 = read_cpuid_ext(CPUID_EXT_DFR0);
return (dfr0 >> 24) & 0xf;
}
static inline void write_pmcr(u32 val)
{
write_sysreg(val, PMCR);
}
static inline u32 read_pmcr(void)
{
return read_sysreg(PMCR);
}
static inline void write_pmselr(u32 val)
{
write_sysreg(val, PMSELR);
}
static inline void write_pmccntr(u64 val)
{
write_sysreg(val, PMCCNTR);
}
static inline u64 read_pmccntr(void)
{
return read_sysreg(PMCCNTR);
}
static inline void write_pmxevcntr(u32 val)
{
write_sysreg(val, PMXEVCNTR);
}
static inline u32 read_pmxevcntr(void)
{
return read_sysreg(PMXEVCNTR);
}
static inline void write_pmxevtyper(u32 val)
{
write_sysreg(val, PMXEVTYPER);
}
static inline void write_pmcntenset(u32 val)
{
write_sysreg(val, PMCNTENSET);
}
static inline void write_pmcntenclr(u32 val)
{
write_sysreg(val, PMCNTENCLR);
}
static inline void write_pmintenset(u32 val)
{
write_sysreg(val, PMINTENSET);
}
static inline void write_pmintenclr(u32 val)
{
write_sysreg(val, PMINTENCLR);
}
static inline void write_pmccfiltr(u32 val)
{
write_sysreg(val, PMCCFILTR);
}
static inline void write_pmovsclr(u32 val)
{
write_sysreg(val, PMOVSR);
}
static inline u32 read_pmovsclr(void)
{
return read_sysreg(PMOVSR);
}
static inline void write_pmuserenr(u32 val)
{
write_sysreg(val, PMUSERENR);
}
static inline u32 read_pmceid0(void)
{
return read_sysreg(PMCEID0);
}
static inline u32 read_pmceid1(void)
{
return read_sysreg(PMCEID1);
}
static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
static inline void kvm_clr_pmu_events(u32 clr) {}
static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
{
return false;
}
/* PMU Version in DFR Register */
#define ARMV8_PMU_DFR_VER_NI 0
#define ARMV8_PMU_DFR_VER_V3P4 0x5
#define ARMV8_PMU_DFR_VER_V3P5 0x6
#define ARMV8_PMU_DFR_VER_IMP_DEF 0xF
static inline bool pmuv3_implemented(int pmuver)
{
return !(pmuver == ARMV8_PMU_DFR_VER_IMP_DEF ||
pmuver == ARMV8_PMU_DFR_VER_NI);
}
static inline bool is_pmuv3p4(int pmuver)
{
return pmuver >= ARMV8_PMU_DFR_VER_V3P4;
}
static inline bool is_pmuv3p5(int pmuver)
{
return pmuver >= ARMV8_PMU_DFR_VER_V3P5;
}
#endif
......@@ -403,7 +403,7 @@ config CPU_V6K
select CPU_THUMB_CAPABLE
select CPU_TLB_V6 if MMU
# ARMv7
# ARMv7 and ARMv8 architectures
config CPU_V7
bool
select CPU_32v6K
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2012 ARM Ltd.
*/
#ifndef __ASM_PMUV3_H
#define __ASM_PMUV3_H
#include <linux/kvm_host.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
#define RETURN_READ_PMEVCNTRN(n) \
return read_sysreg(pmevcntr##n##_el0)
static unsigned long read_pmevcntrn(int n)
{
PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
return 0;
}
#define WRITE_PMEVCNTRN(n) \
write_sysreg(val, pmevcntr##n##_el0)
static void write_pmevcntrn(int n, unsigned long val)
{
PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
}
#define WRITE_PMEVTYPERN(n) \
write_sysreg(val, pmevtyper##n##_el0)
static void write_pmevtypern(int n, unsigned long val)
{
PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
}
static inline unsigned long read_pmmir(void)
{
return read_cpuid(PMMIR_EL1);
}
static inline u32 read_pmuver(void)
{
u64 dfr0 = read_sysreg(id_aa64dfr0_el1);
return cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_EL1_PMUVer_SHIFT);
}
static inline void write_pmcr(u32 val)
{
write_sysreg(val, pmcr_el0);
}
static inline u32 read_pmcr(void)
{
return read_sysreg(pmcr_el0);
}
static inline void write_pmselr(u32 val)
{
write_sysreg(val, pmselr_el0);
}
static inline void write_pmccntr(u64 val)
{
write_sysreg(val, pmccntr_el0);
}
static inline u64 read_pmccntr(void)
{
return read_sysreg(pmccntr_el0);
}
static inline void write_pmxevcntr(u32 val)
{
write_sysreg(val, pmxevcntr_el0);
}
static inline u32 read_pmxevcntr(void)
{
return read_sysreg(pmxevcntr_el0);
}
static inline void write_pmxevtyper(u32 val)
{
write_sysreg(val, pmxevtyper_el0);
}
static inline void write_pmcntenset(u32 val)
{
write_sysreg(val, pmcntenset_el0);
}
static inline void write_pmcntenclr(u32 val)
{
write_sysreg(val, pmcntenclr_el0);
}
static inline void write_pmintenset(u32 val)
{
write_sysreg(val, pmintenset_el1);
}
static inline void write_pmintenclr(u32 val)
{
write_sysreg(val, pmintenclr_el1);
}
static inline void write_pmccfiltr(u32 val)
{
write_sysreg(val, pmccfiltr_el0);
}
static inline void write_pmovsclr(u32 val)
{
write_sysreg(val, pmovsclr_el0);
}
static inline u32 read_pmovsclr(void)
{
return read_sysreg(pmovsclr_el0);
}
static inline void write_pmuserenr(u32 val)
{
write_sysreg(val, pmuserenr_el0);
}
static inline u32 read_pmceid0(void)
{
return read_sysreg(pmceid0_el0);
}
static inline u32 read_pmceid1(void)
{
return read_sysreg(pmceid1_el0);
}
static inline bool pmuv3_implemented(int pmuver)
{
return !(pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF ||
pmuver == ID_AA64DFR0_EL1_PMUVer_NI);
}
static inline bool is_pmuv3p4(int pmuver)
{
return pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4;
}
static inline bool is_pmuv3p5(int pmuver)
{
return pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5;
}
#endif
This diff is collapsed.
......@@ -45,7 +45,6 @@ obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
......
......@@ -16,7 +16,6 @@
#include <linux/fs.h>
#include <linux/mman.h>
#include <linux/sched.h>
#include <linux/kmemleak.h>
#include <linux/kvm.h>
#include <linux/kvm_irqfd.h>
#include <linux/irqbypass.h>
......@@ -46,7 +45,6 @@
#include <kvm/arm_psci.h>
static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
......@@ -2105,41 +2103,6 @@ static int __init init_hyp_mode(void)
return err;
}
static void __init _kvm_host_prot_finalize(void *arg)
{
int *err = arg;
if (WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize)))
WRITE_ONCE(*err, -EINVAL);
}
static int __init pkvm_drop_host_privileges(void)
{
int ret = 0;
/*
* Flip the static key upfront as that may no longer be possible
* once the host stage 2 is installed.
*/
static_branch_enable(&kvm_protected_mode_initialized);
on_each_cpu(_kvm_host_prot_finalize, &ret, 1);
return ret;
}
static int __init finalize_hyp_mode(void)
{
if (!is_protected_kvm_enabled())
return 0;
/*
* Exclude HYP sections from kmemleak so that they don't get peeked
* at, which would end badly once inaccessible.
*/
kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
kmemleak_free_part_phys(hyp_mem_base, hyp_mem_size);
return pkvm_drop_host_privileges();
}
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
{
struct kvm_vcpu *vcpu;
......@@ -2257,14 +2220,6 @@ static __init int kvm_arm_init(void)
if (err)
goto out_hyp;
if (!in_hyp_mode) {
err = finalize_hyp_mode();
if (err) {
kvm_err("Failed to finalize Hyp protection\n");
goto out_subs;
}
}
if (is_protected_kvm_enabled()) {
kvm_info("Protected nVHE mode initialized successfully\n");
} else if (in_hyp_mode) {
......
......@@ -4,6 +4,8 @@
* Author: Quentin Perret <qperret@google.com>
*/
#include <linux/init.h>
#include <linux/kmemleak.h>
#include <linux/kvm_host.h>
#include <linux/memblock.h>
#include <linux/mutex.h>
......@@ -13,6 +15,8 @@
#include "hyp_constants.h"
DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory);
static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr);
......@@ -213,3 +217,46 @@ int pkvm_init_host_vm(struct kvm *host_kvm)
mutex_init(&host_kvm->lock);
return 0;
}
static void __init _kvm_host_prot_finalize(void *arg)
{
int *err = arg;
if (WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize)))
WRITE_ONCE(*err, -EINVAL);
}
static int __init pkvm_drop_host_privileges(void)
{
int ret = 0;
/*
* Flip the static key upfront as that may no longer be possible
* once the host stage 2 is installed.
*/
static_branch_enable(&kvm_protected_mode_initialized);
on_each_cpu(_kvm_host_prot_finalize, &ret, 1);
return ret;
}
static int __init finalize_pkvm(void)
{
int ret;
if (!is_protected_kvm_enabled())
return 0;
/*
* Exclude HYP sections from kmemleak so that they don't get peeked
* at, which would end badly once inaccessible.
*/
kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
kmemleak_free_part_phys(hyp_mem_base, hyp_mem_size);
ret = pkvm_drop_host_privileges();
if (ret)
pr_err("Failed to finalize Hyp protection: %d\n", ret);
return ret;
}
device_initcall_sync(finalize_pkvm);
......@@ -100,6 +100,16 @@ config ARM_SMMU_V3_PMU
through the SMMU and allow the resulting information to be filtered
based on the Stream ID of the corresponding master.
config ARM_PMUV3
depends on HW_PERF_EVENTS && ((ARM && CPU_V7) || ARM64)
bool "ARM PMUv3 support" if !ARM64
default ARM64
help
Say y if you want to use the ARM performance monitor unit (PMU)
version 3. The PMUv3 is the CPU performance monitors on ARMv8
(aarch32 and aarch64) systems that implement the PMUv3
architecture.
config ARM_DSU_PMU
tristate "ARM DynamIQ Shared Unit (DSU) PMU"
depends on ARM64
......
......@@ -5,6 +5,7 @@ obj-$(CONFIG_ARM_CMN) += arm-cmn.o
obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
obj-$(CONFIG_ARM_PMUV3) += arm_pmuv3.o
obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o
obj-$(CONFIG_FSL_IMX8_DDR_PMU) += fsl_imx8_ddr_perf.o
obj-$(CONFIG_HISI_PMU) += hisilicon/
......
......@@ -656,8 +656,7 @@ static int ali_drw_pmu_probe(struct platform_device *pdev)
drw_pmu->dev = &pdev->dev;
platform_set_drvdata(pdev, drw_pmu);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
drw_pmu->cfg_base = devm_ioremap_resource(&pdev->dev, res);
drw_pmu->cfg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(drw_pmu->cfg_base))
return PTR_ERR(drw_pmu->cfg_base);
......
......@@ -156,10 +156,14 @@ static int meson_ddr_perf_event_add(struct perf_event *event, int flags)
u64 config2 = event->attr.config2;
int i;
for_each_set_bit(i, (const unsigned long *)&config1, sizeof(config1))
for_each_set_bit(i,
(const unsigned long *)&config1,
BITS_PER_TYPE(config1))
meson_ddr_set_axi_filter(event, i);
for_each_set_bit(i, (const unsigned long *)&config2, sizeof(config2))
for_each_set_bit(i,
(const unsigned long *)&config2,
BITS_PER_TYPE(config2))
meson_ddr_set_axi_filter(event, i + 64);
if (flags & PERF_EF_START)
......
......@@ -559,7 +559,21 @@ static int m1_pmu_fire_init(struct arm_pmu *cpu_pmu)
return m1_pmu_init(cpu_pmu);
}
static int m2_pmu_avalanche_init(struct arm_pmu *cpu_pmu)
{
cpu_pmu->name = "apple_avalanche_pmu";
return m1_pmu_init(cpu_pmu);
}
static int m2_pmu_blizzard_init(struct arm_pmu *cpu_pmu)
{
cpu_pmu->name = "apple_blizzard_pmu";
return m1_pmu_init(cpu_pmu);
}
static const struct of_device_id m1_pmu_of_device_ids[] = {
{ .compatible = "apple,avalanche-pmu", .data = m2_pmu_avalanche_init, },
{ .compatible = "apple,blizzard-pmu", .data = m2_pmu_blizzard_init, },
{ .compatible = "apple,icestorm-pmu", .data = m1_pmu_ice_init, },
{ .compatible = "apple,firestorm-pmu", .data = m1_pmu_fire_init, },
{ },
......@@ -581,4 +595,3 @@ static struct platform_driver m1_pmu_driver = {
};
module_platform_driver(m1_pmu_driver);
MODULE_LICENSE("GPL v2");
......@@ -57,14 +57,12 @@
#define CMN_INFO_REQ_VC_NUM GENMASK_ULL(1, 0)
/* XPs also have some local topology info which has uses too */
#define CMN_MXP__CONNECT_INFO_P0 0x0008
#define CMN_MXP__CONNECT_INFO_P1 0x0010
#define CMN_MXP__CONNECT_INFO_P2 0x0028
#define CMN_MXP__CONNECT_INFO_P3 0x0030
#define CMN_MXP__CONNECT_INFO_P4 0x0038
#define CMN_MXP__CONNECT_INFO_P5 0x0040
#define CMN_MXP__CONNECT_INFO(p) (0x0008 + 8 * (p))
#define CMN__CONNECT_INFO_DEVICE_TYPE GENMASK_ULL(4, 0)
#define CMN_MAX_PORTS 6
#define CI700_CONNECT_INFO_P2_5_OFFSET 0x10
/* PMU registers occupy the 3rd 4KB page of each node's region */
#define CMN_PMU_OFFSET 0x2000
......@@ -166,7 +164,7 @@
#define CMN_EVENT_BYNODEID(event) FIELD_GET(CMN_CONFIG_BYNODEID, (event)->attr.config)
#define CMN_EVENT_NODEID(event) FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config)
#define CMN_CONFIG_WP_COMBINE GENMASK_ULL(27, 24)
#define CMN_CONFIG_WP_COMBINE GENMASK_ULL(30, 27)
#define CMN_CONFIG_WP_DEV_SEL GENMASK_ULL(50, 48)
#define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(55, 51)
/* Note that we don't yet support the tertiary match group on newer IPs */
......@@ -396,6 +394,25 @@ static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
return NULL;
}
static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn,
const struct arm_cmn_node *xp, int port)
{
int offset = CMN_MXP__CONNECT_INFO(port);
if (port >= 2) {
if (cmn->model & (CMN600 | CMN650))
return 0;
/*
* CI-700 may have extra ports, but still has the
* mesh_port_connect_info registers in the way.
*/
if (cmn->model == CI700)
offset += CI700_CONNECT_INFO_P2_5_OFFSET;
}
return readl_relaxed(xp->pmu_base - CMN_PMU_OFFSET + offset);
}
static struct dentry *arm_cmn_debugfs;
#ifdef CONFIG_DEBUG_FS
......@@ -469,7 +486,7 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
y = cmn->mesh_y;
while (y--) {
int xp_base = cmn->mesh_x * y;
u8 port[6][CMN_MAX_DIMENSION];
u8 port[CMN_MAX_PORTS][CMN_MAX_DIMENSION];
for (x = 0; x < cmn->mesh_x; x++)
seq_puts(s, "--------+");
......@@ -477,14 +494,9 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
seq_printf(s, "\n%d |", y);
for (x = 0; x < cmn->mesh_x; x++) {
struct arm_cmn_node *xp = cmn->xps + xp_base + x;
void __iomem *base = xp->pmu_base - CMN_PMU_OFFSET;
port[0][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P0);
port[1][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P1);
port[2][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P2);
port[3][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P3);
port[4][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P4);
port[5][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P5);
for (p = 0; p < CMN_MAX_PORTS; p++)
port[p][x] = arm_cmn_device_connect_info(cmn, xp, p);
seq_printf(s, " XP #%-2d |", xp_base + x);
}
......@@ -1546,7 +1558,7 @@ static int arm_cmn_event_init(struct perf_event *event)
type = CMN_EVENT_TYPE(event);
/* DTC events (i.e. cycles) already have everything they need */
if (type == CMN_TYPE_DTC)
return 0;
return arm_cmn_validate_group(cmn, event);
eventid = CMN_EVENT_EVENTID(event);
/* For watchpoints we need the actual XP node here */
......@@ -2083,18 +2095,9 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
* from this, since in that case we will see at least one XP
* with port 2 connected, for the HN-D.
*/
if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P0))
xp_ports |= BIT(0);
if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P1))
xp_ports |= BIT(1);
if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P2))
xp_ports |= BIT(2);
if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P3))
xp_ports |= BIT(3);
if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P4))
xp_ports |= BIT(4);
if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P5))
xp_ports |= BIT(5);
for (int p = 0; p < CMN_MAX_PORTS; p++)
if (arm_cmn_device_connect_info(cmn, xp, p))
xp_ports |= BIT(p);
if (cmn->multi_dtm && (xp_ports & 0xc))
arm_cmn_init_dtm(dtm++, xp, 1);
......
......@@ -1078,12 +1078,14 @@ static int arm_cspmu_request_irq(struct arm_cspmu *cspmu)
static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid)
{
u32 acpi_uid;
struct device *cpu_dev = get_cpu_device(cpu);
struct acpi_device *acpi_dev = ACPI_COMPANION(cpu_dev);
struct device *cpu_dev;
struct acpi_device *acpi_dev;
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev)
return -ENODEV;
acpi_dev = ACPI_COMPANION(cpu_dev);
while (acpi_dev) {
if (!strcmp(acpi_device_hid(acpi_dev),
ACPI_PROCESSOR_CONTAINER_HID) &&
......
......@@ -655,8 +655,7 @@ static int dmc620_pmu_device_probe(struct platform_device *pdev)
.attr_groups = dmc620_pmu_attr_groups,
};
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dmc620_pmu->base = devm_ioremap_resource(&pdev->dev, res);
dmc620_pmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(dmc620_pmu->base))
return PTR_ERR(dmc620_pmu->base);
......
......@@ -316,7 +316,7 @@ static int hisi_cpa_pmu_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
hisi_pmu_init(cpa_pmu, name, THIS_MODULE);
hisi_pmu_init(cpa_pmu, THIS_MODULE);
/* Power Management should be disabled before using CPA PMU. */
hisi_cpa_pmu_disable_pm(cpa_pmu);
......
......@@ -499,13 +499,6 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
&ddrc_pmu->node);
if (ret) {
dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret);
return ret;
}
if (ddrc_pmu->identifier >= HISI_PMU_V2)
name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"hisi_sccl%u_ddrc%u_%u",
......@@ -516,7 +509,17 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
"hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id,
ddrc_pmu->index_id);
hisi_pmu_init(ddrc_pmu, name, THIS_MODULE);
if (!name)
return -ENOMEM;
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
&ddrc_pmu->node);
if (ret) {
dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret);
return ret;
}
hisi_pmu_init(ddrc_pmu, THIS_MODULE);
ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
if (ret) {
......
......@@ -510,6 +510,11 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u",
hha_pmu->sccl_id, hha_pmu->index_id);
if (!name)
return -ENOMEM;
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
&hha_pmu->node);
if (ret) {
......@@ -517,9 +522,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
return ret;
}
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u",
hha_pmu->sccl_id, hha_pmu->index_id);
hisi_pmu_init(hha_pmu, name, THIS_MODULE);
hisi_pmu_init(hha_pmu, THIS_MODULE);
ret = perf_pmu_register(&hha_pmu->pmu, name, -1);
if (ret) {
......
......@@ -544,6 +544,11 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u",
l3c_pmu->sccl_id, l3c_pmu->ccl_id);
if (!name)
return -ENOMEM;
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
&l3c_pmu->node);
if (ret) {
......@@ -551,13 +556,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
return ret;
}
/*
* CCL_ID is used to identify the L3C in the same SCCL which was
* used _UID by mistake.
*/
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u",
l3c_pmu->sccl_id, l3c_pmu->ccl_id);
hisi_pmu_init(l3c_pmu, name, THIS_MODULE);
hisi_pmu_init(l3c_pmu, THIS_MODULE);
ret = perf_pmu_register(&l3c_pmu->pmu, name, -1);
if (ret) {
......
......@@ -412,7 +412,7 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
return ret;
}
hisi_pmu_init(pa_pmu, name, THIS_MODULE);
hisi_pmu_init(pa_pmu, THIS_MODULE);
ret = perf_pmu_register(&pa_pmu->pmu, name, -1);
if (ret) {
dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
......
......@@ -531,12 +531,10 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu);
void hisi_pmu_init(struct hisi_pmu *hisi_pmu, const char *name,
struct module *module)
void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module)
{
struct pmu *pmu = &hisi_pmu->pmu;
pmu->name = name;
pmu->module = module;
pmu->task_ctx_nr = perf_invalid_context;
pmu->event_init = hisi_uncore_pmu_event_init;
......
......@@ -121,6 +121,5 @@ ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
struct platform_device *pdev);
void hisi_pmu_init(struct hisi_pmu *hisi_pmu, const char *name,
struct module *module);
void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module);
#endif /* __HISI_UNCORE_PMU_H__ */
......@@ -445,7 +445,7 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
return ret;
}
hisi_pmu_init(sllc_pmu, name, THIS_MODULE);
hisi_pmu_init(sllc_pmu, THIS_MODULE);
ret = perf_pmu_register(&sllc_pmu->pmu, name, -1);
if (ret) {
......
......@@ -763,8 +763,7 @@ static int qcom_l3_cache_pmu_probe(struct platform_device *pdev)
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
memrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
l3pmu->regs = devm_ioremap_resource(&pdev->dev, memrc);
l3pmu->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &memrc);
if (IS_ERR(l3pmu->regs))
return PTR_ERR(l3pmu->regs);
......
......@@ -8,7 +8,7 @@
#define __ASM_ARM_KVM_PMU_H
#include <linux/perf_event.h>
#include <asm/perf_event.h>
#include <linux/perf/arm_pmuv3.h>
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment