Commit 4a2b88eb authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'perf-core-2021-08-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 perf event updates from Ingo Molnar:

 - Add support for Intel Sapphire Rapids server CPU uncore events

 - Allow the AMD uncore driver to be built as a module

 - Misc cleanups and fixes

* tag 'perf-core-2021-08-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
  perf/x86/amd/ibs: Add bitfield definitions in new <asm/amd-ibs.h> header
  perf/amd/uncore: Allow the driver to be built as a module
  x86/cpu: Add get_llc_id() helper function
  perf/amd/uncore: Clean up header use, use <linux/ include paths instead of <asm/
  perf/amd/uncore: Simplify code, use free_percpu()'s built-in check for NULL
  perf/hw_breakpoint: Replace deprecated CPU-hotplug functions
  perf/x86/intel: Replace deprecated CPU-hotplug functions
  perf/x86: Remove unused assignment to pointer 'e'
  perf/x86/intel/uncore: Fix IIO cleanup mapping procedure for SNR/ICX
  perf/x86/intel/uncore: Support IMC free-running counters on Sapphire Rapids server
  perf/x86/intel/uncore: Support IIO free-running counters on Sapphire Rapids server
  perf/x86/intel/uncore: Factor out snr_uncore_mmio_map()
  perf/x86/intel/uncore: Add alias PMU name
  perf/x86/intel/uncore: Add Sapphire Rapids server MDF support
  perf/x86/intel/uncore: Add Sapphire Rapids server M3UPI support
  perf/x86/intel/uncore: Add Sapphire Rapids server UPI support
  perf/x86/intel/uncore: Add Sapphire Rapids server M2M support
  perf/x86/intel/uncore: Add Sapphire Rapids server IMC support
  perf/x86/intel/uncore: Add Sapphire Rapids server PCU support
  perf/x86/intel/uncore: Add Sapphire Rapids server M2PCIe support
  ...
parents 5d3c0db4 6a371baf
What: /sys/bus/event_source/devices/uncore_*/alias
Date: June 2021
KernelVersion: 5.15
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description: Read-only. An attribute to describe the alias name of
the uncore PMU if an alias exists on some platforms.
The 'perf(1)' tool should treat both names the same.
They both can be used to access the uncore PMU.
Example:
$ cat /sys/devices/uncore_cha_2/alias
uncore_type_0_2
...@@ -34,4 +34,14 @@ config PERF_EVENTS_AMD_POWER ...@@ -34,4 +34,14 @@ config PERF_EVENTS_AMD_POWER
(CPUID Fn8000_0007_EDX[12]) interface to calculate the (CPUID Fn8000_0007_EDX[12]) interface to calculate the
average power consumption on Family 15h processors. average power consumption on Family 15h processors.
config PERF_EVENTS_AMD_UNCORE
tristate "AMD Uncore performance events"
depends on PERF_EVENTS && CPU_SUP_AMD
default y
help
Include support for AMD uncore performance events for use with
e.g., perf stat -e amd_l3/.../,amd_df/.../.
To compile this driver as a module, choose M here: the
module will be called 'amd-uncore'.
endmenu endmenu
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CPU_SUP_AMD) += core.o uncore.o obj-$(CONFIG_CPU_SUP_AMD) += core.o
obj-$(CONFIG_PERF_EVENTS_AMD_POWER) += power.o obj-$(CONFIG_PERF_EVENTS_AMD_POWER) += power.o
obj-$(CONFIG_X86_LOCAL_APIC) += ibs.o obj-$(CONFIG_X86_LOCAL_APIC) += ibs.o
obj-$(CONFIG_PERF_EVENTS_AMD_UNCORE) += amd-uncore.o
amd-uncore-objs := uncore.o
ifdef CONFIG_AMD_IOMMU ifdef CONFIG_AMD_IOMMU
obj-$(CONFIG_CPU_SUP_AMD) += iommu.o obj-$(CONFIG_CPU_SUP_AMD) += iommu.o
endif endif
...@@ -26,6 +26,7 @@ static u32 ibs_caps; ...@@ -26,6 +26,7 @@ static u32 ibs_caps;
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <asm/nmi.h> #include <asm/nmi.h>
#include <asm/amd-ibs.h>
#define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT) #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
#define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
...@@ -100,15 +101,6 @@ struct perf_ibs { ...@@ -100,15 +101,6 @@ struct perf_ibs {
u64 (*get_count)(u64 config); u64 (*get_count)(u64 config);
}; };
struct perf_ibs_data {
u32 size;
union {
u32 data[0]; /* data buffer starts here */
u32 caps;
};
u64 regs[MSR_AMD64_IBS_REG_COUNT_MAX];
};
static int static int
perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *hw_period) perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *hw_period)
{ {
...@@ -329,11 +321,14 @@ static int perf_ibs_set_period(struct perf_ibs *perf_ibs, ...@@ -329,11 +321,14 @@ static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
static u64 get_ibs_fetch_count(u64 config) static u64 get_ibs_fetch_count(u64 config)
{ {
return (config & IBS_FETCH_CNT) >> 12; union ibs_fetch_ctl fetch_ctl = (union ibs_fetch_ctl)config;
return fetch_ctl.fetch_cnt << 4;
} }
static u64 get_ibs_op_count(u64 config) static u64 get_ibs_op_count(u64 config)
{ {
union ibs_op_ctl op_ctl = (union ibs_op_ctl)config;
u64 count = 0; u64 count = 0;
/* /*
...@@ -341,12 +336,12 @@ static u64 get_ibs_op_count(u64 config) ...@@ -341,12 +336,12 @@ static u64 get_ibs_op_count(u64 config)
* and the lower 7 bits of CurCnt are randomized. * and the lower 7 bits of CurCnt are randomized.
* Otherwise CurCnt has the full 27-bit current counter value. * Otherwise CurCnt has the full 27-bit current counter value.
*/ */
if (config & IBS_OP_VAL) { if (op_ctl.op_val) {
count = (config & IBS_OP_MAX_CNT) << 4; count = op_ctl.opmaxcnt << 4;
if (ibs_caps & IBS_CAPS_OPCNTEXT) if (ibs_caps & IBS_CAPS_OPCNTEXT)
count += config & IBS_OP_MAX_CNT_EXT_MASK; count += op_ctl.opmaxcnt_ext << 20;
} else if (ibs_caps & IBS_CAPS_RDWROPCNT) { } else if (ibs_caps & IBS_CAPS_RDWROPCNT) {
count = (config & IBS_OP_CUR_CNT) >> 32; count = op_ctl.opcurcnt;
} }
return count; return count;
......
...@@ -12,11 +12,11 @@ ...@@ -12,11 +12,11 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/cpufeature.h>
#include <linux/smp.h>
#include <asm/cpufeature.h>
#include <asm/perf_event.h> #include <asm/perf_event.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/smp.h>
#define NUM_COUNTERS_NB 4 #define NUM_COUNTERS_NB 4
#define NUM_COUNTERS_L2 4 #define NUM_COUNTERS_L2 4
...@@ -347,6 +347,7 @@ static struct pmu amd_nb_pmu = { ...@@ -347,6 +347,7 @@ static struct pmu amd_nb_pmu = {
.stop = amd_uncore_stop, .stop = amd_uncore_stop,
.read = amd_uncore_read, .read = amd_uncore_read,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT, .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
.module = THIS_MODULE,
}; };
static struct pmu amd_llc_pmu = { static struct pmu amd_llc_pmu = {
...@@ -360,6 +361,7 @@ static struct pmu amd_llc_pmu = { ...@@ -360,6 +361,7 @@ static struct pmu amd_llc_pmu = {
.stop = amd_uncore_stop, .stop = amd_uncore_stop,
.read = amd_uncore_read, .read = amd_uncore_read,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT, .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
.module = THIS_MODULE,
}; };
static struct amd_uncore *amd_uncore_alloc(unsigned int cpu) static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
...@@ -452,7 +454,7 @@ static int amd_uncore_cpu_starting(unsigned int cpu) ...@@ -452,7 +454,7 @@ static int amd_uncore_cpu_starting(unsigned int cpu)
if (amd_uncore_llc) { if (amd_uncore_llc) {
uncore = *per_cpu_ptr(amd_uncore_llc, cpu); uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
uncore->id = per_cpu(cpu_llc_id, cpu); uncore->id = get_llc_id(cpu);
uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc); uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
*per_cpu_ptr(amd_uncore_llc, cpu) = uncore; *per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
...@@ -659,12 +661,34 @@ static int __init amd_uncore_init(void) ...@@ -659,12 +661,34 @@ static int __init amd_uncore_init(void)
fail_llc: fail_llc:
if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
perf_pmu_unregister(&amd_nb_pmu); perf_pmu_unregister(&amd_nb_pmu);
if (amd_uncore_llc) free_percpu(amd_uncore_llc);
free_percpu(amd_uncore_llc);
fail_nb: fail_nb:
if (amd_uncore_nb) free_percpu(amd_uncore_nb);
free_percpu(amd_uncore_nb);
return ret; return ret;
} }
device_initcall(amd_uncore_init);
static void __exit amd_uncore_exit(void)
{
cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE);
cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) {
perf_pmu_unregister(&amd_llc_pmu);
free_percpu(amd_uncore_llc);
amd_uncore_llc = NULL;
}
if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
perf_pmu_unregister(&amd_nb_pmu);
free_percpu(amd_uncore_nb);
amd_uncore_nb = NULL;
}
}
module_init(amd_uncore_init);
module_exit(amd_uncore_exit);
MODULE_DESCRIPTION("AMD Uncore Driver");
MODULE_LICENSE("GPL v2");
...@@ -1087,10 +1087,8 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) ...@@ -1087,10 +1087,8 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
* validate an event group (assign == NULL) * validate an event group (assign == NULL)
*/ */
if (!unsched && assign) { if (!unsched && assign) {
for (i = 0; i < n; i++) { for (i = 0; i < n; i++)
e = cpuc->event_list[i];
static_call_cond(x86_pmu_commit_scheduling)(cpuc, i, assign[i]); static_call_cond(x86_pmu_commit_scheduling)(cpuc, i, assign[i]);
}
} else { } else {
for (i = n0; i < n; i++) { for (i = n0; i < n; i++) {
e = cpuc->event_list[i]; e = cpuc->event_list[i];
......
...@@ -5032,9 +5032,9 @@ static ssize_t freeze_on_smi_store(struct device *cdev, ...@@ -5032,9 +5032,9 @@ static ssize_t freeze_on_smi_store(struct device *cdev,
x86_pmu.attr_freeze_on_smi = val; x86_pmu.attr_freeze_on_smi = val;
get_online_cpus(); cpus_read_lock();
on_each_cpu(flip_smm_bit, &val, 1); on_each_cpu(flip_smm_bit, &val, 1);
put_online_cpus(); cpus_read_unlock();
done: done:
mutex_unlock(&freeze_on_smi_mutex); mutex_unlock(&freeze_on_smi_mutex);
...@@ -5077,9 +5077,9 @@ static ssize_t set_sysctl_tfa(struct device *cdev, ...@@ -5077,9 +5077,9 @@ static ssize_t set_sysctl_tfa(struct device *cdev,
allow_tsx_force_abort = val; allow_tsx_force_abort = val;
get_online_cpus(); cpus_read_lock();
on_each_cpu(update_tfa_sched, NULL, 1); on_each_cpu(update_tfa_sched, NULL, 1);
put_online_cpus(); cpus_read_unlock();
return count; return count;
} }
......
...@@ -1708,7 +1708,7 @@ static __init int pt_init(void) ...@@ -1708,7 +1708,7 @@ static __init int pt_init(void)
if (!boot_cpu_has(X86_FEATURE_INTEL_PT)) if (!boot_cpu_has(X86_FEATURE_INTEL_PT))
return -ENODEV; return -ENODEV;
get_online_cpus(); cpus_read_lock();
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
u64 ctl; u64 ctl;
...@@ -1716,7 +1716,7 @@ static __init int pt_init(void) ...@@ -1716,7 +1716,7 @@ static __init int pt_init(void)
if (!ret && (ctl & RTIT_CTL_TRACEEN)) if (!ret && (ctl & RTIT_CTL_TRACEEN))
prior_warn++; prior_warn++;
} }
put_online_cpus(); cpus_read_unlock();
if (prior_warn) { if (prior_warn) {
x86_add_exclusive(x86_lbr_exclusive_pt); x86_add_exclusive(x86_lbr_exclusive_pt);
......
...@@ -842,6 +842,18 @@ static const struct attribute_group uncore_pmu_attr_group = { ...@@ -842,6 +842,18 @@ static const struct attribute_group uncore_pmu_attr_group = {
.attrs = uncore_pmu_attrs, .attrs = uncore_pmu_attrs,
}; };
void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu)
{
struct intel_uncore_type *type = pmu->type;
if (type->num_boxes == 1)
sprintf(pmu_name, "uncore_type_%u", type->type_id);
else {
sprintf(pmu_name, "uncore_type_%u_%d",
type->type_id, type->box_ids[pmu->pmu_idx]);
}
}
static void uncore_get_pmu_name(struct intel_uncore_pmu *pmu) static void uncore_get_pmu_name(struct intel_uncore_pmu *pmu)
{ {
struct intel_uncore_type *type = pmu->type; struct intel_uncore_type *type = pmu->type;
...@@ -851,12 +863,7 @@ static void uncore_get_pmu_name(struct intel_uncore_pmu *pmu) ...@@ -851,12 +863,7 @@ static void uncore_get_pmu_name(struct intel_uncore_pmu *pmu)
* Use uncore_type_&typeid_&boxid as name. * Use uncore_type_&typeid_&boxid as name.
*/ */
if (!type->name) { if (!type->name) {
if (type->num_boxes == 1) uncore_get_alias_name(pmu->name, pmu);
sprintf(pmu->name, "uncore_type_%u", type->type_id);
else {
sprintf(pmu->name, "uncore_type_%u_%d",
type->type_id, type->box_ids[pmu->pmu_idx]);
}
return; return;
} }
...@@ -865,9 +872,13 @@ static void uncore_get_pmu_name(struct intel_uncore_pmu *pmu) ...@@ -865,9 +872,13 @@ static void uncore_get_pmu_name(struct intel_uncore_pmu *pmu)
sprintf(pmu->name, "uncore_%s", type->name); sprintf(pmu->name, "uncore_%s", type->name);
else else
sprintf(pmu->name, "uncore"); sprintf(pmu->name, "uncore");
} else } else {
sprintf(pmu->name, "uncore_%s_%d", type->name, pmu->pmu_idx); /*
* Use the box ID from the discovery table if applicable.
*/
sprintf(pmu->name, "uncore_%s_%d", type->name,
type->box_ids ? type->box_ids[pmu->pmu_idx] : pmu->pmu_idx);
}
} }
static int uncore_pmu_register(struct intel_uncore_pmu *pmu) static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
...@@ -1663,6 +1674,7 @@ struct intel_uncore_init_fun { ...@@ -1663,6 +1674,7 @@ struct intel_uncore_init_fun {
void (*cpu_init)(void); void (*cpu_init)(void);
int (*pci_init)(void); int (*pci_init)(void);
void (*mmio_init)(void); void (*mmio_init)(void);
bool use_discovery;
}; };
static const struct intel_uncore_init_fun nhm_uncore_init __initconst = { static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
...@@ -1765,6 +1777,13 @@ static const struct intel_uncore_init_fun snr_uncore_init __initconst = { ...@@ -1765,6 +1777,13 @@ static const struct intel_uncore_init_fun snr_uncore_init __initconst = {
.mmio_init = snr_uncore_mmio_init, .mmio_init = snr_uncore_mmio_init,
}; };
static const struct intel_uncore_init_fun spr_uncore_init __initconst = {
.cpu_init = spr_uncore_cpu_init,
.pci_init = spr_uncore_pci_init,
.mmio_init = spr_uncore_mmio_init,
.use_discovery = true,
};
static const struct intel_uncore_init_fun generic_uncore_init __initconst = { static const struct intel_uncore_init_fun generic_uncore_init __initconst = {
.cpu_init = intel_uncore_generic_uncore_cpu_init, .cpu_init = intel_uncore_generic_uncore_cpu_init,
.pci_init = intel_uncore_generic_uncore_pci_init, .pci_init = intel_uncore_generic_uncore_pci_init,
...@@ -1809,6 +1828,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { ...@@ -1809,6 +1828,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rkl_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rkl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &spr_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init),
{}, {},
}; };
...@@ -1832,8 +1852,13 @@ static int __init intel_uncore_init(void) ...@@ -1832,8 +1852,13 @@ static int __init intel_uncore_init(void)
uncore_init = (struct intel_uncore_init_fun *)&generic_uncore_init; uncore_init = (struct intel_uncore_init_fun *)&generic_uncore_init;
else else
return -ENODEV; return -ENODEV;
} else } else {
uncore_init = (struct intel_uncore_init_fun *)id->driver_data; uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
if (uncore_no_discover && uncore_init->use_discovery)
return -ENODEV;
if (uncore_init->use_discovery && !intel_uncore_has_discovery_tables())
return -ENODEV;
}
if (uncore_init->pci_init) { if (uncore_init->pci_init) {
pret = uncore_init->pci_init(); pret = uncore_init->pci_init();
......
...@@ -561,6 +561,7 @@ struct event_constraint * ...@@ -561,6 +561,7 @@ struct event_constraint *
uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event); uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event); void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx); u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu);
extern struct intel_uncore_type *empty_uncore[]; extern struct intel_uncore_type *empty_uncore[];
extern struct intel_uncore_type **uncore_msr_uncores; extern struct intel_uncore_type **uncore_msr_uncores;
...@@ -608,6 +609,9 @@ void snr_uncore_mmio_init(void); ...@@ -608,6 +609,9 @@ void snr_uncore_mmio_init(void);
int icx_uncore_pci_init(void); int icx_uncore_pci_init(void);
void icx_uncore_cpu_init(void); void icx_uncore_cpu_init(void);
void icx_uncore_mmio_init(void); void icx_uncore_mmio_init(void);
int spr_uncore_pci_init(void);
void spr_uncore_cpu_init(void);
void spr_uncore_mmio_init(void);
/* uncore_nhmex.c */ /* uncore_nhmex.c */
void nhmex_uncore_cpu_init(void); void nhmex_uncore_cpu_init(void);
...@@ -337,17 +337,17 @@ static const struct attribute_group generic_uncore_format_group = { ...@@ -337,17 +337,17 @@ static const struct attribute_group generic_uncore_format_group = {
.attrs = generic_uncore_formats_attr, .attrs = generic_uncore_formats_attr,
}; };
static void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box) void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box)
{ {
wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_INT); wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_INT);
} }
static void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box) void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box)
{ {
wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ); wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ);
} }
static void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box) void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box)
{ {
wrmsrl(uncore_msr_box_ctl(box), 0); wrmsrl(uncore_msr_box_ctl(box), 0);
} }
...@@ -377,7 +377,7 @@ static struct intel_uncore_ops generic_uncore_msr_ops = { ...@@ -377,7 +377,7 @@ static struct intel_uncore_ops generic_uncore_msr_ops = {
.read_counter = uncore_msr_read_counter, .read_counter = uncore_msr_read_counter,
}; };
static void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box) void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box)
{ {
struct pci_dev *pdev = box->pci_dev; struct pci_dev *pdev = box->pci_dev;
int box_ctl = uncore_pci_box_ctl(box); int box_ctl = uncore_pci_box_ctl(box);
...@@ -386,7 +386,7 @@ static void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box) ...@@ -386,7 +386,7 @@ static void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box)
pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_INT); pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_INT);
} }
static void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box) void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box)
{ {
struct pci_dev *pdev = box->pci_dev; struct pci_dev *pdev = box->pci_dev;
int box_ctl = uncore_pci_box_ctl(box); int box_ctl = uncore_pci_box_ctl(box);
...@@ -394,7 +394,7 @@ static void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box) ...@@ -394,7 +394,7 @@ static void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box)
pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_FRZ); pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_FRZ);
} }
static void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box) void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box)
{ {
struct pci_dev *pdev = box->pci_dev; struct pci_dev *pdev = box->pci_dev;
int box_ctl = uncore_pci_box_ctl(box); int box_ctl = uncore_pci_box_ctl(box);
...@@ -411,8 +411,8 @@ static void intel_generic_uncore_pci_enable_event(struct intel_uncore_box *box, ...@@ -411,8 +411,8 @@ static void intel_generic_uncore_pci_enable_event(struct intel_uncore_box *box,
pci_write_config_dword(pdev, hwc->config_base, hwc->config); pci_write_config_dword(pdev, hwc->config_base, hwc->config);
} }
static void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box, void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box,
struct perf_event *event) struct perf_event *event)
{ {
struct pci_dev *pdev = box->pci_dev; struct pci_dev *pdev = box->pci_dev;
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
...@@ -420,8 +420,8 @@ static void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box, ...@@ -420,8 +420,8 @@ static void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box,
pci_write_config_dword(pdev, hwc->config_base, 0); pci_write_config_dword(pdev, hwc->config_base, 0);
} }
static u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box, u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box,
struct perf_event *event) struct perf_event *event)
{ {
struct pci_dev *pdev = box->pci_dev; struct pci_dev *pdev = box->pci_dev;
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
...@@ -454,7 +454,7 @@ static unsigned int generic_uncore_mmio_box_ctl(struct intel_uncore_box *box) ...@@ -454,7 +454,7 @@ static unsigned int generic_uncore_mmio_box_ctl(struct intel_uncore_box *box)
return type->box_ctls[box->dieid] + type->mmio_offsets[box->pmu->pmu_idx]; return type->box_ctls[box->dieid] + type->mmio_offsets[box->pmu->pmu_idx];
} }
static void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box) void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box)
{ {
unsigned int box_ctl = generic_uncore_mmio_box_ctl(box); unsigned int box_ctl = generic_uncore_mmio_box_ctl(box);
struct intel_uncore_type *type = box->pmu->type; struct intel_uncore_type *type = box->pmu->type;
...@@ -478,7 +478,7 @@ static void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box) ...@@ -478,7 +478,7 @@ static void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box)
writel(GENERIC_PMON_BOX_CTL_INT, box->io_addr); writel(GENERIC_PMON_BOX_CTL_INT, box->io_addr);
} }
static void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box) void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box)
{ {
if (!box->io_addr) if (!box->io_addr)
return; return;
...@@ -486,7 +486,7 @@ static void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box) ...@@ -486,7 +486,7 @@ static void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box)
writel(GENERIC_PMON_BOX_CTL_FRZ, box->io_addr); writel(GENERIC_PMON_BOX_CTL_FRZ, box->io_addr);
} }
static void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box) void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box)
{ {
if (!box->io_addr) if (!box->io_addr)
return; return;
...@@ -505,8 +505,8 @@ static void intel_generic_uncore_mmio_enable_event(struct intel_uncore_box *box, ...@@ -505,8 +505,8 @@ static void intel_generic_uncore_mmio_enable_event(struct intel_uncore_box *box,
writel(hwc->config, box->io_addr + hwc->config_base); writel(hwc->config, box->io_addr + hwc->config_base);
} }
static void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box, void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box,
struct perf_event *event) struct perf_event *event)
{ {
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
...@@ -568,8 +568,8 @@ static bool uncore_update_uncore_type(enum uncore_access_type type_id, ...@@ -568,8 +568,8 @@ static bool uncore_update_uncore_type(enum uncore_access_type type_id,
return true; return true;
} }
static struct intel_uncore_type ** struct intel_uncore_type **
intel_uncore_generic_init_uncores(enum uncore_access_type type_id) intel_uncore_generic_init_uncores(enum uncore_access_type type_id, int num_extra)
{ {
struct intel_uncore_discovery_type *type; struct intel_uncore_discovery_type *type;
struct intel_uncore_type **uncores; struct intel_uncore_type **uncores;
...@@ -577,7 +577,7 @@ intel_uncore_generic_init_uncores(enum uncore_access_type type_id) ...@@ -577,7 +577,7 @@ intel_uncore_generic_init_uncores(enum uncore_access_type type_id)
struct rb_node *node; struct rb_node *node;
int i = 0; int i = 0;
uncores = kcalloc(num_discovered_types[type_id] + 1, uncores = kcalloc(num_discovered_types[type_id] + num_extra + 1,
sizeof(struct intel_uncore_type *), GFP_KERNEL); sizeof(struct intel_uncore_type *), GFP_KERNEL);
if (!uncores) if (!uncores)
return empty_uncore; return empty_uncore;
...@@ -606,17 +606,17 @@ intel_uncore_generic_init_uncores(enum uncore_access_type type_id) ...@@ -606,17 +606,17 @@ intel_uncore_generic_init_uncores(enum uncore_access_type type_id)
void intel_uncore_generic_uncore_cpu_init(void) void intel_uncore_generic_uncore_cpu_init(void)
{ {
uncore_msr_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MSR); uncore_msr_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MSR, 0);
} }
int intel_uncore_generic_uncore_pci_init(void) int intel_uncore_generic_uncore_pci_init(void)
{ {
uncore_pci_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_PCI); uncore_pci_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_PCI, 0);
return 0; return 0;
} }
void intel_uncore_generic_uncore_mmio_init(void) void intel_uncore_generic_uncore_mmio_init(void)
{ {
uncore_mmio_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MMIO); uncore_mmio_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MMIO, 0);
} }
...@@ -129,3 +129,24 @@ void intel_uncore_clear_discovery_tables(void); ...@@ -129,3 +129,24 @@ void intel_uncore_clear_discovery_tables(void);
void intel_uncore_generic_uncore_cpu_init(void); void intel_uncore_generic_uncore_cpu_init(void);
int intel_uncore_generic_uncore_pci_init(void); int intel_uncore_generic_uncore_pci_init(void);
void intel_uncore_generic_uncore_mmio_init(void); void intel_uncore_generic_uncore_mmio_init(void);
void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box);
void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box);
void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box);
void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box);
void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box);
void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box);
void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box,
struct perf_event *event);
void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box);
void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box);
void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box);
void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box,
struct perf_event *event);
u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box,
struct perf_event *event);
struct intel_uncore_type **
intel_uncore_generic_init_uncores(enum uncore_access_type type_id, int num_extra);
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* SandyBridge-EP/IvyTown uncore support */ /* SandyBridge-EP/IvyTown uncore support */
#include "uncore.h" #include "uncore.h"
#include "uncore_discovery.h"
/* SNB-EP pci bus to socket mapping */ /* SNB-EP pci bus to socket mapping */
#define SNBEP_CPUNODEID 0x40 #define SNBEP_CPUNODEID 0x40
...@@ -454,6 +455,17 @@ ...@@ -454,6 +455,17 @@
#define ICX_NUMBER_IMC_CHN 2 #define ICX_NUMBER_IMC_CHN 2
#define ICX_IMC_MEM_STRIDE 0x4 #define ICX_IMC_MEM_STRIDE 0x4
/* SPR */
#define SPR_RAW_EVENT_MASK_EXT 0xffffff
/* SPR CHA */
#define SPR_CHA_PMON_CTL_TID_EN (1 << 16)
#define SPR_CHA_PMON_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
SPR_CHA_PMON_CTL_TID_EN)
#define SPR_CHA_PMON_BOX_FILTER_TID 0x3ff
#define SPR_C0_MSR_PMON_BOX_FILTER0 0x200e
DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6"); DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
...@@ -466,6 +478,7 @@ DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55"); ...@@ -466,6 +478,7 @@ DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16"); DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
DEFINE_UNCORE_FORMAT_ATTR(tid_en2, tid_en, "config:16");
DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35"); DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
...@@ -3838,26 +3851,32 @@ pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag) ...@@ -3838,26 +3851,32 @@ pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
return ret; return ret;
} }
static int skx_iio_set_mapping(struct intel_uncore_type *type) static void
{ pmu_iio_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
}
static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
{ {
struct attribute **attr = skx_iio_mapping_group.attrs; struct attribute **attr = ag->attrs;
if (!attr) if (!attr)
return; return;
for (; *attr; attr++) for (; *attr; attr++)
kfree((*attr)->name); kfree((*attr)->name);
kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs)); kfree(attr_to_ext_attr(*ag->attrs));
kfree(skx_iio_mapping_group.attrs); kfree(ag->attrs);
skx_iio_mapping_group.attrs = NULL; ag->attrs = NULL;
kfree(type->topology); kfree(type->topology);
} }
static int skx_iio_set_mapping(struct intel_uncore_type *type)
{
return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
}
static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
{
pmu_iio_cleanup_mapping(type, &skx_iio_mapping_group);
}
static struct intel_uncore_type skx_uncore_iio = { static struct intel_uncore_type skx_uncore_iio = {
.name = "iio", .name = "iio",
.num_counters = 4, .num_counters = 4,
...@@ -4501,6 +4520,11 @@ static int snr_iio_set_mapping(struct intel_uncore_type *type) ...@@ -4501,6 +4520,11 @@ static int snr_iio_set_mapping(struct intel_uncore_type *type)
return pmu_iio_set_mapping(type, &snr_iio_mapping_group); return pmu_iio_set_mapping(type, &snr_iio_mapping_group);
} }
static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
{
pmu_iio_cleanup_mapping(type, &snr_iio_mapping_group);
}
static struct intel_uncore_type snr_uncore_iio = { static struct intel_uncore_type snr_uncore_iio = {
.name = "iio", .name = "iio",
.num_counters = 4, .num_counters = 4,
...@@ -4517,7 +4541,7 @@ static struct intel_uncore_type snr_uncore_iio = { ...@@ -4517,7 +4541,7 @@ static struct intel_uncore_type snr_uncore_iio = {
.attr_update = snr_iio_attr_update, .attr_update = snr_iio_attr_update,
.get_topology = snr_iio_get_topology, .get_topology = snr_iio_get_topology,
.set_mapping = snr_iio_set_mapping, .set_mapping = snr_iio_set_mapping,
.cleanup_mapping = skx_iio_cleanup_mapping, .cleanup_mapping = snr_iio_cleanup_mapping,
}; };
static struct intel_uncore_type snr_uncore_irp = { static struct intel_uncore_type snr_uncore_irp = {
...@@ -4783,13 +4807,15 @@ int snr_uncore_pci_init(void) ...@@ -4783,13 +4807,15 @@ int snr_uncore_pci_init(void)
return 0; return 0;
} }
static struct pci_dev *snr_uncore_get_mc_dev(int id) #define SNR_MC_DEVICE_ID 0x3451
static struct pci_dev *snr_uncore_get_mc_dev(unsigned int device, int id)
{ {
struct pci_dev *mc_dev = NULL; struct pci_dev *mc_dev = NULL;
int pkg; int pkg;
while (1) { while (1) {
mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev); mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, mc_dev);
if (!mc_dev) if (!mc_dev)
break; break;
pkg = uncore_pcibus_to_dieid(mc_dev->bus); pkg = uncore_pcibus_to_dieid(mc_dev->bus);
...@@ -4799,16 +4825,17 @@ static struct pci_dev *snr_uncore_get_mc_dev(int id) ...@@ -4799,16 +4825,17 @@ static struct pci_dev *snr_uncore_get_mc_dev(int id)
return mc_dev; return mc_dev;
} }
static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box, static int snr_uncore_mmio_map(struct intel_uncore_box *box,
unsigned int box_ctl, int mem_offset) unsigned int box_ctl, int mem_offset,
unsigned int device)
{ {
struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid); struct pci_dev *pdev = snr_uncore_get_mc_dev(device, box->dieid);
struct intel_uncore_type *type = box->pmu->type; struct intel_uncore_type *type = box->pmu->type;
resource_size_t addr; resource_size_t addr;
u32 pci_dword; u32 pci_dword;
if (!pdev) if (!pdev)
return; return -ENODEV;
pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword); pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23; addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
...@@ -4821,16 +4848,25 @@ static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box, ...@@ -4821,16 +4848,25 @@ static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
box->io_addr = ioremap(addr, type->mmio_map_size); box->io_addr = ioremap(addr, type->mmio_map_size);
if (!box->io_addr) { if (!box->io_addr) {
pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
return; return -EINVAL;
} }
writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr); return 0;
}
static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
unsigned int box_ctl, int mem_offset,
unsigned int device)
{
if (!snr_uncore_mmio_map(box, box_ctl, mem_offset, device))
writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
} }
static void snr_uncore_mmio_init_box(struct intel_uncore_box *box) static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
{ {
__snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
SNR_IMC_MMIO_MEM0_OFFSET); SNR_IMC_MMIO_MEM0_OFFSET,
SNR_MC_DEVICE_ID);
} }
static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box) static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
...@@ -5092,6 +5128,11 @@ static int icx_iio_set_mapping(struct intel_uncore_type *type) ...@@ -5092,6 +5128,11 @@ static int icx_iio_set_mapping(struct intel_uncore_type *type)
return pmu_iio_set_mapping(type, &icx_iio_mapping_group); return pmu_iio_set_mapping(type, &icx_iio_mapping_group);
} }
static void icx_iio_cleanup_mapping(struct intel_uncore_type *type)
{
pmu_iio_cleanup_mapping(type, &icx_iio_mapping_group);
}
static struct intel_uncore_type icx_uncore_iio = { static struct intel_uncore_type icx_uncore_iio = {
.name = "iio", .name = "iio",
.num_counters = 4, .num_counters = 4,
...@@ -5109,7 +5150,7 @@ static struct intel_uncore_type icx_uncore_iio = { ...@@ -5109,7 +5150,7 @@ static struct intel_uncore_type icx_uncore_iio = {
.attr_update = icx_iio_attr_update, .attr_update = icx_iio_attr_update,
.get_topology = icx_iio_get_topology, .get_topology = icx_iio_get_topology,
.set_mapping = icx_iio_set_mapping, .set_mapping = icx_iio_set_mapping,
.cleanup_mapping = skx_iio_cleanup_mapping, .cleanup_mapping = icx_iio_cleanup_mapping,
}; };
static struct intel_uncore_type icx_uncore_irp = { static struct intel_uncore_type icx_uncore_irp = {
...@@ -5405,7 +5446,8 @@ static void icx_uncore_imc_init_box(struct intel_uncore_box *box) ...@@ -5405,7 +5446,8 @@ static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE + int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
SNR_IMC_MMIO_MEM0_OFFSET; SNR_IMC_MMIO_MEM0_OFFSET;
__snr_uncore_mmio_init_box(box, box_ctl, mem_offset); __snr_uncore_mmio_init_box(box, box_ctl, mem_offset,
SNR_MC_DEVICE_ID);
} }
static struct intel_uncore_ops icx_uncore_mmio_ops = { static struct intel_uncore_ops icx_uncore_mmio_ops = {
...@@ -5475,7 +5517,8 @@ static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) ...@@ -5475,7 +5517,8 @@ static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
SNR_IMC_MMIO_MEM0_OFFSET; SNR_IMC_MMIO_MEM0_OFFSET;
__snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), mem_offset); snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
mem_offset, SNR_MC_DEVICE_ID);
} }
static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = { static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
...@@ -5509,3 +5552,497 @@ void icx_uncore_mmio_init(void) ...@@ -5509,3 +5552,497 @@ void icx_uncore_mmio_init(void)
} }
/* end of ICX uncore support */ /* end of ICX uncore support */
/* SPR uncore support */
static void spr_uncore_msr_enable_event(struct intel_uncore_box *box,
struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
if (reg1->idx != EXTRA_REG_NONE)
wrmsrl(reg1->reg, reg1->config);
wrmsrl(hwc->config_base, hwc->config);
}
static void spr_uncore_msr_disable_event(struct intel_uncore_box *box,
struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
if (reg1->idx != EXTRA_REG_NONE)
wrmsrl(reg1->reg, 0);
wrmsrl(hwc->config_base, 0);
}
static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
{
struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN);
struct intel_uncore_type *type = box->pmu->type;
if (tie_en) {
reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 +
HSWEP_CBO_MSR_OFFSET * type->box_ids[box->pmu->pmu_idx];
reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID;
reg1->idx = 0;
}
return 0;
}
static struct intel_uncore_ops spr_uncore_chabox_ops = {
.init_box = intel_generic_uncore_msr_init_box,
.disable_box = intel_generic_uncore_msr_disable_box,
.enable_box = intel_generic_uncore_msr_enable_box,
.disable_event = spr_uncore_msr_disable_event,
.enable_event = spr_uncore_msr_enable_event,
.read_counter = uncore_msr_read_counter,
.hw_config = spr_cha_hw_config,
.get_constraint = uncore_get_constraint,
.put_constraint = uncore_put_constraint,
};
static struct attribute *spr_uncore_cha_formats_attr[] = {
&format_attr_event.attr,
&format_attr_umask_ext4.attr,
&format_attr_tid_en2.attr,
&format_attr_edge.attr,
&format_attr_inv.attr,
&format_attr_thresh8.attr,
&format_attr_filter_tid5.attr,
NULL,
};
static const struct attribute_group spr_uncore_chabox_format_group = {
.name = "format",
.attrs = spr_uncore_cha_formats_attr,
};
static ssize_t alias_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
char pmu_name[UNCORE_PMU_NAME_LEN];
uncore_get_alias_name(pmu_name, pmu);
return sysfs_emit(buf, "%s\n", pmu_name);
}
static DEVICE_ATTR_RO(alias);
static struct attribute *uncore_alias_attrs[] = {
&dev_attr_alias.attr,
NULL
};
ATTRIBUTE_GROUPS(uncore_alias);
static struct intel_uncore_type spr_uncore_chabox = {
.name = "cha",
.event_mask = SPR_CHA_PMON_EVENT_MASK,
.event_mask_ext = SPR_RAW_EVENT_MASK_EXT,
.num_shared_regs = 1,
.ops = &spr_uncore_chabox_ops,
.format_group = &spr_uncore_chabox_format_group,
.attr_update = uncore_alias_groups,
};
static struct intel_uncore_type spr_uncore_iio = {
.name = "iio",
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
.event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
.format_group = &snr_uncore_iio_format_group,
.attr_update = uncore_alias_groups,
};
static struct attribute *spr_uncore_raw_formats_attr[] = {
&format_attr_event.attr,
&format_attr_umask_ext4.attr,
&format_attr_edge.attr,
&format_attr_inv.attr,
&format_attr_thresh8.attr,
NULL,
};
static const struct attribute_group spr_uncore_raw_format_group = {
.name = "format",
.attrs = spr_uncore_raw_formats_attr,
};
#define SPR_UNCORE_COMMON_FORMAT() \
.event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
.event_mask_ext = SPR_RAW_EVENT_MASK_EXT, \
.format_group = &spr_uncore_raw_format_group, \
.attr_update = uncore_alias_groups
static struct intel_uncore_type spr_uncore_irp = {
SPR_UNCORE_COMMON_FORMAT(),
.name = "irp",
};
static struct intel_uncore_type spr_uncore_m2pcie = {
SPR_UNCORE_COMMON_FORMAT(),
.name = "m2pcie",
};
static struct intel_uncore_type spr_uncore_pcu = {
.name = "pcu",
.attr_update = uncore_alias_groups,
};
static void spr_uncore_mmio_enable_event(struct intel_uncore_box *box,
struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
if (!box->io_addr)
return;
if (uncore_pmc_fixed(hwc->idx))
writel(SNBEP_PMON_CTL_EN, box->io_addr + hwc->config_base);
else
writel(hwc->config, box->io_addr + hwc->config_base);
}
static struct intel_uncore_ops spr_uncore_mmio_ops = {
.init_box = intel_generic_uncore_mmio_init_box,
.exit_box = uncore_mmio_exit_box,
.disable_box = intel_generic_uncore_mmio_disable_box,
.enable_box = intel_generic_uncore_mmio_enable_box,
.disable_event = intel_generic_uncore_mmio_disable_event,
.enable_event = spr_uncore_mmio_enable_event,
.read_counter = uncore_mmio_read_counter,
};
static struct intel_uncore_type spr_uncore_imc = {
SPR_UNCORE_COMMON_FORMAT(),
.name = "imc",
.fixed_ctr_bits = 48,
.fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
.fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
.ops = &spr_uncore_mmio_ops,
};
static void spr_uncore_pci_enable_event(struct intel_uncore_box *box,
struct perf_event *event)
{
struct pci_dev *pdev = box->pci_dev;
struct hw_perf_event *hwc = &event->hw;
pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
pci_write_config_dword(pdev, hwc->config_base, (u32)hwc->config);
}
static struct intel_uncore_ops spr_uncore_pci_ops = {
.init_box = intel_generic_uncore_pci_init_box,
.disable_box = intel_generic_uncore_pci_disable_box,
.enable_box = intel_generic_uncore_pci_enable_box,
.disable_event = intel_generic_uncore_pci_disable_event,
.enable_event = spr_uncore_pci_enable_event,
.read_counter = intel_generic_uncore_pci_read_counter,
};
#define SPR_UNCORE_PCI_COMMON_FORMAT() \
SPR_UNCORE_COMMON_FORMAT(), \
.ops = &spr_uncore_pci_ops
static struct intel_uncore_type spr_uncore_m2m = {
SPR_UNCORE_PCI_COMMON_FORMAT(),
.name = "m2m",
};
static struct intel_uncore_type spr_uncore_upi = {
SPR_UNCORE_PCI_COMMON_FORMAT(),
.name = "upi",
};
static struct intel_uncore_type spr_uncore_m3upi = {
SPR_UNCORE_PCI_COMMON_FORMAT(),
.name = "m3upi",
};
static struct intel_uncore_type spr_uncore_mdf = {
SPR_UNCORE_COMMON_FORMAT(),
.name = "mdf",
};
#define UNCORE_SPR_NUM_UNCORE_TYPES 12
#define UNCORE_SPR_IIO 1
#define UNCORE_SPR_IMC 6
static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = {
&spr_uncore_chabox,
&spr_uncore_iio,
&spr_uncore_irp,
&spr_uncore_m2pcie,
&spr_uncore_pcu,
NULL,
&spr_uncore_imc,
&spr_uncore_m2m,
&spr_uncore_upi,
&spr_uncore_m3upi,
NULL,
&spr_uncore_mdf,
};
enum perf_uncore_spr_iio_freerunning_type_id {
SPR_IIO_MSR_IOCLK,
SPR_IIO_MSR_BW_IN,
SPR_IIO_MSR_BW_OUT,
SPR_IIO_FREERUNNING_TYPE_MAX,
};
static struct freerunning_counters spr_iio_freerunning[] = {
[SPR_IIO_MSR_IOCLK] = { 0x340e, 0x1, 0x10, 1, 48 },
[SPR_IIO_MSR_BW_IN] = { 0x3800, 0x1, 0x10, 8, 48 },
[SPR_IIO_MSR_BW_OUT] = { 0x3808, 0x1, 0x10, 8, 48 },
};
static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
/* Free-Running IIO CLOCKS Counter */
INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
/* Free-Running IIO BANDWIDTH IN Counters */
INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
/* Free-Running IIO BANDWIDTH OUT Counters */
INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x30"),
INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x31"),
INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x32"),
INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x33"),
INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_out_port4, "event=0xff,umask=0x34"),
INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_out_port5, "event=0xff,umask=0x35"),
INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_out_port6, "event=0xff,umask=0x36"),
INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_out_port7, "event=0xff,umask=0x37"),
INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit, "MiB"),
{ /* end: all zeroes */ },
};
static struct intel_uncore_type spr_uncore_iio_free_running = {
.name = "iio_free_running",
.num_counters = 17,
.num_freerunning_types = SPR_IIO_FREERUNNING_TYPE_MAX,
.freerunning = spr_iio_freerunning,
.ops = &skx_uncore_iio_freerunning_ops,
.event_descs = spr_uncore_iio_freerunning_events,
.format_group = &skx_uncore_iio_freerunning_format_group,
};
enum perf_uncore_spr_imc_freerunning_type_id {
SPR_IMC_DCLK,
SPR_IMC_PQ_CYCLES,
SPR_IMC_FREERUNNING_TYPE_MAX,
};
static struct freerunning_counters spr_imc_freerunning[] = {
[SPR_IMC_DCLK] = { 0x22b0, 0x0, 0, 1, 48 },
[SPR_IMC_PQ_CYCLES] = { 0x2318, 0x8, 0, 2, 48 },
};
static struct uncore_event_desc spr_uncore_imc_freerunning_events[] = {
INTEL_UNCORE_EVENT_DESC(dclk, "event=0xff,umask=0x10"),
INTEL_UNCORE_EVENT_DESC(rpq_cycles, "event=0xff,umask=0x20"),
INTEL_UNCORE_EVENT_DESC(wpq_cycles, "event=0xff,umask=0x21"),
{ /* end: all zeroes */ },
};
#define SPR_MC_DEVICE_ID 0x3251
static void spr_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
{
int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + SNR_IMC_MMIO_MEM0_OFFSET;
snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
mem_offset, SPR_MC_DEVICE_ID);
}
static struct intel_uncore_ops spr_uncore_imc_freerunning_ops = {
.init_box = spr_uncore_imc_freerunning_init_box,
.exit_box = uncore_mmio_exit_box,
.read_counter = uncore_mmio_read_counter,
.hw_config = uncore_freerunning_hw_config,
};
static struct intel_uncore_type spr_uncore_imc_free_running = {
.name = "imc_free_running",
.num_counters = 3,
.mmio_map_size = SNR_IMC_MMIO_SIZE,
.num_freerunning_types = SPR_IMC_FREERUNNING_TYPE_MAX,
.freerunning = spr_imc_freerunning,
.ops = &spr_uncore_imc_freerunning_ops,
.event_descs = spr_uncore_imc_freerunning_events,
.format_group = &skx_uncore_iio_freerunning_format_group,
};
#define UNCORE_SPR_MSR_EXTRA_UNCORES 1
#define UNCORE_SPR_MMIO_EXTRA_UNCORES 1
static struct intel_uncore_type *spr_msr_uncores[UNCORE_SPR_MSR_EXTRA_UNCORES] = {
&spr_uncore_iio_free_running,
};
static struct intel_uncore_type *spr_mmio_uncores[UNCORE_SPR_MMIO_EXTRA_UNCORES] = {
&spr_uncore_imc_free_running,
};
static void uncore_type_customized_copy(struct intel_uncore_type *to_type,
struct intel_uncore_type *from_type)
{
if (!to_type || !from_type)
return;
if (from_type->name)
to_type->name = from_type->name;
if (from_type->fixed_ctr_bits)
to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
if (from_type->event_mask)
to_type->event_mask = from_type->event_mask;
if (from_type->event_mask_ext)
to_type->event_mask_ext = from_type->event_mask_ext;
if (from_type->fixed_ctr)
to_type->fixed_ctr = from_type->fixed_ctr;
if (from_type->fixed_ctl)
to_type->fixed_ctl = from_type->fixed_ctl;
if (from_type->fixed_ctr_bits)
to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
if (from_type->num_shared_regs)
to_type->num_shared_regs = from_type->num_shared_regs;
if (from_type->constraints)
to_type->constraints = from_type->constraints;
if (from_type->ops)
to_type->ops = from_type->ops;
if (from_type->event_descs)
to_type->event_descs = from_type->event_descs;
if (from_type->format_group)
to_type->format_group = from_type->format_group;
if (from_type->attr_update)
to_type->attr_update = from_type->attr_update;
}
static struct intel_uncore_type **
uncore_get_uncores(enum uncore_access_type type_id, int num_extra,
struct intel_uncore_type **extra)
{
struct intel_uncore_type **types, **start_types;
int i;
start_types = types = intel_uncore_generic_init_uncores(type_id, num_extra);
/* Only copy the customized features */
for (; *types; types++) {
if ((*types)->type_id >= UNCORE_SPR_NUM_UNCORE_TYPES)
continue;
uncore_type_customized_copy(*types, spr_uncores[(*types)->type_id]);
}
for (i = 0; i < num_extra; i++, types++)
*types = extra[i];
return start_types;
}
static struct intel_uncore_type *
uncore_find_type_by_id(struct intel_uncore_type **types, int type_id)
{
for (; *types; types++) {
if (type_id == (*types)->type_id)
return *types;
}
return NULL;
}
static int uncore_type_max_boxes(struct intel_uncore_type **types,
int type_id)
{
struct intel_uncore_type *type;
int i, max = 0;
type = uncore_find_type_by_id(types, type_id);
if (!type)
return 0;
for (i = 0; i < type->num_boxes; i++) {
if (type->box_ids[i] > max)
max = type->box_ids[i];
}
return max + 1;
}
void spr_uncore_cpu_init(void)
{
uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
UNCORE_SPR_MSR_EXTRA_UNCORES,
spr_msr_uncores);
spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
}
int spr_uncore_pci_init(void)
{
uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL);
return 0;
}
void spr_uncore_mmio_init(void)
{
int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true);
if (ret)
uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL);
else {
uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO,
UNCORE_SPR_MMIO_EXTRA_UNCORES,
spr_mmio_uncores);
spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_SPR_IMC) / 2;
}
}
/* end of SPR uncore support */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* From PPR Vol 1 for AMD Family 19h Model 01h B1
* 55898 Rev 0.35 - Feb 5, 2021
*/
#include <asm/msr-index.h>
/*
* IBS Hardware MSRs
*/
/* MSR 0xc0011030: IBS Fetch Control */
union ibs_fetch_ctl {
__u64 val;
struct {
__u64 fetch_maxcnt:16,/* 0-15: instruction fetch max. count */
fetch_cnt:16, /* 16-31: instruction fetch count */
fetch_lat:16, /* 32-47: instruction fetch latency */
fetch_en:1, /* 48: instruction fetch enable */
fetch_val:1, /* 49: instruction fetch valid */
fetch_comp:1, /* 50: instruction fetch complete */
ic_miss:1, /* 51: i-cache miss */
phy_addr_valid:1,/* 52: physical address valid */
l1tlb_pgsz:2, /* 53-54: i-cache L1TLB page size
* (needs IbsPhyAddrValid) */
l1tlb_miss:1, /* 55: i-cache fetch missed in L1TLB */
l2tlb_miss:1, /* 56: i-cache fetch missed in L2TLB */
rand_en:1, /* 57: random tagging enable */
fetch_l2_miss:1,/* 58: L2 miss for sampled fetch
* (needs IbsFetchComp) */
reserved:5; /* 59-63: reserved */
};
};
/* MSR 0xc0011033: IBS Execution Control */
union ibs_op_ctl {
__u64 val;
struct {
__u64 opmaxcnt:16, /* 0-15: periodic op max. count */
reserved0:1, /* 16: reserved */
op_en:1, /* 17: op sampling enable */
op_val:1, /* 18: op sample valid */
cnt_ctl:1, /* 19: periodic op counter control */
opmaxcnt_ext:7, /* 20-26: upper 7 bits of periodic op maximum count */
reserved1:5, /* 27-31: reserved */
opcurcnt:27, /* 32-58: periodic op counter current count */
reserved2:5; /* 59-63: reserved */
};
};
/* MSR 0xc0011035: IBS Op Data 2 */
union ibs_op_data {
__u64 val;
struct {
__u64 comp_to_ret_ctr:16, /* 0-15: op completion to retire count */
tag_to_ret_ctr:16, /* 15-31: op tag to retire count */
reserved1:2, /* 32-33: reserved */
op_return:1, /* 34: return op */
op_brn_taken:1, /* 35: taken branch op */
op_brn_misp:1, /* 36: mispredicted branch op */
op_brn_ret:1, /* 37: branch op retired */
op_rip_invalid:1, /* 38: RIP is invalid */
op_brn_fuse:1, /* 39: fused branch op */
op_microcode:1, /* 40: microcode op */
reserved2:23; /* 41-63: reserved */
};
};
/* MSR 0xc0011036: IBS Op Data 2 */
union ibs_op_data2 {
__u64 val;
struct {
__u64 data_src:3, /* 0-2: data source */
reserved0:1, /* 3: reserved */
rmt_node:1, /* 4: destination node */
cache_hit_st:1, /* 5: cache hit state */
reserved1:57; /* 5-63: reserved */
};
};
/* MSR 0xc0011037: IBS Op Data 3 */
union ibs_op_data3 {
__u64 val;
struct {
__u64 ld_op:1, /* 0: load op */
st_op:1, /* 1: store op */
dc_l1tlb_miss:1, /* 2: data cache L1TLB miss */
dc_l2tlb_miss:1, /* 3: data cache L2TLB hit in 2M page */
dc_l1tlb_hit_2m:1, /* 4: data cache L1TLB hit in 2M page */
dc_l1tlb_hit_1g:1, /* 5: data cache L1TLB hit in 1G page */
dc_l2tlb_hit_2m:1, /* 6: data cache L2TLB hit in 2M page */
dc_miss:1, /* 7: data cache miss */
dc_mis_acc:1, /* 8: misaligned access */
reserved:4, /* 9-12: reserved */
dc_wc_mem_acc:1, /* 13: write combining memory access */
dc_uc_mem_acc:1, /* 14: uncacheable memory access */
dc_locked_op:1, /* 15: locked operation */
dc_miss_no_mab_alloc:1, /* 16: DC miss with no MAB allocated */
dc_lin_addr_valid:1, /* 17: data cache linear address valid */
dc_phy_addr_valid:1, /* 18: data cache physical address valid */
dc_l2_tlb_hit_1g:1, /* 19: data cache L2 hit in 1GB page */
l2_miss:1, /* 20: L2 cache miss */
sw_pf:1, /* 21: software prefetch */
op_mem_width:4, /* 22-25: load/store size in bytes */
op_dc_miss_open_mem_reqs:6, /* 26-31: outstanding mem reqs on DC fill */
dc_miss_lat:16, /* 32-47: data cache miss latency */
tlb_refill_lat:16; /* 48-63: L1 TLB refill latency */
};
};
/* MSR 0xc001103c: IBS Fetch Control Extended */
union ic_ibs_extd_ctl {
__u64 val;
struct {
__u64 itlb_refill_lat:16, /* 0-15: ITLB Refill latency for sampled fetch */
reserved:48; /* 16-63: reserved */
};
};
/*
* IBS driver related
*/
struct perf_ibs_data {
u32 size;
union {
u32 data[0]; /* data buffer starts here */
u32 caps;
};
u64 regs[MSR_AMD64_IBS_REG_COUNT_MAX];
};
...@@ -795,6 +795,8 @@ extern int set_tsc_mode(unsigned int val); ...@@ -795,6 +795,8 @@ extern int set_tsc_mode(unsigned int val);
DECLARE_PER_CPU(u64, msr_misc_features_shadow); DECLARE_PER_CPU(u64, msr_misc_features_shadow);
extern u16 get_llc_id(unsigned int cpu);
#ifdef CONFIG_CPU_SUP_AMD #ifdef CONFIG_CPU_SUP_AMD
extern u32 amd_get_nodes_per_socket(void); extern u32 amd_get_nodes_per_socket(void);
extern u32 amd_get_highest_perf(void); extern u32 amd_get_highest_perf(void);
......
...@@ -438,7 +438,7 @@ static void srat_detect_node(struct cpuinfo_x86 *c) ...@@ -438,7 +438,7 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
node = numa_cpu_node(cpu); node = numa_cpu_node(cpu);
if (node == NUMA_NO_NODE) if (node == NUMA_NO_NODE)
node = per_cpu(cpu_llc_id, cpu); node = get_llc_id(cpu);
/* /*
* On multi-fabric platform (e.g. Numascale NumaChip) a * On multi-fabric platform (e.g. Numascale NumaChip) a
......
...@@ -79,6 +79,12 @@ EXPORT_SYMBOL(smp_num_siblings); ...@@ -79,6 +79,12 @@ EXPORT_SYMBOL(smp_num_siblings);
/* Last level cache ID of each logical CPU */ /* Last level cache ID of each logical CPU */
DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID; DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
u16 get_llc_id(unsigned int cpu)
{
return per_cpu(cpu_llc_id, cpu);
}
EXPORT_SYMBOL_GPL(get_llc_id);
/* correctly size the local cpu masks */ /* correctly size the local cpu masks */
void __init setup_cpu_local_masks(void) void __init setup_cpu_local_masks(void)
{ {
......
...@@ -568,7 +568,7 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, ...@@ -568,7 +568,7 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
if (!cpu_events) if (!cpu_events)
return (void __percpu __force *)ERR_PTR(-ENOMEM); return (void __percpu __force *)ERR_PTR(-ENOMEM);
get_online_cpus(); cpus_read_lock();
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
bp = perf_event_create_kernel_counter(attr, cpu, NULL, bp = perf_event_create_kernel_counter(attr, cpu, NULL,
triggered, context); triggered, context);
...@@ -579,7 +579,7 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, ...@@ -579,7 +579,7 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
per_cpu(*cpu_events, cpu) = bp; per_cpu(*cpu_events, cpu) = bp;
} }
put_online_cpus(); cpus_read_unlock();
if (likely(!err)) if (likely(!err))
return cpu_events; return cpu_events;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment