Commit cface032 authored by Alexander Antonov's avatar Alexander Antonov Committed by Peter Zijlstra

perf/x86/intel/uncore: Enable IIO stacks to PMON mapping for multi-segment SKX

IIO stacks to PMON mapping on Skylake servers is exposed through introduced
early attributes /sys/devices/uncore_iio_<pmu_idx>/dieX, where dieX is a
file which holds "Segment:Root Bus" for PCIe root port which can
be monitored by that IIO PMON block. These sysfs attributes are disabled
for multiple segment topologies except VMD domains which start at 0x10000.
This patch removes the limitation and enables IIO stacks to PMON mapping
for multi-segment Skylake servers by introducing segment-aware
intel_uncore_topology structure and attributing the topology configuration
to the segment in skx_iio_get_topology() function.
Reported-by: default avatarkernel test robot <lkp@intel.com>
Signed-off-by: default avatarAlexander Antonov <alexander.antonov@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarKan Liang <kan.liang@linux.intel.com>
Reviewed-by: default avatarAndi Kleen <ak@linux.intel.com>
Tested-by: default avatarKyle Meyer <kyle.meyer@hpe.com>
Link: https://lkml.kernel.org/r/20210323150507.2013-1-alexander.antonov@linux.intel.com
parent c4c55e36
...@@ -53,6 +53,18 @@ int uncore_pcibus_to_dieid(struct pci_bus *bus) ...@@ -53,6 +53,18 @@ int uncore_pcibus_to_dieid(struct pci_bus *bus)
return die_id; return die_id;
} }
int uncore_die_to_segment(int die)
{
struct pci_bus *bus = NULL;
/* Find first pci bus which attributes to specified die. */
while ((bus = pci_find_next_bus(bus)) &&
(die != uncore_pcibus_to_dieid(bus)))
;
return bus ? pci_domain_nr(bus) : -EINVAL;
}
static void uncore_free_pcibus_map(void) static void uncore_free_pcibus_map(void)
{ {
struct pci2phy_map *map, *tmp; struct pci2phy_map *map, *tmp;
......
...@@ -42,6 +42,7 @@ struct intel_uncore_pmu; ...@@ -42,6 +42,7 @@ struct intel_uncore_pmu;
struct intel_uncore_box; struct intel_uncore_box;
struct uncore_event_desc; struct uncore_event_desc;
struct freerunning_counters; struct freerunning_counters;
struct intel_uncore_topology;
struct intel_uncore_type { struct intel_uncore_type {
const char *name; const char *name;
...@@ -87,7 +88,7 @@ struct intel_uncore_type { ...@@ -87,7 +88,7 @@ struct intel_uncore_type {
* to identify which platform component each PMON block of that type is * to identify which platform component each PMON block of that type is
* supposed to monitor. * supposed to monitor.
*/ */
u64 *topology; struct intel_uncore_topology *topology;
/* /*
* Optional callbacks for managing mapping of Uncore units to PMONs * Optional callbacks for managing mapping of Uncore units to PMONs
*/ */
...@@ -176,6 +177,11 @@ struct freerunning_counters { ...@@ -176,6 +177,11 @@ struct freerunning_counters {
unsigned *box_offsets; unsigned *box_offsets;
}; };
struct intel_uncore_topology {
u64 configuration;
int segment;
};
struct pci2phy_map { struct pci2phy_map {
struct list_head list; struct list_head list;
int segment; int segment;
...@@ -184,6 +190,7 @@ struct pci2phy_map { ...@@ -184,6 +190,7 @@ struct pci2phy_map {
struct pci2phy_map *__find_pci2phy_map(int segment); struct pci2phy_map *__find_pci2phy_map(int segment);
int uncore_pcibus_to_dieid(struct pci_bus *bus); int uncore_pcibus_to_dieid(struct pci_bus *bus);
int uncore_die_to_segment(int die);
ssize_t uncore_event_show(struct device *dev, ssize_t uncore_event_show(struct device *dev,
struct device_attribute *attr, char *buf); struct device_attribute *attr, char *buf);
......
...@@ -3684,7 +3684,8 @@ static struct intel_uncore_ops skx_uncore_iio_ops = { ...@@ -3684,7 +3684,8 @@ static struct intel_uncore_ops skx_uncore_iio_ops = {
static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die) static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
{ {
return pmu->type->topology[die] >> (pmu->pmu_idx * BUS_NUM_STRIDE); return pmu->type->topology[die].configuration >>
(pmu->pmu_idx * BUS_NUM_STRIDE);
} }
static umode_t static umode_t
...@@ -3697,19 +3698,14 @@ skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die) ...@@ -3697,19 +3698,14 @@ skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
} }
static ssize_t skx_iio_mapping_show(struct device *dev, static ssize_t skx_iio_mapping_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct pci_bus *bus = pci_find_next_bus(NULL); struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
struct intel_uncore_pmu *uncore_pmu = dev_to_uncore_pmu(dev);
struct dev_ext_attribute *ea = to_dev_ext_attribute(attr); struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
long die = (long)ea->var; long die = (long)ea->var;
/* return sprintf(buf, "%04x:%02x\n", pmu->type->topology[die].segment,
* Current implementation is for single segment configuration hence it's skx_iio_stack(pmu, die));
* safe to take the segment value from the first available root bus.
*/
return sprintf(buf, "%04x:%02x\n", pci_domain_nr(bus),
skx_iio_stack(uncore_pmu, die));
} }
static int skx_msr_cpu_bus_read(int cpu, u64 *topology) static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
...@@ -3746,34 +3742,32 @@ static int die_to_cpu(int die) ...@@ -3746,34 +3742,32 @@ static int die_to_cpu(int die)
static int skx_iio_get_topology(struct intel_uncore_type *type) static int skx_iio_get_topology(struct intel_uncore_type *type)
{ {
int i, ret; int die, ret = -EPERM;
struct pci_bus *bus = NULL;
/*
* Verified single-segment environments only; disabled for multiple
* segment topologies for now except VMD domains.
* VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
*/
while ((bus = pci_find_next_bus(bus))
&& (!pci_domain_nr(bus) || pci_domain_nr(bus) > 0xffff))
;
if (bus)
return -EPERM;
type->topology = kcalloc(uncore_max_dies(), sizeof(u64), GFP_KERNEL); type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
GFP_KERNEL);
if (!type->topology) if (!type->topology)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < uncore_max_dies(); i++) { for (die = 0; die < uncore_max_dies(); die++) {
ret = skx_msr_cpu_bus_read(die_to_cpu(i), &type->topology[i]); ret = skx_msr_cpu_bus_read(die_to_cpu(die),
if (ret) { &type->topology[die].configuration);
kfree(type->topology); if (ret)
type->topology = NULL; break;
return ret;
} ret = uncore_die_to_segment(die);
if (ret < 0)
break;
type->topology[die].segment = ret;
} }
return 0; if (ret < 0) {
kfree(type->topology);
type->topology = NULL;
}
return ret;
} }
static struct attribute_group skx_iio_mapping_group = { static struct attribute_group skx_iio_mapping_group = {
...@@ -3794,7 +3788,7 @@ static int skx_iio_set_mapping(struct intel_uncore_type *type) ...@@ -3794,7 +3788,7 @@ static int skx_iio_set_mapping(struct intel_uncore_type *type)
struct dev_ext_attribute *eas = NULL; struct dev_ext_attribute *eas = NULL;
ret = skx_iio_get_topology(type); ret = skx_iio_get_topology(type);
if (ret) if (ret < 0)
goto clear_attr_update; goto clear_attr_update;
ret = -ENOMEM; ret = -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment