Commit b0529b9c authored by Kan Liang's avatar Kan Liang Committed by Thomas Gleixner

perf/x86/intel/uncore: Cosmetic renames in response to multi-die/pkg support

Syntax update only -- no logical or functional change.

In response to the new multi-die/package changes, update variable names to
use "die" terminology, instead of "pkg".

For previous platforms which doesn't have multi-die, "die" is identical as
"pkg".
Signed-off-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarLen Brown <len.brown@intel.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarIngo Molnar <mingo@kernel.org>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/f0ea5e501288329135e94f51969ff54a03c50e2e.1557769318.git.len.brown@intel.com
parent 835896a5
...@@ -14,7 +14,7 @@ struct pci_driver *uncore_pci_driver; ...@@ -14,7 +14,7 @@ struct pci_driver *uncore_pci_driver;
DEFINE_RAW_SPINLOCK(pci2phy_map_lock); DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head); struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
struct pci_extra_dev *uncore_extra_pci_dev; struct pci_extra_dev *uncore_extra_pci_dev;
static int max_packages; static int max_dies;
/* mask of cpus that collect uncore events */ /* mask of cpus that collect uncore events */
static cpumask_t uncore_cpu_mask; static cpumask_t uncore_cpu_mask;
...@@ -100,13 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj, ...@@ -100,13 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj,
struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
{ {
unsigned int pkgid = topology_logical_die_id(cpu); unsigned int dieid = topology_logical_die_id(cpu);
/* /*
* The unsigned check also catches the '-1' return value for non * The unsigned check also catches the '-1' return value for non
* existent mappings in the topology map. * existent mappings in the topology map.
*/ */
return pkgid < max_packages ? pmu->boxes[pkgid] : NULL; return dieid < max_dies ? pmu->boxes[dieid] : NULL;
} }
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
...@@ -311,7 +311,7 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, ...@@ -311,7 +311,7 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
uncore_pmu_init_hrtimer(box); uncore_pmu_init_hrtimer(box);
box->cpu = -1; box->cpu = -1;
box->pci_phys_id = -1; box->pci_phys_id = -1;
box->pkgid = -1; box->dieid = -1;
/* set default hrtimer timeout */ /* set default hrtimer timeout */
box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL; box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
...@@ -826,10 +826,10 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu) ...@@ -826,10 +826,10 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
static void uncore_free_boxes(struct intel_uncore_pmu *pmu) static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
{ {
int pkg; int die;
for (pkg = 0; pkg < max_packages; pkg++) for (die = 0; die < max_dies; die++)
kfree(pmu->boxes[pkg]); kfree(pmu->boxes[die]);
kfree(pmu->boxes); kfree(pmu->boxes);
} }
...@@ -866,7 +866,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid) ...@@ -866,7 +866,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
if (!pmus) if (!pmus)
return -ENOMEM; return -ENOMEM;
size = max_packages * sizeof(struct intel_uncore_box *); size = max_dies * sizeof(struct intel_uncore_box *);
for (i = 0; i < type->num_boxes; i++) { for (i = 0; i < type->num_boxes; i++) {
pmus[i].func_id = setid ? i : -1; pmus[i].func_id = setid ? i : -1;
...@@ -936,21 +936,21 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id ...@@ -936,21 +936,21 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
struct intel_uncore_type *type; struct intel_uncore_type *type;
struct intel_uncore_pmu *pmu = NULL; struct intel_uncore_pmu *pmu = NULL;
struct intel_uncore_box *box; struct intel_uncore_box *box;
int phys_id, pkg, ret; int phys_id, die, ret;
phys_id = uncore_pcibus_to_physid(pdev->bus); phys_id = uncore_pcibus_to_physid(pdev->bus);
if (phys_id < 0) if (phys_id < 0)
return -ENODEV; return -ENODEV;
pkg = (topology_max_die_per_package() > 1) ? phys_id : die = (topology_max_die_per_package() > 1) ? phys_id :
topology_phys_to_logical_pkg(phys_id); topology_phys_to_logical_pkg(phys_id);
if (pkg < 0) if (die < 0)
return -EINVAL; return -EINVAL;
if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) { if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
int idx = UNCORE_PCI_DEV_IDX(id->driver_data); int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
uncore_extra_pci_dev[pkg].dev[idx] = pdev; uncore_extra_pci_dev[die].dev[idx] = pdev;
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
return 0; return 0;
} }
...@@ -989,7 +989,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id ...@@ -989,7 +989,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)]; pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
} }
if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL)) if (WARN_ON_ONCE(pmu->boxes[die] != NULL))
return -EINVAL; return -EINVAL;
box = uncore_alloc_box(type, NUMA_NO_NODE); box = uncore_alloc_box(type, NUMA_NO_NODE);
...@@ -1003,13 +1003,13 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id ...@@ -1003,13 +1003,13 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
atomic_inc(&box->refcnt); atomic_inc(&box->refcnt);
box->pci_phys_id = phys_id; box->pci_phys_id = phys_id;
box->pkgid = pkg; box->dieid = die;
box->pci_dev = pdev; box->pci_dev = pdev;
box->pmu = pmu; box->pmu = pmu;
uncore_box_init(box); uncore_box_init(box);
pci_set_drvdata(pdev, box); pci_set_drvdata(pdev, box);
pmu->boxes[pkg] = box; pmu->boxes[die] = box;
if (atomic_inc_return(&pmu->activeboxes) > 1) if (atomic_inc_return(&pmu->activeboxes) > 1)
return 0; return 0;
...@@ -1017,7 +1017,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id ...@@ -1017,7 +1017,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
ret = uncore_pmu_register(pmu); ret = uncore_pmu_register(pmu);
if (ret) { if (ret) {
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
pmu->boxes[pkg] = NULL; pmu->boxes[die] = NULL;
uncore_box_exit(box); uncore_box_exit(box);
kfree(box); kfree(box);
} }
...@@ -1028,17 +1028,17 @@ static void uncore_pci_remove(struct pci_dev *pdev) ...@@ -1028,17 +1028,17 @@ static void uncore_pci_remove(struct pci_dev *pdev)
{ {
struct intel_uncore_box *box; struct intel_uncore_box *box;
struct intel_uncore_pmu *pmu; struct intel_uncore_pmu *pmu;
int i, phys_id, pkg; int i, phys_id, die;
phys_id = uncore_pcibus_to_physid(pdev->bus); phys_id = uncore_pcibus_to_physid(pdev->bus);
box = pci_get_drvdata(pdev); box = pci_get_drvdata(pdev);
if (!box) { if (!box) {
pkg = (topology_max_die_per_package() > 1) ? phys_id : die = (topology_max_die_per_package() > 1) ? phys_id :
topology_phys_to_logical_pkg(phys_id); topology_phys_to_logical_pkg(phys_id);
for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) { for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
if (uncore_extra_pci_dev[pkg].dev[i] == pdev) { if (uncore_extra_pci_dev[die].dev[i] == pdev) {
uncore_extra_pci_dev[pkg].dev[i] = NULL; uncore_extra_pci_dev[die].dev[i] = NULL;
break; break;
} }
} }
...@@ -1051,7 +1051,7 @@ static void uncore_pci_remove(struct pci_dev *pdev) ...@@ -1051,7 +1051,7 @@ static void uncore_pci_remove(struct pci_dev *pdev)
return; return;
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
pmu->boxes[box->pkgid] = NULL; pmu->boxes[box->dieid] = NULL;
if (atomic_dec_return(&pmu->activeboxes) == 0) if (atomic_dec_return(&pmu->activeboxes) == 0)
uncore_pmu_unregister(pmu); uncore_pmu_unregister(pmu);
uncore_box_exit(box); uncore_box_exit(box);
...@@ -1063,7 +1063,7 @@ static int __init uncore_pci_init(void) ...@@ -1063,7 +1063,7 @@ static int __init uncore_pci_init(void)
size_t size; size_t size;
int ret; int ret;
size = max_packages * sizeof(struct pci_extra_dev); size = max_dies * sizeof(struct pci_extra_dev);
uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL); uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
if (!uncore_extra_pci_dev) { if (!uncore_extra_pci_dev) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -1110,11 +1110,11 @@ static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu, ...@@ -1110,11 +1110,11 @@ static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
{ {
struct intel_uncore_pmu *pmu = type->pmus; struct intel_uncore_pmu *pmu = type->pmus;
struct intel_uncore_box *box; struct intel_uncore_box *box;
int i, pkg; int i, die;
pkg = topology_logical_die_id(old_cpu < 0 ? new_cpu : old_cpu); die = topology_logical_die_id(old_cpu < 0 ? new_cpu : old_cpu);
for (i = 0; i < type->num_boxes; i++, pmu++) { for (i = 0; i < type->num_boxes; i++, pmu++) {
box = pmu->boxes[pkg]; box = pmu->boxes[die];
if (!box) if (!box)
continue; continue;
...@@ -1147,7 +1147,7 @@ static int uncore_event_cpu_offline(unsigned int cpu) ...@@ -1147,7 +1147,7 @@ static int uncore_event_cpu_offline(unsigned int cpu)
struct intel_uncore_type *type, **types = uncore_msr_uncores; struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu; struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box; struct intel_uncore_box *box;
int i, pkg, target; int i, die, target;
/* Check if exiting cpu is used for collecting uncore events */ /* Check if exiting cpu is used for collecting uncore events */
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
...@@ -1166,12 +1166,12 @@ static int uncore_event_cpu_offline(unsigned int cpu) ...@@ -1166,12 +1166,12 @@ static int uncore_event_cpu_offline(unsigned int cpu)
unref: unref:
/* Clear the references */ /* Clear the references */
pkg = topology_logical_die_id(cpu); die = topology_logical_die_id(cpu);
for (; *types; types++) { for (; *types; types++) {
type = *types; type = *types;
pmu = type->pmus; pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) { for (i = 0; i < type->num_boxes; i++, pmu++) {
box = pmu->boxes[pkg]; box = pmu->boxes[die];
if (box && atomic_dec_return(&box->refcnt) == 0) if (box && atomic_dec_return(&box->refcnt) == 0)
uncore_box_exit(box); uncore_box_exit(box);
} }
...@@ -1180,7 +1180,7 @@ static int uncore_event_cpu_offline(unsigned int cpu) ...@@ -1180,7 +1180,7 @@ static int uncore_event_cpu_offline(unsigned int cpu)
} }
static int allocate_boxes(struct intel_uncore_type **types, static int allocate_boxes(struct intel_uncore_type **types,
unsigned int pkg, unsigned int cpu) unsigned int die, unsigned int cpu)
{ {
struct intel_uncore_box *box, *tmp; struct intel_uncore_box *box, *tmp;
struct intel_uncore_type *type; struct intel_uncore_type *type;
...@@ -1193,20 +1193,20 @@ static int allocate_boxes(struct intel_uncore_type **types, ...@@ -1193,20 +1193,20 @@ static int allocate_boxes(struct intel_uncore_type **types,
type = *types; type = *types;
pmu = type->pmus; pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) { for (i = 0; i < type->num_boxes; i++, pmu++) {
if (pmu->boxes[pkg]) if (pmu->boxes[die])
continue; continue;
box = uncore_alloc_box(type, cpu_to_node(cpu)); box = uncore_alloc_box(type, cpu_to_node(cpu));
if (!box) if (!box)
goto cleanup; goto cleanup;
box->pmu = pmu; box->pmu = pmu;
box->pkgid = pkg; box->dieid = die;
list_add(&box->active_list, &allocated); list_add(&box->active_list, &allocated);
} }
} }
/* Install them in the pmus */ /* Install them in the pmus */
list_for_each_entry_safe(box, tmp, &allocated, active_list) { list_for_each_entry_safe(box, tmp, &allocated, active_list) {
list_del_init(&box->active_list); list_del_init(&box->active_list);
box->pmu->boxes[pkg] = box; box->pmu->boxes[die] = box;
} }
return 0; return 0;
...@@ -1223,10 +1223,10 @@ static int uncore_event_cpu_online(unsigned int cpu) ...@@ -1223,10 +1223,10 @@ static int uncore_event_cpu_online(unsigned int cpu)
struct intel_uncore_type *type, **types = uncore_msr_uncores; struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu; struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box; struct intel_uncore_box *box;
int i, ret, pkg, target; int i, ret, die, target;
pkg = topology_logical_die_id(cpu); die = topology_logical_die_id(cpu);
ret = allocate_boxes(types, pkg, cpu); ret = allocate_boxes(types, die, cpu);
if (ret) if (ret)
return ret; return ret;
...@@ -1234,7 +1234,7 @@ static int uncore_event_cpu_online(unsigned int cpu) ...@@ -1234,7 +1234,7 @@ static int uncore_event_cpu_online(unsigned int cpu)
type = *types; type = *types;
pmu = type->pmus; pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) { for (i = 0; i < type->num_boxes; i++, pmu++) {
box = pmu->boxes[pkg]; box = pmu->boxes[die];
if (box && atomic_inc_return(&box->refcnt) == 1) if (box && atomic_inc_return(&box->refcnt) == 1)
uncore_box_init(box); uncore_box_init(box);
} }
...@@ -1419,7 +1419,7 @@ static int __init intel_uncore_init(void) ...@@ -1419,7 +1419,7 @@ static int __init intel_uncore_init(void)
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
return -ENODEV; return -ENODEV;
max_packages = topology_max_packages() * topology_max_die_per_package(); max_dies = topology_max_packages() * topology_max_die_per_package();
uncore_init = (struct intel_uncore_init_fun *)id->driver_data; uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
if (uncore_init->pci_init) { if (uncore_init->pci_init) {
......
...@@ -108,7 +108,7 @@ struct intel_uncore_extra_reg { ...@@ -108,7 +108,7 @@ struct intel_uncore_extra_reg {
struct intel_uncore_box { struct intel_uncore_box {
int pci_phys_id; int pci_phys_id;
int pkgid; /* Logical package ID */ int dieid; /* Logical die ID */
int n_active; /* number of active events */ int n_active; /* number of active events */
int n_events; int n_events;
int cpu; /* cpu to collect events */ int cpu; /* cpu to collect events */
...@@ -467,7 +467,7 @@ static inline void uncore_box_exit(struct intel_uncore_box *box) ...@@ -467,7 +467,7 @@ static inline void uncore_box_exit(struct intel_uncore_box *box)
static inline bool uncore_box_is_fake(struct intel_uncore_box *box) static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
{ {
return (box->pkgid < 0); return (box->dieid < 0);
} }
static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
......
...@@ -1058,8 +1058,8 @@ static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_eve ...@@ -1058,8 +1058,8 @@ static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_eve
if (reg1->idx != EXTRA_REG_NONE) { if (reg1->idx != EXTRA_REG_NONE) {
int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER; int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
int pkg = box->pkgid; int die = box->dieid;
struct pci_dev *filter_pdev = uncore_extra_pci_dev[pkg].dev[idx]; struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
if (filter_pdev) { if (filter_pdev) {
pci_write_config_dword(filter_pdev, reg1->reg, pci_write_config_dword(filter_pdev, reg1->reg,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment