Commit 323ee64a authored by Jacob Pan's avatar Jacob Pan Committed by Rafael J. Wysocki

powercap/rapl: track lead cpu per package

RAPL driver operates on MSRs that are under package/socket
scope instead of core scope. However, the current code does not
keep track of which CPUs are available on each package for MSR
access. Therefore it has to search for an active CPU on a given
package each time.

This patch optimizes the package level operations by tracking a
per package lead CPU during initialization and CPU hotplug. The
runtime search for active CPU is avoided.
Suggested-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarJacob Pan <jacob.jun.pan@linux.intel.com>
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
parent 309557f5
...@@ -191,6 +191,7 @@ struct rapl_package { ...@@ -191,6 +191,7 @@ struct rapl_package {
* notify interrupt enable status. * notify interrupt enable status.
*/ */
struct list_head plist; struct list_head plist;
int lead_cpu; /* one active cpu per package for access */
}; };
struct rapl_defaults { struct rapl_defaults {
...@@ -267,20 +268,6 @@ static struct rapl_package *find_package_by_id(int id) ...@@ -267,20 +268,6 @@ static struct rapl_package *find_package_by_id(int id)
return NULL; return NULL;
} }
/* caller to ensure CPU hotplug lock is held */
static int find_active_cpu_on_package(int package_id)
{
int i;
for_each_online_cpu(i) {
if (topology_physical_package_id(i) == package_id)
return i;
}
/* all CPUs on this package are offline */
return -ENODEV;
}
/* caller must hold cpu hotplug lock */ /* caller must hold cpu hotplug lock */
static void rapl_cleanup_data(void) static void rapl_cleanup_data(void)
{ {
...@@ -761,10 +748,8 @@ static int rapl_read_data_raw(struct rapl_domain *rd, ...@@ -761,10 +748,8 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
msr = rd->msrs[rp->id]; msr = rd->msrs[rp->id];
if (!msr) if (!msr)
return -EINVAL; return -EINVAL;
/* use physical package id to look up active cpus */
cpu = find_active_cpu_on_package(rd->rp->id); cpu = rd->rp->lead_cpu;
if (cpu < 0)
return cpu;
/* special-case package domain, which uses a different bit*/ /* special-case package domain, which uses a different bit*/
if (prim == FW_LOCK && rd->id == RAPL_DOMAIN_PACKAGE) { if (prim == FW_LOCK && rd->id == RAPL_DOMAIN_PACKAGE) {
...@@ -829,10 +814,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd, ...@@ -829,10 +814,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
struct msrl_action ma; struct msrl_action ma;
int ret; int ret;
cpu = find_active_cpu_on_package(rd->rp->id); cpu = rd->rp->lead_cpu;
if (cpu < 0)
return cpu;
bits = rapl_unit_xlate(rd, rp->unit, value, 1); bits = rapl_unit_xlate(rd, rp->unit, value, 1);
bits |= bits << rp->shift; bits |= bits << rp->shift;
memset(&ma, 0, sizeof(ma)); memset(&ma, 0, sizeof(ma));
...@@ -940,18 +922,10 @@ static void power_limit_irq_save_cpu(void *info) ...@@ -940,18 +922,10 @@ static void power_limit_irq_save_cpu(void *info)
static void package_power_limit_irq_save(struct rapl_package *rp) static void package_power_limit_irq_save(struct rapl_package *rp)
{ {
int cpu;
if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN)) if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
return; return;
cpu = find_active_cpu_on_package(rp->id); smp_call_function_single(rp->lead_cpu, power_limit_irq_save_cpu, rp, 1);
if (cpu < 0)
return;
if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
return;
smp_call_function_single(cpu, power_limit_irq_save_cpu, rp, 1);
} }
static void power_limit_irq_restore_cpu(void *info) static void power_limit_irq_restore_cpu(void *info)
...@@ -972,20 +946,14 @@ static void power_limit_irq_restore_cpu(void *info) ...@@ -972,20 +946,14 @@ static void power_limit_irq_restore_cpu(void *info)
/* restore per package power limit interrupt enable state */ /* restore per package power limit interrupt enable state */
static void package_power_limit_irq_restore(struct rapl_package *rp) static void package_power_limit_irq_restore(struct rapl_package *rp)
{ {
int cpu;
if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN)) if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
return; return;
cpu = find_active_cpu_on_package(rp->id);
if (cpu < 0)
return;
/* irq enable state not saved, nothing to restore */ /* irq enable state not saved, nothing to restore */
if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED))
return; return;
smp_call_function_single(cpu, power_limit_irq_restore_cpu, rp, 1); smp_call_function_single(rp->lead_cpu, power_limit_irq_restore_cpu, rp, 1);
} }
static void set_floor_freq_default(struct rapl_domain *rd, bool mode) static void set_floor_freq_default(struct rapl_domain *rd, bool mode)
...@@ -1419,7 +1387,8 @@ static int rapl_detect_topology(void) ...@@ -1419,7 +1387,8 @@ static int rapl_detect_topology(void)
/* add the new package to the list */ /* add the new package to the list */
new_package->id = phy_package_id; new_package->id = phy_package_id;
new_package->nr_cpus = 1; new_package->nr_cpus = 1;
/* use the first active cpu of the package to access */
new_package->lead_cpu = i;
/* check if the package contains valid domains */ /* check if the package contains valid domains */
if (rapl_detect_domains(new_package, i) || if (rapl_detect_domains(new_package, i) ||
rapl_defaults->check_unit(new_package, i)) { rapl_defaults->check_unit(new_package, i)) {
...@@ -1475,6 +1444,8 @@ static int rapl_add_package(int cpu) ...@@ -1475,6 +1444,8 @@ static int rapl_add_package(int cpu)
/* add the new package to the list */ /* add the new package to the list */
rp->id = phy_package_id; rp->id = phy_package_id;
rp->nr_cpus = 1; rp->nr_cpus = 1;
rp->lead_cpu = cpu;
/* check if the package contains valid domains */ /* check if the package contains valid domains */
if (rapl_detect_domains(rp, cpu) || if (rapl_detect_domains(rp, cpu) ||
rapl_defaults->check_unit(rp, cpu)) { rapl_defaults->check_unit(rp, cpu)) {
...@@ -1507,6 +1478,7 @@ static int rapl_cpu_callback(struct notifier_block *nfb, ...@@ -1507,6 +1478,7 @@ static int rapl_cpu_callback(struct notifier_block *nfb,
unsigned long cpu = (unsigned long)hcpu; unsigned long cpu = (unsigned long)hcpu;
int phy_package_id; int phy_package_id;
struct rapl_package *rp; struct rapl_package *rp;
int lead_cpu;
phy_package_id = topology_physical_package_id(cpu); phy_package_id = topology_physical_package_id(cpu);
switch (action) { switch (action) {
...@@ -1527,6 +1499,15 @@ static int rapl_cpu_callback(struct notifier_block *nfb, ...@@ -1527,6 +1499,15 @@ static int rapl_cpu_callback(struct notifier_block *nfb,
break; break;
if (--rp->nr_cpus == 0) if (--rp->nr_cpus == 0)
rapl_remove_package(rp); rapl_remove_package(rp);
else if (cpu == rp->lead_cpu) {
/* choose another active cpu in the package */
lead_cpu = cpumask_any_but(topology_core_cpumask(cpu), cpu);
if (lead_cpu < nr_cpu_ids)
rp->lead_cpu = lead_cpu;
else /* should never go here */
pr_err("no active cpu available for package %d\n",
phy_package_id);
}
} }
return NOTIFY_OK; return NOTIFY_OK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment