Commit d0411ec8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm-5.3-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull more power management updates from Rafael Wysocki:
 "These modify the Intel RAPL driver to allow it to use an MMIO
  interface to the hardware, make the int340X thermal driver provide
  such an interface for it, add Intel Ice Lake CPU IDs to the RAPL
  driver (these changes depend on the previously merged x86 arch
  changes), update cpufreq to use the PM QoS framework for managing the
  min and max frequency limits, and add update the imx-cpufreq-dt
  cpufreq driver to support i.MX8MN.

  Specifics:

   - Add MMIO interface support to the Intel RAPL power capping driver
     and update the int340X thermal driver to provide a RAPL MMIO
     interface (Zhang Rui, Stephen Rothwell).

   - Add Intel Ice Lake CPU IDs to the RAPL driver (Zhang Rui, Rajneesh
     Bhardwaj).

   - Make cpufreq use the PM QoS framework (instead of notifiers) for
     managing the min and max frequency constraints (Viresh Kumar).

   - Add i.MX8MN support to the imx-cpufreq-dt cpufreq driver (Anson
     Huang)"

* tag 'pm-5.3-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (27 commits)
  cpufreq: Make cpufreq_generic_init() return void
  intel_rapl: need linux/cpuhotplug.h for enum cpuhp_state
  powercap/rapl: Add Ice Lake NNPI support to RAPL driver
  powercap/intel_rapl: add support for ICX-D
  powercap/intel_rapl: add support for ICX
  powercap/intel_rapl: add support for IceLake desktop
  intel_rapl: Fix module autoloading issue
  int340X/processor_thermal_device: add support for MMIO RAPL
  intel_rapl: support two power limits for every RAPL domain
  intel_rapl: support 64 bit register
  intel_rapl: abstract RAPL common code
  intel_rapl: cleanup hardcoded MSR access
  intel_rapl: cleanup some functions
  intel_rapl: abstract register access operations
  intel_rapl: abstract register address
  intel_rapl: introduce struct rapl_if_private
  intel_rapl: introduce intel_rapl.h
  intel_rapl: remove hardcoded register index
  intel_rapl: use reg instead of msr
  cpufreq: imx-cpufreq-dt: Add i.MX8MN support
  ...
parents 4b09ddbc 918e162e
...@@ -129,7 +129,7 @@ int dev_pm_qos_remove_request(handle): ...@@ -129,7 +129,7 @@ int dev_pm_qos_remove_request(handle):
and call the notification trees if the target was changed as a result of and call the notification trees if the target was changed as a result of
removing the request. removing the request.
s32 dev_pm_qos_read_value(device): s32 dev_pm_qos_read_value(device, type):
Returns the aggregated value for a given device's constraints list. Returns the aggregated value for a given device's constraints list.
enum pm_qos_flags_status dev_pm_qos_flags(device, mask) enum pm_qos_flags_status dev_pm_qos_flags(device, mask)
...@@ -176,12 +176,14 @@ Notification mechanisms: ...@@ -176,12 +176,14 @@ Notification mechanisms:
The per-device PM QoS framework has a per-device notification tree. The per-device PM QoS framework has a per-device notification tree.
int dev_pm_qos_add_notifier(device, notifier): int dev_pm_qos_add_notifier(device, notifier, type):
Adds a notification callback function for the device. Adds a notification callback function for the device for a particular request
type.
The callback is called when the aggregated value of the device constraints list The callback is called when the aggregated value of the device constraints list
is changed (for resume latency device PM QoS only). is changed.
int dev_pm_qos_remove_notifier(device, notifier): int dev_pm_qos_remove_notifier(device, notifier, type):
Removes the notification callback function for the device. Removes the notification callback function for the device.
......
...@@ -12840,6 +12840,7 @@ F: drivers/base/power/ ...@@ -12840,6 +12840,7 @@ F: drivers/base/power/
F: include/linux/pm.h F: include/linux/pm.h
F: include/linux/pm_* F: include/linux/pm_*
F: include/linux/powercap.h F: include/linux/powercap.h
F: include/linux/intel_rapl.h
F: drivers/powercap/ F: drivers/powercap/
F: kernel/configs/nopm.config F: kernel/configs/nopm.config
......
...@@ -1536,7 +1536,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, ...@@ -1536,7 +1536,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (ret) if (ret)
genpd_free_dev_data(dev, gpd_data); genpd_free_dev_data(dev, gpd_data);
else else
dev_pm_qos_add_notifier(dev, &gpd_data->nb); dev_pm_qos_add_notifier(dev, &gpd_data->nb,
DEV_PM_QOS_RESUME_LATENCY);
return ret; return ret;
} }
...@@ -1569,7 +1570,8 @@ static int genpd_remove_device(struct generic_pm_domain *genpd, ...@@ -1569,7 +1570,8 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
pdd = dev->power.subsys_data->domain_data; pdd = dev->power.subsys_data->domain_data;
gpd_data = to_gpd_data(pdd); gpd_data = to_gpd_data(pdd);
dev_pm_qos_remove_notifier(dev, &gpd_data->nb); dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
DEV_PM_QOS_RESUME_LATENCY);
genpd_lock(genpd); genpd_lock(genpd);
...@@ -1597,7 +1599,7 @@ static int genpd_remove_device(struct generic_pm_domain *genpd, ...@@ -1597,7 +1599,7 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
out: out:
genpd_unlock(genpd); genpd_unlock(genpd);
dev_pm_qos_add_notifier(dev, &gpd_data->nb); dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
return ret; return ret;
} }
......
...@@ -33,7 +33,7 @@ static int dev_update_qos_constraint(struct device *dev, void *data) ...@@ -33,7 +33,7 @@ static int dev_update_qos_constraint(struct device *dev, void *data)
* take its current PM QoS constraint (that's the only thing * take its current PM QoS constraint (that's the only thing
* known at this point anyway). * known at this point anyway).
*/ */
constraint_ns = dev_pm_qos_read_value(dev); constraint_ns = dev_pm_qos_read_value(dev, DEV_PM_QOS_RESUME_LATENCY);
constraint_ns *= NSEC_PER_USEC; constraint_ns *= NSEC_PER_USEC;
} }
...@@ -66,7 +66,7 @@ static bool default_suspend_ok(struct device *dev) ...@@ -66,7 +66,7 @@ static bool default_suspend_ok(struct device *dev)
td->constraint_changed = false; td->constraint_changed = false;
td->cached_suspend_ok = false; td->cached_suspend_ok = false;
td->effective_constraint_ns = 0; td->effective_constraint_ns = 0;
constraint_ns = __dev_pm_qos_read_value(dev); constraint_ns = __dev_pm_qos_resume_latency(dev);
spin_unlock_irqrestore(&dev->power.lock, flags); spin_unlock_irqrestore(&dev->power.lock, flags);
......
...@@ -90,29 +90,49 @@ enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask) ...@@ -90,29 +90,49 @@ enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
EXPORT_SYMBOL_GPL(dev_pm_qos_flags); EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
/** /**
* __dev_pm_qos_read_value - Get PM QoS constraint for a given device. * __dev_pm_qos_resume_latency - Get resume latency constraint for a given device.
* @dev: Device to get the PM QoS constraint value for. * @dev: Device to get the PM QoS constraint value for.
* *
* This routine must be called with dev->power.lock held. * This routine must be called with dev->power.lock held.
*/ */
s32 __dev_pm_qos_read_value(struct device *dev) s32 __dev_pm_qos_resume_latency(struct device *dev)
{ {
lockdep_assert_held(&dev->power.lock); lockdep_assert_held(&dev->power.lock);
return dev_pm_qos_raw_read_value(dev); return dev_pm_qos_raw_resume_latency(dev);
} }
/** /**
* dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked). * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
* @dev: Device to get the PM QoS constraint value for. * @dev: Device to get the PM QoS constraint value for.
* @type: QoS request type.
*/ */
s32 dev_pm_qos_read_value(struct device *dev) s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
{ {
struct dev_pm_qos *qos = dev->power.qos;
unsigned long flags; unsigned long flags;
s32 ret; s32 ret;
spin_lock_irqsave(&dev->power.lock, flags); spin_lock_irqsave(&dev->power.lock, flags);
ret = __dev_pm_qos_read_value(dev);
switch (type) {
case DEV_PM_QOS_RESUME_LATENCY:
ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
: pm_qos_read_value(&qos->resume_latency);
break;
case DEV_PM_QOS_MIN_FREQUENCY:
ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
: pm_qos_read_value(&qos->min_frequency);
break;
case DEV_PM_QOS_MAX_FREQUENCY:
ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
: pm_qos_read_value(&qos->max_frequency);
break;
default:
WARN_ON(1);
ret = 0;
}
spin_unlock_irqrestore(&dev->power.lock, flags); spin_unlock_irqrestore(&dev->power.lock, flags);
return ret; return ret;
...@@ -149,6 +169,14 @@ static int apply_constraint(struct dev_pm_qos_request *req, ...@@ -149,6 +169,14 @@ static int apply_constraint(struct dev_pm_qos_request *req,
req->dev->power.set_latency_tolerance(req->dev, value); req->dev->power.set_latency_tolerance(req->dev, value);
} }
break; break;
case DEV_PM_QOS_MIN_FREQUENCY:
ret = pm_qos_update_target(&qos->min_frequency,
&req->data.pnode, action, value);
break;
case DEV_PM_QOS_MAX_FREQUENCY:
ret = pm_qos_update_target(&qos->max_frequency,
&req->data.pnode, action, value);
break;
case DEV_PM_QOS_FLAGS: case DEV_PM_QOS_FLAGS:
ret = pm_qos_update_flags(&qos->flags, &req->data.flr, ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
action, value); action, value);
...@@ -177,12 +205,11 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) ...@@ -177,12 +205,11 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
if (!qos) if (!qos)
return -ENOMEM; return -ENOMEM;
n = kzalloc(sizeof(*n), GFP_KERNEL); n = kzalloc(3 * sizeof(*n), GFP_KERNEL);
if (!n) { if (!n) {
kfree(qos); kfree(qos);
return -ENOMEM; return -ENOMEM;
} }
BLOCKING_INIT_NOTIFIER_HEAD(n);
c = &qos->resume_latency; c = &qos->resume_latency;
plist_head_init(&c->list); plist_head_init(&c->list);
...@@ -191,6 +218,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) ...@@ -191,6 +218,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
c->type = PM_QOS_MIN; c->type = PM_QOS_MIN;
c->notifiers = n; c->notifiers = n;
BLOCKING_INIT_NOTIFIER_HEAD(n);
c = &qos->latency_tolerance; c = &qos->latency_tolerance;
plist_head_init(&c->list); plist_head_init(&c->list);
...@@ -199,6 +227,24 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) ...@@ -199,6 +227,24 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
c->type = PM_QOS_MIN; c->type = PM_QOS_MIN;
c = &qos->min_frequency;
plist_head_init(&c->list);
c->target_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
c->default_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
c->no_constraint_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
c->type = PM_QOS_MAX;
c->notifiers = ++n;
BLOCKING_INIT_NOTIFIER_HEAD(n);
c = &qos->max_frequency;
plist_head_init(&c->list);
c->target_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
c->default_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
c->no_constraint_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
c->type = PM_QOS_MIN;
c->notifiers = ++n;
BLOCKING_INIT_NOTIFIER_HEAD(n);
INIT_LIST_HEAD(&qos->flags.list); INIT_LIST_HEAD(&qos->flags.list);
spin_lock_irq(&dev->power.lock); spin_lock_irq(&dev->power.lock);
...@@ -252,11 +298,25 @@ void dev_pm_qos_constraints_destroy(struct device *dev) ...@@ -252,11 +298,25 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req)); memset(req, 0, sizeof(*req));
} }
c = &qos->latency_tolerance; c = &qos->latency_tolerance;
plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req)); memset(req, 0, sizeof(*req));
} }
c = &qos->min_frequency;
plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
c = &qos->max_frequency;
plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
f = &qos->flags; f = &qos->flags;
list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) { list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
...@@ -368,6 +428,8 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, ...@@ -368,6 +428,8 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
switch(req->type) { switch(req->type) {
case DEV_PM_QOS_RESUME_LATENCY: case DEV_PM_QOS_RESUME_LATENCY:
case DEV_PM_QOS_LATENCY_TOLERANCE: case DEV_PM_QOS_LATENCY_TOLERANCE:
case DEV_PM_QOS_MIN_FREQUENCY:
case DEV_PM_QOS_MAX_FREQUENCY:
curr_value = req->data.pnode.prio; curr_value = req->data.pnode.prio;
break; break;
case DEV_PM_QOS_FLAGS: case DEV_PM_QOS_FLAGS:
...@@ -467,6 +529,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request); ...@@ -467,6 +529,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
* *
* @dev: target device for the constraint * @dev: target device for the constraint
* @notifier: notifier block managed by caller. * @notifier: notifier block managed by caller.
* @type: request type.
* *
* Will register the notifier into a notification chain that gets called * Will register the notifier into a notification chain that gets called
* upon changes to the target value for the device. * upon changes to the target value for the device.
...@@ -474,7 +537,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request); ...@@ -474,7 +537,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
* If the device's constraints object doesn't exist when this routine is called, * If the device's constraints object doesn't exist when this routine is called,
* it will be created (or error code will be returned if that fails). * it will be created (or error code will be returned if that fails).
*/ */
int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
enum dev_pm_qos_req_type type)
{ {
int ret = 0; int ret = 0;
...@@ -485,10 +549,28 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) ...@@ -485,10 +549,28 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
else if (!dev->power.qos) else if (!dev->power.qos)
ret = dev_pm_qos_constraints_allocate(dev); ret = dev_pm_qos_constraints_allocate(dev);
if (!ret) if (ret)
goto unlock;
switch (type) {
case DEV_PM_QOS_RESUME_LATENCY:
ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers, ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
notifier); notifier);
break;
case DEV_PM_QOS_MIN_FREQUENCY:
ret = blocking_notifier_chain_register(dev->power.qos->min_frequency.notifiers,
notifier);
break;
case DEV_PM_QOS_MAX_FREQUENCY:
ret = blocking_notifier_chain_register(dev->power.qos->max_frequency.notifiers,
notifier);
break;
default:
WARN_ON(1);
ret = -EINVAL;
}
unlock:
mutex_unlock(&dev_pm_qos_mtx); mutex_unlock(&dev_pm_qos_mtx);
return ret; return ret;
} }
...@@ -500,24 +582,44 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier); ...@@ -500,24 +582,44 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
* *
* @dev: target device for the constraint * @dev: target device for the constraint
* @notifier: notifier block to be removed. * @notifier: notifier block to be removed.
* @type: request type.
* *
* Will remove the notifier from the notification chain that gets called * Will remove the notifier from the notification chain that gets called
* upon changes to the target value. * upon changes to the target value.
*/ */
int dev_pm_qos_remove_notifier(struct device *dev, int dev_pm_qos_remove_notifier(struct device *dev,
struct notifier_block *notifier) struct notifier_block *notifier,
enum dev_pm_qos_req_type type)
{ {
int retval = 0; int ret = 0;
mutex_lock(&dev_pm_qos_mtx); mutex_lock(&dev_pm_qos_mtx);
/* Silently return if the constraints object is not present. */ /* Silently return if the constraints object is not present. */
if (!IS_ERR_OR_NULL(dev->power.qos)) if (IS_ERR_OR_NULL(dev->power.qos))
retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers, goto unlock;
switch (type) {
case DEV_PM_QOS_RESUME_LATENCY:
ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
notifier);
break;
case DEV_PM_QOS_MIN_FREQUENCY:
ret = blocking_notifier_chain_unregister(dev->power.qos->min_frequency.notifiers,
notifier); notifier);
break;
case DEV_PM_QOS_MAX_FREQUENCY:
ret = blocking_notifier_chain_unregister(dev->power.qos->max_frequency.notifiers,
notifier);
break;
default:
WARN_ON(1);
ret = -EINVAL;
}
unlock:
mutex_unlock(&dev_pm_qos_mtx); mutex_unlock(&dev_pm_qos_mtx);
return retval; return ret;
} }
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier); EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
...@@ -577,6 +679,9 @@ static void __dev_pm_qos_drop_user_request(struct device *dev, ...@@ -577,6 +679,9 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,
req = dev->power.qos->flags_req; req = dev->power.qos->flags_req;
dev->power.qos->flags_req = NULL; dev->power.qos->flags_req = NULL;
break; break;
default:
WARN_ON(1);
return;
} }
__dev_pm_qos_remove_request(req); __dev_pm_qos_remove_request(req);
kfree(req); kfree(req);
......
...@@ -275,7 +275,7 @@ static int rpm_check_suspend_allowed(struct device *dev) ...@@ -275,7 +275,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
|| (dev->power.request_pending || (dev->power.request_pending
&& dev->power.request == RPM_REQ_RESUME)) && dev->power.request == RPM_REQ_RESUME))
retval = -EAGAIN; retval = -EAGAIN;
else if (__dev_pm_qos_read_value(dev) == 0) else if (__dev_pm_qos_resume_latency(dev) == 0)
retval = -EPERM; retval = -EPERM;
else if (dev->power.runtime_status == RPM_SUSPENDED) else if (dev->power.runtime_status == RPM_SUSPENDED)
retval = 1; retval = 1;
......
...@@ -131,23 +131,18 @@ static int bmips_cpufreq_exit(struct cpufreq_policy *policy) ...@@ -131,23 +131,18 @@ static int bmips_cpufreq_exit(struct cpufreq_policy *policy)
static int bmips_cpufreq_init(struct cpufreq_policy *policy) static int bmips_cpufreq_init(struct cpufreq_policy *policy)
{ {
struct cpufreq_frequency_table *freq_table; struct cpufreq_frequency_table *freq_table;
int ret;
freq_table = bmips_cpufreq_get_freq_table(policy); freq_table = bmips_cpufreq_get_freq_table(policy);
if (IS_ERR(freq_table)) { if (IS_ERR(freq_table)) {
ret = PTR_ERR(freq_table); pr_err("%s: couldn't determine frequency table (%ld).\n",
pr_err("%s: couldn't determine frequency table (%d).\n", BMIPS_CPUFREQ_NAME, PTR_ERR(freq_table));
BMIPS_CPUFREQ_NAME, ret); return PTR_ERR(freq_table);
return ret;
} }
ret = cpufreq_generic_init(policy, freq_table, TRANSITION_LATENCY); cpufreq_generic_init(policy, freq_table, TRANSITION_LATENCY);
if (ret)
bmips_cpufreq_exit(policy);
else
pr_info("%s: registered\n", BMIPS_CPUFREQ_NAME); pr_info("%s: registered\n", BMIPS_CPUFREQ_NAME);
return ret; return 0;
} }
static struct cpufreq_driver bmips_cpufreq_driver = { static struct cpufreq_driver bmips_cpufreq_driver = {
......
This diff is collapsed.
...@@ -90,7 +90,8 @@ static int davinci_cpu_init(struct cpufreq_policy *policy) ...@@ -90,7 +90,8 @@ static int davinci_cpu_init(struct cpufreq_policy *policy)
* Setting the latency to 2000 us to accommodate addition of drivers * Setting the latency to 2000 us to accommodate addition of drivers
* to pre/post change notification list. * to pre/post change notification list.
*/ */
return cpufreq_generic_init(policy, freq_table, 2000 * 1000); cpufreq_generic_init(policy, freq_table, 2000 * 1000);
return 0;
} }
static struct cpufreq_driver davinci_driver = { static struct cpufreq_driver davinci_driver = {
......
...@@ -44,10 +44,11 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev) ...@@ -44,10 +44,11 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
* According to datasheet minimum speed grading is not supported for * According to datasheet minimum speed grading is not supported for
* consumer parts so clamp to 1 to avoid warning for "no OPPs" * consumer parts so clamp to 1 to avoid warning for "no OPPs"
* *
* Applies to 8mq and 8mm. * Applies to i.MX8M series SoCs.
*/ */
if (mkt_segment == 0 && speed_grade == 0 && ( if (mkt_segment == 0 && speed_grade == 0 && (
of_machine_is_compatible("fsl,imx8mm") || of_machine_is_compatible("fsl,imx8mm") ||
of_machine_is_compatible("fsl,imx8mn") ||
of_machine_is_compatible("fsl,imx8mq"))) of_machine_is_compatible("fsl,imx8mq")))
speed_grade = 1; speed_grade = 1;
......
...@@ -190,14 +190,12 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) ...@@ -190,14 +190,12 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
static int imx6q_cpufreq_init(struct cpufreq_policy *policy) static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{ {
int ret;
policy->clk = clks[ARM].clk; policy->clk = clks[ARM].clk;
ret = cpufreq_generic_init(policy, freq_table, transition_latency); cpufreq_generic_init(policy, freq_table, transition_latency);
policy->suspend_freq = max_freq; policy->suspend_freq = max_freq;
dev_pm_opp_of_register_em(policy->cpus); dev_pm_opp_of_register_em(policy->cpus);
return ret; return 0;
} }
static struct cpufreq_driver imx6q_cpufreq_driver = { static struct cpufreq_driver imx6q_cpufreq_driver = {
......
...@@ -898,7 +898,6 @@ static void intel_pstate_update_policies(void) ...@@ -898,7 +898,6 @@ static void intel_pstate_update_policies(void)
static void intel_pstate_update_max_freq(unsigned int cpu) static void intel_pstate_update_max_freq(unsigned int cpu)
{ {
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
struct cpufreq_policy new_policy;
struct cpudata *cpudata; struct cpudata *cpudata;
if (!policy) if (!policy)
...@@ -908,11 +907,7 @@ static void intel_pstate_update_max_freq(unsigned int cpu) ...@@ -908,11 +907,7 @@ static void intel_pstate_update_max_freq(unsigned int cpu)
policy->cpuinfo.max_freq = global.turbo_disabled_mf ? policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
memcpy(&new_policy, policy, sizeof(*policy)); refresh_frequency_limits(policy);
new_policy.max = min(policy->user_policy.max, policy->cpuinfo.max_freq);
new_policy.min = min(policy->user_policy.min, new_policy.max);
cpufreq_set_policy(policy, &new_policy);
cpufreq_cpu_release(policy); cpufreq_cpu_release(policy);
} }
......
...@@ -85,7 +85,8 @@ static int kirkwood_cpufreq_target(struct cpufreq_policy *policy, ...@@ -85,7 +85,8 @@ static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
/* Module init and exit code */ /* Module init and exit code */
static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy) static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy)
{ {
return cpufreq_generic_init(policy, kirkwood_freq_table, 5000); cpufreq_generic_init(policy, kirkwood_freq_table, 5000);
return 0;
} }
static struct cpufreq_driver kirkwood_cpufreq_driver = { static struct cpufreq_driver kirkwood_cpufreq_driver = {
......
...@@ -81,7 +81,7 @@ static int ls1x_cpufreq_init(struct cpufreq_policy *policy) ...@@ -81,7 +81,7 @@ static int ls1x_cpufreq_init(struct cpufreq_policy *policy)
struct device *cpu_dev = get_cpu_device(policy->cpu); struct device *cpu_dev = get_cpu_device(policy->cpu);
struct cpufreq_frequency_table *freq_tbl; struct cpufreq_frequency_table *freq_tbl;
unsigned int pll_freq, freq; unsigned int pll_freq, freq;
int steps, i, ret; int steps, i;
pll_freq = clk_get_rate(cpufreq->pll_clk) / 1000; pll_freq = clk_get_rate(cpufreq->pll_clk) / 1000;
...@@ -103,11 +103,9 @@ static int ls1x_cpufreq_init(struct cpufreq_policy *policy) ...@@ -103,11 +103,9 @@ static int ls1x_cpufreq_init(struct cpufreq_policy *policy)
freq_tbl[i].frequency = CPUFREQ_TABLE_END; freq_tbl[i].frequency = CPUFREQ_TABLE_END;
policy->clk = cpufreq->clk; policy->clk = cpufreq->clk;
ret = cpufreq_generic_init(policy, freq_tbl, 0); cpufreq_generic_init(policy, freq_tbl, 0);
if (ret)
kfree(freq_tbl);
return ret; return 0;
} }
static int ls1x_cpufreq_exit(struct cpufreq_policy *policy) static int ls1x_cpufreq_exit(struct cpufreq_policy *policy)
......
...@@ -95,7 +95,8 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) ...@@ -95,7 +95,8 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
} }
policy->clk = cpuclk; policy->clk = cpuclk;
return cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0); cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0);
return 0;
} }
static int loongson2_cpufreq_exit(struct cpufreq_policy *policy) static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
......
...@@ -140,7 +140,8 @@ static unsigned int maple_cpufreq_get_speed(unsigned int cpu) ...@@ -140,7 +140,8 @@ static unsigned int maple_cpufreq_get_speed(unsigned int cpu)
static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy) static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
{ {
return cpufreq_generic_init(policy, maple_cpu_freqs, 12000); cpufreq_generic_init(policy, maple_cpu_freqs, 12000);
return 0;
} }
static struct cpufreq_driver maple_cpufreq_driver = { static struct cpufreq_driver maple_cpufreq_driver = {
......
...@@ -122,23 +122,18 @@ static int omap_cpu_init(struct cpufreq_policy *policy) ...@@ -122,23 +122,18 @@ static int omap_cpu_init(struct cpufreq_policy *policy)
dev_err(mpu_dev, dev_err(mpu_dev,
"%s: cpu%d: failed creating freq table[%d]\n", "%s: cpu%d: failed creating freq table[%d]\n",
__func__, policy->cpu, result); __func__, policy->cpu, result);
goto fail; clk_put(policy->clk);
return result;
} }
} }
atomic_inc_return(&freq_table_users); atomic_inc_return(&freq_table_users);
/* FIXME: what's the actual transition time? */ /* FIXME: what's the actual transition time? */
result = cpufreq_generic_init(policy, freq_table, 300 * 1000); cpufreq_generic_init(policy, freq_table, 300 * 1000);
if (!result) {
dev_pm_opp_of_register_em(policy->cpus); dev_pm_opp_of_register_em(policy->cpus);
return 0;
}
freq_table_free(); return 0;
fail:
clk_put(policy->clk);
return result;
} }
static int omap_cpu_exit(struct cpufreq_policy *policy) static int omap_cpu_exit(struct cpufreq_policy *policy)
......
...@@ -196,7 +196,8 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) ...@@ -196,7 +196,8 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->cur = pas_freqs[cur_astate].frequency; policy->cur = pas_freqs[cur_astate].frequency;
ppc_proc_freq = policy->cur * 1000ul; ppc_proc_freq = policy->cur * 1000ul;
return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency()); cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
return 0;
out_unmap_sdcpwr: out_unmap_sdcpwr:
iounmap(sdcpwr_mapbase); iounmap(sdcpwr_mapbase);
......
...@@ -372,7 +372,8 @@ static int pmac_cpufreq_target( struct cpufreq_policy *policy, ...@@ -372,7 +372,8 @@ static int pmac_cpufreq_target( struct cpufreq_policy *policy,
static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy) static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
{ {
return cpufreq_generic_init(policy, pmac_cpu_freqs, transition_latency); cpufreq_generic_init(policy, pmac_cpu_freqs, transition_latency);
return 0;
} }
static u32 read_gpio(struct device_node *np) static u32 read_gpio(struct device_node *np)
......
...@@ -321,7 +321,8 @@ static unsigned int g5_cpufreq_get_speed(unsigned int cpu) ...@@ -321,7 +321,8 @@ static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy) static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
{ {
return cpufreq_generic_init(policy, g5_cpu_freqs, transition_latency); cpufreq_generic_init(policy, g5_cpu_freqs, transition_latency);
return 0;
} }
static struct cpufreq_driver g5_cpufreq_driver = { static struct cpufreq_driver g5_cpufreq_driver = {
......
...@@ -447,21 +447,16 @@ static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) ...@@ -447,21 +447,16 @@ static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
/* Datasheet says PLL stabalisation time must be at least 300us, /* Datasheet says PLL stabalisation time must be at least 300us,
* so but add some fudge. (reference in LOCKCON0 register description) * so but add some fudge. (reference in LOCKCON0 register description)
*/ */
ret = cpufreq_generic_init(policy, s3c_freq->freq_table, cpufreq_generic_init(policy, s3c_freq->freq_table,
(500 * 1000) + s3c_freq->regulator_latency); (500 * 1000) + s3c_freq->regulator_latency);
if (ret)
goto err_freq_table;
register_reboot_notifier(&s3c2416_cpufreq_reboot_notifier); register_reboot_notifier(&s3c2416_cpufreq_reboot_notifier);
return 0; return 0;
err_freq_table:
#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE #ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
regulator_put(s3c_freq->vddarm);
err_vddarm: err_vddarm:
#endif
clk_put(s3c_freq->armclk); clk_put(s3c_freq->armclk);
#endif
err_armclk: err_armclk:
clk_put(s3c_freq->hclk); clk_put(s3c_freq->hclk);
err_hclk: err_hclk:
......
...@@ -144,7 +144,6 @@ static void s3c64xx_cpufreq_config_regulator(void) ...@@ -144,7 +144,6 @@ static void s3c64xx_cpufreq_config_regulator(void)
static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
{ {
int ret;
struct cpufreq_frequency_table *freq; struct cpufreq_frequency_table *freq;
if (policy->cpu != 0) if (policy->cpu != 0)
...@@ -165,8 +164,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) ...@@ -165,8 +164,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
#ifdef CONFIG_REGULATOR #ifdef CONFIG_REGULATOR
vddarm = regulator_get(NULL, "vddarm"); vddarm = regulator_get(NULL, "vddarm");
if (IS_ERR(vddarm)) { if (IS_ERR(vddarm)) {
ret = PTR_ERR(vddarm); pr_err("Failed to obtain VDDARM: %ld\n", PTR_ERR(vddarm));
pr_err("Failed to obtain VDDARM: %d\n", ret);
pr_err("Only frequency scaling available\n"); pr_err("Only frequency scaling available\n");
vddarm = NULL; vddarm = NULL;
} else { } else {
...@@ -196,16 +194,9 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) ...@@ -196,16 +194,9 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
* the PLLs, which we don't currently) is ~300us worst case, * the PLLs, which we don't currently) is ~300us worst case,
* but add some fudge. * but add some fudge.
*/ */
ret = cpufreq_generic_init(policy, s3c64xx_freq_table, cpufreq_generic_init(policy, s3c64xx_freq_table,
(500 * 1000) + regulator_latency); (500 * 1000) + regulator_latency);
if (ret != 0) { return 0;
pr_err("Failed to configure frequency table: %d\n",
ret);
regulator_put(vddarm);
clk_put(policy->clk);
}
return ret;
} }
static struct cpufreq_driver s3c64xx_cpufreq_driver = { static struct cpufreq_driver s3c64xx_cpufreq_driver = {
......
...@@ -541,7 +541,8 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy) ...@@ -541,7 +541,8 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy)
s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk); s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
policy->suspend_freq = SLEEP_FREQ; policy->suspend_freq = SLEEP_FREQ;
return cpufreq_generic_init(policy, s5pv210_freq_table, 40000); cpufreq_generic_init(policy, s5pv210_freq_table, 40000);
return 0;
out_dmc1: out_dmc1:
clk_put(dmc0_clk); clk_put(dmc0_clk);
......
...@@ -181,7 +181,8 @@ static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr) ...@@ -181,7 +181,8 @@ static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr)
static int __init sa1100_cpu_init(struct cpufreq_policy *policy) static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
{ {
return cpufreq_generic_init(policy, sa11x0_freq_table, 0); cpufreq_generic_init(policy, sa11x0_freq_table, 0);
return 0;
} }
static struct cpufreq_driver sa1100_driver __refdata = { static struct cpufreq_driver sa1100_driver __refdata = {
......
...@@ -303,7 +303,8 @@ static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr) ...@@ -303,7 +303,8 @@ static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr)
static int __init sa1110_cpu_init(struct cpufreq_policy *policy) static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
{ {
return cpufreq_generic_init(policy, sa11x0_freq_table, 0); cpufreq_generic_init(policy, sa11x0_freq_table, 0);
return 0;
} }
/* sa1110_driver needs __refdata because it must remain after init registers /* sa1110_driver needs __refdata because it must remain after init registers
......
...@@ -153,8 +153,9 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy, ...@@ -153,8 +153,9 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
static int spear_cpufreq_init(struct cpufreq_policy *policy) static int spear_cpufreq_init(struct cpufreq_policy *policy)
{ {
policy->clk = spear_cpufreq.clk; policy->clk = spear_cpufreq.clk;
return cpufreq_generic_init(policy, spear_cpufreq.freq_tbl, cpufreq_generic_init(policy, spear_cpufreq.freq_tbl,
spear_cpufreq.transition_latency); spear_cpufreq.transition_latency);
return 0;
} }
static struct cpufreq_driver spear_cpufreq_driver = { static struct cpufreq_driver spear_cpufreq_driver = {
......
...@@ -118,17 +118,11 @@ static int tegra_target(struct cpufreq_policy *policy, unsigned int index) ...@@ -118,17 +118,11 @@ static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
static int tegra_cpu_init(struct cpufreq_policy *policy) static int tegra_cpu_init(struct cpufreq_policy *policy)
{ {
struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data(); struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data();
int ret;
clk_prepare_enable(cpufreq->cpu_clk); clk_prepare_enable(cpufreq->cpu_clk);
/* FIXME: what's the actual transition time? */ /* FIXME: what's the actual transition time? */
ret = cpufreq_generic_init(policy, freq_table, 300 * 1000); cpufreq_generic_init(policy, freq_table, 300 * 1000);
if (ret) {
clk_disable_unprepare(cpufreq->cpu_clk);
return ret;
}
policy->clk = cpufreq->cpu_clk; policy->clk = cpufreq->cpu_clk;
policy->suspend_freq = freq_table[0].frequency; policy->suspend_freq = freq_table[0].frequency;
return 0; return 0;
......
...@@ -110,7 +110,7 @@ int cpuidle_governor_latency_req(unsigned int cpu) ...@@ -110,7 +110,7 @@ int cpuidle_governor_latency_req(unsigned int cpu)
{ {
int global_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); int global_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
struct device *device = get_cpu_device(cpu); struct device *device = get_cpu_device(cpu);
int device_req = dev_pm_qos_raw_read_value(device); int device_req = dev_pm_qos_raw_resume_latency(device);
return device_req < global_req ? device_req : global_req; return device_req < global_req ? device_req : global_req;
} }
...@@ -16,14 +16,17 @@ menuconfig POWERCAP ...@@ -16,14 +16,17 @@ menuconfig POWERCAP
if POWERCAP if POWERCAP
# Client driver configurations go here. # Client driver configurations go here.
config INTEL_RAPL_CORE
tristate
config INTEL_RAPL config INTEL_RAPL
tristate "Intel RAPL Support" tristate "Intel RAPL Support via MSR Interface"
depends on X86 && IOSF_MBI depends on X86 && IOSF_MBI
default n select INTEL_RAPL_CORE
---help--- ---help---
This enables support for the Intel Running Average Power Limit (RAPL) This enables support for the Intel Running Average Power Limit (RAPL)
technology which allows power limits to be enforced and monitored on technology via MSR interface, which allows power limits to be enforced
modern Intel processors (Sandy Bridge and later). and monitored on modern Intel processors (Sandy Bridge and later).
In RAPL, the platform level settings are divided into domains for In RAPL, the platform level settings are divided into domains for
fine grained control. These domains include processor package, DRAM fine grained control. These domains include processor package, DRAM
......
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_POWERCAP) += powercap_sys.o obj-$(CONFIG_POWERCAP) += powercap_sys.o
obj-$(CONFIG_INTEL_RAPL) += intel_rapl.o obj-$(CONFIG_INTEL_RAPL_CORE) += intel_rapl_common.o
obj-$(CONFIG_INTEL_RAPL) += intel_rapl_msr.o
obj-$(CONFIG_IDLE_INJECT) += idle_inject.o obj-$(CONFIG_IDLE_INJECT) += idle_inject.o
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel Running Average Power Limit (RAPL) Driver via MSR interface
* Copyright (c) 2019, Intel Corporation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/log2.h>
#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/sysfs.h>
#include <linux/cpu.h>
#include <linux/powercap.h>
#include <linux/suspend.h>
#include <linux/intel_rapl.h>
#include <linux/processor.h>
#include <linux/platform_device.h>
#include <asm/iosf_mbi.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
/* Local defines */
#define MSR_PLATFORM_POWER_LIMIT 0x0000065C
/* private data for RAPL MSR Interface */
static struct rapl_if_priv rapl_msr_priv = {
.reg_unit = MSR_RAPL_POWER_UNIT,
.regs[RAPL_DOMAIN_PACKAGE] = {
MSR_PKG_POWER_LIMIT, MSR_PKG_ENERGY_STATUS, MSR_PKG_PERF_STATUS, 0, MSR_PKG_POWER_INFO },
.regs[RAPL_DOMAIN_PP0] = {
MSR_PP0_POWER_LIMIT, MSR_PP0_ENERGY_STATUS, 0, MSR_PP0_POLICY, 0 },
.regs[RAPL_DOMAIN_PP1] = {
MSR_PP1_POWER_LIMIT, MSR_PP1_ENERGY_STATUS, 0, MSR_PP1_POLICY, 0 },
.regs[RAPL_DOMAIN_DRAM] = {
MSR_DRAM_POWER_LIMIT, MSR_DRAM_ENERGY_STATUS, MSR_DRAM_PERF_STATUS, 0, MSR_DRAM_POWER_INFO },
.regs[RAPL_DOMAIN_PLATFORM] = {
MSR_PLATFORM_POWER_LIMIT, MSR_PLATFORM_ENERGY_STATUS, 0, 0, 0},
.limits[RAPL_DOMAIN_PACKAGE] = 2,
};
/* Handles CPU hotplug on multi-socket systems.
* If a CPU goes online as the first CPU of the physical package
* we add the RAPL package to the system. Similarly, when the last
* CPU of the package is removed, we remove the RAPL package and its
* associated domains. Cooling devices are handled accordingly at
* per-domain level.
*/
static int rapl_cpu_online(unsigned int cpu)
{
struct rapl_package *rp;
rp = rapl_find_package_domain(cpu, &rapl_msr_priv);
if (!rp) {
rp = rapl_add_package(cpu, &rapl_msr_priv);
if (IS_ERR(rp))
return PTR_ERR(rp);
}
cpumask_set_cpu(cpu, &rp->cpumask);
return 0;
}
static int rapl_cpu_down_prep(unsigned int cpu)
{
struct rapl_package *rp;
int lead_cpu;
rp = rapl_find_package_domain(cpu, &rapl_msr_priv);
if (!rp)
return 0;
cpumask_clear_cpu(cpu, &rp->cpumask);
lead_cpu = cpumask_first(&rp->cpumask);
if (lead_cpu >= nr_cpu_ids)
rapl_remove_package(rp);
else if (rp->lead_cpu == cpu)
rp->lead_cpu = lead_cpu;
return 0;
}
static int rapl_msr_read_raw(int cpu, struct reg_action *ra)
{
u32 msr = (u32)ra->reg;
if (rdmsrl_safe_on_cpu(cpu, msr, &ra->value)) {
pr_debug("failed to read msr 0x%x on cpu %d\n", msr, cpu);
return -EIO;
}
ra->value &= ra->mask;
return 0;
}
static void rapl_msr_update_func(void *info)
{
struct reg_action *ra = info;
u32 msr = (u32)ra->reg;
u64 val;
ra->err = rdmsrl_safe(msr, &val);
if (ra->err)
return;
val &= ~ra->mask;
val |= ra->value;
ra->err = wrmsrl_safe(msr, val);
}
static int rapl_msr_write_raw(int cpu, struct reg_action *ra)
{
int ret;
ret = smp_call_function_single(cpu, rapl_msr_update_func, ra, 1);
if (WARN_ON_ONCE(ret))
return ret;
return ra->err;
}
static int rapl_msr_probe(struct platform_device *pdev)
{
int ret;
rapl_msr_priv.read_raw = rapl_msr_read_raw;
rapl_msr_priv.write_raw = rapl_msr_write_raw;
rapl_msr_priv.control_type = powercap_register_control_type(NULL, "intel-rapl", NULL);
if (IS_ERR(rapl_msr_priv.control_type)) {
pr_debug("failed to register powercap control_type.\n");
return PTR_ERR(rapl_msr_priv.control_type);
}
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powercap/rapl:online",
rapl_cpu_online, rapl_cpu_down_prep);
if (ret < 0)
goto out;
rapl_msr_priv.pcap_rapl_online = ret;
/* Don't bail out if PSys is not supported */
rapl_add_platform_domain(&rapl_msr_priv);
return 0;
out:
if (ret)
powercap_unregister_control_type(rapl_msr_priv.control_type);
return ret;
}
static int rapl_msr_remove(struct platform_device *pdev)
{
cpuhp_remove_state(rapl_msr_priv.pcap_rapl_online);
rapl_remove_platform_domain(&rapl_msr_priv);
powercap_unregister_control_type(rapl_msr_priv.control_type);
return 0;
}
static const struct platform_device_id rapl_msr_ids[] = {
{ .name = "intel_rapl_msr", },
{}
};
MODULE_DEVICE_TABLE(platform, rapl_msr_ids);
static struct platform_driver intel_rapl_msr_driver = {
.probe = rapl_msr_probe,
.remove = rapl_msr_remove,
.id_table = rapl_msr_ids,
.driver = {
.name = "intel_rapl_msr",
},
};
module_platform_driver(intel_rapl_msr_driver);
MODULE_DESCRIPTION("Driver for Intel RAPL (Running Average Power Limit) control via MSR interface");
MODULE_AUTHOR("Zhang Rui <rui.zhang@intel.com>");
MODULE_LICENSE("GPL v2");
...@@ -40,4 +40,10 @@ config INT3406_THERMAL ...@@ -40,4 +40,10 @@ config INT3406_THERMAL
brightness in order to address a thermal condition or to reduce brightness in order to address a thermal condition or to reduce
power consumed by display device. power consumed by display device.
config PROC_THERMAL_MMIO_RAPL
bool
depends on 64BIT
depends on POWERCAP
select INTEL_RAPL_CORE
default y
endif endif
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/thermal.h> #include <linux/thermal.h>
#include <linux/cpuhotplug.h>
#include <linux/intel_rapl.h>
#include "int340x_thermal_zone.h" #include "int340x_thermal_zone.h"
#include "../intel_soc_dts_iosf.h" #include "../intel_soc_dts_iosf.h"
...@@ -37,6 +39,8 @@ ...@@ -37,6 +39,8 @@
/* GeminiLake thermal reporting device */ /* GeminiLake thermal reporting device */
#define PCI_DEVICE_ID_PROC_GLK_THERMAL 0x318C #define PCI_DEVICE_ID_PROC_GLK_THERMAL 0x318C
#define DRV_NAME "proc_thermal"
struct power_config { struct power_config {
u32 index; u32 index;
u32 min_uw; u32 min_uw;
...@@ -52,6 +56,7 @@ struct proc_thermal_device { ...@@ -52,6 +56,7 @@ struct proc_thermal_device {
struct power_config power_limits[2]; struct power_config power_limits[2];
struct int34x_thermal_zone *int340x_zone; struct int34x_thermal_zone *int340x_zone;
struct intel_soc_dts_sensors *soc_dts; struct intel_soc_dts_sensors *soc_dts;
void __iomem *mmio_base;
}; };
enum proc_thermal_emum_mode_type { enum proc_thermal_emum_mode_type {
...@@ -60,6 +65,12 @@ enum proc_thermal_emum_mode_type { ...@@ -60,6 +65,12 @@ enum proc_thermal_emum_mode_type {
PROC_THERMAL_PLATFORM_DEV PROC_THERMAL_PLATFORM_DEV
}; };
struct rapl_mmio_regs {
u64 reg_unit;
u64 regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX];
int limits[RAPL_DOMAIN_MAX];
};
/* /*
* We can have only one type of enumeration, PCI or Platform, * We can have only one type of enumeration, PCI or Platform,
* not both. So we don't need instance specific data. * not both. So we don't need instance specific data.
...@@ -367,8 +378,151 @@ static irqreturn_t proc_thermal_pci_msi_irq(int irq, void *devid) ...@@ -367,8 +378,151 @@ static irqreturn_t proc_thermal_pci_msi_irq(int irq, void *devid)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
#ifdef CONFIG_PROC_THERMAL_MMIO_RAPL
#define MCHBAR 0
/* RAPL Support via MMIO interface */
static struct rapl_if_priv rapl_mmio_priv;
static int rapl_mmio_cpu_online(unsigned int cpu)
{
struct rapl_package *rp;
/* mmio rapl supports package 0 only for now */
if (topology_physical_package_id(cpu))
return 0;
rp = rapl_find_package_domain(cpu, &rapl_mmio_priv);
if (!rp) {
rp = rapl_add_package(cpu, &rapl_mmio_priv);
if (IS_ERR(rp))
return PTR_ERR(rp);
}
cpumask_set_cpu(cpu, &rp->cpumask);
return 0;
}
static int rapl_mmio_cpu_down_prep(unsigned int cpu)
{
struct rapl_package *rp;
int lead_cpu;
rp = rapl_find_package_domain(cpu, &rapl_mmio_priv);
if (!rp)
return 0;
cpumask_clear_cpu(cpu, &rp->cpumask);
lead_cpu = cpumask_first(&rp->cpumask);
if (lead_cpu >= nr_cpu_ids)
rapl_remove_package(rp);
else if (rp->lead_cpu == cpu)
rp->lead_cpu = lead_cpu;
return 0;
}
static int rapl_mmio_read_raw(int cpu, struct reg_action *ra)
{
if (!ra->reg)
return -EINVAL;
ra->value = readq((void __iomem *)ra->reg);
ra->value &= ra->mask;
return 0;
}
static int rapl_mmio_write_raw(int cpu, struct reg_action *ra)
{
u64 val;
if (!ra->reg)
return -EINVAL;
val = readq((void __iomem *)ra->reg);
val &= ~ra->mask;
val |= ra->value;
writeq(val, (void __iomem *)ra->reg);
return 0;
}
static int proc_thermal_rapl_add(struct pci_dev *pdev,
struct proc_thermal_device *proc_priv,
struct rapl_mmio_regs *rapl_regs)
{
enum rapl_domain_reg_id reg;
enum rapl_domain_type domain;
int ret;
if (!rapl_regs)
return 0;
ret = pcim_iomap_regions(pdev, 1 << MCHBAR, DRV_NAME);
if (ret) {
dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
return -ENOMEM;
}
proc_priv->mmio_base = pcim_iomap_table(pdev)[MCHBAR];
for (domain = RAPL_DOMAIN_PACKAGE; domain < RAPL_DOMAIN_MAX; domain++) {
for (reg = RAPL_DOMAIN_REG_LIMIT; reg < RAPL_DOMAIN_REG_MAX; reg++)
if (rapl_regs->regs[domain][reg])
rapl_mmio_priv.regs[domain][reg] =
(u64)proc_priv->mmio_base +
rapl_regs->regs[domain][reg];
rapl_mmio_priv.limits[domain] = rapl_regs->limits[domain];
}
rapl_mmio_priv.reg_unit = (u64)proc_priv->mmio_base + rapl_regs->reg_unit;
rapl_mmio_priv.read_raw = rapl_mmio_read_raw;
rapl_mmio_priv.write_raw = rapl_mmio_write_raw;
rapl_mmio_priv.control_type = powercap_register_control_type(NULL, "intel-rapl-mmio", NULL);
if (IS_ERR(rapl_mmio_priv.control_type)) {
pr_debug("failed to register powercap control_type.\n");
return PTR_ERR(rapl_mmio_priv.control_type);
}
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powercap/rapl:online",
rapl_mmio_cpu_online, rapl_mmio_cpu_down_prep);
if (ret < 0) {
powercap_unregister_control_type(rapl_mmio_priv.control_type);
return ret;
}
rapl_mmio_priv.pcap_rapl_online = ret;
return 0;
}
static void proc_thermal_rapl_remove(void)
{
cpuhp_remove_state(rapl_mmio_priv.pcap_rapl_online);
powercap_unregister_control_type(rapl_mmio_priv.control_type);
}
static const struct rapl_mmio_regs rapl_mmio_hsw = {
.reg_unit = 0x5938,
.regs[RAPL_DOMAIN_PACKAGE] = { 0x59a0, 0x593c, 0x58f0, 0, 0x5930},
.regs[RAPL_DOMAIN_DRAM] = { 0x58e0, 0x58e8, 0x58ec, 0, 0},
.limits[RAPL_DOMAIN_PACKAGE] = 2,
.limits[RAPL_DOMAIN_DRAM] = 2,
};
#else
static int proc_thermal_rapl_add(struct pci_dev *pdev,
struct proc_thermal_device *proc_priv,
struct rapl_mmio_regs *rapl_regs)
{
return 0;
}
static void proc_thermal_rapl_remove(void) {}
static const struct rapl_mmio_regs rapl_mmio_hsw;
#endif /* CONFIG_MMIO_RAPL */
static int proc_thermal_pci_probe(struct pci_dev *pdev, static int proc_thermal_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *unused) const struct pci_device_id *id)
{ {
struct proc_thermal_device *proc_priv; struct proc_thermal_device *proc_priv;
int ret; int ret;
...@@ -378,15 +532,21 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, ...@@ -378,15 +532,21 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev,
return -ENODEV; return -ENODEV;
} }
ret = pci_enable_device(pdev); ret = pcim_enable_device(pdev);
if (ret < 0) { if (ret < 0) {
dev_err(&pdev->dev, "error: could not enable device\n"); dev_err(&pdev->dev, "error: could not enable device\n");
return ret; return ret;
} }
ret = proc_thermal_add(&pdev->dev, &proc_priv); ret = proc_thermal_add(&pdev->dev, &proc_priv);
if (ret)
return ret;
ret = proc_thermal_rapl_add(pdev, proc_priv,
(struct rapl_mmio_regs *)id->driver_data);
if (ret) { if (ret) {
pci_disable_device(pdev); dev_err(&pdev->dev, "failed to add RAPL MMIO interface\n");
proc_thermal_remove(proc_priv);
return ret; return ret;
} }
...@@ -439,8 +599,8 @@ static void proc_thermal_pci_remove(struct pci_dev *pdev) ...@@ -439,8 +599,8 @@ static void proc_thermal_pci_remove(struct pci_dev *pdev)
pci_disable_msi(pdev); pci_disable_msi(pdev);
} }
} }
proc_thermal_rapl_remove();
proc_thermal_remove(proc_priv); proc_thermal_remove(proc_priv);
pci_disable_device(pdev);
} }
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
...@@ -462,7 +622,8 @@ static SIMPLE_DEV_PM_OPS(proc_thermal_pm, NULL, proc_thermal_resume); ...@@ -462,7 +622,8 @@ static SIMPLE_DEV_PM_OPS(proc_thermal_pm, NULL, proc_thermal_resume);
static const struct pci_device_id proc_thermal_pci_ids[] = { static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BDW_THERMAL)}, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BDW_THERMAL)},
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_HSB_THERMAL)}, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_HSB_THERMAL)},
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_SKL_THERMAL)}, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_SKL_THERMAL),
.driver_data = (kernel_ulong_t)&rapl_mmio_hsw, },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BSW_THERMAL)}, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BSW_THERMAL)},
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXT0_THERMAL)}, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXT0_THERMAL)},
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXT1_THERMAL)}, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXT1_THERMAL)},
...@@ -477,7 +638,7 @@ static const struct pci_device_id proc_thermal_pci_ids[] = { ...@@ -477,7 +638,7 @@ static const struct pci_device_id proc_thermal_pci_ids[] = {
MODULE_DEVICE_TABLE(pci, proc_thermal_pci_ids); MODULE_DEVICE_TABLE(pci, proc_thermal_pci_ids);
static struct pci_driver proc_thermal_pci_driver = { static struct pci_driver proc_thermal_pci_driver = {
.name = "proc_thermal", .name = DRV_NAME,
.probe = proc_thermal_pci_probe, .probe = proc_thermal_pci_probe,
.remove = proc_thermal_pci_remove, .remove = proc_thermal_pci_remove,
.id_table = proc_thermal_pci_ids, .id_table = proc_thermal_pci_ids,
......
...@@ -47,11 +47,6 @@ struct cpufreq_cpuinfo { ...@@ -47,11 +47,6 @@ struct cpufreq_cpuinfo {
unsigned int transition_latency; unsigned int transition_latency;
}; };
struct cpufreq_user_policy {
unsigned int min; /* in kHz */
unsigned int max; /* in kHz */
};
struct cpufreq_policy { struct cpufreq_policy {
/* CPUs sharing clock, require sw coordination */ /* CPUs sharing clock, require sw coordination */
cpumask_var_t cpus; /* Online CPUs only */ cpumask_var_t cpus; /* Online CPUs only */
...@@ -81,7 +76,8 @@ struct cpufreq_policy { ...@@ -81,7 +76,8 @@ struct cpufreq_policy {
struct work_struct update; /* if update_policy() needs to be struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */ * called, but you're in IRQ context */
struct cpufreq_user_policy user_policy; struct dev_pm_qos_request *min_freq_req;
struct dev_pm_qos_request *max_freq_req;
struct cpufreq_frequency_table *freq_table; struct cpufreq_frequency_table *freq_table;
enum cpufreq_table_sorting freq_table_sorted; enum cpufreq_table_sorting freq_table_sorted;
...@@ -144,6 +140,9 @@ struct cpufreq_policy { ...@@ -144,6 +140,9 @@ struct cpufreq_policy {
/* Pointer to the cooling device if used for thermal mitigation */ /* Pointer to the cooling device if used for thermal mitigation */
struct thermal_cooling_device *cdev; struct thermal_cooling_device *cdev;
struct notifier_block nb_min;
struct notifier_block nb_max;
}; };
struct cpufreq_freqs { struct cpufreq_freqs {
...@@ -201,6 +200,7 @@ void cpufreq_cpu_release(struct cpufreq_policy *policy); ...@@ -201,6 +200,7 @@ void cpufreq_cpu_release(struct cpufreq_policy *policy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
int cpufreq_set_policy(struct cpufreq_policy *policy, int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy); struct cpufreq_policy *new_policy);
void refresh_frequency_limits(struct cpufreq_policy *policy);
void cpufreq_update_policy(unsigned int cpu); void cpufreq_update_policy(unsigned int cpu);
void cpufreq_update_limits(unsigned int cpu); void cpufreq_update_limits(unsigned int cpu);
bool have_governor_per_policy(void); bool have_governor_per_policy(void);
...@@ -992,7 +992,7 @@ extern struct freq_attr *cpufreq_generic_attr[]; ...@@ -992,7 +992,7 @@ extern struct freq_attr *cpufreq_generic_attr[];
int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy); int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy);
unsigned int cpufreq_generic_get(unsigned int cpu); unsigned int cpufreq_generic_get(unsigned int cpu);
int cpufreq_generic_init(struct cpufreq_policy *policy, void cpufreq_generic_init(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table, struct cpufreq_frequency_table *table,
unsigned int transition_latency); unsigned int transition_latency);
#endif /* _LINUX_CPUFREQ_H */ #endif /* _LINUX_CPUFREQ_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Data types and headers for RAPL support
*
* Copyright (C) 2019 Intel Corporation.
*
* Author: Zhang Rui <rui.zhang@intel.com>
*/
#ifndef __INTEL_RAPL_H__
#define __INTEL_RAPL_H__
#include <linux/types.h>
#include <linux/powercap.h>
#include <linux/cpuhotplug.h>
enum rapl_domain_type {
RAPL_DOMAIN_PACKAGE, /* entire package/socket */
RAPL_DOMAIN_PP0, /* core power plane */
RAPL_DOMAIN_PP1, /* graphics uncore */
RAPL_DOMAIN_DRAM, /* DRAM control_type */
RAPL_DOMAIN_PLATFORM, /* PSys control_type */
RAPL_DOMAIN_MAX,
};
enum rapl_domain_reg_id {
RAPL_DOMAIN_REG_LIMIT,
RAPL_DOMAIN_REG_STATUS,
RAPL_DOMAIN_REG_PERF,
RAPL_DOMAIN_REG_POLICY,
RAPL_DOMAIN_REG_INFO,
RAPL_DOMAIN_REG_MAX,
};
struct rapl_package;
enum rapl_primitives {
ENERGY_COUNTER,
POWER_LIMIT1,
POWER_LIMIT2,
FW_LOCK,
PL1_ENABLE, /* power limit 1, aka long term */
PL1_CLAMP, /* allow frequency to go below OS request */
PL2_ENABLE, /* power limit 2, aka short term, instantaneous */
PL2_CLAMP,
TIME_WINDOW1, /* long term */
TIME_WINDOW2, /* short term */
THERMAL_SPEC_POWER,
MAX_POWER,
MIN_POWER,
MAX_TIME_WINDOW,
THROTTLED_TIME,
PRIORITY_LEVEL,
/* below are not raw primitive data */
AVERAGE_POWER,
NR_RAPL_PRIMITIVES,
};
struct rapl_domain_data {
u64 primitives[NR_RAPL_PRIMITIVES];
unsigned long timestamp;
};
#define NR_POWER_LIMITS (2)
struct rapl_power_limit {
struct powercap_zone_constraint *constraint;
int prim_id; /* primitive ID used to enable */
struct rapl_domain *domain;
const char *name;
u64 last_power_limit;
};
struct rapl_package;
struct rapl_domain {
const char *name;
enum rapl_domain_type id;
u64 regs[RAPL_DOMAIN_REG_MAX];
struct powercap_zone power_zone;
struct rapl_domain_data rdd;
struct rapl_power_limit rpl[NR_POWER_LIMITS];
u64 attr_map; /* track capabilities */
unsigned int state;
unsigned int domain_energy_unit;
struct rapl_package *rp;
};
struct reg_action {
u64 reg;
u64 mask;
u64 value;
int err;
};
/**
* struct rapl_if_priv: private data for different RAPL interfaces
* @control_type: Each RAPL interface must have its own powercap
* control type.
* @platform_rapl_domain: Optional. Some RAPL interface may have platform
* level RAPL control.
* @pcap_rapl_online: CPU hotplug state for each RAPL interface.
* @reg_unit: Register for getting energy/power/time unit.
* @regs: Register sets for different RAPL Domains.
* @limits: Number of power limits supported by each domain.
* @read_raw: Callback for reading RAPL interface specific
* registers.
* @write_raw: Callback for writing RAPL interface specific
* registers.
*/
struct rapl_if_priv {
struct powercap_control_type *control_type;
struct rapl_domain *platform_rapl_domain;
enum cpuhp_state pcap_rapl_online;
u64 reg_unit;
u64 regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX];
int limits[RAPL_DOMAIN_MAX];
int (*read_raw)(int cpu, struct reg_action *ra);
int (*write_raw)(int cpu, struct reg_action *ra);
};
/* maximum rapl package domain name: package-%d-die-%d */
#define PACKAGE_DOMAIN_NAME_LENGTH 30
struct rapl_package {
unsigned int id; /* logical die id, equals physical 1-die systems */
unsigned int nr_domains;
unsigned long domain_map; /* bit map of active domains */
unsigned int power_unit;
unsigned int energy_unit;
unsigned int time_unit;
struct rapl_domain *domains; /* array of domains, sized at runtime */
struct powercap_zone *power_zone; /* keep track of parent zone */
unsigned long power_limit_irq; /* keep track of package power limit
* notify interrupt enable status.
*/
struct list_head plist;
int lead_cpu; /* one active cpu per package for access */
/* Track active cpus */
struct cpumask cpumask;
char name[PACKAGE_DOMAIN_NAME_LENGTH];
struct rapl_if_priv *priv;
};
struct rapl_package *rapl_find_package_domain(int cpu, struct rapl_if_priv *priv);
struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv);
void rapl_remove_package(struct rapl_package *rp);
int rapl_add_platform_domain(struct rapl_if_priv *priv);
void rapl_remove_platform_domain(struct rapl_if_priv *priv);
#endif /* __INTEL_RAPL_H__ */
...@@ -40,6 +40,8 @@ enum pm_qos_flags_status { ...@@ -40,6 +40,8 @@ enum pm_qos_flags_status {
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS
#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0 #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
#define PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE 0
#define PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE (-1)
#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1) #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0) #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
...@@ -58,6 +60,8 @@ struct pm_qos_flags_request { ...@@ -58,6 +60,8 @@ struct pm_qos_flags_request {
enum dev_pm_qos_req_type { enum dev_pm_qos_req_type {
DEV_PM_QOS_RESUME_LATENCY = 1, DEV_PM_QOS_RESUME_LATENCY = 1,
DEV_PM_QOS_LATENCY_TOLERANCE, DEV_PM_QOS_LATENCY_TOLERANCE,
DEV_PM_QOS_MIN_FREQUENCY,
DEV_PM_QOS_MAX_FREQUENCY,
DEV_PM_QOS_FLAGS, DEV_PM_QOS_FLAGS,
}; };
...@@ -99,10 +103,14 @@ struct pm_qos_flags { ...@@ -99,10 +103,14 @@ struct pm_qos_flags {
struct dev_pm_qos { struct dev_pm_qos {
struct pm_qos_constraints resume_latency; struct pm_qos_constraints resume_latency;
struct pm_qos_constraints latency_tolerance; struct pm_qos_constraints latency_tolerance;
struct pm_qos_constraints min_frequency;
struct pm_qos_constraints max_frequency;
struct pm_qos_flags flags; struct pm_qos_flags flags;
struct dev_pm_qos_request *resume_latency_req; struct dev_pm_qos_request *resume_latency_req;
struct dev_pm_qos_request *latency_tolerance_req; struct dev_pm_qos_request *latency_tolerance_req;
struct dev_pm_qos_request *flags_req; struct dev_pm_qos_request *flags_req;
struct dev_pm_qos_request *min_frequency_req;
struct dev_pm_qos_request *max_frequency_req;
}; };
/* Action requested to pm_qos_update_target */ /* Action requested to pm_qos_update_target */
...@@ -139,16 +147,18 @@ s32 pm_qos_read_value(struct pm_qos_constraints *c); ...@@ -139,16 +147,18 @@ s32 pm_qos_read_value(struct pm_qos_constraints *c);
#ifdef CONFIG_PM #ifdef CONFIG_PM
enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask); enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask); enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
s32 __dev_pm_qos_read_value(struct device *dev); s32 __dev_pm_qos_resume_latency(struct device *dev);
s32 dev_pm_qos_read_value(struct device *dev); s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type);
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
enum dev_pm_qos_req_type type, s32 value); enum dev_pm_qos_req_type type, s32 value);
int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value); int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req); int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
int dev_pm_qos_add_notifier(struct device *dev, int dev_pm_qos_add_notifier(struct device *dev,
struct notifier_block *notifier); struct notifier_block *notifier,
enum dev_pm_qos_req_type type);
int dev_pm_qos_remove_notifier(struct device *dev, int dev_pm_qos_remove_notifier(struct device *dev,
struct notifier_block *notifier); struct notifier_block *notifier,
enum dev_pm_qos_req_type type);
void dev_pm_qos_constraints_init(struct device *dev); void dev_pm_qos_constraints_init(struct device *dev);
void dev_pm_qos_constraints_destroy(struct device *dev); void dev_pm_qos_constraints_destroy(struct device *dev);
int dev_pm_qos_add_ancestor_request(struct device *dev, int dev_pm_qos_add_ancestor_request(struct device *dev,
...@@ -174,7 +184,7 @@ static inline s32 dev_pm_qos_requested_flags(struct device *dev) ...@@ -174,7 +184,7 @@ static inline s32 dev_pm_qos_requested_flags(struct device *dev)
return dev->power.qos->flags_req->data.flr.flags; return dev->power.qos->flags_req->data.flr.flags;
} }
static inline s32 dev_pm_qos_raw_read_value(struct device *dev) static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev)
{ {
return IS_ERR_OR_NULL(dev->power.qos) ? return IS_ERR_OR_NULL(dev->power.qos) ?
PM_QOS_RESUME_LATENCY_NO_CONSTRAINT : PM_QOS_RESUME_LATENCY_NO_CONSTRAINT :
...@@ -187,10 +197,24 @@ static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, ...@@ -187,10 +197,24 @@ static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev,
s32 mask) s32 mask)
{ return PM_QOS_FLAGS_UNDEFINED; } { return PM_QOS_FLAGS_UNDEFINED; }
static inline s32 __dev_pm_qos_read_value(struct device *dev) static inline s32 __dev_pm_qos_resume_latency(struct device *dev)
{ return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; }
static inline s32 dev_pm_qos_read_value(struct device *dev)
{ return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; }
static inline s32 dev_pm_qos_read_value(struct device *dev,
enum dev_pm_qos_req_type type)
{
switch (type) {
case DEV_PM_QOS_RESUME_LATENCY:
return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
case DEV_PM_QOS_MIN_FREQUENCY:
return PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
case DEV_PM_QOS_MAX_FREQUENCY:
return PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
default:
WARN_ON(1);
return 0;
}
}
static inline int dev_pm_qos_add_request(struct device *dev, static inline int dev_pm_qos_add_request(struct device *dev,
struct dev_pm_qos_request *req, struct dev_pm_qos_request *req,
enum dev_pm_qos_req_type type, enum dev_pm_qos_req_type type,
...@@ -202,10 +226,12 @@ static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req, ...@@ -202,10 +226,12 @@ static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
{ return 0; } { return 0; }
static inline int dev_pm_qos_add_notifier(struct device *dev, static inline int dev_pm_qos_add_notifier(struct device *dev,
struct notifier_block *notifier) struct notifier_block *notifier,
enum dev_pm_qos_req_type type)
{ return 0; } { return 0; }
static inline int dev_pm_qos_remove_notifier(struct device *dev, static inline int dev_pm_qos_remove_notifier(struct device *dev,
struct notifier_block *notifier) struct notifier_block *notifier,
enum dev_pm_qos_req_type type)
{ return 0; } { return 0; }
static inline void dev_pm_qos_constraints_init(struct device *dev) static inline void dev_pm_qos_constraints_init(struct device *dev)
{ {
...@@ -241,7 +267,7 @@ static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) ...@@ -241,7 +267,7 @@ static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
} }
static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
static inline s32 dev_pm_qos_raw_read_value(struct device *dev) static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev)
{ {
return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment