Commit 5fa2845f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm-5.4-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull power management fixes from Rafael Wysocki:
 "These fix problems related to frequency limits management in cpufreq
  that were introduced during the 5.3 cycle (when PM QoS had started to
  be used for that), fix a few issues in the OPP (operating performance
  points) library code and fix up the recently added haltpoll cpuidle
  driver.

  The cpufreq changes are somewhat bigger that I would like them to be
  at this stage of the cycle, but the problems fixed by them include
  crashes on boot and shutdown in some cases (among other things) and in
  my view it is better to address the root of the issue right away.

  Specifics:

   - Using device PM QoS of CPU devices for managing frequency limits in
     cpufreq does not work, so introduce frequency QoS (based on the
     original low-level PM QoS) for this purpose, switch cpufreq and
     related code over to using it and fix a race involving deferred
     updates of frequency limits on top of that (Rafael Wysocki, Sudeep
     Holla).

   - Avoid calling regulator_enable()/disable() from the OPP framework
     to avoid side-effects on boot-enabled regulators that may change
     their initial voltage due to performing initial voltage balancing
     without all restrictions from the consumers (Marek Szyprowski).

   - Avoid a kref management issue in the OPP library code and drop an
     incorrectly added lockdep_assert_held() from it (Viresh Kumar).

   - Make the recently added haltpoll cpuidle driver take the 'idle='
     override into account as appropriate (Zhenzhong Duan)"

* tag 'pm-5.4-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  opp: Reinitialize the list_kref before adding the static OPPs again
  cpufreq: Cancel policy update work scheduled before freeing
  cpuidle: haltpoll: Take 'idle=' override into account
  opp: core: Revert "add regulators enable and disable"
  PM: QoS: Drop frequency QoS types from device PM QoS
  cpufreq: Use per-policy frequency QoS
  PM: QoS: Introduce frequency QoS
  opp: of: drop incorrect lockdep_assert_held()
parents 65b15b7f 767d2d71
...@@ -290,14 +290,13 @@ static int acpi_processor_notifier(struct notifier_block *nb, ...@@ -290,14 +290,13 @@ static int acpi_processor_notifier(struct notifier_block *nb,
unsigned long event, void *data) unsigned long event, void *data)
{ {
struct cpufreq_policy *policy = data; struct cpufreq_policy *policy = data;
int cpu = policy->cpu;
if (event == CPUFREQ_CREATE_POLICY) { if (event == CPUFREQ_CREATE_POLICY) {
acpi_thermal_cpufreq_init(cpu); acpi_thermal_cpufreq_init(policy);
acpi_processor_ppc_init(cpu); acpi_processor_ppc_init(policy);
} else if (event == CPUFREQ_REMOVE_POLICY) { } else if (event == CPUFREQ_REMOVE_POLICY) {
acpi_processor_ppc_exit(cpu); acpi_processor_ppc_exit(policy);
acpi_thermal_cpufreq_exit(cpu); acpi_thermal_cpufreq_exit(policy);
} }
return 0; return 0;
......
...@@ -81,10 +81,10 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) ...@@ -81,10 +81,10 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
pr->performance_platform_limit = (int)ppc; pr->performance_platform_limit = (int)ppc;
if (ppc >= pr->performance->state_count || if (ppc >= pr->performance->state_count ||
unlikely(!dev_pm_qos_request_active(&pr->perflib_req))) unlikely(!freq_qos_request_active(&pr->perflib_req)))
return 0; return 0;
ret = dev_pm_qos_update_request(&pr->perflib_req, ret = freq_qos_update_request(&pr->perflib_req,
pr->performance->states[ppc].core_frequency * 1000); pr->performance->states[ppc].core_frequency * 1000);
if (ret < 0) { if (ret < 0) {
pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n", pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
...@@ -157,28 +157,28 @@ void acpi_processor_ignore_ppc_init(void) ...@@ -157,28 +157,28 @@ void acpi_processor_ignore_ppc_init(void)
ignore_ppc = 0; ignore_ppc = 0;
} }
void acpi_processor_ppc_init(int cpu) void acpi_processor_ppc_init(struct cpufreq_policy *policy)
{ {
int cpu = policy->cpu;
struct acpi_processor *pr = per_cpu(processors, cpu); struct acpi_processor *pr = per_cpu(processors, cpu);
int ret; int ret;
if (!pr) if (!pr)
return; return;
ret = dev_pm_qos_add_request(get_cpu_device(cpu), ret = freq_qos_add_request(&policy->constraints, &pr->perflib_req,
&pr->perflib_req, DEV_PM_QOS_MAX_FREQUENCY, FREQ_QOS_MAX, INT_MAX);
INT_MAX);
if (ret < 0) if (ret < 0)
pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu, pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
ret); ret);
} }
void acpi_processor_ppc_exit(int cpu) void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
{ {
struct acpi_processor *pr = per_cpu(processors, cpu); struct acpi_processor *pr = per_cpu(processors, policy->cpu);
if (pr) if (pr)
dev_pm_qos_remove_request(&pr->perflib_req); freq_qos_remove_request(&pr->perflib_req);
} }
static int acpi_processor_get_performance_control(struct acpi_processor *pr) static int acpi_processor_get_performance_control(struct acpi_processor *pr)
......
...@@ -105,7 +105,7 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state) ...@@ -105,7 +105,7 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
pr = per_cpu(processors, i); pr = per_cpu(processors, i);
if (unlikely(!dev_pm_qos_request_active(&pr->thermal_req))) if (unlikely(!freq_qos_request_active(&pr->thermal_req)))
continue; continue;
policy = cpufreq_cpu_get(i); policy = cpufreq_cpu_get(i);
...@@ -116,7 +116,7 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state) ...@@ -116,7 +116,7 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
cpufreq_cpu_put(policy); cpufreq_cpu_put(policy);
ret = dev_pm_qos_update_request(&pr->thermal_req, max_freq); ret = freq_qos_update_request(&pr->thermal_req, max_freq);
if (ret < 0) { if (ret < 0) {
pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n", pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
pr->id, ret); pr->id, ret);
...@@ -125,28 +125,28 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state) ...@@ -125,28 +125,28 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
return 0; return 0;
} }
void acpi_thermal_cpufreq_init(int cpu) void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
{ {
int cpu = policy->cpu;
struct acpi_processor *pr = per_cpu(processors, cpu); struct acpi_processor *pr = per_cpu(processors, cpu);
int ret; int ret;
if (!pr) if (!pr)
return; return;
ret = dev_pm_qos_add_request(get_cpu_device(cpu), ret = freq_qos_add_request(&policy->constraints, &pr->thermal_req,
&pr->thermal_req, DEV_PM_QOS_MAX_FREQUENCY, FREQ_QOS_MAX, INT_MAX);
INT_MAX);
if (ret < 0) if (ret < 0)
pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu, pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
ret); ret);
} }
void acpi_thermal_cpufreq_exit(int cpu) void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
{ {
struct acpi_processor *pr = per_cpu(processors, cpu); struct acpi_processor *pr = per_cpu(processors, policy->cpu);
if (pr) if (pr)
dev_pm_qos_remove_request(&pr->thermal_req); freq_qos_remove_request(&pr->thermal_req);
} }
#else /* ! CONFIG_CPU_FREQ */ #else /* ! CONFIG_CPU_FREQ */
static int cpufreq_get_max_state(unsigned int cpu) static int cpufreq_get_max_state(unsigned int cpu)
......
...@@ -115,20 +115,10 @@ s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type) ...@@ -115,20 +115,10 @@ s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
spin_lock_irqsave(&dev->power.lock, flags); spin_lock_irqsave(&dev->power.lock, flags);
switch (type) { if (type == DEV_PM_QOS_RESUME_LATENCY) {
case DEV_PM_QOS_RESUME_LATENCY:
ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
: pm_qos_read_value(&qos->resume_latency); : pm_qos_read_value(&qos->resume_latency);
break; } else {
case DEV_PM_QOS_MIN_FREQUENCY:
ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
: pm_qos_read_value(&qos->min_frequency);
break;
case DEV_PM_QOS_MAX_FREQUENCY:
ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
: pm_qos_read_value(&qos->max_frequency);
break;
default:
WARN_ON(1); WARN_ON(1);
ret = 0; ret = 0;
} }
...@@ -169,14 +159,6 @@ static int apply_constraint(struct dev_pm_qos_request *req, ...@@ -169,14 +159,6 @@ static int apply_constraint(struct dev_pm_qos_request *req,
req->dev->power.set_latency_tolerance(req->dev, value); req->dev->power.set_latency_tolerance(req->dev, value);
} }
break; break;
case DEV_PM_QOS_MIN_FREQUENCY:
ret = pm_qos_update_target(&qos->min_frequency,
&req->data.pnode, action, value);
break;
case DEV_PM_QOS_MAX_FREQUENCY:
ret = pm_qos_update_target(&qos->max_frequency,
&req->data.pnode, action, value);
break;
case DEV_PM_QOS_FLAGS: case DEV_PM_QOS_FLAGS:
ret = pm_qos_update_flags(&qos->flags, &req->data.flr, ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
action, value); action, value);
...@@ -227,24 +209,6 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) ...@@ -227,24 +209,6 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
c->type = PM_QOS_MIN; c->type = PM_QOS_MIN;
c = &qos->min_frequency;
plist_head_init(&c->list);
c->target_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
c->default_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
c->no_constraint_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
c->type = PM_QOS_MAX;
c->notifiers = ++n;
BLOCKING_INIT_NOTIFIER_HEAD(n);
c = &qos->max_frequency;
plist_head_init(&c->list);
c->target_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
c->default_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
c->no_constraint_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
c->type = PM_QOS_MIN;
c->notifiers = ++n;
BLOCKING_INIT_NOTIFIER_HEAD(n);
INIT_LIST_HEAD(&qos->flags.list); INIT_LIST_HEAD(&qos->flags.list);
spin_lock_irq(&dev->power.lock); spin_lock_irq(&dev->power.lock);
...@@ -305,18 +269,6 @@ void dev_pm_qos_constraints_destroy(struct device *dev) ...@@ -305,18 +269,6 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
memset(req, 0, sizeof(*req)); memset(req, 0, sizeof(*req));
} }
c = &qos->min_frequency;
plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
c = &qos->max_frequency;
plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
f = &qos->flags; f = &qos->flags;
list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) { list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
...@@ -428,8 +380,6 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, ...@@ -428,8 +380,6 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
switch(req->type) { switch(req->type) {
case DEV_PM_QOS_RESUME_LATENCY: case DEV_PM_QOS_RESUME_LATENCY:
case DEV_PM_QOS_LATENCY_TOLERANCE: case DEV_PM_QOS_LATENCY_TOLERANCE:
case DEV_PM_QOS_MIN_FREQUENCY:
case DEV_PM_QOS_MAX_FREQUENCY:
curr_value = req->data.pnode.prio; curr_value = req->data.pnode.prio;
break; break;
case DEV_PM_QOS_FLAGS: case DEV_PM_QOS_FLAGS:
...@@ -557,14 +507,6 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier, ...@@ -557,14 +507,6 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers, ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
notifier); notifier);
break; break;
case DEV_PM_QOS_MIN_FREQUENCY:
ret = blocking_notifier_chain_register(dev->power.qos->min_frequency.notifiers,
notifier);
break;
case DEV_PM_QOS_MAX_FREQUENCY:
ret = blocking_notifier_chain_register(dev->power.qos->max_frequency.notifiers,
notifier);
break;
default: default:
WARN_ON(1); WARN_ON(1);
ret = -EINVAL; ret = -EINVAL;
...@@ -604,14 +546,6 @@ int dev_pm_qos_remove_notifier(struct device *dev, ...@@ -604,14 +546,6 @@ int dev_pm_qos_remove_notifier(struct device *dev,
ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers, ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
notifier); notifier);
break; break;
case DEV_PM_QOS_MIN_FREQUENCY:
ret = blocking_notifier_chain_unregister(dev->power.qos->min_frequency.notifiers,
notifier);
break;
case DEV_PM_QOS_MAX_FREQUENCY:
ret = blocking_notifier_chain_unregister(dev->power.qos->max_frequency.notifiers,
notifier);
break;
default: default:
WARN_ON(1); WARN_ON(1);
ret = -EINVAL; ret = -EINVAL;
......
...@@ -720,7 +720,7 @@ static ssize_t store_##file_name \ ...@@ -720,7 +720,7 @@ static ssize_t store_##file_name \
if (ret != 1) \ if (ret != 1) \
return -EINVAL; \ return -EINVAL; \
\ \
ret = dev_pm_qos_update_request(policy->object##_freq_req, val);\ ret = freq_qos_update_request(policy->object##_freq_req, val);\
return ret >= 0 ? count : ret; \ return ret >= 0 ? count : ret; \
} }
...@@ -1202,19 +1202,21 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) ...@@ -1202,19 +1202,21 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
goto err_free_real_cpus; goto err_free_real_cpus;
} }
freq_constraints_init(&policy->constraints);
policy->nb_min.notifier_call = cpufreq_notifier_min; policy->nb_min.notifier_call = cpufreq_notifier_min;
policy->nb_max.notifier_call = cpufreq_notifier_max; policy->nb_max.notifier_call = cpufreq_notifier_max;
ret = dev_pm_qos_add_notifier(dev, &policy->nb_min, ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
DEV_PM_QOS_MIN_FREQUENCY); &policy->nb_min);
if (ret) { if (ret) {
dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n", dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
ret, cpumask_pr_args(policy->cpus)); ret, cpumask_pr_args(policy->cpus));
goto err_kobj_remove; goto err_kobj_remove;
} }
ret = dev_pm_qos_add_notifier(dev, &policy->nb_max, ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
DEV_PM_QOS_MAX_FREQUENCY); &policy->nb_max);
if (ret) { if (ret) {
dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n", dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
ret, cpumask_pr_args(policy->cpus)); ret, cpumask_pr_args(policy->cpus));
...@@ -1232,8 +1234,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) ...@@ -1232,8 +1234,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
return policy; return policy;
err_min_qos_notifier: err_min_qos_notifier:
dev_pm_qos_remove_notifier(dev, &policy->nb_min, freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
DEV_PM_QOS_MIN_FREQUENCY); &policy->nb_min);
err_kobj_remove: err_kobj_remove:
cpufreq_policy_put_kobj(policy); cpufreq_policy_put_kobj(policy);
err_free_real_cpus: err_free_real_cpus:
...@@ -1250,7 +1252,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) ...@@ -1250,7 +1252,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
static void cpufreq_policy_free(struct cpufreq_policy *policy) static void cpufreq_policy_free(struct cpufreq_policy *policy)
{ {
struct device *dev = get_cpu_device(policy->cpu);
unsigned long flags; unsigned long flags;
int cpu; int cpu;
...@@ -1262,10 +1263,13 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy) ...@@ -1262,10 +1263,13 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
per_cpu(cpufreq_cpu_data, cpu) = NULL; per_cpu(cpufreq_cpu_data, cpu) = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags); write_unlock_irqrestore(&cpufreq_driver_lock, flags);
dev_pm_qos_remove_notifier(dev, &policy->nb_max, freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
DEV_PM_QOS_MAX_FREQUENCY); &policy->nb_max);
dev_pm_qos_remove_notifier(dev, &policy->nb_min, freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
DEV_PM_QOS_MIN_FREQUENCY); &policy->nb_min);
/* Cancel any pending policy->update work before freeing the policy. */
cancel_work_sync(&policy->update);
if (policy->max_freq_req) { if (policy->max_freq_req) {
/* /*
...@@ -1274,10 +1278,10 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy) ...@@ -1274,10 +1278,10 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
*/ */
blocking_notifier_call_chain(&cpufreq_policy_notifier_list, blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_REMOVE_POLICY, policy); CPUFREQ_REMOVE_POLICY, policy);
dev_pm_qos_remove_request(policy->max_freq_req); freq_qos_remove_request(policy->max_freq_req);
} }
dev_pm_qos_remove_request(policy->min_freq_req); freq_qos_remove_request(policy->min_freq_req);
kfree(policy->min_freq_req); kfree(policy->min_freq_req);
cpufreq_policy_put_kobj(policy); cpufreq_policy_put_kobj(policy);
...@@ -1357,8 +1361,6 @@ static int cpufreq_online(unsigned int cpu) ...@@ -1357,8 +1361,6 @@ static int cpufreq_online(unsigned int cpu)
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
if (new_policy) { if (new_policy) {
struct device *dev = get_cpu_device(cpu);
for_each_cpu(j, policy->related_cpus) { for_each_cpu(j, policy->related_cpus) {
per_cpu(cpufreq_cpu_data, j) = policy; per_cpu(cpufreq_cpu_data, j) = policy;
add_cpu_dev_symlink(policy, j); add_cpu_dev_symlink(policy, j);
...@@ -1369,36 +1371,31 @@ static int cpufreq_online(unsigned int cpu) ...@@ -1369,36 +1371,31 @@ static int cpufreq_online(unsigned int cpu)
if (!policy->min_freq_req) if (!policy->min_freq_req)
goto out_destroy_policy; goto out_destroy_policy;
ret = dev_pm_qos_add_request(dev, policy->min_freq_req, ret = freq_qos_add_request(&policy->constraints,
DEV_PM_QOS_MIN_FREQUENCY, policy->min_freq_req, FREQ_QOS_MIN,
policy->min); policy->min);
if (ret < 0) { if (ret < 0) {
/* /*
* So we don't call dev_pm_qos_remove_request() for an * So we don't call freq_qos_remove_request() for an
* uninitialized request. * uninitialized request.
*/ */
kfree(policy->min_freq_req); kfree(policy->min_freq_req);
policy->min_freq_req = NULL; policy->min_freq_req = NULL;
dev_err(dev, "Failed to add min-freq constraint (%d)\n",
ret);
goto out_destroy_policy; goto out_destroy_policy;
} }
/* /*
* This must be initialized right here to avoid calling * This must be initialized right here to avoid calling
* dev_pm_qos_remove_request() on uninitialized request in case * freq_qos_remove_request() on uninitialized request in case
* of errors. * of errors.
*/ */
policy->max_freq_req = policy->min_freq_req + 1; policy->max_freq_req = policy->min_freq_req + 1;
ret = dev_pm_qos_add_request(dev, policy->max_freq_req, ret = freq_qos_add_request(&policy->constraints,
DEV_PM_QOS_MAX_FREQUENCY, policy->max_freq_req, FREQ_QOS_MAX,
policy->max); policy->max);
if (ret < 0) { if (ret < 0) {
policy->max_freq_req = NULL; policy->max_freq_req = NULL;
dev_err(dev, "Failed to add max-freq constraint (%d)\n",
ret);
goto out_destroy_policy; goto out_destroy_policy;
} }
...@@ -2374,7 +2371,6 @@ int cpufreq_set_policy(struct cpufreq_policy *policy, ...@@ -2374,7 +2371,6 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy) struct cpufreq_policy *new_policy)
{ {
struct cpufreq_governor *old_gov; struct cpufreq_governor *old_gov;
struct device *cpu_dev = get_cpu_device(policy->cpu);
int ret; int ret;
pr_debug("setting new policy for CPU %u: %u - %u kHz\n", pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
...@@ -2386,8 +2382,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy, ...@@ -2386,8 +2382,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
* PM QoS framework collects all the requests from users and provide us * PM QoS framework collects all the requests from users and provide us
* the final aggregated value here. * the final aggregated value here.
*/ */
new_policy->min = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MIN_FREQUENCY); new_policy->min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
new_policy->max = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MAX_FREQUENCY); new_policy->max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
/* verify the cpu speed can be set within this limit */ /* verify the cpu speed can be set within this limit */
ret = cpufreq_driver->verify(new_policy); ret = cpufreq_driver->verify(new_policy);
...@@ -2518,7 +2514,7 @@ static int cpufreq_boost_set_sw(int state) ...@@ -2518,7 +2514,7 @@ static int cpufreq_boost_set_sw(int state)
break; break;
} }
ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max); ret = freq_qos_update_request(policy->max_freq_req, policy->max);
if (ret < 0) if (ret < 0)
break; break;
} }
......
...@@ -1088,10 +1088,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, ...@@ -1088,10 +1088,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
static struct cpufreq_driver intel_pstate; static struct cpufreq_driver intel_pstate;
static void update_qos_request(enum dev_pm_qos_req_type type) static void update_qos_request(enum freq_qos_req_type type)
{ {
int max_state, turbo_max, freq, i, perf_pct; int max_state, turbo_max, freq, i, perf_pct;
struct dev_pm_qos_request *req; struct freq_qos_request *req;
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
...@@ -1112,7 +1112,7 @@ static void update_qos_request(enum dev_pm_qos_req_type type) ...@@ -1112,7 +1112,7 @@ static void update_qos_request(enum dev_pm_qos_req_type type)
else else
turbo_max = cpu->pstate.turbo_pstate; turbo_max = cpu->pstate.turbo_pstate;
if (type == DEV_PM_QOS_MIN_FREQUENCY) { if (type == FREQ_QOS_MIN) {
perf_pct = global.min_perf_pct; perf_pct = global.min_perf_pct;
} else { } else {
req++; req++;
...@@ -1122,7 +1122,7 @@ static void update_qos_request(enum dev_pm_qos_req_type type) ...@@ -1122,7 +1122,7 @@ static void update_qos_request(enum dev_pm_qos_req_type type)
freq = DIV_ROUND_UP(turbo_max * perf_pct, 100); freq = DIV_ROUND_UP(turbo_max * perf_pct, 100);
freq *= cpu->pstate.scaling; freq *= cpu->pstate.scaling;
if (dev_pm_qos_update_request(req, freq) < 0) if (freq_qos_update_request(req, freq) < 0)
pr_warn("Failed to update freq constraint: CPU%d\n", i); pr_warn("Failed to update freq constraint: CPU%d\n", i);
} }
} }
...@@ -1153,7 +1153,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, ...@@ -1153,7 +1153,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (intel_pstate_driver == &intel_pstate) if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies(); intel_pstate_update_policies();
else else
update_qos_request(DEV_PM_QOS_MAX_FREQUENCY); update_qos_request(FREQ_QOS_MAX);
mutex_unlock(&intel_pstate_driver_lock); mutex_unlock(&intel_pstate_driver_lock);
...@@ -1187,7 +1187,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, ...@@ -1187,7 +1187,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (intel_pstate_driver == &intel_pstate) if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies(); intel_pstate_update_policies();
else else
update_qos_request(DEV_PM_QOS_MIN_FREQUENCY); update_qos_request(FREQ_QOS_MIN);
mutex_unlock(&intel_pstate_driver_lock); mutex_unlock(&intel_pstate_driver_lock);
...@@ -2381,7 +2381,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, ...@@ -2381,7 +2381,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
{ {
int max_state, turbo_max, min_freq, max_freq, ret; int max_state, turbo_max, min_freq, max_freq, ret;
struct dev_pm_qos_request *req; struct freq_qos_request *req;
struct cpudata *cpu; struct cpudata *cpu;
struct device *dev; struct device *dev;
...@@ -2416,14 +2416,14 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) ...@@ -2416,14 +2416,14 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
max_freq = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100); max_freq = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
max_freq *= cpu->pstate.scaling; max_freq *= cpu->pstate.scaling;
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_MIN_FREQUENCY, ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN,
min_freq); min_freq);
if (ret < 0) { if (ret < 0) {
dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret); dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
goto free_req; goto free_req;
} }
ret = dev_pm_qos_add_request(dev, req + 1, DEV_PM_QOS_MAX_FREQUENCY, ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX,
max_freq); max_freq);
if (ret < 0) { if (ret < 0) {
dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret); dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
...@@ -2435,7 +2435,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) ...@@ -2435,7 +2435,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
return 0; return 0;
remove_min_req: remove_min_req:
dev_pm_qos_remove_request(req); freq_qos_remove_request(req);
free_req: free_req:
kfree(req); kfree(req);
pstate_exit: pstate_exit:
...@@ -2446,12 +2446,12 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) ...@@ -2446,12 +2446,12 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{ {
struct dev_pm_qos_request *req; struct freq_qos_request *req;
req = policy->driver_data; req = policy->driver_data;
dev_pm_qos_remove_request(req + 1); freq_qos_remove_request(req + 1);
dev_pm_qos_remove_request(req); freq_qos_remove_request(req);
kfree(req); kfree(req);
return intel_pstate_cpu_exit(policy); return intel_pstate_cpu_exit(policy);
......
...@@ -65,7 +65,7 @@ EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi); ...@@ -65,7 +65,7 @@ EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg) static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
{ {
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
struct dev_pm_qos_request *req; struct freq_qos_request *req;
u8 node, slow_mode; u8 node, slow_mode;
int cpu, ret; int cpu, ret;
...@@ -86,7 +86,7 @@ static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg) ...@@ -86,7 +86,7 @@ static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
req = policy->driver_data; req = policy->driver_data;
ret = dev_pm_qos_update_request(req, ret = freq_qos_update_request(req,
policy->freq_table[slow_mode].frequency); policy->freq_table[slow_mode].frequency);
if (ret < 0) if (ret < 0)
pr_warn("Failed to update freq constraint: %d\n", ret); pr_warn("Failed to update freq constraint: %d\n", ret);
...@@ -103,7 +103,7 @@ static struct pmi_handler cbe_pmi_handler = { ...@@ -103,7 +103,7 @@ static struct pmi_handler cbe_pmi_handler = {
void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy) void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy)
{ {
struct dev_pm_qos_request *req; struct freq_qos_request *req;
int ret; int ret;
if (!cbe_cpufreq_has_pmi) if (!cbe_cpufreq_has_pmi)
...@@ -113,8 +113,7 @@ void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy) ...@@ -113,8 +113,7 @@ void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy)
if (!req) if (!req)
return; return;
ret = dev_pm_qos_add_request(get_cpu_device(policy->cpu), req, ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MAX,
DEV_PM_QOS_MAX_FREQUENCY,
policy->freq_table[0].frequency); policy->freq_table[0].frequency);
if (ret < 0) { if (ret < 0) {
pr_err("Failed to add freq constraint (%d)\n", ret); pr_err("Failed to add freq constraint (%d)\n", ret);
...@@ -128,10 +127,10 @@ EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_init); ...@@ -128,10 +127,10 @@ EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_init);
void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy) void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy)
{ {
struct dev_pm_qos_request *req = policy->driver_data; struct freq_qos_request *req = policy->driver_data;
if (cbe_cpufreq_has_pmi) { if (cbe_cpufreq_has_pmi) {
dev_pm_qos_remove_request(req); freq_qos_remove_request(req);
kfree(req); kfree(req);
} }
} }
......
...@@ -95,6 +95,10 @@ static int __init haltpoll_init(void) ...@@ -95,6 +95,10 @@ static int __init haltpoll_init(void)
int ret; int ret;
struct cpuidle_driver *drv = &haltpoll_driver; struct cpuidle_driver *drv = &haltpoll_driver;
/* Do not load haltpoll if idle= is passed */
if (boot_option_idle_override != IDLE_NO_OVERRIDE)
return -ENODEV;
cpuidle_poll_state_init(drv); cpuidle_poll_state_init(drv);
if (!kvm_para_available() || if (!kvm_para_available() ||
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
static int clamped; static int clamped;
static struct wf_control *clamp_control; static struct wf_control *clamp_control;
static struct dev_pm_qos_request qos_req; static struct freq_qos_request qos_req;
static unsigned int min_freq, max_freq; static unsigned int min_freq, max_freq;
static int clamp_set(struct wf_control *ct, s32 value) static int clamp_set(struct wf_control *ct, s32 value)
...@@ -35,7 +35,7 @@ static int clamp_set(struct wf_control *ct, s32 value) ...@@ -35,7 +35,7 @@ static int clamp_set(struct wf_control *ct, s32 value)
} }
clamped = value; clamped = value;
return dev_pm_qos_update_request(&qos_req, freq); return freq_qos_update_request(&qos_req, freq);
} }
static int clamp_get(struct wf_control *ct, s32 *value) static int clamp_get(struct wf_control *ct, s32 *value)
...@@ -77,38 +77,44 @@ static int __init wf_cpufreq_clamp_init(void) ...@@ -77,38 +77,44 @@ static int __init wf_cpufreq_clamp_init(void)
min_freq = policy->cpuinfo.min_freq; min_freq = policy->cpuinfo.min_freq;
max_freq = policy->cpuinfo.max_freq; max_freq = policy->cpuinfo.max_freq;
ret = freq_qos_add_request(&policy->constraints, &qos_req, FREQ_QOS_MAX,
max_freq);
cpufreq_cpu_put(policy); cpufreq_cpu_put(policy);
if (ret < 0) {
pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
ret);
return ret;
}
dev = get_cpu_device(0); dev = get_cpu_device(0);
if (unlikely(!dev)) { if (unlikely(!dev)) {
pr_warn("%s: No cpu device for cpu0\n", __func__); pr_warn("%s: No cpu device for cpu0\n", __func__);
return -ENODEV; ret = -ENODEV;
goto fail;
} }
clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL); clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL);
if (clamp == NULL) if (clamp == NULL) {
return -ENOMEM; ret = -ENOMEM;
goto fail;
ret = dev_pm_qos_add_request(dev, &qos_req, DEV_PM_QOS_MAX_FREQUENCY,
max_freq);
if (ret < 0) {
pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
ret);
goto free;
} }
clamp->ops = &clamp_ops; clamp->ops = &clamp_ops;
clamp->name = "cpufreq-clamp"; clamp->name = "cpufreq-clamp";
ret = wf_register_control(clamp); ret = wf_register_control(clamp);
if (ret) if (ret)
goto fail; goto free;
clamp_control = clamp; clamp_control = clamp;
return 0; return 0;
fail:
dev_pm_qos_remove_request(&qos_req);
free: free:
kfree(clamp); kfree(clamp);
fail:
freq_qos_remove_request(&qos_req);
return ret; return ret;
} }
...@@ -116,7 +122,7 @@ static void __exit wf_cpufreq_clamp_exit(void) ...@@ -116,7 +122,7 @@ static void __exit wf_cpufreq_clamp_exit(void)
{ {
if (clamp_control) { if (clamp_control) {
wf_unregister_control(clamp_control); wf_unregister_control(clamp_control);
dev_pm_qos_remove_request(&qos_req); freq_qos_remove_request(&qos_req);
} }
} }
......
...@@ -1626,12 +1626,6 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev, ...@@ -1626,12 +1626,6 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
goto free_regulators; goto free_regulators;
} }
ret = regulator_enable(reg);
if (ret < 0) {
regulator_put(reg);
goto free_regulators;
}
opp_table->regulators[i] = reg; opp_table->regulators[i] = reg;
} }
...@@ -1645,10 +1639,8 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev, ...@@ -1645,10 +1639,8 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
return opp_table; return opp_table;
free_regulators: free_regulators:
while (i--) { while (i != 0)
regulator_disable(opp_table->regulators[i]); regulator_put(opp_table->regulators[--i]);
regulator_put(opp_table->regulators[i]);
}
kfree(opp_table->regulators); kfree(opp_table->regulators);
opp_table->regulators = NULL; opp_table->regulators = NULL;
...@@ -1674,10 +1666,8 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table) ...@@ -1674,10 +1666,8 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
/* Make sure there are no concurrent readers while updating opp_table */ /* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list)); WARN_ON(!list_empty(&opp_table->opp_list));
for (i = opp_table->regulator_count - 1; i >= 0; i--) { for (i = opp_table->regulator_count - 1; i >= 0; i--)
regulator_disable(opp_table->regulators[i]);
regulator_put(opp_table->regulators[i]); regulator_put(opp_table->regulators[i]);
}
_free_set_opp_data(opp_table); _free_set_opp_data(opp_table);
......
...@@ -77,8 +77,6 @@ static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table, ...@@ -77,8 +77,6 @@ static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
{ {
struct dev_pm_opp *opp; struct dev_pm_opp *opp;
lockdep_assert_held(&opp_table_lock);
mutex_lock(&opp_table->lock); mutex_lock(&opp_table->lock);
list_for_each_entry(opp, &opp_table->opp_list, node) { list_for_each_entry(opp, &opp_table->opp_list, node) {
...@@ -665,6 +663,13 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) ...@@ -665,6 +663,13 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
return 0; return 0;
} }
/*
* Re-initialize list_kref every time we add static OPPs to the OPP
* table as the reference count may be 0 after the last tie static OPPs
* were removed.
*/
kref_init(&opp_table->list_kref);
/* We have opp-table node now, iterate over it and add OPPs */ /* We have opp-table node now, iterate over it and add OPPs */
for_each_available_child_of_node(opp_table->np, np) { for_each_available_child_of_node(opp_table->np, np) {
opp = _opp_add_static_v2(opp_table, dev, np); opp = _opp_add_static_v2(opp_table, dev, np);
......
...@@ -88,7 +88,7 @@ struct cpufreq_cooling_device { ...@@ -88,7 +88,7 @@ struct cpufreq_cooling_device {
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
struct list_head node; struct list_head node;
struct time_in_idle *idle_time; struct time_in_idle *idle_time;
struct dev_pm_qos_request qos_req; struct freq_qos_request qos_req;
}; };
static DEFINE_IDA(cpufreq_ida); static DEFINE_IDA(cpufreq_ida);
...@@ -331,7 +331,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, ...@@ -331,7 +331,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
cpufreq_cdev->cpufreq_state = state; cpufreq_cdev->cpufreq_state = state;
return dev_pm_qos_update_request(&cpufreq_cdev->qos_req, return freq_qos_update_request(&cpufreq_cdev->qos_req,
cpufreq_cdev->freq_table[state].frequency); cpufreq_cdev->freq_table[state].frequency);
} }
...@@ -615,8 +615,8 @@ __cpufreq_cooling_register(struct device_node *np, ...@@ -615,8 +615,8 @@ __cpufreq_cooling_register(struct device_node *np,
cooling_ops = &cpufreq_cooling_ops; cooling_ops = &cpufreq_cooling_ops;
} }
ret = dev_pm_qos_add_request(dev, &cpufreq_cdev->qos_req, ret = freq_qos_add_request(&policy->constraints,
DEV_PM_QOS_MAX_FREQUENCY, &cpufreq_cdev->qos_req, FREQ_QOS_MAX,
cpufreq_cdev->freq_table[0].frequency); cpufreq_cdev->freq_table[0].frequency);
if (ret < 0) { if (ret < 0) {
pr_err("%s: Failed to add freq constraint (%d)\n", __func__, pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
...@@ -637,7 +637,7 @@ __cpufreq_cooling_register(struct device_node *np, ...@@ -637,7 +637,7 @@ __cpufreq_cooling_register(struct device_node *np,
return cdev; return cdev;
remove_qos_req: remove_qos_req:
dev_pm_qos_remove_request(&cpufreq_cdev->qos_req); freq_qos_remove_request(&cpufreq_cdev->qos_req);
remove_ida: remove_ida:
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id); ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
free_table: free_table:
...@@ -736,7 +736,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) ...@@ -736,7 +736,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
mutex_unlock(&cooling_list_lock); mutex_unlock(&cooling_list_lock);
thermal_cooling_device_unregister(cdev); thermal_cooling_device_unregister(cdev);
dev_pm_qos_remove_request(&cpufreq_cdev->qos_req); freq_qos_remove_request(&cpufreq_cdev->qos_req);
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id); ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
kfree(cpufreq_cdev->idle_time); kfree(cpufreq_cdev->idle_time);
kfree(cpufreq_cdev->freq_table); kfree(cpufreq_cdev->freq_table);
......
...@@ -232,8 +232,8 @@ struct acpi_processor { ...@@ -232,8 +232,8 @@ struct acpi_processor {
struct acpi_processor_limit limit; struct acpi_processor_limit limit;
struct thermal_cooling_device *cdev; struct thermal_cooling_device *cdev;
struct device *dev; /* Processor device. */ struct device *dev; /* Processor device. */
struct dev_pm_qos_request perflib_req; struct freq_qos_request perflib_req;
struct dev_pm_qos_request thermal_req; struct freq_qos_request thermal_req;
}; };
struct acpi_processor_errata { struct acpi_processor_errata {
...@@ -302,8 +302,8 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx ...@@ -302,8 +302,8 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx
#ifdef CONFIG_CPU_FREQ #ifdef CONFIG_CPU_FREQ
extern bool acpi_processor_cpufreq_init; extern bool acpi_processor_cpufreq_init;
void acpi_processor_ignore_ppc_init(void); void acpi_processor_ignore_ppc_init(void);
void acpi_processor_ppc_init(int cpu); void acpi_processor_ppc_init(struct cpufreq_policy *policy);
void acpi_processor_ppc_exit(int cpu); void acpi_processor_ppc_exit(struct cpufreq_policy *policy);
void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag); void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag);
extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit); extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit);
#else #else
...@@ -311,11 +311,11 @@ static inline void acpi_processor_ignore_ppc_init(void) ...@@ -311,11 +311,11 @@ static inline void acpi_processor_ignore_ppc_init(void)
{ {
return; return;
} }
static inline void acpi_processor_ppc_init(int cpu) static inline void acpi_processor_ppc_init(struct cpufreq_policy *policy)
{ {
return; return;
} }
static inline void acpi_processor_ppc_exit(int cpu) static inline void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
{ {
return; return;
} }
...@@ -431,14 +431,14 @@ static inline int acpi_processor_hotplug(struct acpi_processor *pr) ...@@ -431,14 +431,14 @@ static inline int acpi_processor_hotplug(struct acpi_processor *pr)
int acpi_processor_get_limit_info(struct acpi_processor *pr); int acpi_processor_get_limit_info(struct acpi_processor *pr);
extern const struct thermal_cooling_device_ops processor_cooling_ops; extern const struct thermal_cooling_device_ops processor_cooling_ops;
#if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ) #if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ)
void acpi_thermal_cpufreq_init(int cpu); void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy);
void acpi_thermal_cpufreq_exit(int cpu); void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy);
#else #else
static inline void acpi_thermal_cpufreq_init(int cpu) static inline void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
{ {
return; return;
} }
static inline void acpi_thermal_cpufreq_exit(int cpu) static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
{ {
return; return;
} }
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/kobject.h> #include <linux/kobject.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/pm_qos.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/sysfs.h> #include <linux/sysfs.h>
...@@ -76,8 +77,10 @@ struct cpufreq_policy { ...@@ -76,8 +77,10 @@ struct cpufreq_policy {
struct work_struct update; /* if update_policy() needs to be struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */ * called, but you're in IRQ context */
struct dev_pm_qos_request *min_freq_req; struct freq_constraints constraints;
struct dev_pm_qos_request *max_freq_req; struct freq_qos_request *min_freq_req;
struct freq_qos_request *max_freq_req;
struct cpufreq_frequency_table *freq_table; struct cpufreq_frequency_table *freq_table;
enum cpufreq_table_sorting freq_table_sorted; enum cpufreq_table_sorting freq_table_sorted;
......
...@@ -34,8 +34,6 @@ enum pm_qos_flags_status { ...@@ -34,8 +34,6 @@ enum pm_qos_flags_status {
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS
#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0 #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
#define PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE 0
#define PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE (-1)
#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1) #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0) #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
...@@ -54,8 +52,6 @@ struct pm_qos_flags_request { ...@@ -54,8 +52,6 @@ struct pm_qos_flags_request {
enum dev_pm_qos_req_type { enum dev_pm_qos_req_type {
DEV_PM_QOS_RESUME_LATENCY = 1, DEV_PM_QOS_RESUME_LATENCY = 1,
DEV_PM_QOS_LATENCY_TOLERANCE, DEV_PM_QOS_LATENCY_TOLERANCE,
DEV_PM_QOS_MIN_FREQUENCY,
DEV_PM_QOS_MAX_FREQUENCY,
DEV_PM_QOS_FLAGS, DEV_PM_QOS_FLAGS,
}; };
...@@ -97,14 +93,10 @@ struct pm_qos_flags { ...@@ -97,14 +93,10 @@ struct pm_qos_flags {
struct dev_pm_qos { struct dev_pm_qos {
struct pm_qos_constraints resume_latency; struct pm_qos_constraints resume_latency;
struct pm_qos_constraints latency_tolerance; struct pm_qos_constraints latency_tolerance;
struct pm_qos_constraints min_frequency;
struct pm_qos_constraints max_frequency;
struct pm_qos_flags flags; struct pm_qos_flags flags;
struct dev_pm_qos_request *resume_latency_req; struct dev_pm_qos_request *resume_latency_req;
struct dev_pm_qos_request *latency_tolerance_req; struct dev_pm_qos_request *latency_tolerance_req;
struct dev_pm_qos_request *flags_req; struct dev_pm_qos_request *flags_req;
struct dev_pm_qos_request *min_frequency_req;
struct dev_pm_qos_request *max_frequency_req;
}; };
/* Action requested to pm_qos_update_target */ /* Action requested to pm_qos_update_target */
...@@ -199,10 +191,6 @@ static inline s32 dev_pm_qos_read_value(struct device *dev, ...@@ -199,10 +191,6 @@ static inline s32 dev_pm_qos_read_value(struct device *dev,
switch (type) { switch (type) {
case DEV_PM_QOS_RESUME_LATENCY: case DEV_PM_QOS_RESUME_LATENCY:
return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
case DEV_PM_QOS_MIN_FREQUENCY:
return PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
case DEV_PM_QOS_MAX_FREQUENCY:
return PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
default: default:
WARN_ON(1); WARN_ON(1);
return 0; return 0;
...@@ -267,4 +255,48 @@ static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev) ...@@ -267,4 +255,48 @@ static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev)
} }
#endif #endif
#define FREQ_QOS_MIN_DEFAULT_VALUE 0
#define FREQ_QOS_MAX_DEFAULT_VALUE (-1)
enum freq_qos_req_type {
FREQ_QOS_MIN = 1,
FREQ_QOS_MAX,
};
struct freq_constraints {
struct pm_qos_constraints min_freq;
struct blocking_notifier_head min_freq_notifiers;
struct pm_qos_constraints max_freq;
struct blocking_notifier_head max_freq_notifiers;
};
struct freq_qos_request {
enum freq_qos_req_type type;
struct plist_node pnode;
struct freq_constraints *qos;
};
static inline int freq_qos_request_active(struct freq_qos_request *req)
{
return !IS_ERR_OR_NULL(req->qos);
}
void freq_constraints_init(struct freq_constraints *qos);
s32 freq_qos_read_value(struct freq_constraints *qos,
enum freq_qos_req_type type);
int freq_qos_add_request(struct freq_constraints *qos,
struct freq_qos_request *req,
enum freq_qos_req_type type, s32 value);
int freq_qos_update_request(struct freq_qos_request *req, s32 new_value);
int freq_qos_remove_request(struct freq_qos_request *req);
int freq_qos_add_notifier(struct freq_constraints *qos,
enum freq_qos_req_type type,
struct notifier_block *notifier);
int freq_qos_remove_notifier(struct freq_constraints *qos,
enum freq_qos_req_type type,
struct notifier_block *notifier);
#endif #endif
...@@ -650,3 +650,243 @@ static int __init pm_qos_power_init(void) ...@@ -650,3 +650,243 @@ static int __init pm_qos_power_init(void)
} }
late_initcall(pm_qos_power_init); late_initcall(pm_qos_power_init);
/* Definitions related to the frequency QoS below. */
/**
* freq_constraints_init - Initialize frequency QoS constraints.
* @qos: Frequency QoS constraints to initialize.
*/
void freq_constraints_init(struct freq_constraints *qos)
{
struct pm_qos_constraints *c;
c = &qos->min_freq;
plist_head_init(&c->list);
c->target_value = FREQ_QOS_MIN_DEFAULT_VALUE;
c->default_value = FREQ_QOS_MIN_DEFAULT_VALUE;
c->no_constraint_value = FREQ_QOS_MIN_DEFAULT_VALUE;
c->type = PM_QOS_MAX;
c->notifiers = &qos->min_freq_notifiers;
BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
c = &qos->max_freq;
plist_head_init(&c->list);
c->target_value = FREQ_QOS_MAX_DEFAULT_VALUE;
c->default_value = FREQ_QOS_MAX_DEFAULT_VALUE;
c->no_constraint_value = FREQ_QOS_MAX_DEFAULT_VALUE;
c->type = PM_QOS_MIN;
c->notifiers = &qos->max_freq_notifiers;
BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
}
/**
* freq_qos_read_value - Get frequency QoS constraint for a given list.
* @qos: Constraints to evaluate.
* @type: QoS request type.
*/
s32 freq_qos_read_value(struct freq_constraints *qos,
enum freq_qos_req_type type)
{
s32 ret;
switch (type) {
case FREQ_QOS_MIN:
ret = IS_ERR_OR_NULL(qos) ?
FREQ_QOS_MIN_DEFAULT_VALUE :
pm_qos_read_value(&qos->min_freq);
break;
case FREQ_QOS_MAX:
ret = IS_ERR_OR_NULL(qos) ?
FREQ_QOS_MAX_DEFAULT_VALUE :
pm_qos_read_value(&qos->max_freq);
break;
default:
WARN_ON(1);
ret = 0;
}
return ret;
}
/**
* freq_qos_apply - Add/modify/remove frequency QoS request.
* @req: Constraint request to apply.
* @action: Action to perform (add/update/remove).
* @value: Value to assign to the QoS request.
*/
static int freq_qos_apply(struct freq_qos_request *req,
enum pm_qos_req_action action, s32 value)
{
int ret;
switch(req->type) {
case FREQ_QOS_MIN:
ret = pm_qos_update_target(&req->qos->min_freq, &req->pnode,
action, value);
break;
case FREQ_QOS_MAX:
ret = pm_qos_update_target(&req->qos->max_freq, &req->pnode,
action, value);
break;
default:
ret = -EINVAL;
}
return ret;
}
/**
* freq_qos_add_request - Insert new frequency QoS request into a given list.
* @qos: Constraints to update.
* @req: Preallocated request object.
* @type: Request type.
* @value: Request value.
*
* Insert a new entry into the @qos list of requests, recompute the effective
* QoS constraint value for that list and initialize the @req object. The
* caller needs to save that object for later use in updates and removal.
*
* Return 1 if the effective constraint value has changed, 0 if the effective
* constraint value has not changed, or a negative error code on failures.
*/
int freq_qos_add_request(struct freq_constraints *qos,
struct freq_qos_request *req,
enum freq_qos_req_type type, s32 value)
{
int ret;
if (IS_ERR_OR_NULL(qos) || !req)
return -EINVAL;
if (WARN(freq_qos_request_active(req),
"%s() called for active request\n", __func__))
return -EINVAL;
req->qos = qos;
req->type = type;
ret = freq_qos_apply(req, PM_QOS_ADD_REQ, value);
if (ret < 0) {
req->qos = NULL;
req->type = 0;
}
return ret;
}
EXPORT_SYMBOL_GPL(freq_qos_add_request);
/**
* freq_qos_update_request - Modify existing frequency QoS request.
* @req: Request to modify.
* @new_value: New request value.
*
* Update an existing frequency QoS request along with the effective constraint
* value for the list of requests it belongs to.
*
* Return 1 if the effective constraint value has changed, 0 if the effective
* constraint value has not changed, or a negative error code on failures.
*/
int freq_qos_update_request(struct freq_qos_request *req, s32 new_value)
{
if (!req)
return -EINVAL;
if (WARN(!freq_qos_request_active(req),
"%s() called for unknown object\n", __func__))
return -EINVAL;
if (req->pnode.prio == new_value)
return 0;
return freq_qos_apply(req, PM_QOS_UPDATE_REQ, new_value);
}
EXPORT_SYMBOL_GPL(freq_qos_update_request);
/**
* freq_qos_remove_request - Remove frequency QoS request from its list.
* @req: Request to remove.
*
* Remove the given frequency QoS request from the list of constraints it
* belongs to and recompute the effective constraint value for that list.
*
* Return 1 if the effective constraint value has changed, 0 if the effective
* constraint value has not changed, or a negative error code on failures.
*/
int freq_qos_remove_request(struct freq_qos_request *req)
{
if (!req)
return -EINVAL;
if (WARN(!freq_qos_request_active(req),
"%s() called for unknown object\n", __func__))
return -EINVAL;
return freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
}
EXPORT_SYMBOL_GPL(freq_qos_remove_request);
/**
* freq_qos_add_notifier - Add frequency QoS change notifier.
* @qos: List of requests to add the notifier to.
* @type: Request type.
* @notifier: Notifier block to add.
*/
int freq_qos_add_notifier(struct freq_constraints *qos,
enum freq_qos_req_type type,
struct notifier_block *notifier)
{
int ret;
if (IS_ERR_OR_NULL(qos) || !notifier)
return -EINVAL;
switch (type) {
case FREQ_QOS_MIN:
ret = blocking_notifier_chain_register(qos->min_freq.notifiers,
notifier);
break;
case FREQ_QOS_MAX:
ret = blocking_notifier_chain_register(qos->max_freq.notifiers,
notifier);
break;
default:
WARN_ON(1);
ret = -EINVAL;
}
return ret;
}
EXPORT_SYMBOL_GPL(freq_qos_add_notifier);
/**
* freq_qos_remove_notifier - Remove frequency QoS change notifier.
* @qos: List of requests to remove the notifier from.
* @type: Request type.
* @notifier: Notifier block to remove.
*/
int freq_qos_remove_notifier(struct freq_constraints *qos,
enum freq_qos_req_type type,
struct notifier_block *notifier)
{
int ret;
if (IS_ERR_OR_NULL(qos) || !notifier)
return -EINVAL;
switch (type) {
case FREQ_QOS_MIN:
ret = blocking_notifier_chain_unregister(qos->min_freq.notifiers,
notifier);
break;
case FREQ_QOS_MAX:
ret = blocking_notifier_chain_unregister(qos->max_freq.notifiers,
notifier);
break;
default:
WARN_ON(1);
ret = -EINVAL;
}
return ret;
}
EXPORT_SYMBOL_GPL(freq_qos_remove_notifier);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment