Commit 96bbbe4a authored by Viresh Kumar's avatar Viresh Kumar Committed by Rafael J. Wysocki

cpufreq: Remove unnecessary variable/parameter 'frozen'

We have used 'frozen' variable/function parameter at many places to
distinguish between CPU offline/online on suspend/resume vs sysfs
removals. We now have another variable cpufreq_suspended which can
be used in these cases, so we can get rid of all those variables or
function parameters.
Signed-off-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
parent 979d86fa
......@@ -1041,13 +1041,13 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
CPUFREQ_UPDATE_POLICY_CPU, policy);
}
static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
bool frozen)
static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
unsigned int j, cpu = dev->id;
int ret = -ENOMEM;
struct cpufreq_policy *policy;
unsigned long flags;
bool recover_policy = cpufreq_suspended;
#ifdef CONFIG_HOTPLUG_CPU
struct cpufreq_policy *tpolicy;
#endif
......@@ -1088,9 +1088,9 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
* Restore the saved policy when doing light-weight init and fall back
* to the full init if that fails.
*/
policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
if (!policy) {
frozen = false;
recover_policy = false;
policy = cpufreq_policy_alloc();
if (!policy)
goto nomem_out;
......@@ -1102,7 +1102,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
* the creation of a brand new one. So we need to perform this update
* by invoking update_policy_cpu().
*/
if (frozen && cpu != policy->cpu)
if (recover_policy && cpu != policy->cpu)
update_policy_cpu(policy, cpu);
else
policy->cpu = cpu;
......@@ -1130,7 +1130,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
*/
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
if (!frozen) {
if (!recover_policy) {
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
}
......@@ -1192,7 +1192,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
if (!frozen) {
if (!recover_policy) {
ret = cpufreq_add_dev_interface(policy, dev);
if (ret)
goto err_out_unregister;
......@@ -1206,7 +1206,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
cpufreq_init_policy(policy);
if (!frozen) {
if (!recover_policy) {
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
}
......@@ -1229,7 +1229,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
err_set_policy_cpu:
if (frozen) {
if (recover_policy) {
/* Do not leave stale fallback data behind. */
per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
cpufreq_policy_put_kobj(policy);
......@@ -1253,7 +1253,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
*/
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
return __cpufreq_add_dev(dev, sif, false);
return __cpufreq_add_dev(dev, sif);
}
static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
......@@ -1284,8 +1284,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
}
static int __cpufreq_remove_dev_prepare(struct device *dev,
struct subsys_interface *sif,
bool frozen)
struct subsys_interface *sif)
{
unsigned int cpu = dev->id, cpus;
int new_cpu, ret;
......@@ -1299,7 +1298,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
policy = per_cpu(cpufreq_cpu_data, cpu);
/* Save the policy somewhere when doing a light-weight tear-down */
if (frozen)
if (cpufreq_suspended)
per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
......@@ -1332,7 +1331,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
if (new_cpu >= 0) {
update_policy_cpu(policy, new_cpu);
if (!frozen) {
if (!cpufreq_suspended) {
pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
__func__, new_cpu, cpu);
}
......@@ -1343,8 +1342,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
}
static int __cpufreq_remove_dev_finish(struct device *dev,
struct subsys_interface *sif,
bool frozen)
struct subsys_interface *sif)
{
unsigned int cpu = dev->id, cpus;
int ret;
......@@ -1379,7 +1377,7 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
}
}
if (!frozen)
if (!cpufreq_suspended)
cpufreq_policy_put_kobj(policy);
/*
......@@ -1395,7 +1393,7 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
list_del(&policy->policy_list);
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (!frozen)
if (!cpufreq_suspended)
cpufreq_policy_free(policy);
} else {
if (has_target()) {
......@@ -1425,10 +1423,10 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (cpu_is_offline(cpu))
return 0;
ret = __cpufreq_remove_dev_prepare(dev, sif, false);
ret = __cpufreq_remove_dev_prepare(dev, sif);
if (!ret)
ret = __cpufreq_remove_dev_finish(dev, sif, false);
ret = __cpufreq_remove_dev_finish(dev, sif);
return ret;
}
......@@ -2182,29 +2180,24 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
{
unsigned int cpu = (unsigned long)hcpu;
struct device *dev;
bool frozen = false;
dev = get_cpu_device(cpu);
if (dev) {
if (action & CPU_TASKS_FROZEN)
frozen = true;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
__cpufreq_add_dev(dev, NULL, frozen);
__cpufreq_add_dev(dev, NULL);
break;
case CPU_DOWN_PREPARE:
__cpufreq_remove_dev_prepare(dev, NULL, frozen);
__cpufreq_remove_dev_prepare(dev, NULL);
break;
case CPU_POST_DEAD:
__cpufreq_remove_dev_finish(dev, NULL, frozen);
__cpufreq_remove_dev_finish(dev, NULL);
break;
case CPU_DOWN_FAILED:
__cpufreq_add_dev(dev, NULL, frozen);
__cpufreq_add_dev(dev, NULL);
break;
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment