Commit 2f0aea93 authored by Viresh Kumar's avatar Viresh Kumar Committed by Rafael J. Wysocki

cpufreq: suspend governors on system suspend/hibernate

This patch adds cpufreq suspend/resume calls to dpm_{suspend|resume}()
for handling suspend/resume of cpufreq governors.

Lan Tianyu (Intel) & Jinhyuk Choi (Broadcom) found an issue where the
tunables configuration for clusters/sockets with non-boot CPUs was
lost after system suspend/resume, as we were notifying governors with
CPUFREQ_GOV_POLICY_EXIT on removal of the last CPU for that policy
which caused the tunables memory to be freed.

This is fixed by preventing any governor operations from being
carried out between the device suspend and device resume stages of
system suspend and resume, respectively.

We could have added these callbacks at dpm_{suspend|resume}_noirq()
level, but there is an additional problem that the majority of I/O
devices is already suspended at that point and if cpufreq drivers
want to change the frequency before suspending, then that not be
possible on some platforms (which depend on peripherals like i2c,
regulators, etc).
Reported-and-tested-by: default avatarLan Tianyu <tianyu.lan@intel.com>
Reported-by: default avatarJinhyuk Choi <jinchoi@broadcom.com>
Signed-off-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
[rjw: Changelog]
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
parent 6e2c89d1
......@@ -29,6 +29,7 @@
#include <linux/async.h>
#include <linux/suspend.h>
#include <trace/events/power.h>
#include <linux/cpufreq.h>
#include <linux/cpuidle.h>
#include <linux/timer.h>
......@@ -789,6 +790,8 @@ void dpm_resume(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, NULL);
cpufreq_resume();
}
/**
......@@ -1259,6 +1262,8 @@ int dpm_suspend(pm_message_t state)
might_sleep();
cpufreq_suspend();
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
......
......@@ -26,7 +26,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/suspend.h>
#include <linux/tick.h>
#include <trace/events/power.h>
......@@ -45,6 +45,9 @@ static LIST_HEAD(cpufreq_policy_list);
/* This one keeps track of the previously set governor of a removed CPU */
static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
/* Flag to suspend/resume CPUFreq governors */
static bool cpufreq_suspended;
static inline bool has_target(void)
{
return cpufreq_driver->target_index || cpufreq_driver->target;
......@@ -1565,82 +1568,77 @@ static struct subsys_interface cpufreq_interface = {
};
/**
* cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
* cpufreq_suspend() - Suspend CPUFreq governors
*
* This function is only executed for the boot processor. The other CPUs
* have been put offline by means of CPU hotplug.
* Called during system wide Suspend/Hibernate cycles for suspending governors
* as some platforms can't change frequency after this point in suspend cycle.
* Because some of the devices (like: i2c, regulators, etc) they use for
* changing frequency are suspended quickly after this point.
*/
static int cpufreq_bp_suspend(void)
void cpufreq_suspend(void)
{
int ret = 0;
int cpu = smp_processor_id();
struct cpufreq_policy *policy;
pr_debug("suspending cpu %u\n", cpu);
if (!cpufreq_driver)
return;
/* If there's no policy for the boot CPU, we have nothing to do. */
policy = cpufreq_cpu_get(cpu);
if (!policy)
return 0;
if (!has_target())
return;
if (cpufreq_driver->suspend) {
ret = cpufreq_driver->suspend(policy);
if (ret)
printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
"step on CPU %u\n", policy->cpu);
pr_debug("%s: Suspending Governors\n", __func__);
list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
pr_err("%s: Failed to stop governor for policy: %p\n",
__func__, policy);
else if (cpufreq_driver->suspend
&& cpufreq_driver->suspend(policy))
pr_err("%s: Failed to suspend driver: %p\n", __func__,
policy);
}
cpufreq_cpu_put(policy);
return ret;
cpufreq_suspended = true;
}
/**
* cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
* cpufreq_resume() - Resume CPUFreq governors
*
* 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
* 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
* restored. It will verify that the current freq is in sync with
* what we believe it to be. This is a bit later than when it
* should be, but nonethteless it's better than calling
* cpufreq_driver->get() here which might re-enable interrupts...
*
* This function is only executed for the boot CPU. The other CPUs have not
* been turned on yet.
* Called during system wide Suspend/Hibernate cycle for resuming governors that
* are suspended with cpufreq_suspend().
*/
static void cpufreq_bp_resume(void)
void cpufreq_resume(void)
{
int ret = 0;
int cpu = smp_processor_id();
struct cpufreq_policy *policy;
pr_debug("resuming cpu %u\n", cpu);
if (!cpufreq_driver)
return;
/* If there's no policy for the boot CPU, we have nothing to do. */
policy = cpufreq_cpu_get(cpu);
if (!policy)
if (!has_target())
return;
if (cpufreq_driver->resume) {
ret = cpufreq_driver->resume(policy);
if (ret) {
printk(KERN_ERR "cpufreq: resume failed in ->resume "
"step on CPU %u\n", policy->cpu);
goto fail;
}
}
pr_debug("%s: Resuming Governors\n", __func__);
schedule_work(&policy->update);
cpufreq_suspended = false;
fail:
cpufreq_cpu_put(policy);
}
list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
|| __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
pr_err("%s: Failed to start governor for policy: %p\n",
__func__, policy);
else if (cpufreq_driver->resume
&& cpufreq_driver->resume(policy))
pr_err("%s: Failed to resume driver: %p\n", __func__,
policy);
static struct syscore_ops cpufreq_syscore_ops = {
.suspend = cpufreq_bp_suspend,
.resume = cpufreq_bp_resume,
};
/*
* schedule call cpufreq_update_policy() for boot CPU, i.e. last
* policy in list. It will verify that the current freq is in
* sync with what we believe it to be.
*/
if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
schedule_work(&policy->update);
}
}
/**
* cpufreq_get_current_driver - return current driver's name
......@@ -1857,6 +1855,10 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
struct cpufreq_governor *gov = NULL;
#endif
/* Don't start any governor operations if we are entering suspend */
if (cpufreq_suspended)
return 0;
if (policy->governor->max_transition_latency &&
policy->cpuinfo.transition_latency >
policy->governor->max_transition_latency) {
......@@ -2392,7 +2394,6 @@ static int __init cpufreq_core_init(void)
cpufreq_global_kobject = kobject_create();
BUG_ON(!cpufreq_global_kobject);
register_syscore_ops(&cpufreq_syscore_ops);
return 0;
}
......
......@@ -296,6 +296,14 @@ cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
policy->cpuinfo.max_freq);
}
#ifdef CONFIG_CPU_FREQ
void cpufreq_suspend(void);
void cpufreq_resume(void);
#else
static inline void cpufreq_suspend(void) {}
static inline void cpufreq_resume(void) {}
#endif
/*********************************************************************
* CPUFREQ NOTIFIER INTERFACE *
*********************************************************************/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment