Commit f4f31fff authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm-5.0-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull power management updates from Rafael Wysocki:
 "These fix fallout after starting to use hrtimers in the runtime PM
  framework, fix a few cpufreq issues, fix a recently broken reference
  to cpuidle documentation, update MAINTAINERS entries for cpufreq and
  cpuidle and make the recently added system suspend and resume support
  in devfreq actually work.

  Specifics:

   - Prevent integer overflows from occurring on 32-bit when converting
     milliseconds to nanoseconds in the runtime PM framework and update
     comments that still refer to jiffies in it (Vincent Guittot,
     Ladislav Michl).

   - Fix the SCMI cpufreq driver to always use the same frequency units
     for arch_set_freq_scale() and make the scale-invariant load
     tracking acutally work with this driver (Quentin Perret).

   - Fix freeing of dynamic OPPs in the SCPI and SCMI cpufreq drivers
     broken during the 4.20 defelopment cycle (Viresh Kumar).

   - Prevent the cpufreq core from attempting to return the current
     frequency of offline CPUs (Sudeep Holla).

   - Add devfreq suspend and resume hooks (missed previously) to the PM
     core to make the recently added system suspend and resume support
     in devfreq actually work (Lukasz Luba).

   - Update MAINTAINERS entries for cpufreq and cpuidle, mostly to add
     references to new/current documentation to them (Rafael Wysocki).

   - Fix a recently broken reference to cpuidle documentation (Otto
     Sabart)"

* tag 'pm-5.0-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  PM-runtime: Fix autosuspend_delay on 32bits arch
  PM-runtime: Fix 'jiffies' in comments after switch to hrtimers
  cpufreq: scmi: Fix frequency invariance in slow path
  doc: trace: fix reference to cpuidle documentation file
  cpufreq: check if policy is inactive early in __cpufreq_get()
  cpufreq: scpi/scmi: Fix freeing of dynamic OPPs
  cpuidle / Documentation: Update cpuidle MAINTAINERS entry
  cpufreq / Documentation: Update cpufreq MAINTAINERS entry
  PM: sleep: call devfreq suspend/resume
parents 385c59c7 343e60e5
...@@ -165,7 +165,7 @@ Do some work... ...@@ -165,7 +165,7 @@ Do some work...
The same can also be done from an application program. The same can also be done from an application program.
Disable specific CPU's specific idle state from cpuidle sysfs (see Disable specific CPU's specific idle state from cpuidle sysfs (see
Documentation/cpuidle/sysfs.txt): Documentation/admin-guide/pm/cpuidle.rst):
# echo 1 > /sys/devices/system/cpu/cpu$cpu/cpuidle/state$state/disable # echo 1 > /sys/devices/system/cpu/cpu$cpu/cpuidle/state$state/disable
......
...@@ -3951,7 +3951,7 @@ L: netdev@vger.kernel.org ...@@ -3951,7 +3951,7 @@ L: netdev@vger.kernel.org
S: Maintained S: Maintained
F: drivers/net/ethernet/ti/cpmac.c F: drivers/net/ethernet/ti/cpmac.c
CPU FREQUENCY DRIVERS CPU FREQUENCY SCALING FRAMEWORK
M: "Rafael J. Wysocki" <rjw@rjwysocki.net> M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
M: Viresh Kumar <viresh.kumar@linaro.org> M: Viresh Kumar <viresh.kumar@linaro.org>
L: linux-pm@vger.kernel.org L: linux-pm@vger.kernel.org
...@@ -3959,6 +3959,8 @@ S: Maintained ...@@ -3959,6 +3959,8 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates) T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates)
B: https://bugzilla.kernel.org B: https://bugzilla.kernel.org
F: Documentation/admin-guide/pm/cpufreq.rst
F: Documentation/admin-guide/pm/intel_pstate.rst
F: Documentation/cpu-freq/ F: Documentation/cpu-freq/
F: Documentation/devicetree/bindings/cpufreq/ F: Documentation/devicetree/bindings/cpufreq/
F: drivers/cpufreq/ F: drivers/cpufreq/
...@@ -4006,13 +4008,14 @@ S: Supported ...@@ -4006,13 +4008,14 @@ S: Supported
F: drivers/cpuidle/cpuidle-exynos.c F: drivers/cpuidle/cpuidle-exynos.c
F: arch/arm/mach-exynos/pm.c F: arch/arm/mach-exynos/pm.c
CPUIDLE DRIVERS CPU IDLE TIME MANAGEMENT FRAMEWORK
M: "Rafael J. Wysocki" <rjw@rjwysocki.net> M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
M: Daniel Lezcano <daniel.lezcano@linaro.org> M: Daniel Lezcano <daniel.lezcano@linaro.org>
L: linux-pm@vger.kernel.org L: linux-pm@vger.kernel.org
S: Maintained S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
B: https://bugzilla.kernel.org B: https://bugzilla.kernel.org
F: Documentation/admin-guide/pm/cpuidle.rst
F: drivers/cpuidle/* F: drivers/cpuidle/*
F: include/linux/cpuidle.h F: include/linux/cpuidle.h
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <trace/events/power.h> #include <trace/events/power.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/devfreq.h>
#include <linux/timer.h> #include <linux/timer.h>
#include "../base.h" #include "../base.h"
...@@ -1078,6 +1079,7 @@ void dpm_resume(pm_message_t state) ...@@ -1078,6 +1079,7 @@ void dpm_resume(pm_message_t state)
dpm_show_time(starttime, state, 0, NULL); dpm_show_time(starttime, state, 0, NULL);
cpufreq_resume(); cpufreq_resume();
devfreq_resume();
trace_suspend_resume(TPS("dpm_resume"), state.event, false); trace_suspend_resume(TPS("dpm_resume"), state.event, false);
} }
...@@ -1852,6 +1854,7 @@ int dpm_suspend(pm_message_t state) ...@@ -1852,6 +1854,7 @@ int dpm_suspend(pm_message_t state)
trace_suspend_resume(TPS("dpm_suspend"), state.event, true); trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
might_sleep(); might_sleep();
devfreq_suspend();
cpufreq_suspend(); cpufreq_suspend();
mutex_lock(&dpm_list_mtx); mutex_lock(&dpm_list_mtx);
......
...@@ -121,7 +121,7 @@ static void pm_runtime_cancel_pending(struct device *dev) ...@@ -121,7 +121,7 @@ static void pm_runtime_cancel_pending(struct device *dev)
* Compute the autosuspend-delay expiration time based on the device's * Compute the autosuspend-delay expiration time based on the device's
* power.last_busy time. If the delay has already expired or is disabled * power.last_busy time. If the delay has already expired or is disabled
* (negative) or the power.use_autosuspend flag isn't set, return 0. * (negative) or the power.use_autosuspend flag isn't set, return 0.
* Otherwise return the expiration time in jiffies (adjusted to be nonzero). * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
* *
* This function may be called either with or without dev->power.lock held. * This function may be called either with or without dev->power.lock held.
* Either way it can be racy, since power.last_busy may be updated at any time. * Either way it can be racy, since power.last_busy may be updated at any time.
...@@ -141,7 +141,7 @@ u64 pm_runtime_autosuspend_expiration(struct device *dev) ...@@ -141,7 +141,7 @@ u64 pm_runtime_autosuspend_expiration(struct device *dev)
last_busy = READ_ONCE(dev->power.last_busy); last_busy = READ_ONCE(dev->power.last_busy);
expires = last_busy + autosuspend_delay * NSEC_PER_MSEC; expires = last_busy + (u64)autosuspend_delay * NSEC_PER_MSEC;
if (expires <= now) if (expires <= now)
expires = 0; /* Already expired. */ expires = 0; /* Already expired. */
...@@ -525,7 +525,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) ...@@ -525,7 +525,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
* We add a slack of 25% to gather wakeups * We add a slack of 25% to gather wakeups
* without sacrificing the granularity. * without sacrificing the granularity.
*/ */
u64 slack = READ_ONCE(dev->power.autosuspend_delay) * u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
(NSEC_PER_MSEC >> 2); (NSEC_PER_MSEC >> 2);
dev->power.timer_expires = expires; dev->power.timer_expires = expires;
...@@ -905,7 +905,10 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) ...@@ -905,7 +905,10 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
spin_lock_irqsave(&dev->power.lock, flags); spin_lock_irqsave(&dev->power.lock, flags);
expires = dev->power.timer_expires; expires = dev->power.timer_expires;
/* If 'expire' is after 'jiffies' we've been called too early. */ /*
* If 'expires' is after the current time, we've been called
* too early.
*/
if (expires > 0 && expires < ktime_to_ns(ktime_get())) { if (expires > 0 && expires < ktime_to_ns(ktime_get())) {
dev->power.timer_expires = 0; dev->power.timer_expires = 0;
rpm_suspend(dev, dev->power.timer_autosuspends ? rpm_suspend(dev, dev->power.timer_autosuspends ?
......
...@@ -1530,17 +1530,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy) ...@@ -1530,17 +1530,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
{ {
unsigned int ret_freq = 0; unsigned int ret_freq = 0;
if (!cpufreq_driver->get) if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
return ret_freq; return ret_freq;
ret_freq = cpufreq_driver->get(policy->cpu); ret_freq = cpufreq_driver->get(policy->cpu);
/* /*
* Updating inactive policies is invalid, so avoid doing that. Also * If fast frequency switching is used with the given policy, the check
* if fast frequency switching is used with the given policy, the check
* against policy->cur is pointless, so skip it in that case too. * against policy->cur is pointless, so skip it in that case too.
*/ */
if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled) if (policy->fast_switch_enabled)
return ret_freq; return ret_freq;
if (ret_freq && policy->cur && if (ret_freq && policy->cur &&
...@@ -1569,10 +1568,7 @@ unsigned int cpufreq_get(unsigned int cpu) ...@@ -1569,10 +1568,7 @@ unsigned int cpufreq_get(unsigned int cpu)
if (policy) { if (policy) {
down_read(&policy->rwsem); down_read(&policy->rwsem);
ret_freq = __cpufreq_get(policy);
if (!policy_is_inactive(policy))
ret_freq = __cpufreq_get(policy);
up_read(&policy->rwsem); up_read(&policy->rwsem);
cpufreq_cpu_put(policy); cpufreq_cpu_put(policy);
......
...@@ -52,9 +52,9 @@ scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) ...@@ -52,9 +52,9 @@ scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
int ret; int ret;
struct scmi_data *priv = policy->driver_data; struct scmi_data *priv = policy->driver_data;
struct scmi_perf_ops *perf_ops = handle->perf_ops; struct scmi_perf_ops *perf_ops = handle->perf_ops;
u64 freq = policy->freq_table[index].frequency * 1000; u64 freq = policy->freq_table[index].frequency;
ret = perf_ops->freq_set(handle, priv->domain_id, freq, false); ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
if (!ret) if (!ret)
arch_set_freq_scale(policy->related_cpus, freq, arch_set_freq_scale(policy->related_cpus, freq,
policy->cpuinfo.max_freq); policy->cpuinfo.max_freq);
...@@ -176,7 +176,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) ...@@ -176,7 +176,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
out_free_priv: out_free_priv:
kfree(priv); kfree(priv);
out_free_opp: out_free_opp:
dev_pm_opp_cpumask_remove_table(policy->cpus); dev_pm_opp_remove_all_dynamic(cpu_dev);
return ret; return ret;
} }
...@@ -188,7 +188,7 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy) ...@@ -188,7 +188,7 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
cpufreq_cooling_unregister(priv->cdev); cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
kfree(priv); kfree(priv);
dev_pm_opp_cpumask_remove_table(policy->related_cpus); dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
return 0; return 0;
} }
......
...@@ -177,7 +177,7 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy) ...@@ -177,7 +177,7 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy)
out_free_priv: out_free_priv:
kfree(priv); kfree(priv);
out_free_opp: out_free_opp:
dev_pm_opp_cpumask_remove_table(policy->cpus); dev_pm_opp_remove_all_dynamic(cpu_dev);
return ret; return ret;
} }
...@@ -190,7 +190,7 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy) ...@@ -190,7 +190,7 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
clk_put(priv->clk); clk_put(priv->clk);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
kfree(priv); kfree(priv);
dev_pm_opp_cpumask_remove_table(policy->related_cpus); dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
return 0; return 0;
} }
......
...@@ -988,11 +988,9 @@ void _opp_free(struct dev_pm_opp *opp) ...@@ -988,11 +988,9 @@ void _opp_free(struct dev_pm_opp *opp)
kfree(opp); kfree(opp);
} }
static void _opp_kref_release(struct kref *kref) static void _opp_kref_release(struct dev_pm_opp *opp,
struct opp_table *opp_table)
{ {
struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
struct opp_table *opp_table = opp->opp_table;
/* /*
* Notify the changes in the availability of the operable * Notify the changes in the availability of the operable
* frequency/voltage list. * frequency/voltage list.
...@@ -1002,7 +1000,22 @@ static void _opp_kref_release(struct kref *kref) ...@@ -1002,7 +1000,22 @@ static void _opp_kref_release(struct kref *kref)
opp_debug_remove_one(opp); opp_debug_remove_one(opp);
list_del(&opp->node); list_del(&opp->node);
kfree(opp); kfree(opp);
}
static void _opp_kref_release_unlocked(struct kref *kref)
{
struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
struct opp_table *opp_table = opp->opp_table;
_opp_kref_release(opp, opp_table);
}
static void _opp_kref_release_locked(struct kref *kref)
{
struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
struct opp_table *opp_table = opp->opp_table;
_opp_kref_release(opp, opp_table);
mutex_unlock(&opp_table->lock); mutex_unlock(&opp_table->lock);
} }
...@@ -1013,10 +1026,16 @@ void dev_pm_opp_get(struct dev_pm_opp *opp) ...@@ -1013,10 +1026,16 @@ void dev_pm_opp_get(struct dev_pm_opp *opp)
void dev_pm_opp_put(struct dev_pm_opp *opp) void dev_pm_opp_put(struct dev_pm_opp *opp)
{ {
kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock); kref_put_mutex(&opp->kref, _opp_kref_release_locked,
&opp->opp_table->lock);
} }
EXPORT_SYMBOL_GPL(dev_pm_opp_put); EXPORT_SYMBOL_GPL(dev_pm_opp_put);
static void dev_pm_opp_put_unlocked(struct dev_pm_opp *opp)
{
kref_put(&opp->kref, _opp_kref_release_unlocked);
}
/** /**
* dev_pm_opp_remove() - Remove an OPP from OPP table * dev_pm_opp_remove() - Remove an OPP from OPP table
* @dev: device for which we do this operation * @dev: device for which we do this operation
...@@ -1060,6 +1079,40 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) ...@@ -1060,6 +1079,40 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
} }
EXPORT_SYMBOL_GPL(dev_pm_opp_remove); EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
/**
* dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
* @dev: device for which we do this operation
*
* This function removes all dynamically created OPPs from the opp table.
*/
void dev_pm_opp_remove_all_dynamic(struct device *dev)
{
struct opp_table *opp_table;
struct dev_pm_opp *opp, *temp;
int count = 0;
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table))
return;
mutex_lock(&opp_table->lock);
list_for_each_entry_safe(opp, temp, &opp_table->opp_list, node) {
if (opp->dynamic) {
dev_pm_opp_put_unlocked(opp);
count++;
}
}
mutex_unlock(&opp_table->lock);
/* Drop the references taken by dev_pm_opp_add() */
while (count--)
dev_pm_opp_put_opp_table(opp_table);
/* Drop the reference taken by _find_opp_table() */
dev_pm_opp_put_opp_table(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
struct dev_pm_opp *_opp_allocate(struct opp_table *table) struct dev_pm_opp *_opp_allocate(struct opp_table *table)
{ {
struct dev_pm_opp *opp; struct dev_pm_opp *opp;
......
...@@ -108,6 +108,7 @@ void dev_pm_opp_put(struct dev_pm_opp *opp); ...@@ -108,6 +108,7 @@ void dev_pm_opp_put(struct dev_pm_opp *opp);
int dev_pm_opp_add(struct device *dev, unsigned long freq, int dev_pm_opp_add(struct device *dev, unsigned long freq,
unsigned long u_volt); unsigned long u_volt);
void dev_pm_opp_remove(struct device *dev, unsigned long freq); void dev_pm_opp_remove(struct device *dev, unsigned long freq);
void dev_pm_opp_remove_all_dynamic(struct device *dev);
int dev_pm_opp_enable(struct device *dev, unsigned long freq); int dev_pm_opp_enable(struct device *dev, unsigned long freq);
...@@ -217,6 +218,10 @@ static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) ...@@ -217,6 +218,10 @@ static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
{ {
} }
static inline void dev_pm_opp_remove_all_dynamic(struct device *dev)
{
}
static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
{ {
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment