Commit 3325254c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm-5.0-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull power management fixes from Rafael Wysocki:
 "These fix a PM-runtime framework regression introduced by the recent
  switch-over of device autosuspend to hrtimers and a mistake in the
  "poll idle state" code introduced by a recent change in it.

  Specifics:

   - Since ktime_get() turns out to be problematic for device
     autosuspend in the PM-runtime framework, make it use
     ktime_get_mono_fast_ns() instead (Vincent Guittot).

   - Fix an initial value of a local variable in the "poll idle state"
     code that makes it behave not exactly as expected when all idle
     states except for the "polling" one are disabled (Doug Smythies)"

* tag 'pm-5.0-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  cpuidle: poll_state: Fix default time limit
  PM-runtime: Fix deadlock with ktime_get()
parents 4771eec1 cbffab68
......@@ -130,7 +130,7 @@ u64 pm_runtime_autosuspend_expiration(struct device *dev)
{
int autosuspend_delay;
u64 last_busy, expires = 0;
u64 now = ktime_to_ns(ktime_get());
u64 now = ktime_get_mono_fast_ns();
if (!dev->power.use_autosuspend)
goto out;
......@@ -909,7 +909,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
* If 'expires' is after the current time, we've been called
* too early.
*/
if (expires > 0 && expires < ktime_to_ns(ktime_get())) {
if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
dev->power.timer_expires = 0;
rpm_suspend(dev, dev->power.timer_autosuspends ?
(RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
......@@ -928,7 +928,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
int pm_schedule_suspend(struct device *dev, unsigned int delay)
{
unsigned long flags;
ktime_t expires;
u64 expires;
int retval;
spin_lock_irqsave(&dev->power.lock, flags);
......@@ -945,8 +945,8 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
/* Other scheduled or pending requests need to be canceled. */
pm_runtime_cancel_pending(dev);
expires = ktime_add(ktime_get(), ms_to_ktime(delay));
dev->power.timer_expires = ktime_to_ns(expires);
expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
dev->power.timer_expires = expires;
dev->power.timer_autosuspends = 0;
hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
......
......@@ -21,7 +21,7 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
local_irq_enable();
if (!current_set_polling_and_test()) {
unsigned int loop_count = 0;
u64 limit = TICK_USEC;
u64 limit = TICK_NSEC;
int i;
for (i = 1; i < drv->state_count; i++) {
......
......@@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
static inline void pm_runtime_mark_last_busy(struct device *dev)
{
WRITE_ONCE(dev->power.last_busy, ktime_to_ns(ktime_get()));
WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns());
}
static inline bool pm_runtime_is_irq_safe(struct device *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment