Commit 852b7fa2 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branches 'pm-core', 'pm-qos' and 'pm-avs'

* pm-core:
  PM / core: Fix bug in the error handling of async suspend
  PM / wakeirq: Fix dedicated wakeirq for drivers not using autosuspend
  PM / Runtime: Defer resuming of the device in pm_runtime_force_resume()
  PM / Runtime: Don't allow to suspend a device with an active child
  net: smsc911x: Synchronize the runtime PM status during system suspend
  PM / Runtime: Convert pm_runtime_set_suspended() to return an int
  PM / Runtime: Clarify comment in rpm_resume() when resuming the parent
  PM / Runtime: Remove the exported function pm_children_suspended()

* pm-qos:
  PM / QoS: Export dev_pm_qos_update_user_latency_tolerance
  PM / QoS: Fix writing 'auto' to pm_qos_latency_tolerance_us
  PM / QoS: Improve sysfs pm_qos_latency_tolerance validation

* pm-avs:
  PM / AVS: rockchip-io: make the log more consistent
...@@ -1460,10 +1460,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) ...@@ -1460,10 +1460,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_watchdog_clear(&wd); dpm_watchdog_clear(&wd);
Complete: Complete:
complete_all(&dev->power.completion);
if (error) if (error)
async_error = error; async_error = error;
complete_all(&dev->power.completion);
TRACE_SUSPEND(error); TRACE_SUSPEND(error);
return error; return error;
} }
......
...@@ -21,14 +21,22 @@ extern void pm_runtime_init(struct device *dev); ...@@ -21,14 +21,22 @@ extern void pm_runtime_init(struct device *dev);
extern void pm_runtime_reinit(struct device *dev); extern void pm_runtime_reinit(struct device *dev);
extern void pm_runtime_remove(struct device *dev); extern void pm_runtime_remove(struct device *dev);
#define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0)
#define WAKE_IRQ_DEDICATED_MANAGED BIT(1)
#define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \
WAKE_IRQ_DEDICATED_MANAGED)
struct wake_irq { struct wake_irq {
struct device *dev; struct device *dev;
unsigned int status;
int irq; int irq;
bool dedicated_irq:1;
}; };
extern void dev_pm_arm_wake_irq(struct wake_irq *wirq); extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq); extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_enable_wake_irq_check(struct device *dev,
bool can_change_status);
extern void dev_pm_disable_wake_irq_check(struct device *dev);
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
...@@ -104,6 +112,15 @@ static inline void dev_pm_disarm_wake_irq(struct wake_irq *wirq) ...@@ -104,6 +112,15 @@ static inline void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
{ {
} }
static inline void dev_pm_enable_wake_irq_check(struct device *dev,
bool can_change_status)
{
}
static inline void dev_pm_disable_wake_irq_check(struct device *dev)
{
}
#endif #endif
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
......
...@@ -856,7 +856,10 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) ...@@ -856,7 +856,10 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
struct dev_pm_qos_request *req; struct dev_pm_qos_request *req;
if (val < 0) { if (val < 0) {
ret = -EINVAL; if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
ret = 0;
else
ret = -EINVAL;
goto out; goto out;
} }
req = kzalloc(sizeof(*req), GFP_KERNEL); req = kzalloc(sizeof(*req), GFP_KERNEL);
...@@ -883,6 +886,7 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) ...@@ -883,6 +886,7 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
mutex_unlock(&dev_pm_qos_mtx); mutex_unlock(&dev_pm_qos_mtx);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
/** /**
* dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
......
...@@ -241,7 +241,8 @@ static int rpm_check_suspend_allowed(struct device *dev) ...@@ -241,7 +241,8 @@ static int rpm_check_suspend_allowed(struct device *dev)
retval = -EACCES; retval = -EACCES;
else if (atomic_read(&dev->power.usage_count) > 0) else if (atomic_read(&dev->power.usage_count) > 0)
retval = -EAGAIN; retval = -EAGAIN;
else if (!pm_children_suspended(dev)) else if (!dev->power.ignore_children &&
atomic_read(&dev->power.child_count))
retval = -EBUSY; retval = -EBUSY;
/* Pending resume requests take precedence over suspends. */ /* Pending resume requests take precedence over suspends. */
...@@ -515,7 +516,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) ...@@ -515,7 +516,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
callback = RPM_GET_CALLBACK(dev, runtime_suspend); callback = RPM_GET_CALLBACK(dev, runtime_suspend);
dev_pm_enable_wake_irq(dev); dev_pm_enable_wake_irq_check(dev, true);
retval = rpm_callback(callback, dev); retval = rpm_callback(callback, dev);
if (retval) if (retval)
goto fail; goto fail;
...@@ -554,7 +555,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) ...@@ -554,7 +555,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
return retval; return retval;
fail: fail:
dev_pm_disable_wake_irq(dev); dev_pm_disable_wake_irq_check(dev);
__update_runtime_status(dev, RPM_ACTIVE); __update_runtime_status(dev, RPM_ACTIVE);
dev->power.deferred_resume = false; dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue); wake_up_all(&dev->power.wait_queue);
...@@ -712,8 +713,8 @@ static int rpm_resume(struct device *dev, int rpmflags) ...@@ -712,8 +713,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
spin_lock(&parent->power.lock); spin_lock(&parent->power.lock);
/* /*
* We can resume if the parent's runtime PM is disabled or it * Resume the parent if it has runtime PM enabled and not been
* is set to ignore children. * set to ignore its children.
*/ */
if (!parent->power.disable_depth if (!parent->power.disable_depth
&& !parent->power.ignore_children) { && !parent->power.ignore_children) {
...@@ -737,12 +738,12 @@ static int rpm_resume(struct device *dev, int rpmflags) ...@@ -737,12 +738,12 @@ static int rpm_resume(struct device *dev, int rpmflags)
callback = RPM_GET_CALLBACK(dev, runtime_resume); callback = RPM_GET_CALLBACK(dev, runtime_resume);
dev_pm_disable_wake_irq(dev); dev_pm_disable_wake_irq_check(dev);
retval = rpm_callback(callback, dev); retval = rpm_callback(callback, dev);
if (retval) { if (retval) {
__update_runtime_status(dev, RPM_SUSPENDED); __update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_cancel_pending(dev); pm_runtime_cancel_pending(dev);
dev_pm_enable_wake_irq(dev); dev_pm_enable_wake_irq_check(dev, false);
} else { } else {
no_callback: no_callback:
__update_runtime_status(dev, RPM_ACTIVE); __update_runtime_status(dev, RPM_ACTIVE);
...@@ -1027,7 +1028,17 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) ...@@ -1027,7 +1028,17 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
goto out_set; goto out_set;
if (status == RPM_SUSPENDED) { if (status == RPM_SUSPENDED) {
/* It always is possible to set the status to 'suspended'. */ /*
* It is invalid to suspend a device with an active child,
* unless it has been set to ignore its children.
*/
if (!dev->power.ignore_children &&
atomic_read(&dev->power.child_count)) {
dev_err(dev, "runtime PM trying to suspend device but active child\n");
error = -EBUSY;
goto out;
}
if (parent) { if (parent) {
atomic_add_unless(&parent->power.child_count, -1, 0); atomic_add_unless(&parent->power.child_count, -1, 0);
notify_parent = !parent->power.ignore_children; notify_parent = !parent->power.ignore_children;
...@@ -1478,6 +1489,16 @@ int pm_runtime_force_suspend(struct device *dev) ...@@ -1478,6 +1489,16 @@ int pm_runtime_force_suspend(struct device *dev)
if (ret) if (ret)
goto err; goto err;
/*
* Increase the runtime PM usage count for the device's parent, in case
* when we find the device being used when system suspend was invoked.
* This informs pm_runtime_force_resume() to resume the parent
* immediately, which is needed to be able to resume its children,
* when not deferring the resume to be managed via runtime PM.
*/
if (dev->parent && atomic_read(&dev->power.usage_count) > 1)
pm_runtime_get_noresume(dev->parent);
pm_runtime_set_suspended(dev); pm_runtime_set_suspended(dev);
return 0; return 0;
err: err:
...@@ -1487,16 +1508,20 @@ int pm_runtime_force_suspend(struct device *dev) ...@@ -1487,16 +1508,20 @@ int pm_runtime_force_suspend(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
/** /**
* pm_runtime_force_resume - Force a device into resume state. * pm_runtime_force_resume - Force a device into resume state if needed.
* @dev: Device to resume. * @dev: Device to resume.
* *
* Prior invoking this function we expect the user to have brought the device * Prior invoking this function we expect the user to have brought the device
* into low power state by a call to pm_runtime_force_suspend(). Here we reverse * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
* those actions and brings the device into full power. We update the runtime PM * those actions and brings the device into full power, if it is expected to be
* status and re-enables runtime PM. * used on system resume. To distinguish that, we check whether the runtime PM
* usage count is greater than 1 (the PM core increases the usage count in the
* system PM prepare phase), as that indicates a real user (such as a subsystem,
* driver, userspace, etc.) is using it. If that is the case, the device is
* expected to be used on system resume as well, so then we resume it. In the
* other case, we defer the resume to be managed via runtime PM.
* *
* Typically this function may be invoked from a system resume callback to make * Typically this function may be invoked from a system resume callback.
* sure the device is put into full power state.
*/ */
int pm_runtime_force_resume(struct device *dev) int pm_runtime_force_resume(struct device *dev)
{ {
...@@ -1513,6 +1538,17 @@ int pm_runtime_force_resume(struct device *dev) ...@@ -1513,6 +1538,17 @@ int pm_runtime_force_resume(struct device *dev)
if (!pm_runtime_status_suspended(dev)) if (!pm_runtime_status_suspended(dev))
goto out; goto out;
/*
* Decrease the parent's runtime PM usage count, if we increased it
* during system suspend in pm_runtime_force_suspend().
*/
if (atomic_read(&dev->power.usage_count) > 1) {
if (dev->parent)
pm_runtime_put_noidle(dev->parent);
} else {
goto out;
}
ret = pm_runtime_set_active(dev); ret = pm_runtime_set_active(dev);
if (ret) if (ret)
goto out; goto out;
......
...@@ -263,7 +263,11 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev, ...@@ -263,7 +263,11 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
s32 value; s32 value;
int ret; int ret;
if (kstrtos32(buf, 0, &value)) { if (kstrtos32(buf, 0, &value) == 0) {
/* Users can't write negative values directly */
if (value < 0)
return -EINVAL;
} else {
if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n")) if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n"))
value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
else if (!strcmp(buf, "any") || !strcmp(buf, "any\n")) else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
......
...@@ -110,8 +110,10 @@ void dev_pm_clear_wake_irq(struct device *dev) ...@@ -110,8 +110,10 @@ void dev_pm_clear_wake_irq(struct device *dev)
dev->power.wakeirq = NULL; dev->power.wakeirq = NULL;
spin_unlock_irqrestore(&dev->power.lock, flags); spin_unlock_irqrestore(&dev->power.lock, flags);
if (wirq->dedicated_irq) if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
free_irq(wirq->irq, wirq); free_irq(wirq->irq, wirq);
wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
}
kfree(wirq); kfree(wirq);
} }
EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq); EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
...@@ -179,7 +181,6 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) ...@@ -179,7 +181,6 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
wirq->dev = dev; wirq->dev = dev;
wirq->irq = irq; wirq->irq = irq;
wirq->dedicated_irq = true;
irq_set_status_flags(irq, IRQ_NOAUTOEN); irq_set_status_flags(irq, IRQ_NOAUTOEN);
/* /*
...@@ -195,6 +196,8 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) ...@@ -195,6 +196,8 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
if (err) if (err)
goto err_free_irq; goto err_free_irq;
wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
return err; return err;
err_free_irq: err_free_irq:
...@@ -210,9 +213,9 @@ EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq); ...@@ -210,9 +213,9 @@ EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
* dev_pm_enable_wake_irq - Enable device wake-up interrupt * dev_pm_enable_wake_irq - Enable device wake-up interrupt
* @dev: Device * @dev: Device
* *
* Called from the bus code or the device driver for * Optionally called from the bus code or the device driver for
* runtime_suspend() to enable the wake-up interrupt while * runtime_resume() to override the PM runtime core managed wake-up
* the device is running. * interrupt handling to enable the wake-up interrupt.
* *
* Note that for runtime_suspend()) the wake-up interrupts * Note that for runtime_suspend()) the wake-up interrupts
* should be unconditionally enabled unlike for suspend() * should be unconditionally enabled unlike for suspend()
...@@ -222,7 +225,7 @@ void dev_pm_enable_wake_irq(struct device *dev) ...@@ -222,7 +225,7 @@ void dev_pm_enable_wake_irq(struct device *dev)
{ {
struct wake_irq *wirq = dev->power.wakeirq; struct wake_irq *wirq = dev->power.wakeirq;
if (wirq && wirq->dedicated_irq) if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
enable_irq(wirq->irq); enable_irq(wirq->irq);
} }
EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq); EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
...@@ -231,19 +234,72 @@ EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq); ...@@ -231,19 +234,72 @@ EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
* dev_pm_disable_wake_irq - Disable device wake-up interrupt * dev_pm_disable_wake_irq - Disable device wake-up interrupt
* @dev: Device * @dev: Device
* *
* Called from the bus code or the device driver for * Optionally called from the bus code or the device driver for
* runtime_resume() to disable the wake-up interrupt while * runtime_suspend() to override the PM runtime core managed wake-up
* the device is running. * interrupt handling to disable the wake-up interrupt.
*/ */
void dev_pm_disable_wake_irq(struct device *dev) void dev_pm_disable_wake_irq(struct device *dev)
{ {
struct wake_irq *wirq = dev->power.wakeirq; struct wake_irq *wirq = dev->power.wakeirq;
if (wirq && wirq->dedicated_irq) if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
disable_irq_nosync(wirq->irq); disable_irq_nosync(wirq->irq);
} }
EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq); EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
/**
* dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt
* @dev: Device
* @can_change_status: Can change wake-up interrupt status
*
* Enables wakeirq conditionally. We need to enable wake-up interrupt
* lazily on the first rpm_suspend(). This is needed as the consumer device
* starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would
* otherwise try to disable already disabled wakeirq. The wake-up interrupt
* starts disabled with IRQ_NOAUTOEN set.
*
* Should be only called from rpm_suspend() and rpm_resume() path.
* Caller must hold &dev->power.lock to change wirq->status
*/
void dev_pm_enable_wake_irq_check(struct device *dev,
bool can_change_status)
{
struct wake_irq *wirq = dev->power.wakeirq;
if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
return;
if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
goto enable;
} else if (can_change_status) {
wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
goto enable;
}
return;
enable:
enable_irq(wirq->irq);
}
/**
* dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
* @dev: Device
*
* Disables wake-up interrupt conditionally based on status.
* Should be only called from rpm_suspend() and rpm_resume() path.
*/
void dev_pm_disable_wake_irq_check(struct device *dev)
{
struct wake_irq *wirq = dev->power.wakeirq;
if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
disable_irq_nosync(wirq->irq);
}
/** /**
* dev_pm_arm_wake_irq - Arm device wake-up * dev_pm_arm_wake_irq - Arm device wake-up
* @wirq: Device wake-up interrupt * @wirq: Device wake-up interrupt
......
...@@ -2591,6 +2591,9 @@ static int smsc911x_suspend(struct device *dev) ...@@ -2591,6 +2591,9 @@ static int smsc911x_suspend(struct device *dev)
PMT_CTRL_PM_MODE_D1_ | PMT_CTRL_WOL_EN_ | PMT_CTRL_PM_MODE_D1_ | PMT_CTRL_WOL_EN_ |
PMT_CTRL_ED_EN_ | PMT_CTRL_PME_EN_); PMT_CTRL_ED_EN_ | PMT_CTRL_PME_EN_);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
return 0; return 0;
} }
...@@ -2600,6 +2603,9 @@ static int smsc911x_resume(struct device *dev) ...@@ -2600,6 +2603,9 @@ static int smsc911x_resume(struct device *dev)
struct smsc911x_data *pdata = netdev_priv(ndev); struct smsc911x_data *pdata = netdev_priv(ndev);
unsigned int to = 100; unsigned int to = 100;
pm_runtime_enable(dev);
pm_runtime_resume(dev);
/* Note 3.11 from the datasheet: /* Note 3.11 from the datasheet:
* "When the LAN9220 is in a power saving state, a write of any * "When the LAN9220 is in a power saving state, a write of any
* data to the BYTE_TEST register will wake-up the device." * data to the BYTE_TEST register will wake-up the device."
......
...@@ -143,7 +143,7 @@ static int rockchip_iodomain_notify(struct notifier_block *nb, ...@@ -143,7 +143,7 @@ static int rockchip_iodomain_notify(struct notifier_block *nb,
if (ret && event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE) if (ret && event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE)
return NOTIFY_BAD; return NOTIFY_BAD;
dev_info(supply->iod->dev, "Setting to %d done\n", uV); dev_dbg(supply->iod->dev, "Setting to %d done\n", uV);
return NOTIFY_OK; return NOTIFY_OK;
} }
......
...@@ -61,12 +61,6 @@ static inline void pm_suspend_ignore_children(struct device *dev, bool enable) ...@@ -61,12 +61,6 @@ static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
dev->power.ignore_children = enable; dev->power.ignore_children = enable;
} }
static inline bool pm_children_suspended(struct device *dev)
{
return dev->power.ignore_children
|| !atomic_read(&dev->power.child_count);
}
static inline void pm_runtime_get_noresume(struct device *dev) static inline void pm_runtime_get_noresume(struct device *dev)
{ {
atomic_inc(&dev->power.usage_count); atomic_inc(&dev->power.usage_count);
...@@ -162,7 +156,6 @@ static inline void pm_runtime_allow(struct device *dev) {} ...@@ -162,7 +156,6 @@ static inline void pm_runtime_allow(struct device *dev) {}
static inline void pm_runtime_forbid(struct device *dev) {} static inline void pm_runtime_forbid(struct device *dev) {}
static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {} static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
static inline bool pm_children_suspended(struct device *dev) { return false; }
static inline void pm_runtime_get_noresume(struct device *dev) {} static inline void pm_runtime_get_noresume(struct device *dev) {}
static inline void pm_runtime_put_noidle(struct device *dev) {} static inline void pm_runtime_put_noidle(struct device *dev) {}
static inline bool device_run_wake(struct device *dev) { return false; } static inline bool device_run_wake(struct device *dev) { return false; }
...@@ -265,9 +258,9 @@ static inline int pm_runtime_set_active(struct device *dev) ...@@ -265,9 +258,9 @@ static inline int pm_runtime_set_active(struct device *dev)
return __pm_runtime_set_status(dev, RPM_ACTIVE); return __pm_runtime_set_status(dev, RPM_ACTIVE);
} }
static inline void pm_runtime_set_suspended(struct device *dev) static inline int pm_runtime_set_suspended(struct device *dev)
{ {
__pm_runtime_set_status(dev, RPM_SUSPENDED); return __pm_runtime_set_status(dev, RPM_SUSPENDED);
} }
static inline void pm_runtime_disable(struct device *dev) static inline void pm_runtime_disable(struct device *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment