Commit dbfa4478 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

PM: runtime: Adjust white space in the core code

Some inconsistent usage of white space in the PM-runtime core code
causes that code to be somewhat harder to read that it would have
been otherwise, so adjust the white space in there to be more
consistent with the rest of the code.

No expected functional impact.
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
parent 0307f4e8
...@@ -243,8 +243,7 @@ void pm_runtime_set_memalloc_noio(struct device *dev, bool enable) ...@@ -243,8 +243,7 @@ void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
* flag was set by any one of the descendants. * flag was set by any one of the descendants.
*/ */
if (!dev || (!enable && if (!dev || (!enable &&
device_for_each_child(dev, NULL, device_for_each_child(dev, NULL, dev_memalloc_noio)))
dev_memalloc_noio)))
break; break;
} }
mutex_unlock(&dev_hotplug_mutex); mutex_unlock(&dev_hotplug_mutex);
...@@ -265,15 +264,13 @@ static int rpm_check_suspend_allowed(struct device *dev) ...@@ -265,15 +264,13 @@ static int rpm_check_suspend_allowed(struct device *dev)
retval = -EACCES; retval = -EACCES;
else if (atomic_read(&dev->power.usage_count)) else if (atomic_read(&dev->power.usage_count))
retval = -EAGAIN; retval = -EAGAIN;
else if (!dev->power.ignore_children && else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count))
atomic_read(&dev->power.child_count))
retval = -EBUSY; retval = -EBUSY;
/* Pending resume requests take precedence over suspends. */ /* Pending resume requests take precedence over suspends. */
else if ((dev->power.deferred_resume else if ((dev->power.deferred_resume &&
&& dev->power.runtime_status == RPM_SUSPENDING) dev->power.runtime_status == RPM_SUSPENDING) ||
|| (dev->power.request_pending (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME))
&& dev->power.request == RPM_REQ_RESUME))
retval = -EAGAIN; retval = -EAGAIN;
else if (__dev_pm_qos_resume_latency(dev) == 0) else if (__dev_pm_qos_resume_latency(dev) == 0)
retval = -EPERM; retval = -EPERM;
...@@ -404,9 +401,9 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev) ...@@ -404,9 +401,9 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
* *
* Do that if resume fails too. * Do that if resume fails too.
*/ */
if (use_links if (use_links &&
&& ((dev->power.runtime_status == RPM_SUSPENDING && !retval) ((dev->power.runtime_status == RPM_SUSPENDING && !retval) ||
|| (dev->power.runtime_status == RPM_RESUMING && retval))) { (dev->power.runtime_status == RPM_RESUMING && retval))) {
idx = device_links_read_lock(); idx = device_links_read_lock();
__rpm_put_suppliers(dev, false); __rpm_put_suppliers(dev, false);
...@@ -491,6 +488,7 @@ static int rpm_idle(struct device *dev, int rpmflags) ...@@ -491,6 +488,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
/* Act as though RPM_NOWAIT is always set. */ /* Act as though RPM_NOWAIT is always set. */
else if (dev->power.idle_notification) else if (dev->power.idle_notification)
retval = -EINPROGRESS; retval = -EINPROGRESS;
if (retval) if (retval)
goto out; goto out;
...@@ -574,12 +572,12 @@ static int rpm_suspend(struct device *dev, int rpmflags) ...@@ -574,12 +572,12 @@ static int rpm_suspend(struct device *dev, int rpmflags)
/* Synchronous suspends are not allowed in the RPM_RESUMING state. */ /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC)) if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
retval = -EAGAIN; retval = -EAGAIN;
if (retval) if (retval)
goto out; goto out;
/* If the autosuspend_delay time hasn't expired yet, reschedule. */ /* If the autosuspend_delay time hasn't expired yet, reschedule. */
if ((rpmflags & RPM_AUTO) if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) {
&& dev->power.runtime_status != RPM_SUSPENDING) {
u64 expires = pm_runtime_autosuspend_expiration(dev); u64 expires = pm_runtime_autosuspend_expiration(dev);
if (expires != 0) { if (expires != 0) {
...@@ -797,8 +795,8 @@ static int rpm_resume(struct device *dev, int rpmflags) ...@@ -797,8 +795,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
goto out; goto out;
} }
if (dev->power.runtime_status == RPM_RESUMING if (dev->power.runtime_status == RPM_RESUMING ||
|| dev->power.runtime_status == RPM_SUSPENDING) { dev->power.runtime_status == RPM_SUSPENDING) {
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
...@@ -825,8 +823,8 @@ static int rpm_resume(struct device *dev, int rpmflags) ...@@ -825,8 +823,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
for (;;) { for (;;) {
prepare_to_wait(&dev->power.wait_queue, &wait, prepare_to_wait(&dev->power.wait_queue, &wait,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
if (dev->power.runtime_status != RPM_RESUMING if (dev->power.runtime_status != RPM_RESUMING &&
&& dev->power.runtime_status != RPM_SUSPENDING) dev->power.runtime_status != RPM_SUSPENDING)
break; break;
spin_unlock_irq(&dev->power.lock); spin_unlock_irq(&dev->power.lock);
...@@ -846,9 +844,9 @@ static int rpm_resume(struct device *dev, int rpmflags) ...@@ -846,9 +844,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
*/ */
if (dev->power.no_callbacks && !parent && dev->parent) { if (dev->power.no_callbacks && !parent && dev->parent) {
spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
if (dev->parent->power.disable_depth > 0 if (dev->parent->power.disable_depth > 0 ||
|| dev->parent->power.ignore_children dev->parent->power.ignore_children ||
|| dev->parent->power.runtime_status == RPM_ACTIVE) { dev->parent->power.runtime_status == RPM_ACTIVE) {
atomic_inc(&dev->parent->power.child_count); atomic_inc(&dev->parent->power.child_count);
spin_unlock(&dev->parent->power.lock); spin_unlock(&dev->parent->power.lock);
retval = 1; retval = 1;
...@@ -877,6 +875,7 @@ static int rpm_resume(struct device *dev, int rpmflags) ...@@ -877,6 +875,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
parent = dev->parent; parent = dev->parent;
if (dev->power.irq_safe) if (dev->power.irq_safe)
goto skip_parent; goto skip_parent;
spin_unlock(&dev->power.lock); spin_unlock(&dev->power.lock);
pm_runtime_get_noresume(parent); pm_runtime_get_noresume(parent);
...@@ -886,8 +885,8 @@ static int rpm_resume(struct device *dev, int rpmflags) ...@@ -886,8 +885,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
* Resume the parent if it has runtime PM enabled and not been * Resume the parent if it has runtime PM enabled and not been
* set to ignore its children. * set to ignore its children.
*/ */
if (!parent->power.disable_depth if (!parent->power.disable_depth &&
&& !parent->power.ignore_children) { !parent->power.ignore_children) {
rpm_resume(parent, 0); rpm_resume(parent, 0);
if (parent->power.runtime_status != RPM_ACTIVE) if (parent->power.runtime_status != RPM_ACTIVE)
retval = -EBUSY; retval = -EBUSY;
...@@ -897,6 +896,7 @@ static int rpm_resume(struct device *dev, int rpmflags) ...@@ -897,6 +896,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
spin_lock(&dev->power.lock); spin_lock(&dev->power.lock);
if (retval) if (retval)
goto out; goto out;
goto repeat; goto repeat;
} }
skip_parent: skip_parent:
...@@ -1301,9 +1301,9 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) ...@@ -1301,9 +1301,9 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
* not active, has runtime PM enabled and the * not active, has runtime PM enabled and the
* 'power.ignore_children' flag unset. * 'power.ignore_children' flag unset.
*/ */
if (!parent->power.disable_depth if (!parent->power.disable_depth &&
&& !parent->power.ignore_children !parent->power.ignore_children &&
&& parent->power.runtime_status != RPM_ACTIVE) { parent->power.runtime_status != RPM_ACTIVE) {
dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n", dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
dev_name(dev), dev_name(dev),
dev_name(parent)); dev_name(parent));
...@@ -1368,9 +1368,9 @@ static void __pm_runtime_barrier(struct device *dev) ...@@ -1368,9 +1368,9 @@ static void __pm_runtime_barrier(struct device *dev)
dev->power.request_pending = false; dev->power.request_pending = false;
} }
if (dev->power.runtime_status == RPM_SUSPENDING if (dev->power.runtime_status == RPM_SUSPENDING ||
|| dev->power.runtime_status == RPM_RESUMING dev->power.runtime_status == RPM_RESUMING ||
|| dev->power.idle_notification) { dev->power.idle_notification) {
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
/* Suspend, wake-up or idle notification in progress. */ /* Suspend, wake-up or idle notification in progress. */
...@@ -1455,8 +1455,8 @@ void __pm_runtime_disable(struct device *dev, bool check_resume) ...@@ -1455,8 +1455,8 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
* means there probably is some I/O to process and disabling runtime PM * means there probably is some I/O to process and disabling runtime PM
* shouldn't prevent the device from processing the I/O. * shouldn't prevent the device from processing the I/O.
*/ */
if (check_resume && dev->power.request_pending if (check_resume && dev->power.request_pending &&
&& dev->power.request == RPM_REQ_RESUME) { dev->power.request == RPM_REQ_RESUME) {
/* /*
* Prevent suspends and idle notifications from being carried * Prevent suspends and idle notifications from being carried
* out after we have woken up the device. * out after we have woken up the device.
...@@ -1616,6 +1616,7 @@ void pm_runtime_irq_safe(struct device *dev) ...@@ -1616,6 +1616,7 @@ void pm_runtime_irq_safe(struct device *dev)
{ {
if (dev->parent) if (dev->parent)
pm_runtime_get_sync(dev->parent); pm_runtime_get_sync(dev->parent);
spin_lock_irq(&dev->power.lock); spin_lock_irq(&dev->power.lock);
dev->power.irq_safe = 1; dev->power.irq_safe = 1;
spin_unlock_irq(&dev->power.lock); spin_unlock_irq(&dev->power.lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment