Commit afe94fb8 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branches 'pm-core' and 'pm-sleep'

* pm-core:
  PM: runtime: Clarify documentation when callbacks are unassigned
  PM: runtime: Allow unassigned ->runtime_suspend|resume callbacks
  PM: runtime: Improve path in rpm_idle() when no callback
  PM: runtime: document common mistake with pm_runtime_get_sync()

* pm-sleep:
  PM: hibernate: remove leading spaces before tabs
  PM: sleep: remove trailing spaces and tabs
  PM: hibernate: fix spelling mistakes
  PM: wakeirq: Set IRQF_NO_AUTOEN when requesting the IRQ
...@@ -378,7 +378,11 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: ...@@ -378,7 +378,11 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
`int pm_runtime_get_sync(struct device *dev);` `int pm_runtime_get_sync(struct device *dev);`
- increment the device's usage counter, run pm_runtime_resume(dev) and - increment the device's usage counter, run pm_runtime_resume(dev) and
return its result return its result;
note that it does not drop the device's usage counter on errors, so
consider using pm_runtime_resume_and_get() instead of it, especially
if its return value is checked by the caller, as this is likely to
result in cleaner code.
`int pm_runtime_get_if_in_use(struct device *dev);` `int pm_runtime_get_if_in_use(struct device *dev);`
- return -EINVAL if 'power.disable_depth' is nonzero; otherwise, if the - return -EINVAL if 'power.disable_depth' is nonzero; otherwise, if the
...@@ -827,6 +831,15 @@ or driver about runtime power changes. Instead, the driver for the device's ...@@ -827,6 +831,15 @@ or driver about runtime power changes. Instead, the driver for the device's
parent must take responsibility for telling the device's driver when the parent must take responsibility for telling the device's driver when the
parent's power state changes. parent's power state changes.
Note that, in some cases it may not be desirable for subsystems/drivers to call
pm_runtime_no_callbacks() for their devices. This could be because a subset of
the runtime PM callbacks needs to be implemented, a platform dependent PM
domain could get attached to the device or that the device is power managed
through a supplier device link. For these reasons and to avoid boilerplate code
in subsystems/drivers, the PM core allows runtime PM callbacks to be
unassigned. More precisely, if a callback pointer is NULL, the PM core will act
as though there was a callback and it returned 0.
9. Autosuspend, or automatically-delayed suspends 9. Autosuspend, or automatically-delayed suspends
================================================= =================================================
......
...@@ -345,7 +345,7 @@ static void rpm_suspend_suppliers(struct device *dev) ...@@ -345,7 +345,7 @@ static void rpm_suspend_suppliers(struct device *dev)
static int __rpm_callback(int (*cb)(struct device *), struct device *dev) static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
__releases(&dev->power.lock) __acquires(&dev->power.lock) __releases(&dev->power.lock) __acquires(&dev->power.lock)
{ {
int retval, idx; int retval = 0, idx;
bool use_links = dev->power.links_count > 0; bool use_links = dev->power.links_count > 0;
if (dev->power.irq_safe) { if (dev->power.irq_safe) {
...@@ -373,7 +373,8 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev) ...@@ -373,7 +373,8 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
} }
} }
retval = cb(dev); if (cb)
retval = cb(dev);
if (dev->power.irq_safe) { if (dev->power.irq_safe) {
spin_lock(&dev->power.lock); spin_lock(&dev->power.lock);
...@@ -446,7 +447,10 @@ static int rpm_idle(struct device *dev, int rpmflags) ...@@ -446,7 +447,10 @@ static int rpm_idle(struct device *dev, int rpmflags)
/* Pending requests need to be canceled. */ /* Pending requests need to be canceled. */
dev->power.request = RPM_REQ_NONE; dev->power.request = RPM_REQ_NONE;
if (dev->power.no_callbacks) callback = RPM_GET_CALLBACK(dev, runtime_idle);
/* If no callback assume success. */
if (!callback || dev->power.no_callbacks)
goto out; goto out;
/* Carry out an asynchronous or a synchronous idle notification. */ /* Carry out an asynchronous or a synchronous idle notification. */
...@@ -462,10 +466,7 @@ static int rpm_idle(struct device *dev, int rpmflags) ...@@ -462,10 +466,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.idle_notification = true; dev->power.idle_notification = true;
callback = RPM_GET_CALLBACK(dev, runtime_idle); retval = __rpm_callback(callback, dev);
if (callback)
retval = __rpm_callback(callback, dev);
dev->power.idle_notification = false; dev->power.idle_notification = false;
wake_up_all(&dev->power.wait_queue); wake_up_all(&dev->power.wait_queue);
...@@ -484,9 +485,6 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev) ...@@ -484,9 +485,6 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
{ {
int retval; int retval;
if (!cb)
return -ENOSYS;
if (dev->power.memalloc_noio) { if (dev->power.memalloc_noio) {
unsigned int noio_flag; unsigned int noio_flag;
......
...@@ -182,7 +182,6 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) ...@@ -182,7 +182,6 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
wirq->dev = dev; wirq->dev = dev;
wirq->irq = irq; wirq->irq = irq;
irq_set_status_flags(irq, IRQ_NOAUTOEN);
/* Prevent deferred spurious wakeirqs with disable_irq_nosync() */ /* Prevent deferred spurious wakeirqs with disable_irq_nosync() */
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
...@@ -192,7 +191,8 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) ...@@ -192,7 +191,8 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
* so we use a threaded irq. * so we use a threaded irq.
*/ */
err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq, err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
IRQF_ONESHOT, wirq->name, wirq); IRQF_ONESHOT | IRQF_NO_AUTOEN,
wirq->name, wirq);
if (err) if (err)
goto err_free_name; goto err_free_name;
......
...@@ -380,6 +380,9 @@ static inline int pm_runtime_get(struct device *dev) ...@@ -380,6 +380,9 @@ static inline int pm_runtime_get(struct device *dev)
* The possible return values of this function are the same as for * The possible return values of this function are the same as for
* pm_runtime_resume() and the runtime PM usage counter of @dev remains * pm_runtime_resume() and the runtime PM usage counter of @dev remains
* incremented in all cases, even if it returns an error code. * incremented in all cases, even if it returns an error code.
* Consider using pm_runtime_resume_and_get() instead of it, especially
* if its return value is checked by the caller, as this is likely to result
* in cleaner code.
*/ */
static inline int pm_runtime_get_sync(struct device *dev) static inline int pm_runtime_get_sync(struct device *dev)
{ {
......
...@@ -98,20 +98,20 @@ config PM_STD_PARTITION ...@@ -98,20 +98,20 @@ config PM_STD_PARTITION
default "" default ""
help help
The default resume partition is the partition that the suspend- The default resume partition is the partition that the suspend-
to-disk implementation will look for a suspended disk image. to-disk implementation will look for a suspended disk image.
The partition specified here will be different for almost every user. The partition specified here will be different for almost every user.
It should be a valid swap partition (at least for now) that is turned It should be a valid swap partition (at least for now) that is turned
on before suspending. on before suspending.
The partition specified can be overridden by specifying: The partition specified can be overridden by specifying:
resume=/dev/<other device> resume=/dev/<other device>
which will set the resume partition to the device specified. which will set the resume partition to the device specified.
Note there is currently not a way to specify which device to save the Note there is currently not a way to specify which device to save the
suspended image to. It will simply pick the first available swap suspended image to. It will simply pick the first available swap
device. device.
config PM_SLEEP config PM_SLEEP
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* /*
* drivers/power/process.c - Functions for starting/stopping processes on * drivers/power/process.c - Functions for starting/stopping processes on
* suspend transitions. * suspend transitions.
* *
* Originally from swsusp. * Originally from swsusp.
......
...@@ -331,7 +331,7 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size) ...@@ -331,7 +331,7 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
* *
* Memory bitmap is a structure consisting of many linked lists of * Memory bitmap is a structure consisting of many linked lists of
* objects. The main list's elements are of type struct zone_bitmap * objects. The main list's elements are of type struct zone_bitmap
* and each of them corresonds to one zone. For each zone bitmap * and each of them corresponds to one zone. For each zone bitmap
* object there is a list of objects of type struct bm_block that * object there is a list of objects of type struct bm_block that
* represent each blocks of bitmap in which information is stored. * represent each blocks of bitmap in which information is stored.
* *
...@@ -1146,7 +1146,7 @@ int create_basic_memory_bitmaps(void) ...@@ -1146,7 +1146,7 @@ int create_basic_memory_bitmaps(void)
Free_second_object: Free_second_object:
kfree(bm2); kfree(bm2);
Free_first_bitmap: Free_first_bitmap:
memory_bm_free(bm1, PG_UNSAFE_CLEAR); memory_bm_free(bm1, PG_UNSAFE_CLEAR);
Free_first_object: Free_first_object:
kfree(bm1); kfree(bm1);
return -ENOMEM; return -ENOMEM;
...@@ -1500,7 +1500,7 @@ static struct memory_bitmap copy_bm; ...@@ -1500,7 +1500,7 @@ static struct memory_bitmap copy_bm;
/** /**
* swsusp_free - Free pages allocated for hibernation image. * swsusp_free - Free pages allocated for hibernation image.
* *
* Image pages are alocated before snapshot creation, so they need to be * Image pages are allocated before snapshot creation, so they need to be
* released after resume. * released after resume.
*/ */
void swsusp_free(void) void swsusp_free(void)
...@@ -2326,7 +2326,7 @@ static struct memory_bitmap *safe_highmem_bm; ...@@ -2326,7 +2326,7 @@ static struct memory_bitmap *safe_highmem_bm;
* (@nr_highmem_p points to the variable containing the number of highmem image * (@nr_highmem_p points to the variable containing the number of highmem image
* pages). The pages that are "safe" (ie. will not be overwritten when the * pages). The pages that are "safe" (ie. will not be overwritten when the
* hibernation image is restored entirely) have the corresponding bits set in * hibernation image is restored entirely) have the corresponding bits set in
* @bm (it must be unitialized). * @bm (it must be uninitialized).
* *
* NOTE: This function should not be called if there are no highmem image pages. * NOTE: This function should not be called if there are no highmem image pages.
*/ */
...@@ -2483,7 +2483,7 @@ static inline void free_highmem_data(void) {} ...@@ -2483,7 +2483,7 @@ static inline void free_highmem_data(void) {}
/** /**
* prepare_image - Make room for loading hibernation image. * prepare_image - Make room for loading hibernation image.
* @new_bm: Unitialized memory bitmap structure. * @new_bm: Uninitialized memory bitmap structure.
* @bm: Memory bitmap with unsafe pages marked. * @bm: Memory bitmap with unsafe pages marked.
* *
* Use @bm to mark the pages that will be overwritten in the process of * Use @bm to mark the pages that will be overwritten in the process of
......
...@@ -1125,7 +1125,7 @@ struct dec_data { ...@@ -1125,7 +1125,7 @@ struct dec_data {
}; };
/** /**
* Deompression function that runs in its own thread. * Decompression function that runs in its own thread.
*/ */
static int lzo_decompress_threadfn(void *data) static int lzo_decompress_threadfn(void *data)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment