Commit b62b3064 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branch 'pm-sleep'

Merge updates related to system sleep for 5.16-rc1:

 - Clean up hib_wait_io() (Falla Coulibaly).

 - Fix sparse warnings in hibernation-related code (Anders Roxell).

 - Use vzalloc() and kzalloc() instead of their open-coded
   equivalents in hibernation-related code (Cai Huoqing).

 - Prevent user space from crashing the kernel by attempting to
   restore the system state from a swap partition in use (Ye Bin).

 - Do not let "syscore" devices runtime-suspend during system PM
   transitions (Rafael Wysocki).

 - Do not pause cpuidle in the suspend-to-idle path (Rafael Wysocki).

 - Pause cpuidle later and resume it earlier during system PM
   transitions (Rafael Wysocki).

 - Make system suspend code use valid_state() consistently (Rafael
   Wysocki).

 - Add support for enabling wakeup IRQs after invoking the
   ->runtime_suspend() callback and make two drivers use it (Chunfeng
   Yun).

* pm-sleep:
  usb: mtu3: enable wake-up interrupt after runtime_suspend called
  usb: xhci-mtk: enable wake-up interrupt after runtime_suspend called
  PM / wakeirq: support enabling wake-up irq after runtime_suspend called
  PM: suspend: Use valid_state() consistently
  PM: sleep: Pause cpuidle later and resume it earlier during system transitions
  PM: suspend: Do not pause cpuidle in the suspend-to-idle path
  PM: sleep: Do not let "syscore" devices runtime-suspend during system transitions
  PM: hibernate: Get block device exclusively in swsusp_check()
  PM: hibernate: swap: Use vzalloc() and kzalloc()
  PM: hibernate: fix sparse warnings
  Revert "PM: sleep: Do not assume that "mem" is always present"
  PM: hibernate: Remove blk_status_to_errno in hib_wait_io
  PM: sleep: Do not assume that "mem" is always present
parents 1fec1611 7ddae8c7
......@@ -32,7 +32,6 @@
#include <linux/suspend.h>
#include <trace/events/power.h>
#include <linux/cpufreq.h>
#include <linux/cpuidle.h>
#include <linux/devfreq.h>
#include <linux/timer.h>
......@@ -747,8 +746,6 @@ void dpm_resume_noirq(pm_message_t state)
resume_device_irqs();
device_wakeup_disarm_wake_irqs();
cpuidle_resume();
}
/**
......@@ -1051,7 +1048,7 @@ static void device_complete(struct device *dev, pm_message_t state)
const char *info = NULL;
if (dev->power.syscore)
return;
goto out;
device_lock(dev);
......@@ -1081,6 +1078,7 @@ static void device_complete(struct device *dev, pm_message_t state)
device_unlock(dev);
out:
pm_runtime_put(dev);
}
......@@ -1336,8 +1334,6 @@ int dpm_suspend_noirq(pm_message_t state)
{
int ret;
cpuidle_pause();
device_wakeup_arm_wake_irqs();
suspend_device_irqs();
......@@ -1794,9 +1790,6 @@ static int device_prepare(struct device *dev, pm_message_t state)
int (*callback)(struct device *) = NULL;
int ret = 0;
if (dev->power.syscore)
return 0;
/*
* If a device's parent goes into runtime suspend at the wrong time,
* it won't be possible to resume the device. To prevent this we
......@@ -1805,6 +1798,9 @@ static int device_prepare(struct device *dev, pm_message_t state)
*/
pm_runtime_get_noresume(dev);
if (dev->power.syscore)
return 0;
device_lock(dev);
dev->power.wakeup_path = false;
......
......@@ -25,8 +25,10 @@ extern u64 pm_runtime_active_time(struct device *dev);
#define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0)
#define WAKE_IRQ_DEDICATED_MANAGED BIT(1)
#define WAKE_IRQ_DEDICATED_REVERSE BIT(2)
#define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \
WAKE_IRQ_DEDICATED_MANAGED)
WAKE_IRQ_DEDICATED_MANAGED | \
WAKE_IRQ_DEDICATED_REVERSE)
struct wake_irq {
struct device *dev;
......@@ -39,7 +41,8 @@ extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_enable_wake_irq_check(struct device *dev,
bool can_change_status);
extern void dev_pm_disable_wake_irq_check(struct device *dev);
extern void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable);
extern void dev_pm_enable_wake_irq_complete(struct device *dev);
#ifdef CONFIG_PM_SLEEP
......
......@@ -645,6 +645,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
if (retval)
goto fail;
dev_pm_enable_wake_irq_complete(dev);
no_callback:
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_deactivate_timer(dev);
......@@ -690,7 +692,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
return retval;
fail:
dev_pm_disable_wake_irq_check(dev);
dev_pm_disable_wake_irq_check(dev, true);
__update_runtime_status(dev, RPM_ACTIVE);
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
......@@ -873,7 +875,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
callback = RPM_GET_CALLBACK(dev, runtime_resume);
dev_pm_disable_wake_irq_check(dev);
dev_pm_disable_wake_irq_check(dev, false);
retval = rpm_callback(callback, dev);
if (retval) {
__update_runtime_status(dev, RPM_SUSPENDED);
......
......@@ -142,24 +142,7 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
return IRQ_HANDLED;
}
/**
* dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
* @dev: Device entry
* @irq: Device wake-up interrupt
*
* Unless your hardware has separate wake-up interrupts in addition
* to the device IO interrupts, you don't need this.
*
* Sets up a threaded interrupt handler for a device that has
* a dedicated wake-up interrupt in addition to the device IO
* interrupt.
*
* The interrupt starts disabled, and needs to be managed for
* the device by the bus code or the device driver using
* dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq()
* functions.
*/
int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag)
{
struct wake_irq *wirq;
int err;
......@@ -197,7 +180,7 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
if (err)
goto err_free_irq;
wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag;
return err;
......@@ -210,8 +193,57 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
return err;
}
/**
* dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
* @dev: Device entry
* @irq: Device wake-up interrupt
*
* Unless your hardware has separate wake-up interrupts in addition
* to the device IO interrupts, you don't need this.
*
* Sets up a threaded interrupt handler for a device that has
* a dedicated wake-up interrupt in addition to the device IO
* interrupt.
*
* The interrupt starts disabled, and needs to be managed for
* the device by the bus code or the device driver using
* dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
* functions.
*/
int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
{
return __dev_pm_set_dedicated_wake_irq(dev, irq, 0);
}
EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
/**
* dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt
* with reverse enable ordering
* @dev: Device entry
* @irq: Device wake-up interrupt
*
* Unless your hardware has separate wake-up interrupts in addition
* to the device IO interrupts, you don't need this.
*
* Sets up a threaded interrupt handler for a device that has a dedicated
* wake-up interrupt in addition to the device IO interrupt. It sets
* the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend()
* to enable dedicated wake-up interrupt after running the runtime suspend
* callback for @dev.
*
* The interrupt starts disabled, and needs to be managed for
* the device by the bus code or the device driver using
* dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
* functions.
*/
int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
{
return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE);
}
EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse);
/**
* dev_pm_enable_wake_irq - Enable device wake-up interrupt
* @dev: Device
......@@ -282,27 +314,54 @@ void dev_pm_enable_wake_irq_check(struct device *dev,
return;
enable:
if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
enable_irq(wirq->irq);
}
/**
* dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
* @dev: Device
* @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE
*
* Disables wake-up interrupt conditionally based on status.
* Should be only called from rpm_suspend() and rpm_resume() path.
*/
void dev_pm_disable_wake_irq_check(struct device *dev)
void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable)
{
struct wake_irq *wirq = dev->power.wakeirq;
if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
return;
if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
disable_irq_nosync(wirq->irq);
}
/**
* dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before
* @dev: Device using the wake IRQ
*
* Enable wake IRQ conditionally based on status, mainly used if want to
* enable wake IRQ after running ->runtime_suspend() which depends on
* WAKE_IRQ_DEDICATED_REVERSE.
*
* Should be only called from rpm_suspend() path.
*/
void dev_pm_enable_wake_irq_complete(struct device *dev)
{
struct wake_irq *wirq = dev->power.wakeirq;
if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
enable_irq(wirq->irq);
}
/**
* dev_pm_arm_wake_irq - Arm device wake-up
* @wirq: Device wake-up interrupt
......
......@@ -602,7 +602,7 @@ static int xhci_mtk_probe(struct platform_device *pdev)
goto dealloc_usb2_hcd;
if (wakeup_irq > 0) {
ret = dev_pm_set_dedicated_wake_irq(dev, wakeup_irq);
ret = dev_pm_set_dedicated_wake_irq_reverse(dev, wakeup_irq);
if (ret) {
dev_err(dev, "set wakeup irq %d failed\n", wakeup_irq);
goto dealloc_usb3_hcd;
......
......@@ -337,7 +337,7 @@ static int mtu3_probe(struct platform_device *pdev)
goto comm_init_err;
if (ssusb->wakeup_irq > 0) {
ret = dev_pm_set_dedicated_wake_irq(dev, ssusb->wakeup_irq);
ret = dev_pm_set_dedicated_wake_irq_reverse(dev, ssusb->wakeup_irq);
if (ret) {
dev_err(dev, "failed to set wakeup irq %d\n", ssusb->wakeup_irq);
goto comm_exit;
......
......@@ -17,8 +17,8 @@
#ifdef CONFIG_PM
extern int dev_pm_set_wake_irq(struct device *dev, int irq);
extern int dev_pm_set_dedicated_wake_irq(struct device *dev,
int irq);
extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq);
extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq);
extern void dev_pm_clear_wake_irq(struct device *dev);
extern void dev_pm_enable_wake_irq(struct device *dev);
extern void dev_pm_disable_wake_irq(struct device *dev);
......@@ -35,6 +35,11 @@ static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
return 0;
}
static inline int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
{
return 0;
}
static inline void dev_pm_clear_wake_irq(struct device *dev)
{
}
......
......@@ -300,7 +300,7 @@ static int create_image(int platform_mode)
if (error || hibernation_test(TEST_PLATFORM))
goto Platform_finish;
error = suspend_disable_secondary_cpus();
error = pm_sleep_disable_secondary_cpus();
if (error || hibernation_test(TEST_CPUS))
goto Enable_cpus;
......@@ -342,7 +342,7 @@ static int create_image(int platform_mode)
local_irq_enable();
Enable_cpus:
suspend_enable_secondary_cpus();
pm_sleep_enable_secondary_cpus();
/* Allow architectures to do nosmt-specific post-resume dances */
if (!in_suspend)
......@@ -466,6 +466,8 @@ static int resume_target_kernel(bool platform_mode)
if (error)
goto Cleanup;
cpuidle_pause();
error = hibernate_resume_nonboot_cpu_disable();
if (error)
goto Enable_cpus;
......@@ -509,7 +511,7 @@ static int resume_target_kernel(bool platform_mode)
local_irq_enable();
Enable_cpus:
suspend_enable_secondary_cpus();
pm_sleep_enable_secondary_cpus();
Cleanup:
platform_restore_cleanup(platform_mode);
......@@ -587,7 +589,7 @@ int hibernation_platform_enter(void)
if (error)
goto Platform_finish;
error = suspend_disable_secondary_cpus();
error = pm_sleep_disable_secondary_cpus();
if (error)
goto Enable_cpus;
......@@ -609,7 +611,7 @@ int hibernation_platform_enter(void)
local_irq_enable();
Enable_cpus:
suspend_enable_secondary_cpus();
pm_sleep_enable_secondary_cpus();
Platform_finish:
hibernation_ops->finish();
......
......@@ -4,6 +4,8 @@
#include <linux/utsname.h>
#include <linux/freezer.h>
#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/cpuidle.h>
struct swsusp_info {
struct new_utsname uts;
......@@ -310,3 +312,15 @@ extern int pm_wake_lock(const char *buf);
extern int pm_wake_unlock(const char *buf);
#endif /* !CONFIG_PM_WAKELOCKS */
static inline int pm_sleep_disable_secondary_cpus(void)
{
cpuidle_pause();
return suspend_disable_secondary_cpus();
}
static inline void pm_sleep_enable_secondary_cpus(void)
{
suspend_enable_secondary_cpus();
cpuidle_resume();
}
......@@ -97,7 +97,6 @@ static void s2idle_enter(void)
raw_spin_unlock_irq(&s2idle_lock);
cpus_read_lock();
cpuidle_resume();
/* Push all the CPUs into the idle loop. */
wake_up_all_idle_cpus();
......@@ -105,7 +104,6 @@ static void s2idle_enter(void)
swait_event_exclusive(s2idle_wait_head,
s2idle_state == S2IDLE_STATE_WAKE);
cpuidle_pause();
cpus_read_unlock();
raw_spin_lock_irq(&s2idle_lock);
......@@ -162,11 +160,13 @@ EXPORT_SYMBOL_GPL(s2idle_wake);
static bool valid_state(suspend_state_t state)
{
/*
* PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states need low level
* support and need to be valid to the low level
* implementation, no valid callback implies that none are valid.
* The PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states require low-level
* support and need to be valid to the low-level implementation.
*
* No ->valid() or ->enter() callback implies that none are valid.
*/
return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
return suspend_ops && suspend_ops->valid && suspend_ops->valid(state) &&
suspend_ops->enter;
}
void __init pm_states_init(void)
......@@ -238,7 +238,7 @@ EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
static bool sleep_state_supported(suspend_state_t state)
{
return state == PM_SUSPEND_TO_IDLE || (suspend_ops && suspend_ops->enter);
return state == PM_SUSPEND_TO_IDLE || valid_state(state);
}
static int platform_suspend_prepare(suspend_state_t state)
......@@ -422,7 +422,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
goto Platform_wake;
}
error = suspend_disable_secondary_cpus();
error = pm_sleep_disable_secondary_cpus();
if (error || suspend_test(TEST_CPUS))
goto Enable_cpus;
......@@ -452,7 +452,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
BUG_ON(irqs_disabled());
Enable_cpus:
suspend_enable_secondary_cpus();
pm_sleep_enable_secondary_cpus();
Platform_wake:
platform_resume_noirq(state);
......
......@@ -299,7 +299,7 @@ static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
return error;
}
static blk_status_t hib_wait_io(struct hib_bio_batch *hb)
static int hib_wait_io(struct hib_bio_batch *hb)
{
/*
* We are relying on the behavior of blk_plug that a thread with
......@@ -705,22 +705,19 @@ static int save_image_lzo(struct swap_map_handle *handle,
goto out_clean;
}
data = vmalloc(array_size(nr_threads, sizeof(*data)));
data = vzalloc(array_size(nr_threads, sizeof(*data)));
if (!data) {
pr_err("Failed to allocate LZO data\n");
ret = -ENOMEM;
goto out_clean;
}
for (thr = 0; thr < nr_threads; thr++)
memset(&data[thr], 0, offsetof(struct cmp_data, go));
crc = kmalloc(sizeof(*crc), GFP_KERNEL);
crc = kzalloc(sizeof(*crc), GFP_KERNEL);
if (!crc) {
pr_err("Failed to allocate crc\n");
ret = -ENOMEM;
goto out_clean;
}
memset(crc, 0, offsetof(struct crc_data, go));
/*
* Start the compression threads.
......@@ -1198,22 +1195,19 @@ static int load_image_lzo(struct swap_map_handle *handle,
goto out_clean;
}
data = vmalloc(array_size(nr_threads, sizeof(*data)));
data = vzalloc(array_size(nr_threads, sizeof(*data)));
if (!data) {
pr_err("Failed to allocate LZO data\n");
ret = -ENOMEM;
goto out_clean;
}
for (thr = 0; thr < nr_threads; thr++)
memset(&data[thr], 0, offsetof(struct dec_data, go));
crc = kmalloc(sizeof(*crc), GFP_KERNEL);
crc = kzalloc(sizeof(*crc), GFP_KERNEL);
if (!crc) {
pr_err("Failed to allocate crc\n");
ret = -ENOMEM;
goto out_clean;
}
memset(crc, 0, offsetof(struct crc_data, go));
clean_pages_on_decompress = true;
......@@ -1521,9 +1515,10 @@ int swsusp_read(unsigned int *flags_p)
int swsusp_check(void)
{
int error;
void *holder;
hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
FMODE_READ, NULL);
FMODE_READ | FMODE_EXCL, &holder);
if (!IS_ERR(hib_resume_bdev)) {
set_blocksize(hib_resume_bdev, PAGE_SIZE);
clear_page(swsusp_header);
......@@ -1545,7 +1540,7 @@ int swsusp_check(void)
put:
if (error)
blkdev_put(hib_resume_bdev, FMODE_READ);
blkdev_put(hib_resume_bdev, FMODE_READ | FMODE_EXCL);
else
pr_debug("Image signature found, resuming\n");
} else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment