Commit 7680d45a authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branches 'pm-cpuidle', 'pm-sleep' and 'pm-domains'

Merge cpuidle changes, updates related to system sleep amd generic power
domains code fixes for 6.2-rc1:

 - Improve kernel messages printed by the cpuidle PCI driver (Ulf
   Hansson).

 - Make the DT cpuidle driver return the correct number of parsed idle
   states, clean it up and clarify a comment in it (Ulf Hansson).

 - Modify the tasks freezing code to avoid using pr_cont() and refine an
   error message printed by it (Rafael Wysocki).

 - Make the hibernation core code complain about memory map mismatches
   during resume to help diagnostics (Xueqin Luo).

 - Fix mistake in a kerneldoc comment in the hibernation code (xiongxin).

 - Reverse the order of performance and enabling operations in the
   generic power domains code (Abel Vesa).

 - Power off[on] domains in hibernate .freeze[thaw]_noirq hook of in the
   generic power domains code (Abel Vesa).

 - Consolidate genpd_restore_noirq() and genpd_resume_noirq() (Shawn
   Guo).

 - Pass generic PM noirq hooks to genpd_finish_suspend() (Shawn Guo).

 - Drop generic power domain status manipulation during hibernate
   restore (Shawn Guo).

* pm-cpuidle:
  cpuidle: dt: Clarify a comment and simplify code in dt_init_idle_driver()
  cpuidle: dt: Return the correct numbers of parsed idle states
  cpuidle: psci: Extend information in log about OSI/PC mode

* pm-sleep:
  PM: sleep: Refine error message in try_to_freeze_tasks()
  PM: sleep: Avoid using pr_cont() in the tasks freezing code
  PM: hibernate: Complain about memory map mismatches during resume
  PM: hibernate: Fix mistake in kerneldoc comment

* pm-domains:
  PM: domains: Reverse the order of performance and enabling ops
  PM: domains: Power off[on] domain in hibernate .freeze[thaw]_noirq hook
  PM: domains: Consolidate genpd_restore_noirq() and genpd_resume_noirq()
  PM: domains: Pass generic PM noirq hooks to genpd_finish_suspend()
  PM: domains: Drop genpd status manipulation for hibernate restore
......@@ -939,8 +939,8 @@ static int genpd_runtime_suspend(struct device *dev)
return 0;
genpd_lock(genpd);
gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
genpd_power_off(genpd, true, 0);
gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
genpd_unlock(genpd);
return 0;
......@@ -978,9 +978,8 @@ static int genpd_runtime_resume(struct device *dev)
goto out;
genpd_lock(genpd);
genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
ret = genpd_power_on(genpd, 0);
if (!ret)
genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
genpd_unlock(genpd);
if (ret)
......@@ -1018,8 +1017,8 @@ static int genpd_runtime_resume(struct device *dev)
err_poweroff:
if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
genpd_lock(genpd);
gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
genpd_power_off(genpd, true, 0);
gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
genpd_unlock(genpd);
}
......@@ -1189,12 +1188,15 @@ static int genpd_prepare(struct device *dev)
* genpd_finish_suspend - Completion of suspend or hibernation of device in an
* I/O pm domain.
* @dev: Device to suspend.
* @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
* @suspend_noirq: Generic suspend_noirq callback.
* @resume_noirq: Generic resume_noirq callback.
*
* Stop the device and remove power from the domain if all devices in it have
* been stopped.
*/
static int genpd_finish_suspend(struct device *dev, bool poweroff)
static int genpd_finish_suspend(struct device *dev,
int (*suspend_noirq)(struct device *dev),
int (*resume_noirq)(struct device *dev))
{
struct generic_pm_domain *genpd;
int ret = 0;
......@@ -1203,10 +1205,7 @@ static int genpd_finish_suspend(struct device *dev, bool poweroff)
if (IS_ERR(genpd))
return -EINVAL;
if (poweroff)
ret = pm_generic_poweroff_noirq(dev);
else
ret = pm_generic_suspend_noirq(dev);
ret = suspend_noirq(dev);
if (ret)
return ret;
......@@ -1217,10 +1216,7 @@ static int genpd_finish_suspend(struct device *dev, bool poweroff)
!pm_runtime_status_suspended(dev)) {
ret = genpd_stop_dev(genpd, dev);
if (ret) {
if (poweroff)
pm_generic_restore_noirq(dev);
else
pm_generic_resume_noirq(dev);
resume_noirq(dev);
return ret;
}
}
......@@ -1244,16 +1240,20 @@ static int genpd_suspend_noirq(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
return genpd_finish_suspend(dev, false);
return genpd_finish_suspend(dev,
pm_generic_suspend_noirq,
pm_generic_resume_noirq);
}
/**
* genpd_resume_noirq - Start of resume of device in an I/O PM domain.
* genpd_finish_resume - Completion of resume of device in an I/O PM domain.
* @dev: Device to resume.
* @resume_noirq: Generic resume_noirq callback.
*
* Restore power to the device's PM domain, if necessary, and start the device.
*/
static int genpd_resume_noirq(struct device *dev)
static int genpd_finish_resume(struct device *dev,
int (*resume_noirq)(struct device *dev))
{
struct generic_pm_domain *genpd;
int ret;
......@@ -1265,7 +1265,7 @@ static int genpd_resume_noirq(struct device *dev)
return -EINVAL;
if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
return pm_generic_resume_noirq(dev);
return resume_noirq(dev);
genpd_lock(genpd);
genpd_sync_power_on(genpd, true, 0);
......@@ -1282,6 +1282,19 @@ static int genpd_resume_noirq(struct device *dev)
return pm_generic_resume_noirq(dev);
}
/**
* genpd_resume_noirq - Start of resume of device in an I/O PM domain.
* @dev: Device to resume.
*
* Restore power to the device's PM domain, if necessary, and start the device.
*/
static int genpd_resume_noirq(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
return genpd_finish_resume(dev, pm_generic_resume_noirq);
}
/**
* genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
* @dev: Device to freeze.
......@@ -1293,24 +1306,11 @@ static int genpd_resume_noirq(struct device *dev)
*/
static int genpd_freeze_noirq(struct device *dev)
{
const struct generic_pm_domain *genpd;
int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
ret = pm_generic_freeze_noirq(dev);
if (ret)
return ret;
if (genpd->dev_ops.stop && genpd->dev_ops.start &&
!pm_runtime_status_suspended(dev))
ret = genpd_stop_dev(genpd, dev);
return ret;
return genpd_finish_suspend(dev,
pm_generic_freeze_noirq,
pm_generic_thaw_noirq);
}
/**
......@@ -1322,23 +1322,9 @@ static int genpd_freeze_noirq(struct device *dev)
*/
static int genpd_thaw_noirq(struct device *dev)
{
const struct generic_pm_domain *genpd;
int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
if (genpd->dev_ops.stop && genpd->dev_ops.start &&
!pm_runtime_status_suspended(dev)) {
ret = genpd_start_dev(genpd, dev);
if (ret)
return ret;
}
return pm_generic_thaw_noirq(dev);
return genpd_finish_resume(dev, pm_generic_thaw_noirq);
}
/**
......@@ -1353,7 +1339,9 @@ static int genpd_poweroff_noirq(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
return genpd_finish_suspend(dev, true);
return genpd_finish_suspend(dev,
pm_generic_poweroff_noirq,
pm_generic_restore_noirq);
}
/**
......@@ -1365,40 +1353,9 @@ static int genpd_poweroff_noirq(struct device *dev)
*/
static int genpd_restore_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
/*
* At this point suspended_count == 0 means we are being run for the
* first time for the given domain in the present cycle.
*/
genpd_lock(genpd);
if (genpd->suspended_count++ == 0) {
/*
* The boot kernel might put the domain into arbitrary state,
* so make it appear as powered off to genpd_sync_power_on(),
* so that it tries to power it on in case it was really off.
*/
genpd->status = GENPD_STATE_OFF;
}
genpd_sync_power_on(genpd, true, 0);
genpd_unlock(genpd);
if (genpd->dev_ops.stop && genpd->dev_ops.start &&
!pm_runtime_status_suspended(dev)) {
ret = genpd_start_dev(genpd, dev);
if (ret)
return ret;
}
return pm_generic_restore_noirq(dev);
return genpd_finish_resume(dev, pm_generic_restore_noirq);
}
/**
......@@ -2749,17 +2706,6 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
dev->pm_domain->detach = genpd_dev_pm_detach;
dev->pm_domain->sync = genpd_dev_pm_sync;
if (power_on) {
genpd_lock(pd);
ret = genpd_power_on(pd, 0);
genpd_unlock(pd);
}
if (ret) {
genpd_remove_device(pd, dev);
return -EPROBE_DEFER;
}
/* Set the default performance state */
pstate = of_get_required_opp_performance_state(dev->of_node, index);
if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
......@@ -2771,6 +2717,24 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
goto err;
dev_gpd_data(dev)->default_pstate = pstate;
}
if (power_on) {
genpd_lock(pd);
ret = genpd_power_on(pd, 0);
genpd_unlock(pd);
}
if (ret) {
/* Drop the default performance state */
if (dev_gpd_data(dev)->default_pstate) {
dev_pm_genpd_set_performance_state(dev, 0);
dev_gpd_data(dev)->default_pstate = 0;
}
genpd_remove_device(pd, dev);
return -EPROBE_DEFER;
}
return 1;
err:
......
......@@ -181,7 +181,8 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev)
if (ret)
goto remove_pd;
pr_info("Initialized CPU PM domain topology\n");
pr_info("Initialized CPU PM domain topology using %s mode\n",
use_osi ? "OSI" : "PC");
return 0;
put_node:
......
......@@ -211,18 +211,15 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
of_node_put(cpu_node);
if (err)
return err;
/*
* Update the driver state count only if some valid DT idle states
* were detected
*/
if (i)
drv->state_count = state_idx;
/* Set the number of total supported idle states. */
drv->state_count = state_idx;
/*
* Return the number of present and valid DT idle states, which can
* also be 0 on platforms with missing DT idle states or legacy DT
* configuration predating the DT idle states bindings.
*/
return i;
return state_idx - start_idx;
}
EXPORT_SYMBOL_GPL(dt_init_idle_driver);
......@@ -27,6 +27,8 @@ unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
static int try_to_freeze_tasks(bool user_only)
{
const char *what = user_only ? "user space processes" :
"remaining freezable tasks";
struct task_struct *g, *p;
unsigned long end_time;
unsigned int todo;
......@@ -36,6 +38,8 @@ static int try_to_freeze_tasks(bool user_only)
bool wakeup = false;
int sleep_usecs = USEC_PER_MSEC;
pr_info("Freezing %s\n", what);
start = ktime_get_boottime();
end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);
......@@ -82,9 +86,8 @@ static int try_to_freeze_tasks(bool user_only)
elapsed_msecs = ktime_to_ms(elapsed);
if (todo) {
pr_cont("\n");
pr_err("Freezing of tasks %s after %d.%03d seconds "
"(%d tasks refusing to freeze, wq_busy=%d):\n",
pr_err("Freezing %s %s after %d.%03d seconds "
"(%d tasks refusing to freeze, wq_busy=%d):\n", what,
wakeup ? "aborted" : "failed",
elapsed_msecs / 1000, elapsed_msecs % 1000,
todo - wq_busy, wq_busy);
......@@ -101,8 +104,8 @@ static int try_to_freeze_tasks(bool user_only)
read_unlock(&tasklist_lock);
}
} else {
pr_cont("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
elapsed_msecs % 1000);
pr_info("Freezing %s completed (elapsed %d.%03d seconds)\n",
what, elapsed_msecs / 1000, elapsed_msecs % 1000);
}
return todo ? -EBUSY : 0;
......@@ -130,14 +133,11 @@ int freeze_processes(void)
static_branch_inc(&freezer_active);
pm_wakeup_clear(0);
pr_info("Freezing user space processes ... ");
pm_freezing = true;
error = try_to_freeze_tasks(true);
if (!error) {
if (!error)
__usermodehelper_set_disable_depth(UMH_DISABLED);
pr_cont("done.");
}
pr_cont("\n");
BUG_ON(in_atomic());
/*
......@@ -166,14 +166,9 @@ int freeze_kernel_threads(void)
{
int error;
pr_info("Freezing remaining freezable tasks ... ");
pm_nosig_freezing = true;
error = try_to_freeze_tasks(false);
if (!error)
pr_cont("done.");
pr_cont("\n");
BUG_ON(in_atomic());
if (error)
......
......@@ -1723,8 +1723,8 @@ static unsigned long minimum_image_size(unsigned long saveable)
* /sys/power/reserved_size, respectively). To make this happen, we compute the
* total number of available page frames and allocate at least
*
* ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
* + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
* ([page frames total] - PAGES_FOR_IO - [metadata pages]) / 2
* - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
*
* of them, which corresponds to the maximum size of a hibernation image.
*
......@@ -2259,10 +2259,14 @@ static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
if (unlikely(buf[j] == BM_END_OF_MAP))
break;
if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j])) {
memory_bm_set_bit(bm, buf[j]);
else
} else {
if (!pfn_valid(buf[j]))
pr_err(FW_BUG "Memory map mismatch at 0x%llx after hibernation\n",
(unsigned long long)PFN_PHYS(buf[j]));
return -EFAULT;
}
}
return 0;
......
......@@ -1462,7 +1462,7 @@ class Data:
'TIMEOUT' : r'(?i).*\bTIMEOUT\b.*',
'ABORT' : r'(?i).*\bABORT\b.*',
'IRQ' : r'.*\bgenirq: .*',
'TASKFAIL': r'.*Freezing of tasks *.*',
'TASKFAIL': r'.*Freezing .*after *.*',
'ACPI' : r'.*\bACPI *(?P<b>[A-Za-z]*) *Error[: ].*',
'DISKFULL': r'.*\bNo space left on device.*',
'USBERR' : r'.*usb .*device .*, error [0-9-]*',
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment