Commit 1e8703b2 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'pm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6

* 'pm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6:
  PM / PM QoS: Fix reversed min and max
  PM / OPP: Hide OPP configuration when SoCs do not provide an implementation
  PM: Allow devices to be removed during late suspend and early resume
parents 45314915 00fafcda
...@@ -37,6 +37,9 @@ Typical usage of the OPP library is as follows: ...@@ -37,6 +37,9 @@ Typical usage of the OPP library is as follows:
SoC framework -> modifies on required cases certain OPPs -> OPP layer SoC framework -> modifies on required cases certain OPPs -> OPP layer
-> queries to search/retrieve information -> -> queries to search/retrieve information ->
Architectures that provide a SoC framework for OPP should select ARCH_HAS_OPP
to make the OPP layer available.
OPP layer expects each domain to be represented by a unique device pointer. SoC OPP layer expects each domain to be represented by a unique device pointer. SoC
framework registers a set of initial OPPs per device with the OPP layer. This framework registers a set of initial OPPs per device with the OPP layer. This
list is expected to be an optimally small number typically around 5 per device. list is expected to be an optimally small number typically around 5 per device.
......
...@@ -475,20 +475,33 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) ...@@ -475,20 +475,33 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
*/ */
void dpm_resume_noirq(pm_message_t state) void dpm_resume_noirq(pm_message_t state)
{ {
struct device *dev; struct list_head list;
ktime_t starttime = ktime_get(); ktime_t starttime = ktime_get();
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx); mutex_lock(&dpm_list_mtx);
transition_started = false; transition_started = false;
list_for_each_entry(dev, &dpm_list, power.entry) while (!list_empty(&dpm_list)) {
struct device *dev = to_device(dpm_list.next);
get_device(dev);
if (dev->power.status > DPM_OFF) { if (dev->power.status > DPM_OFF) {
int error; int error;
dev->power.status = DPM_OFF; dev->power.status = DPM_OFF;
mutex_unlock(&dpm_list_mtx);
error = device_resume_noirq(dev, state); error = device_resume_noirq(dev, state);
mutex_lock(&dpm_list_mtx);
if (error) if (error)
pm_dev_err(dev, state, " early", error); pm_dev_err(dev, state, " early", error);
} }
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &list);
put_device(dev);
}
list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx); mutex_unlock(&dpm_list_mtx);
dpm_show_time(starttime, state, "early"); dpm_show_time(starttime, state, "early");
resume_device_irqs(); resume_device_irqs();
...@@ -789,20 +802,33 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) ...@@ -789,20 +802,33 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
*/ */
int dpm_suspend_noirq(pm_message_t state) int dpm_suspend_noirq(pm_message_t state)
{ {
struct device *dev; struct list_head list;
ktime_t starttime = ktime_get(); ktime_t starttime = ktime_get();
int error = 0; int error = 0;
INIT_LIST_HEAD(&list);
suspend_device_irqs(); suspend_device_irqs();
mutex_lock(&dpm_list_mtx); mutex_lock(&dpm_list_mtx);
list_for_each_entry_reverse(dev, &dpm_list, power.entry) { while (!list_empty(&dpm_list)) {
struct device *dev = to_device(dpm_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
error = device_suspend_noirq(dev, state); error = device_suspend_noirq(dev, state);
mutex_lock(&dpm_list_mtx);
if (error) { if (error) {
pm_dev_err(dev, state, " late", error); pm_dev_err(dev, state, " late", error);
put_device(dev);
break; break;
} }
dev->power.status = DPM_OFF_IRQ; dev->power.status = DPM_OFF_IRQ;
if (!list_empty(&dev->power.entry))
list_move(&dev->power.entry, &list);
put_device(dev);
} }
list_splice_tail(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx); mutex_unlock(&dpm_list_mtx);
if (error) if (error)
dpm_resume_noirq(resume_event(state)); dpm_resume_noirq(resume_event(state));
......
...@@ -121,10 +121,10 @@ static inline int pm_qos_get_value(struct pm_qos_object *o) ...@@ -121,10 +121,10 @@ static inline int pm_qos_get_value(struct pm_qos_object *o)
switch (o->type) { switch (o->type) {
case PM_QOS_MIN: case PM_QOS_MIN:
return plist_last(&o->requests)->prio; return plist_first(&o->requests)->prio;
case PM_QOS_MAX: case PM_QOS_MAX:
return plist_first(&o->requests)->prio; return plist_last(&o->requests)->prio;
default: default:
/* runtime check for not using enum */ /* runtime check for not using enum */
......
...@@ -246,9 +246,13 @@ config PM_OPS ...@@ -246,9 +246,13 @@ config PM_OPS
depends on PM_SLEEP || PM_RUNTIME depends on PM_SLEEP || PM_RUNTIME
default y default y
config ARCH_HAS_OPP
bool
config PM_OPP config PM_OPP
bool "Operating Performance Point (OPP) Layer library" bool "Operating Performance Point (OPP) Layer library"
depends on PM depends on PM
depends on ARCH_HAS_OPP
---help--- ---help---
SOCs have a standard set of tuples consisting of frequency and SOCs have a standard set of tuples consisting of frequency and
voltage pairs that the device will support per voltage domain. This voltage pairs that the device will support per voltage domain. This
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment