Commit ada0629b authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branches 'pm-core', 'pm-sleep', 'pm-acpi' and 'pm-domains'

* pm-core:
  PM: runtime: Add pm_runtime_get_if_active()

* pm-sleep:
  PM: sleep: wakeup: Skip wakeup_source_sysfs_remove() if device is not there
  PM / hibernate: Remove unnecessary compat ioctl overrides
  PM: hibernate: fix docs for ioctls that return loff_t via pointer
  PM: sleep: wakeup: Use built-in RCU list checking
  PM: sleep: core: Use built-in RCU list checking

* pm-acpi:
  ACPI: PM: s2idle: Refine active GPEs check
  ACPICA: Allow acpi_any_gpe_status_set() to skip one GPE
  ACPI: PM: s2idle: Fix comment in acpi_s2idle_prepare_late()

* pm-domains:
  cpuidle: psci: Split psci_dt_cpu_init_idle()
  PM / Domains: Allow no domain-idle-states DT property in genpd when parsing
...@@ -382,6 +382,12 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: ...@@ -382,6 +382,12 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
nonzero, increment the counter and return 1; otherwise return 0 without nonzero, increment the counter and return 1; otherwise return 0 without
changing the counter changing the counter
`int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count);`
- return -EINVAL if 'power.disable_depth' is nonzero; otherwise, if the
runtime PM status is RPM_ACTIVE, and either ign_usage_count is true
or the device's usage_count is non-zero, increment the counter and
return 1; otherwise return 0 without changing the counter
`void pm_runtime_put_noidle(struct device *dev);` `void pm_runtime_put_noidle(struct device *dev);`
- decrement the device's usage counter - decrement the device's usage counter
......
...@@ -69,11 +69,13 @@ SNAPSHOT_PREF_IMAGE_SIZE ...@@ -69,11 +69,13 @@ SNAPSHOT_PREF_IMAGE_SIZE
SNAPSHOT_GET_IMAGE_SIZE SNAPSHOT_GET_IMAGE_SIZE
return the actual size of the hibernation image return the actual size of the hibernation image
(the last argument should be a pointer to a loff_t variable that
will contain the result if the call is successful)
SNAPSHOT_AVAIL_SWAP_SIZE SNAPSHOT_AVAIL_SWAP_SIZE
return the amount of available swap in bytes (the return the amount of available swap in bytes
last argument should be a pointer to an unsigned int variable that will (the last argument should be a pointer to a loff_t variable that
contain the result if the call is successful). will contain the result if the call is successful)
SNAPSHOT_ALLOC_SWAP_PAGE SNAPSHOT_ALLOC_SWAP_PAGE
allocate a swap page from the resume partition allocate a swap page from the resume partition
......
...@@ -101,7 +101,7 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void); ...@@ -101,7 +101,7 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void);
acpi_status acpi_hw_enable_all_wakeup_gpes(void); acpi_status acpi_hw_enable_all_wakeup_gpes(void);
u8 acpi_hw_check_all_gpes(void); u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number);
acpi_status acpi_status
acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
......
...@@ -799,17 +799,19 @@ ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes) ...@@ -799,17 +799,19 @@ ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes)
* *
* FUNCTION: acpi_any_gpe_status_set * FUNCTION: acpi_any_gpe_status_set
* *
* PARAMETERS: None * PARAMETERS: gpe_skip_number - Number of the GPE to skip
* *
* RETURN: Whether or not the status bit is set for any GPE * RETURN: Whether or not the status bit is set for any GPE
* *
* DESCRIPTION: Check the status bits of all enabled GPEs and return TRUE if any * DESCRIPTION: Check the status bits of all enabled GPEs, except for the one
* of them is set or FALSE otherwise. * represented by the "skip" argument, and return TRUE if any of
* them is set or FALSE otherwise.
* *
******************************************************************************/ ******************************************************************************/
u32 acpi_any_gpe_status_set(void) u32 acpi_any_gpe_status_set(u32 gpe_skip_number)
{ {
acpi_status status; acpi_status status;
acpi_handle gpe_device;
u8 ret; u8 ret;
ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set); ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set);
...@@ -819,7 +821,12 @@ u32 acpi_any_gpe_status_set(void) ...@@ -819,7 +821,12 @@ u32 acpi_any_gpe_status_set(void)
return (FALSE); return (FALSE);
} }
ret = acpi_hw_check_all_gpes(); status = acpi_get_gpe_device(gpe_skip_number, &gpe_device);
if (ACPI_FAILURE(status)) {
gpe_device = NULL;
}
ret = acpi_hw_check_all_gpes(gpe_device, gpe_skip_number);
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return (ret); return (ret);
......
...@@ -444,12 +444,19 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, ...@@ -444,12 +444,19 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
return (AE_OK); return (AE_OK);
} }
struct acpi_gpe_block_status_context {
struct acpi_gpe_register_info *gpe_skip_register_info;
u8 gpe_skip_mask;
u8 retval;
};
/****************************************************************************** /******************************************************************************
* *
* FUNCTION: acpi_hw_get_gpe_block_status * FUNCTION: acpi_hw_get_gpe_block_status
* *
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
* gpe_block - Gpe Block info * gpe_block - Gpe Block info
* context - GPE list walk context data
* *
* RETURN: Success * RETURN: Success
* *
...@@ -460,12 +467,13 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, ...@@ -460,12 +467,13 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
static acpi_status static acpi_status
acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info, acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block, struct acpi_gpe_block_info *gpe_block,
void *ret_ptr) void *context)
{ {
struct acpi_gpe_block_status_context *c = context;
struct acpi_gpe_register_info *gpe_register_info; struct acpi_gpe_register_info *gpe_register_info;
u64 in_enable, in_status; u64 in_enable, in_status;
acpi_status status; acpi_status status;
u8 *ret = ret_ptr; u8 ret_mask;
u32 i; u32 i;
/* Examine each GPE Register within the block */ /* Examine each GPE Register within the block */
...@@ -485,7 +493,11 @@ acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info, ...@@ -485,7 +493,11 @@ acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
continue; continue;
} }
*ret |= in_enable & in_status; ret_mask = in_enable & in_status;
if (ret_mask && c->gpe_skip_register_info == gpe_register_info) {
ret_mask &= ~c->gpe_skip_mask;
}
c->retval |= ret_mask;
} }
return (AE_OK); return (AE_OK);
...@@ -561,24 +573,41 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void) ...@@ -561,24 +573,41 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void)
* *
* FUNCTION: acpi_hw_check_all_gpes * FUNCTION: acpi_hw_check_all_gpes
* *
* PARAMETERS: None * PARAMETERS: gpe_skip_device - GPE devoce of the GPE to skip
* gpe_skip_number - Number of the GPE to skip
* *
* RETURN: Combined status of all GPEs * RETURN: Combined status of all GPEs
* *
* DESCRIPTION: Check all enabled GPEs in all GPE blocks and return TRUE if the * DESCRIPTION: Check all enabled GPEs in all GPE blocks, except for the one
* represented by the "skip" arguments, and return TRUE if the
* status bit is set for at least one of them of FALSE otherwise. * status bit is set for at least one of them of FALSE otherwise.
* *
******************************************************************************/ ******************************************************************************/
u8 acpi_hw_check_all_gpes(void) u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number)
{ {
u8 ret = 0; struct acpi_gpe_block_status_context context = {
.gpe_skip_register_info = NULL,
.retval = 0,
};
struct acpi_gpe_event_info *gpe_event_info;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes); ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes);
(void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &ret); flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_skip_device,
gpe_skip_number);
if (gpe_event_info) {
context.gpe_skip_register_info = gpe_event_info->register_info;
context.gpe_skip_mask = acpi_hw_get_gpe_register_bit(gpe_event_info);
}
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return (ret != 0); (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &context);
return (context.retval != 0);
} }
#endif /* !ACPI_REDUCED_HARDWARE */ #endif /* !ACPI_REDUCED_HARDWARE */
...@@ -2037,6 +2037,11 @@ void acpi_ec_set_gpe_wake_mask(u8 action) ...@@ -2037,6 +2037,11 @@ void acpi_ec_set_gpe_wake_mask(u8 action)
acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action); acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
} }
bool acpi_ec_other_gpes_active(void)
{
return acpi_any_gpe_status_set(first_ec ? first_ec->gpe : U32_MAX);
}
bool acpi_ec_dispatch_gpe(void) bool acpi_ec_dispatch_gpe(void)
{ {
u32 ret; u32 ret;
......
...@@ -202,6 +202,7 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit); ...@@ -202,6 +202,7 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
void acpi_ec_flush_work(void); void acpi_ec_flush_work(void);
bool acpi_ec_other_gpes_active(void);
bool acpi_ec_dispatch_gpe(void); bool acpi_ec_dispatch_gpe(void);
#endif #endif
......
...@@ -982,10 +982,7 @@ static int acpi_s2idle_prepare_late(void) ...@@ -982,10 +982,7 @@ static int acpi_s2idle_prepare_late(void)
static void acpi_s2idle_sync(void) static void acpi_s2idle_sync(void)
{ {
/* /* The EC driver uses special workqueues that need to be flushed. */
* The EC driver uses the system workqueue and an additional special
* one, so those need to be flushed too.
*/
acpi_ec_flush_work(); acpi_ec_flush_work();
acpi_os_wait_events_complete(); /* synchronize Notify handling */ acpi_os_wait_events_complete(); /* synchronize Notify handling */
} }
...@@ -1013,18 +1010,19 @@ static bool acpi_s2idle_wake(void) ...@@ -1013,18 +1010,19 @@ static bool acpi_s2idle_wake(void)
return true; return true;
/* /*
* If there are no EC events to process and at least one of the * If the status bit is set for any enabled GPE other than the
* other enabled GPEs is active, the wakeup is regarded as a * EC one, the wakeup is regarded as a genuine one.
* genuine one.
*
* Note that the checks below must be carried out in this order
* to avoid returning prematurely due to a change of the EC GPE
* status bit from unset to set between the checks with the
* status bits of all the other GPEs unset.
*/ */
if (acpi_any_gpe_status_set() && !acpi_ec_dispatch_gpe()) if (acpi_ec_other_gpes_active())
return true; return true;
/*
* If the EC GPE status bit has not been set, the wakeup is
* regarded as a spurious one.
*/
if (!acpi_ec_dispatch_gpe())
return false;
/* /*
* Cancel the wakeup and process all pending events in case * Cancel the wakeup and process all pending events in case
* there are any wakeup ones in there. * there are any wakeup ones in there.
......
...@@ -2653,7 +2653,7 @@ static int genpd_iterate_idle_states(struct device_node *dn, ...@@ -2653,7 +2653,7 @@ static int genpd_iterate_idle_states(struct device_node *dn,
ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL); ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
if (ret <= 0) if (ret <= 0)
return ret; return ret == -ENOENT ? 0 : ret;
/* Loop over the phandles until all the requested entry is found */ /* Loop over the phandles until all the requested entry is found */
of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) { of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
......
...@@ -40,6 +40,10 @@ ...@@ -40,6 +40,10 @@
typedef int (*pm_callback_t)(struct device *); typedef int (*pm_callback_t)(struct device *);
#define list_for_each_entry_rcu_locked(pos, head, member) \
list_for_each_entry_rcu(pos, head, member, \
device_links_read_lock_held())
/* /*
* The entries in the dpm_list list are in a depth first order, simply * The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and * because children are guaranteed to be discovered after parents, and
...@@ -266,7 +270,7 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async) ...@@ -266,7 +270,7 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
* callbacks freeing the link objects for the links in the list we're * callbacks freeing the link objects for the links in the list we're
* walking. * walking.
*/ */
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT) if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->supplier, async); dpm_wait(link->supplier, async);
...@@ -323,7 +327,7 @@ static void dpm_wait_for_consumers(struct device *dev, bool async) ...@@ -323,7 +327,7 @@ static void dpm_wait_for_consumers(struct device *dev, bool async)
* continue instead of trying to continue in parallel with its * continue instead of trying to continue in parallel with its
* unregistration). * unregistration).
*/ */
list_for_each_entry_rcu(link, &dev->links.consumers, s_node) list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT) if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->consumer, async); dpm_wait(link->consumer, async);
...@@ -1235,7 +1239,7 @@ static void dpm_superior_set_must_resume(struct device *dev) ...@@ -1235,7 +1239,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
idx = device_links_read_lock(); idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
link->supplier->power.must_resume = true; link->supplier->power.must_resume = true;
device_links_read_unlock(idx); device_links_read_unlock(idx);
...@@ -1695,7 +1699,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev) ...@@ -1695,7 +1699,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
idx = device_links_read_lock(); idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
spin_lock_irq(&link->supplier->power.lock); spin_lock_irq(&link->supplier->power.lock);
link->supplier->power.direct_complete = false; link->supplier->power.direct_complete = false;
spin_unlock_irq(&link->supplier->power.lock); spin_unlock_irq(&link->supplier->power.lock);
......
...@@ -1087,29 +1087,47 @@ int __pm_runtime_resume(struct device *dev, int rpmflags) ...@@ -1087,29 +1087,47 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_resume); EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/** /**
* pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter. * pm_runtime_get_if_active - Conditionally bump up the device's usage counter.
* @dev: Device to handle. * @dev: Device to handle.
* *
* Return -EINVAL if runtime PM is disabled for the device. * Return -EINVAL if runtime PM is disabled for the device.
* *
* If that's not the case and if the device's runtime PM status is RPM_ACTIVE * Otherwise, if the device's runtime PM status is RPM_ACTIVE and either
* and the runtime PM usage counter is nonzero, increment the counter and * ign_usage_count is true or the device's usage_count is non-zero, increment
* return 1. Otherwise return 0 without changing the counter. * the counter and return 1. Otherwise return 0 without changing the counter.
*
* If ign_usage_count is true, the function can be used to prevent suspending
* the device when its runtime PM status is RPM_ACTIVE.
*
* If ign_usage_count is false, the function can be used to prevent suspending
* the device when both its runtime PM status is RPM_ACTIVE and its usage_count
* is non-zero.
*
* The caller is resposible for putting the device's usage count when ther
* return value is greater than zero.
*/ */
int pm_runtime_get_if_in_use(struct device *dev) int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
{ {
unsigned long flags; unsigned long flags;
int retval; int retval;
spin_lock_irqsave(&dev->power.lock, flags); spin_lock_irqsave(&dev->power.lock, flags);
retval = dev->power.disable_depth > 0 ? -EINVAL : if (dev->power.disable_depth > 0) {
dev->power.runtime_status == RPM_ACTIVE retval = -EINVAL;
&& atomic_inc_not_zero(&dev->power.usage_count); } else if (dev->power.runtime_status != RPM_ACTIVE) {
retval = 0;
} else if (ign_usage_count) {
retval = 1;
atomic_inc(&dev->power.usage_count);
} else {
retval = atomic_inc_not_zero(&dev->power.usage_count);
}
trace_rpm_usage_rcuidle(dev, 0); trace_rpm_usage_rcuidle(dev, 0);
spin_unlock_irqrestore(&dev->power.lock, flags); spin_unlock_irqrestore(&dev->power.lock, flags);
return retval; return retval;
} }
EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use); EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
/** /**
* __pm_runtime_set_status - Set runtime PM status of a device. * __pm_runtime_set_status - Set runtime PM status of a device.
......
...@@ -24,6 +24,9 @@ suspend_state_t pm_suspend_target_state; ...@@ -24,6 +24,9 @@ suspend_state_t pm_suspend_target_state;
#define pm_suspend_target_state (PM_SUSPEND_ON) #define pm_suspend_target_state (PM_SUSPEND_ON)
#endif #endif
#define list_for_each_entry_rcu_locked(pos, head, member) \
list_for_each_entry_rcu(pos, head, member, \
srcu_read_lock_held(&wakeup_srcu))
/* /*
* If set, the suspend/hibernate code will abort transitions to a sleep state * If set, the suspend/hibernate code will abort transitions to a sleep state
* if wakeup events are registered during or immediately before the transition. * if wakeup events are registered during or immediately before the transition.
...@@ -241,7 +244,9 @@ void wakeup_source_unregister(struct wakeup_source *ws) ...@@ -241,7 +244,9 @@ void wakeup_source_unregister(struct wakeup_source *ws)
{ {
if (ws) { if (ws) {
wakeup_source_remove(ws); wakeup_source_remove(ws);
wakeup_source_sysfs_remove(ws); if (ws->dev)
wakeup_source_sysfs_remove(ws);
wakeup_source_destroy(ws); wakeup_source_destroy(ws);
} }
} }
...@@ -405,7 +410,7 @@ void device_wakeup_arm_wake_irqs(void) ...@@ -405,7 +410,7 @@ void device_wakeup_arm_wake_irqs(void)
int srcuidx; int srcuidx;
srcuidx = srcu_read_lock(&wakeup_srcu); srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
dev_pm_arm_wake_irq(ws->wakeirq); dev_pm_arm_wake_irq(ws->wakeirq);
srcu_read_unlock(&wakeup_srcu, srcuidx); srcu_read_unlock(&wakeup_srcu, srcuidx);
} }
...@@ -421,7 +426,7 @@ void device_wakeup_disarm_wake_irqs(void) ...@@ -421,7 +426,7 @@ void device_wakeup_disarm_wake_irqs(void)
int srcuidx; int srcuidx;
srcuidx = srcu_read_lock(&wakeup_srcu); srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
dev_pm_disarm_wake_irq(ws->wakeirq); dev_pm_disarm_wake_irq(ws->wakeirq);
srcu_read_unlock(&wakeup_srcu, srcuidx); srcu_read_unlock(&wakeup_srcu, srcuidx);
} }
...@@ -874,7 +879,7 @@ void pm_print_active_wakeup_sources(void) ...@@ -874,7 +879,7 @@ void pm_print_active_wakeup_sources(void)
struct wakeup_source *last_activity_ws = NULL; struct wakeup_source *last_activity_ws = NULL;
srcuidx = srcu_read_lock(&wakeup_srcu); srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) { list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
if (ws->active) { if (ws->active) {
pm_pr_dbg("active wakeup source: %s\n", ws->name); pm_pr_dbg("active wakeup source: %s\n", ws->name);
active = 1; active = 1;
...@@ -1025,7 +1030,7 @@ void pm_wakep_autosleep_enabled(bool set) ...@@ -1025,7 +1030,7 @@ void pm_wakep_autosleep_enabled(bool set)
int srcuidx; int srcuidx;
srcuidx = srcu_read_lock(&wakeup_srcu); srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) { list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
spin_lock_irq(&ws->lock); spin_lock_irq(&ws->lock);
if (ws->autosleep_enabled != set) { if (ws->autosleep_enabled != set) {
ws->autosleep_enabled = set; ws->autosleep_enabled = set;
...@@ -1104,7 +1109,7 @@ static void *wakeup_sources_stats_seq_start(struct seq_file *m, ...@@ -1104,7 +1109,7 @@ static void *wakeup_sources_stats_seq_start(struct seq_file *m,
} }
*srcuidx = srcu_read_lock(&wakeup_srcu); *srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) { list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
if (n-- <= 0) if (n-- <= 0)
return ws; return ws;
} }
......
...@@ -160,6 +160,29 @@ int __init psci_dt_parse_state_node(struct device_node *np, u32 *state) ...@@ -160,6 +160,29 @@ int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
return 0; return 0;
} }
static int __init psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
struct psci_cpuidle_data *data,
unsigned int state_count, int cpu)
{
/* Currently limit the hierarchical topology to be used in OSI mode. */
if (!psci_has_osi_support())
return 0;
data->dev = psci_dt_attach_cpu(cpu);
if (IS_ERR_OR_NULL(data->dev))
return PTR_ERR_OR_ZERO(data->dev);
/*
* Using the deepest state for the CPU to trigger a potential selection
* of a shared state for the domain, assumes the domain states are all
* deeper states.
*/
drv->states[state_count - 1].enter = psci_enter_domain_idle_state;
psci_cpuidle_use_cpuhp = true;
return 0;
}
static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv, static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv,
struct device_node *cpu_node, struct device_node *cpu_node,
unsigned int state_count, int cpu) unsigned int state_count, int cpu)
...@@ -193,25 +216,10 @@ static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv, ...@@ -193,25 +216,10 @@ static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv,
goto free_mem; goto free_mem;
} }
/* Currently limit the hierarchical topology to be used in OSI mode. */ /* Initialize optional data, used for the hierarchical topology. */
if (psci_has_osi_support()) { ret = psci_dt_cpu_init_topology(drv, data, state_count, cpu);
data->dev = psci_dt_attach_cpu(cpu); if (ret < 0)
if (IS_ERR(data->dev)) { goto free_mem;
ret = PTR_ERR(data->dev);
goto free_mem;
}
/*
* Using the deepest state for the CPU to trigger a potential
* selection of a shared state for the domain, assumes the
* domain states are all deeper states.
*/
if (data->dev) {
drv->states[state_count - 1].enter =
psci_enter_domain_idle_state;
psci_cpuidle_use_cpuhp = true;
}
}
/* Idle states parsed correctly, store them in the per-cpu struct. */ /* Idle states parsed correctly, store them in the per-cpu struct. */
data->psci_states = psci_states; data->psci_states = psci_states;
......
...@@ -752,7 +752,7 @@ ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u3 ...@@ -752,7 +752,7 @@ ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u3
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(void)) ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(u32 gpe_skip_number))
ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_fixed_event_status_set(void)) ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_fixed_event_status_set(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
......
...@@ -38,7 +38,7 @@ extern int pm_runtime_force_resume(struct device *dev); ...@@ -38,7 +38,7 @@ extern int pm_runtime_force_resume(struct device *dev);
extern int __pm_runtime_idle(struct device *dev, int rpmflags); extern int __pm_runtime_idle(struct device *dev, int rpmflags);
extern int __pm_runtime_suspend(struct device *dev, int rpmflags); extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
extern int __pm_runtime_resume(struct device *dev, int rpmflags); extern int __pm_runtime_resume(struct device *dev, int rpmflags);
extern int pm_runtime_get_if_in_use(struct device *dev); extern int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count);
extern int pm_schedule_suspend(struct device *dev, unsigned int delay); extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
extern int __pm_runtime_set_status(struct device *dev, unsigned int status); extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
extern int pm_runtime_barrier(struct device *dev); extern int pm_runtime_barrier(struct device *dev);
...@@ -60,6 +60,11 @@ extern void pm_runtime_put_suppliers(struct device *dev); ...@@ -60,6 +60,11 @@ extern void pm_runtime_put_suppliers(struct device *dev);
extern void pm_runtime_new_link(struct device *dev); extern void pm_runtime_new_link(struct device *dev);
extern void pm_runtime_drop_link(struct device *dev); extern void pm_runtime_drop_link(struct device *dev);
static inline int pm_runtime_get_if_in_use(struct device *dev)
{
return pm_runtime_get_if_active(dev, false);
}
static inline void pm_suspend_ignore_children(struct device *dev, bool enable) static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
{ {
dev->power.ignore_children = enable; dev->power.ignore_children = enable;
...@@ -143,6 +148,11 @@ static inline int pm_runtime_get_if_in_use(struct device *dev) ...@@ -143,6 +148,11 @@ static inline int pm_runtime_get_if_in_use(struct device *dev)
{ {
return -EINVAL; return -EINVAL;
} }
static inline int pm_runtime_get_if_active(struct device *dev,
bool ign_usage_count)
{
return -EINVAL;
}
static inline int __pm_runtime_set_status(struct device *dev, static inline int __pm_runtime_set_status(struct device *dev,
unsigned int status) { return 0; } unsigned int status) { return 0; }
static inline int pm_runtime_barrier(struct device *dev) { return 0; } static inline int pm_runtime_barrier(struct device *dev) { return 0; }
......
...@@ -409,21 +409,7 @@ snapshot_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ...@@ -409,21 +409,7 @@ snapshot_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (cmd) { switch (cmd) {
case SNAPSHOT_GET_IMAGE_SIZE: case SNAPSHOT_GET_IMAGE_SIZE:
case SNAPSHOT_AVAIL_SWAP_SIZE: case SNAPSHOT_AVAIL_SWAP_SIZE:
case SNAPSHOT_ALLOC_SWAP_PAGE: { case SNAPSHOT_ALLOC_SWAP_PAGE:
compat_loff_t __user *uoffset = compat_ptr(arg);
loff_t offset;
mm_segment_t old_fs;
int err;
old_fs = get_fs();
set_fs(KERNEL_DS);
err = snapshot_ioctl(file, cmd, (unsigned long) &offset);
set_fs(old_fs);
if (!err && put_user(offset, uoffset))
err = -EFAULT;
return err;
}
case SNAPSHOT_CREATE_IMAGE: case SNAPSHOT_CREATE_IMAGE:
return snapshot_ioctl(file, cmd, return snapshot_ioctl(file, cmd,
(unsigned long) compat_ptr(arg)); (unsigned long) compat_ptr(arg));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment