Commit ad1ac947 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branches 'pm-cpuidle', 'pm-cpufreq', 'pm-domains' and 'pm-sleep'

* pm-cpuidle:
  cpuidle: coupled: remove unused define cpuidle_coupled_lock
  cpuidle: fix fallback mechanism for suspend to idle in absence of enter_freeze

* pm-cpufreq:
  cpufreq: cpufreq-dt: avoid uninitialized variable warnings:
  cpufreq: pxa2xx: fix pxa_cpufreq_change_voltage prototype
  cpufreq: Use list_is_last() to check last entry of the policy list
  cpufreq: Fix NULL reference crash while accessing policy->governor_data

* pm-domains:
  PM / Domains: Fix typo in comment
  PM / Domains: Fix potential deadlock while adding/removing subdomains
  PM / domains: fix lockdep issue for all subdomains

* pm-sleep:
  PM: APM_EMULATION does not depend on PM
...@@ -162,7 +162,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) ...@@ -162,7 +162,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
/** /**
* genpd_queue_power_off_work - Queue up the execution of genpd_poweroff(). * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
* @genpd: PM domait to power off. * @genpd: PM domain to power off.
* *
* Queue up the execution of genpd_poweroff() unless it's already been done * Queue up the execution of genpd_poweroff() unless it's already been done
* before. * before.
...@@ -172,16 +172,15 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) ...@@ -172,16 +172,15 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
queue_work(pm_wq, &genpd->power_off_work); queue_work(pm_wq, &genpd->power_off_work);
} }
static int genpd_poweron(struct generic_pm_domain *genpd);
/** /**
* __genpd_poweron - Restore power to a given PM domain and its masters. * __genpd_poweron - Restore power to a given PM domain and its masters.
* @genpd: PM domain to power up. * @genpd: PM domain to power up.
* @depth: nesting count for lockdep.
* *
* Restore power to @genpd and all of its masters so that it is possible to * Restore power to @genpd and all of its masters so that it is possible to
* resume a device belonging to it. * resume a device belonging to it.
*/ */
static int __genpd_poweron(struct generic_pm_domain *genpd) static int __genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
{ {
struct gpd_link *link; struct gpd_link *link;
int ret = 0; int ret = 0;
...@@ -196,11 +195,16 @@ static int __genpd_poweron(struct generic_pm_domain *genpd) ...@@ -196,11 +195,16 @@ static int __genpd_poweron(struct generic_pm_domain *genpd)
* with it. * with it.
*/ */
list_for_each_entry(link, &genpd->slave_links, slave_node) { list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_inc(link->master); struct generic_pm_domain *master = link->master;
genpd_sd_counter_inc(master);
mutex_lock_nested(&master->lock, depth + 1);
ret = __genpd_poweron(master, depth + 1);
mutex_unlock(&master->lock);
ret = genpd_poweron(link->master);
if (ret) { if (ret) {
genpd_sd_counter_dec(link->master); genpd_sd_counter_dec(master);
goto err; goto err;
} }
} }
...@@ -232,11 +236,12 @@ static int genpd_poweron(struct generic_pm_domain *genpd) ...@@ -232,11 +236,12 @@ static int genpd_poweron(struct generic_pm_domain *genpd)
int ret; int ret;
mutex_lock(&genpd->lock); mutex_lock(&genpd->lock);
ret = __genpd_poweron(genpd); ret = __genpd_poweron(genpd, 0);
mutex_unlock(&genpd->lock); mutex_unlock(&genpd->lock);
return ret; return ret;
} }
static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
{ {
return GENPD_DEV_CALLBACK(genpd, int, save_state, dev); return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
...@@ -484,7 +489,7 @@ static int pm_genpd_runtime_resume(struct device *dev) ...@@ -484,7 +489,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
} }
mutex_lock(&genpd->lock); mutex_lock(&genpd->lock);
ret = __genpd_poweron(genpd); ret = __genpd_poweron(genpd, 0);
mutex_unlock(&genpd->lock); mutex_unlock(&genpd->lock);
if (ret) if (ret)
...@@ -1339,8 +1344,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, ...@@ -1339,8 +1344,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
if (!link) if (!link)
return -ENOMEM; return -ENOMEM;
mutex_lock(&genpd->lock); mutex_lock(&subdomain->lock);
mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
if (genpd->status == GPD_STATE_POWER_OFF if (genpd->status == GPD_STATE_POWER_OFF
&& subdomain->status != GPD_STATE_POWER_OFF) { && subdomain->status != GPD_STATE_POWER_OFF) {
...@@ -1363,8 +1368,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, ...@@ -1363,8 +1368,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
genpd_sd_counter_inc(genpd); genpd_sd_counter_inc(genpd);
out: out:
mutex_unlock(&subdomain->lock);
mutex_unlock(&genpd->lock); mutex_unlock(&genpd->lock);
mutex_unlock(&subdomain->lock);
if (ret) if (ret)
kfree(link); kfree(link);
return ret; return ret;
...@@ -1385,7 +1390,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, ...@@ -1385,7 +1390,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
return -EINVAL; return -EINVAL;
mutex_lock(&genpd->lock); mutex_lock(&subdomain->lock);
mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
if (!list_empty(&subdomain->slave_links) || subdomain->device_count) { if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
pr_warn("%s: unable to remove subdomain %s\n", genpd->name, pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
...@@ -1398,22 +1404,19 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, ...@@ -1398,22 +1404,19 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
if (link->slave != subdomain) if (link->slave != subdomain)
continue; continue;
mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
list_del(&link->master_node); list_del(&link->master_node);
list_del(&link->slave_node); list_del(&link->slave_node);
kfree(link); kfree(link);
if (subdomain->status != GPD_STATE_POWER_OFF) if (subdomain->status != GPD_STATE_POWER_OFF)
genpd_sd_counter_dec(genpd); genpd_sd_counter_dec(genpd);
mutex_unlock(&subdomain->lock);
ret = 0; ret = 0;
break; break;
} }
out: out:
mutex_unlock(&genpd->lock); mutex_unlock(&genpd->lock);
mutex_unlock(&subdomain->lock);
return ret; return ret;
} }
......
...@@ -142,15 +142,16 @@ static int allocate_resources(int cpu, struct device **cdev, ...@@ -142,15 +142,16 @@ static int allocate_resources(int cpu, struct device **cdev,
try_again: try_again:
cpu_reg = regulator_get_optional(cpu_dev, reg); cpu_reg = regulator_get_optional(cpu_dev, reg);
if (IS_ERR(cpu_reg)) { ret = PTR_ERR_OR_ZERO(cpu_reg);
if (ret) {
/* /*
* If cpu's regulator supply node is present, but regulator is * If cpu's regulator supply node is present, but regulator is
* not yet registered, we should try defering probe. * not yet registered, we should try defering probe.
*/ */
if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) { if (ret == -EPROBE_DEFER) {
dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n", dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n",
cpu); cpu);
return -EPROBE_DEFER; return ret;
} }
/* Try with "cpu-supply" */ /* Try with "cpu-supply" */
...@@ -159,18 +160,16 @@ static int allocate_resources(int cpu, struct device **cdev, ...@@ -159,18 +160,16 @@ static int allocate_resources(int cpu, struct device **cdev,
goto try_again; goto try_again;
} }
dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n", dev_dbg(cpu_dev, "no regulator for cpu%d: %d\n", cpu, ret);
cpu, PTR_ERR(cpu_reg));
} }
cpu_clk = clk_get(cpu_dev, NULL); cpu_clk = clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) { ret = PTR_ERR_OR_ZERO(cpu_clk);
if (ret) {
/* put regulator */ /* put regulator */
if (!IS_ERR(cpu_reg)) if (!IS_ERR(cpu_reg))
regulator_put(cpu_reg); regulator_put(cpu_reg);
ret = PTR_ERR(cpu_clk);
/* /*
* If cpu's clk node is present, but clock is not yet * If cpu's clk node is present, but clock is not yet
* registered, we should try defering probe. * registered, we should try defering probe.
......
...@@ -48,11 +48,11 @@ static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy, ...@@ -48,11 +48,11 @@ static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
bool active) bool active)
{ {
do { do {
policy = list_next_entry(policy, policy_list);
/* No more policies in the list */ /* No more policies in the list */
if (&policy->policy_list == &cpufreq_policy_list) if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
return NULL; return NULL;
policy = list_next_entry(policy, policy_list);
} while (!suitable_policy(policy, active)); } while (!suitable_policy(policy, active));
return policy; return policy;
......
...@@ -387,16 +387,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy, ...@@ -387,16 +387,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
if (!have_governor_per_policy()) if (!have_governor_per_policy())
cdata->gdbs_data = dbs_data; cdata->gdbs_data = dbs_data;
policy->governor_data = dbs_data;
ret = sysfs_create_group(get_governor_parent_kobj(policy), ret = sysfs_create_group(get_governor_parent_kobj(policy),
get_sysfs_attr(dbs_data)); get_sysfs_attr(dbs_data));
if (ret) if (ret)
goto reset_gdbs_data; goto reset_gdbs_data;
policy->governor_data = dbs_data;
return 0; return 0;
reset_gdbs_data: reset_gdbs_data:
policy->governor_data = NULL;
if (!have_governor_per_policy()) if (!have_governor_per_policy())
cdata->gdbs_data = NULL; cdata->gdbs_data = NULL;
cdata->exit(dbs_data, !policy->governor->initialized); cdata->exit(dbs_data, !policy->governor->initialized);
...@@ -417,16 +419,19 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy, ...@@ -417,16 +419,19 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy,
if (!cdbs->shared || cdbs->shared->policy) if (!cdbs->shared || cdbs->shared->policy)
return -EBUSY; return -EBUSY;
policy->governor_data = NULL;
if (!--dbs_data->usage_count) { if (!--dbs_data->usage_count) {
sysfs_remove_group(get_governor_parent_kobj(policy), sysfs_remove_group(get_governor_parent_kobj(policy),
get_sysfs_attr(dbs_data)); get_sysfs_attr(dbs_data));
policy->governor_data = NULL;
if (!have_governor_per_policy()) if (!have_governor_per_policy())
cdata->gdbs_data = NULL; cdata->gdbs_data = NULL;
cdata->exit(dbs_data, policy->governor->initialized == 1); cdata->exit(dbs_data, policy->governor->initialized == 1);
kfree(dbs_data); kfree(dbs_data);
} else {
policy->governor_data = NULL;
} }
free_common_dbs_info(policy, cdata); free_common_dbs_info(policy, cdata);
......
...@@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void) ...@@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void)
} }
} }
#else #else
static int pxa_cpufreq_change_voltage(struct pxa_freqs *pxa_freq) static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
{ {
return 0; return 0;
} }
......
...@@ -119,7 +119,6 @@ struct cpuidle_coupled { ...@@ -119,7 +119,6 @@ struct cpuidle_coupled {
#define CPUIDLE_COUPLED_NOT_IDLE (-1) #define CPUIDLE_COUPLED_NOT_IDLE (-1)
static DEFINE_MUTEX(cpuidle_coupled_lock);
static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb); static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
/* /*
......
...@@ -153,7 +153,7 @@ int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev) ...@@ -153,7 +153,7 @@ int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* be frozen safely. * be frozen safely.
*/ */
index = find_deepest_state(drv, dev, UINT_MAX, 0, true); index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
if (index >= 0) if (index > 0)
enter_freeze_proper(drv, dev, index); enter_freeze_proper(drv, dev, index);
return index; return index;
......
...@@ -235,7 +235,7 @@ config PM_TRACE_RTC ...@@ -235,7 +235,7 @@ config PM_TRACE_RTC
config APM_EMULATION config APM_EMULATION
tristate "Advanced Power Management Emulation" tristate "Advanced Power Management Emulation"
depends on PM && SYS_SUPPORTS_APM_EMULATION depends on SYS_SUPPORTS_APM_EMULATION
help help
APM is a BIOS specification for saving power using several different APM is a BIOS specification for saving power using several different
techniques. This is mostly useful for battery powered laptops with techniques. This is mostly useful for battery powered laptops with
......
...@@ -162,7 +162,7 @@ static void cpuidle_idle_call(void) ...@@ -162,7 +162,7 @@ static void cpuidle_idle_call(void)
*/ */
if (idle_should_freeze()) { if (idle_should_freeze()) {
entered_state = cpuidle_enter_freeze(drv, dev); entered_state = cpuidle_enter_freeze(drv, dev);
if (entered_state >= 0) { if (entered_state > 0) {
local_irq_enable(); local_irq_enable();
goto exit_idle; goto exit_idle;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment