Commit abfba60c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm+acpi-3.14-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull ACPI and power management fixes from Rafael Wysocki:

 - ACPI tables in some BIOSes list device resources with size equal to
   0, which doesn't make sense, so we should ignore them, but instead we
   try to use them and mangle things completely.  Fix from Zhang Rui.

 - Several models of Samsung laptops accumulate EC events when they are
   in sleep states which leads to EC buffer overflows that prevent new
   events from being signaled after system resume or reboot.  This has
   been affecting many users for quite a while and may be addressed by
   clearing the EC buffer during system resume and system startup on
   those machines.  From Kieran Clancy.

 - If the ACPI sleep control and status registers are not present (which
   happens if the Hardware Reduced ACPI mode bit is set in the ACPI
   tables, but also may result from BIOS bugs), we should not try to use
   ACPI to power off the system and ACPI S5 should not be listed as
   supported.  Fix from Aubrey Li.

 - There's a race condition in cpufreq_get() that leads to a kernel
   crash if that function is called at a wrong time.  Fix from Aaron
   Plattner.

 - cpufreq policy objects have to be initialized entirely before they
   are first accessed by their users which isn't the case currently and
   that potentially leads to various kinds of breakage that is difficult
   to debug.  Fix from Viresh Kumar.

 - Locking is missing in __cpufreq_add_dev() which leads to a race
   condition that may trigger a kernel crash.  Fix from Viresh Kumar.

* tag 'pm+acpi-3.14-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  ACPI / EC: Clear stale EC events on Samsung systems
  cpufreq: Initialize governor for a new policy under policy->rwsem
  cpufreq: Initialize policy before making it available for others to use
  cpufreq: use cpufreq_cpu_get() to avoid cpufreq_get() race conditions
  ACPI / sleep: pm_power_off needs more sanity checks to be installed
  ACPI / resources: ignore invalid ACPI device resources
parents b01d4e68 19bc45a5
......@@ -67,6 +67,8 @@ enum ec_command {
#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
* when trying to clear the EC */
enum {
EC_FLAGS_QUERY_PENDING, /* Query is pending */
......@@ -116,6 +118,7 @@ EXPORT_SYMBOL(first_ec);
static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
/* --------------------------------------------------------------------------
Transaction Management
......@@ -440,6 +443,29 @@ acpi_handle ec_get_handle(void)
EXPORT_SYMBOL(ec_get_handle);
static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
/*
* Clears stale _Q events that might have accumulated in the EC.
* Run with locked ec mutex.
*/
static void acpi_ec_clear(struct acpi_ec *ec)
{
int i, status;
u8 value = 0;
for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
status = acpi_ec_query_unlocked(ec, &value);
if (status || !value)
break;
}
if (unlikely(i == ACPI_EC_CLEAR_MAX))
pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
else
pr_info("%d stale EC events cleared\n", i);
}
void acpi_ec_block_transactions(void)
{
struct acpi_ec *ec = first_ec;
......@@ -463,6 +489,10 @@ void acpi_ec_unblock_transactions(void)
mutex_lock(&ec->mutex);
/* Allow transactions to be carried out again */
clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
if (EC_FLAGS_CLEAR_ON_RESUME)
acpi_ec_clear(ec);
mutex_unlock(&ec->mutex);
}
......@@ -821,6 +851,13 @@ static int acpi_ec_add(struct acpi_device *device)
/* EC is fully operational, allow queries */
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
/* Clear stale _Q events if hardware might require that */
if (EC_FLAGS_CLEAR_ON_RESUME) {
mutex_lock(&ec->mutex);
acpi_ec_clear(ec);
mutex_unlock(&ec->mutex);
}
return ret;
}
......@@ -922,6 +959,30 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
return 0;
}
/*
* On some hardware it is necessary to clear events accumulated by the EC during
* sleep. These ECs stop reporting GPEs until they are manually polled, if too
* many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
*
* https://bugzilla.kernel.org/show_bug.cgi?id=44161
*
* Ideally, the EC should also be instructed NOT to accumulate events during
* sleep (which Windows seems to do somehow), but the interface to control this
* behaviour is not known at this time.
*
* Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
* however it is very likely that other Samsung models are affected.
*
* On systems which don't accumulate _Q events during sleep, this extra check
* should be harmless.
*/
static int ec_clear_on_resume(const struct dmi_system_id *id)
{
pr_debug("Detected system needing EC poll on resume.\n");
EC_FLAGS_CLEAR_ON_RESUME = 1;
return 0;
}
static struct dmi_system_id ec_dmi_table[] __initdata = {
{
ec_skip_dsdt_scan, "Compal JFL92", {
......@@ -965,6 +1026,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
ec_validate_ecdt, "ASUS hardware", {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
{
ec_clear_on_resume, "Samsung hardware", {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
{},
};
......
......@@ -77,18 +77,24 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
switch (ares->type) {
case ACPI_RESOURCE_TYPE_MEMORY24:
memory24 = &ares->data.memory24;
if (!memory24->address_length)
return false;
acpi_dev_get_memresource(res, memory24->minimum,
memory24->address_length,
memory24->write_protect);
break;
case ACPI_RESOURCE_TYPE_MEMORY32:
memory32 = &ares->data.memory32;
if (!memory32->address_length)
return false;
acpi_dev_get_memresource(res, memory32->minimum,
memory32->address_length,
memory32->write_protect);
break;
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
fixed_memory32 = &ares->data.fixed_memory32;
if (!fixed_memory32->address_length)
return false;
acpi_dev_get_memresource(res, fixed_memory32->address,
fixed_memory32->address_length,
fixed_memory32->write_protect);
......@@ -144,12 +150,16 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
switch (ares->type) {
case ACPI_RESOURCE_TYPE_IO:
io = &ares->data.io;
if (!io->address_length)
return false;
acpi_dev_get_ioresource(res, io->minimum,
io->address_length,
io->io_decode);
break;
case ACPI_RESOURCE_TYPE_FIXED_IO:
fixed_io = &ares->data.fixed_io;
if (!fixed_io->address_length)
return false;
acpi_dev_get_ioresource(res, fixed_io->address,
fixed_io->address_length,
ACPI_DECODE_10);
......
......@@ -807,7 +807,12 @@ int __init acpi_sleep_init(void)
acpi_sleep_hibernate_setup();
status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
if (ACPI_SUCCESS(status)) {
/*
* Check both ACPI S5 object and ACPI sleep registers to
* install pm_power_off_prepare/pm_power_off hook
*/
if (ACPI_SUCCESS(status) && acpi_gbl_FADT.sleep_control.address
&& acpi_gbl_FADT.sleep_status.address) {
sleep_states[ACPI_STATE_S5] = 1;
pm_power_off_prepare = acpi_power_off_prepare;
pm_power_off = acpi_power_off;
......
......@@ -1109,6 +1109,21 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
goto err_set_policy_cpu;
}
/* related cpus should atleast have policy->cpus */
cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
/*
* affected cpus must always be the one, which are online. We aren't
* managing offline cpus here.
*/
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
if (!frozen) {
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
}
down_write(&policy->rwsem);
write_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus)
per_cpu(cpufreq_cpu_data, j) = policy;
......@@ -1162,20 +1177,6 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
}
}
/* related cpus should atleast have policy->cpus */
cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
/*
* affected cpus must always be the one, which are online. We aren't
* managing offline cpus here.
*/
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
if (!frozen) {
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
}
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
......@@ -1206,6 +1207,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
}
up_write(&policy->rwsem);
kobject_uevent(&policy->kobj, KOBJ_ADD);
up_read(&cpufreq_rwsem);
......@@ -1546,23 +1548,16 @@ static unsigned int __cpufreq_get(unsigned int cpu)
*/
unsigned int cpufreq_get(unsigned int cpu)
{
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
unsigned int ret_freq = 0;
if (cpufreq_disabled() || !cpufreq_driver)
return -ENOENT;
BUG_ON(!policy);
if (!down_read_trylock(&cpufreq_rwsem))
return 0;
down_read(&policy->rwsem);
ret_freq = __cpufreq_get(cpu);
if (policy) {
down_read(&policy->rwsem);
ret_freq = __cpufreq_get(cpu);
up_read(&policy->rwsem);
up_read(&policy->rwsem);
up_read(&cpufreq_rwsem);
cpufreq_cpu_put(policy);
}
return ret_freq;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment