Commit 14e94194 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm+acpi-3.11-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull ACPI and power management fixes from Rafael Wysocki:

 - ACPI-based memory hotplug stopped working after a recent change,
   because it's not possible to associate sufficiently many "physical"
   devices with one ACPI device object due to an artificial limit.  Fix
   from Rafael J Wysocki removes that limit and makes memory hotplug
   work again.

 - A change made in 3.9 uncovered a bug in the ACPI processor driver
   preventing NUMA nodes from being put offline due to an ordering
   issue.  Fix from Yasuaki Ishimatsu changes the ordering to make
   things work again.

 - One of the recent ACPI video commits (that hasn't been reverted so
   far) uncovered a bug in the code handling quirky BIOSes that caused
   some Asus machines to boot with backlight completely off which made
   it quite difficult to use them afterward.  Fix from Felipe Contreras
   improves the quirk to cover this particular case correctly.

 - A cpufreq user space interface change made in 3.10 inadvertently
   renamed the ignore_nice_load sysfs attribute to ignore_nice which
   resulted in some confusion.  Fix from Viresh Kumar changes the name
   back to ignore_nice_load.

 - An initialization ordering change made in 3.9 broke cpufreq on
   loongson2 boards.  Fix from Aaro Koskinen restores the correct
   initialization ordering there.

 - Fix breakage resulting from a mistake made in 3.9 and causing the
   detection of some graphics adapters (that were detected correctly
   before) to fail.  There are two objects representing the same PCIe
   port in the affected systems' ACPI tables and both appear as
   "enabled" and we are expected to guess which one to use.  We used to
   choose the right one before by pure luck, but when we tried to
   address another similar corner case, the luck went away.  This time
   we try to make our guessing a bit more educated which is reported to
   work on those systems.

 - The /proc/acpi/wakeup interface code is missing some locking which
   may lead to breakage if that file is written or read during hotplug
   of wakeup devices.  That should be rare but still possible, so it's
   better to start using the appropriate locking there.

* tag 'pm+acpi-3.11-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  ACPI: Try harder to resolve _ADR collisions for bridges
  cpufreq: rename ignore_nice as ignore_nice_load
  cpufreq: loongson2: fix regression related to clock management
  ACPI / processor: move try_offline_node() after acpi_unmap_lsapic()
  ACPI: Drop physical_node_id_bitmap from struct acpi_device
  ACPI / PM: Walk physical_node_list under physical_node_lock
  ACPI / video: improve quirk check in acpi_video_bqc_quirk()
parents fdafa7cf 69fdadfd
......@@ -451,7 +451,6 @@ static void acpi_processor_remove(struct acpi_device *device)
/* Clean up. */
per_cpu(processor_device_array, pr->id) = NULL;
per_cpu(processors, pr->id) = NULL;
try_offline_node(cpu_to_node(pr->id));
/* Remove the CPU. */
get_online_cpus();
......@@ -459,6 +458,8 @@ static void acpi_processor_remove(struct acpi_device *device)
acpi_unmap_lsapic(pr->id);
put_online_cpus();
try_offline_node(cpu_to_node(pr->id));
out:
free_cpumask_var(pr->throttling.shared_cpu_map);
kfree(pr);
......
......@@ -31,6 +31,7 @@ static LIST_HEAD(bus_type_list);
static DECLARE_RWSEM(bus_type_sem);
#define PHYSICAL_NODE_STRING "physical_node"
#define PHYSICAL_NODE_NAME_SIZE (sizeof(PHYSICAL_NODE_STRING) + 10)
int register_acpi_bus_type(struct acpi_bus_type *type)
{
......@@ -78,41 +79,108 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
return ret;
}
static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
void *addr_p, void **ret_p)
static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used,
void *not_used, void **ret_p)
{
unsigned long long addr, sta;
acpi_status status;
struct acpi_device *adev = NULL;
status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) {
acpi_bus_get_device(handle, &adev);
if (adev) {
*ret_p = handle;
status = acpi_bus_get_status_handle(handle, &sta);
if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_ENABLED))
return AE_CTRL_TERMINATE;
return AE_CTRL_TERMINATE;
}
return AE_OK;
}
acpi_handle acpi_get_child(acpi_handle parent, u64 address)
static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge)
{
void *ret = NULL;
unsigned long long sta;
acpi_status status;
status = acpi_bus_get_status_handle(handle, &sta);
if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
return false;
if (is_bridge) {
void *test = NULL;
/* Check if this object has at least one child device. */
acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
acpi_dev_present, NULL, NULL, &test);
return !!test;
}
return true;
}
struct find_child_context {
u64 addr;
bool is_bridge;
acpi_handle ret;
bool ret_checked;
};
static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
void *data, void **not_used)
{
struct find_child_context *context = data;
unsigned long long addr;
acpi_status status;
if (!parent)
return NULL;
status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
if (ACPI_FAILURE(status) || addr != context->addr)
return AE_OK;
acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL,
do_acpi_find_child, &address, &ret);
return (acpi_handle)ret;
if (!context->ret) {
/* This is the first matching object. Save its handle. */
context->ret = handle;
return AE_OK;
}
/*
* There is more than one matching object with the same _ADR value.
* That really is unexpected, so we are kind of beyond the scope of the
* spec here. We have to choose which one to return, though.
*
* First, check if the previously found object is good enough and return
* its handle if so. Second, check the same for the object that we've
* just found.
*/
if (!context->ret_checked) {
if (acpi_extra_checks_passed(context->ret, context->is_bridge))
return AE_CTRL_TERMINATE;
else
context->ret_checked = true;
}
if (acpi_extra_checks_passed(handle, context->is_bridge)) {
context->ret = handle;
return AE_CTRL_TERMINATE;
}
return AE_OK;
}
EXPORT_SYMBOL(acpi_get_child);
acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge)
{
if (parent) {
struct find_child_context context = {
.addr = addr,
.is_bridge = is_bridge,
};
acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, do_find_child,
NULL, &context, NULL);
return context.ret;
}
return NULL;
}
EXPORT_SYMBOL_GPL(acpi_find_child);
int acpi_bind_one(struct device *dev, acpi_handle handle)
{
struct acpi_device *acpi_dev;
acpi_status status;
struct acpi_device_physical_node *physical_node, *pn;
char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
struct list_head *physnode_list;
unsigned int node_id;
int retval = -EINVAL;
if (ACPI_HANDLE(dev)) {
......@@ -139,25 +207,27 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
mutex_lock(&acpi_dev->physical_node_lock);
/* Sanity check. */
list_for_each_entry(pn, &acpi_dev->physical_node_list, node)
/*
* Keep the list sorted by node_id so that the IDs of removed nodes can
* be recycled easily.
*/
physnode_list = &acpi_dev->physical_node_list;
node_id = 0;
list_for_each_entry(pn, &acpi_dev->physical_node_list, node) {
/* Sanity check. */
if (pn->dev == dev) {
dev_warn(dev, "Already associated with ACPI node\n");
goto err_free;
}
/* allocate physical node id according to physical_node_id_bitmap */
physical_node->node_id =
find_first_zero_bit(acpi_dev->physical_node_id_bitmap,
ACPI_MAX_PHYSICAL_NODE);
if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) {
retval = -ENOSPC;
goto err_free;
if (pn->node_id == node_id) {
physnode_list = &pn->node;
node_id++;
}
}
set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap);
physical_node->node_id = node_id;
physical_node->dev = dev;
list_add_tail(&physical_node->node, &acpi_dev->physical_node_list);
list_add(&physical_node->node, physnode_list);
acpi_dev->physical_node_count++;
mutex_unlock(&acpi_dev->physical_node_lock);
......@@ -208,7 +278,7 @@ int acpi_unbind_one(struct device *dev)
mutex_lock(&acpi_dev->physical_node_lock);
list_for_each_safe(node, next, &acpi_dev->physical_node_list) {
char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
entry = list_entry(node, struct acpi_device_physical_node,
node);
......@@ -216,7 +286,6 @@ int acpi_unbind_one(struct device *dev)
continue;
list_del(node);
clear_bit(entry->node_id, acpi_dev->physical_node_id_bitmap);
acpi_dev->physical_node_count--;
......
......@@ -311,6 +311,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
dev->pnp.bus_id,
(u32) dev->wakeup.sleep_state);
mutex_lock(&dev->physical_node_lock);
if (!dev->physical_node_count) {
seq_printf(seq, "%c%-8s\n",
dev->wakeup.flags.run_wake ? '*' : ' ',
......@@ -338,6 +340,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
put_device(ldev);
}
}
mutex_unlock(&dev->physical_node_lock);
}
mutex_unlock(&acpi_device_lock);
return 0;
......@@ -347,12 +351,16 @@ static void physical_device_enable_wakeup(struct acpi_device *adev)
{
struct acpi_device_physical_node *entry;
mutex_lock(&adev->physical_node_lock);
list_for_each_entry(entry,
&adev->physical_node_list, node)
if (entry->dev && device_can_wakeup(entry->dev)) {
bool enable = !device_may_wakeup(entry->dev);
device_set_wakeup_enable(entry->dev, enable);
}
mutex_unlock(&adev->physical_node_lock);
}
static ssize_t
......
......@@ -689,7 +689,7 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device,
* Some systems always report current brightness level as maximum
* through _BQC, we need to test another value for them.
*/
test_level = current_level == max_level ? br->levels[2] : max_level;
test_level = current_level == max_level ? br->levels[3] : max_level;
result = acpi_video_device_lcd_set_level(device, test_level);
if (result)
......
......@@ -221,8 +221,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
return count;
}
static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
size_t count)
static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
const char *buf, size_t count)
{
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input, j;
......@@ -235,10 +235,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
if (input > 1)
input = 1;
if (input == cs_tuners->ignore_nice) /* nothing to do */
if (input == cs_tuners->ignore_nice_load) /* nothing to do */
return count;
cs_tuners->ignore_nice = input;
cs_tuners->ignore_nice_load = input;
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
......@@ -246,7 +246,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
dbs_info = &per_cpu(cs_cpu_dbs_info, j);
dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->cdbs.prev_cpu_wall, 0);
if (cs_tuners->ignore_nice)
if (cs_tuners->ignore_nice_load)
dbs_info->cdbs.prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
......@@ -279,7 +279,7 @@ show_store_one(cs, sampling_rate);
show_store_one(cs, sampling_down_factor);
show_store_one(cs, up_threshold);
show_store_one(cs, down_threshold);
show_store_one(cs, ignore_nice);
show_store_one(cs, ignore_nice_load);
show_store_one(cs, freq_step);
declare_show_sampling_rate_min(cs);
......@@ -287,7 +287,7 @@ gov_sys_pol_attr_rw(sampling_rate);
gov_sys_pol_attr_rw(sampling_down_factor);
gov_sys_pol_attr_rw(up_threshold);
gov_sys_pol_attr_rw(down_threshold);
gov_sys_pol_attr_rw(ignore_nice);
gov_sys_pol_attr_rw(ignore_nice_load);
gov_sys_pol_attr_rw(freq_step);
gov_sys_pol_attr_ro(sampling_rate_min);
......@@ -297,7 +297,7 @@ static struct attribute *dbs_attributes_gov_sys[] = {
&sampling_down_factor_gov_sys.attr,
&up_threshold_gov_sys.attr,
&down_threshold_gov_sys.attr,
&ignore_nice_gov_sys.attr,
&ignore_nice_load_gov_sys.attr,
&freq_step_gov_sys.attr,
NULL
};
......@@ -313,7 +313,7 @@ static struct attribute *dbs_attributes_gov_pol[] = {
&sampling_down_factor_gov_pol.attr,
&up_threshold_gov_pol.attr,
&down_threshold_gov_pol.attr,
&ignore_nice_gov_pol.attr,
&ignore_nice_load_gov_pol.attr,
&freq_step_gov_pol.attr,
NULL
};
......@@ -338,7 +338,7 @@ static int cs_init(struct dbs_data *dbs_data)
tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
tuners->ignore_nice = 0;
tuners->ignore_nice_load = 0;
tuners->freq_step = DEF_FREQUENCY_STEP;
dbs_data->tuners = tuners;
......
......@@ -47,9 +47,9 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
unsigned int j;
if (dbs_data->cdata->governor == GOV_ONDEMAND)
ignore_nice = od_tuners->ignore_nice;
ignore_nice = od_tuners->ignore_nice_load;
else
ignore_nice = cs_tuners->ignore_nice;
ignore_nice = cs_tuners->ignore_nice_load;
policy = cdbs->cur_policy;
......@@ -298,12 +298,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
cs_tuners = dbs_data->tuners;
cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
sampling_rate = cs_tuners->sampling_rate;
ignore_nice = cs_tuners->ignore_nice;
ignore_nice = cs_tuners->ignore_nice_load;
} else {
od_tuners = dbs_data->tuners;
od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
sampling_rate = od_tuners->sampling_rate;
ignore_nice = od_tuners->ignore_nice;
ignore_nice = od_tuners->ignore_nice_load;
od_ops = dbs_data->cdata->gov_ops;
io_busy = od_tuners->io_is_busy;
}
......
......@@ -165,7 +165,7 @@ struct cs_cpu_dbs_info_s {
/* Per policy Governers sysfs tunables */
struct od_dbs_tuners {
unsigned int ignore_nice;
unsigned int ignore_nice_load;
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
......@@ -175,7 +175,7 @@ struct od_dbs_tuners {
};
struct cs_dbs_tuners {
unsigned int ignore_nice;
unsigned int ignore_nice_load;
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
......
......@@ -403,8 +403,8 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
return count;
}
static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
size_t count)
static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
const char *buf, size_t count)
{
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
unsigned int input;
......@@ -419,10 +419,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
if (input > 1)
input = 1;
if (input == od_tuners->ignore_nice) { /* nothing to do */
if (input == od_tuners->ignore_nice_load) { /* nothing to do */
return count;
}
od_tuners->ignore_nice = input;
od_tuners->ignore_nice_load = input;
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
......@@ -430,7 +430,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
dbs_info = &per_cpu(od_cpu_dbs_info, j);
dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
if (od_tuners->ignore_nice)
if (od_tuners->ignore_nice_load)
dbs_info->cdbs.prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
......@@ -461,7 +461,7 @@ show_store_one(od, sampling_rate);
show_store_one(od, io_is_busy);
show_store_one(od, up_threshold);
show_store_one(od, sampling_down_factor);
show_store_one(od, ignore_nice);
show_store_one(od, ignore_nice_load);
show_store_one(od, powersave_bias);
declare_show_sampling_rate_min(od);
......@@ -469,7 +469,7 @@ gov_sys_pol_attr_rw(sampling_rate);
gov_sys_pol_attr_rw(io_is_busy);
gov_sys_pol_attr_rw(up_threshold);
gov_sys_pol_attr_rw(sampling_down_factor);
gov_sys_pol_attr_rw(ignore_nice);
gov_sys_pol_attr_rw(ignore_nice_load);
gov_sys_pol_attr_rw(powersave_bias);
gov_sys_pol_attr_ro(sampling_rate_min);
......@@ -478,7 +478,7 @@ static struct attribute *dbs_attributes_gov_sys[] = {
&sampling_rate_gov_sys.attr,
&up_threshold_gov_sys.attr,
&sampling_down_factor_gov_sys.attr,
&ignore_nice_gov_sys.attr,
&ignore_nice_load_gov_sys.attr,
&powersave_bias_gov_sys.attr,
&io_is_busy_gov_sys.attr,
NULL
......@@ -494,7 +494,7 @@ static struct attribute *dbs_attributes_gov_pol[] = {
&sampling_rate_gov_pol.attr,
&up_threshold_gov_pol.attr,
&sampling_down_factor_gov_pol.attr,
&ignore_nice_gov_pol.attr,
&ignore_nice_load_gov_pol.attr,
&powersave_bias_gov_pol.attr,
&io_is_busy_gov_pol.attr,
NULL
......@@ -544,7 +544,7 @@ static int od_init(struct dbs_data *dbs_data)
}
tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
tuners->ignore_nice = 0;
tuners->ignore_nice_load = 0;
tuners->powersave_bias = default_powersave_bias;
tuners->io_is_busy = should_io_be_busy();
......
......@@ -118,11 +118,6 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
clk_put(cpuclk);
return -EINVAL;
}
ret = clk_set_rate(cpuclk, rate);
if (ret) {
clk_put(cpuclk);
return ret;
}
/* clock table init */
for (i = 2;
......@@ -130,6 +125,12 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
i++)
loongson2_clockmod_table[i].frequency = (rate * i) / 8;
ret = clk_set_rate(cpuclk, rate);
if (ret) {
clk_put(cpuclk);
return ret;
}
policy->cur = loongson2_cpufreq_get(policy->cpu);
cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
......
......@@ -317,13 +317,20 @@ void acpi_pci_remove_bus(struct pci_bus *bus)
/* ACPI bus type */
static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
{
struct pci_dev * pci_dev;
u64 addr;
struct pci_dev *pci_dev = to_pci_dev(dev);
bool is_bridge;
u64 addr;
pci_dev = to_pci_dev(dev);
/*
* pci_is_bridge() is not suitable here, because pci_dev->subordinate
* is set only after acpi_pci_find_device() has been called for the
* given device.
*/
is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE
|| pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
/* Please ref to ACPI spec for the syntax of _ADR */
addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
*handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr);
*handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge);
if (!*handle)
return -ENODEV;
return 0;
......
......@@ -274,15 +274,12 @@ struct acpi_device_wakeup {
};
struct acpi_device_physical_node {
u8 node_id;
unsigned int node_id;
struct list_head node;
struct device *dev;
bool put_online:1;
};
/* set maximum of physical nodes to 32 for expansibility */
#define ACPI_MAX_PHYSICAL_NODE 32
/* Device */
struct acpi_device {
int device_type;
......@@ -302,10 +299,9 @@ struct acpi_device {
struct acpi_driver *driver;
void *driver_data;
struct device dev;
u8 physical_node_count;
unsigned int physical_node_count;
struct list_head physical_node_list;
struct mutex physical_node_lock;
DECLARE_BITMAP(physical_node_id_bitmap, ACPI_MAX_PHYSICAL_NODE);
struct list_head power_dependent;
void (*remove)(struct acpi_device *);
};
......@@ -445,7 +441,11 @@ struct acpi_pci_root {
};
/* helper */
acpi_handle acpi_get_child(acpi_handle, u64);
acpi_handle acpi_find_child(acpi_handle, u64, bool);
static inline acpi_handle acpi_get_child(acpi_handle handle, u64 addr)
{
return acpi_find_child(handle, addr, false);
}
int acpi_is_root_bridge(acpi_handle);
struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
#define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)ACPI_HANDLE(dev))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment