Commit dedd0c2a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6: (23 commits)
  ACPI: delete acpi_processor_power_verify_c2()
  ACPI: allow C3 > 1000usec
  ACPI: enable C2 and Turbo-mode on Nehalem notebooks on A/C
  ACPI: power_meter: remove double kfree()
  ACPI: processor: restrict early _PDC to opt-in platforms
  ACPI: Fix unused variable warning in sbs.c
  acpi: make ACPI device id constant
  sony-laptop - fix using of uninitialized variable
  ACPI: Fix section mismatch error for acpi_early_processor_set_pdc()
  eeepc-laptop: disable wireless hotplug for 1201N
  eeepc-laptop: add hotplug_disable parameter
  eeepc-laptop: switch to using sparse keymap library
  eeepc-laptop: dmi blacklist to disable pci hotplug code
  eeepc-laptop: disable cpu speed control on EeePC 701
  ACPI: don't cond_resched if irq is disabled
  ACPI: Remove unnecessary cast.
  ACPI: Advertise to BIOS in _OSC: _OST on _PPC changes
  ACPI: EC: Add wait for irq storm
  ACPI: SBS: Move SBS HC callback to faster Notify queue
  x86, ACPI: delete acpi_boot_table_init() return value
  ...
parents 15e551e5 418521de
......@@ -1529,16 +1529,10 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
* if acpi_blacklisted() acpi_disabled = 1;
* acpi_irq_model=...
* ...
*
* return value: (currently ignored)
* 0: success
* !0: failure
*/
int __init acpi_boot_table_init(void)
void __init acpi_boot_table_init(void)
{
int error;
dmi_check_system(acpi_dmi_table);
/*
......@@ -1546,15 +1540,14 @@ int __init acpi_boot_table_init(void)
* One exception: acpi=ht continues far enough to enumerate LAPICs
*/
if (acpi_disabled && !acpi_ht)
return 1;
return;
/*
* Initialize the ACPI boot-time table parser.
*/
error = acpi_table_init();
if (error) {
if (acpi_table_init()) {
disable_acpi();
return error;
return;
}
acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
......@@ -1562,18 +1555,15 @@ int __init acpi_boot_table_init(void)
/*
* blacklist may disable ACPI entirely
*/
error = acpi_blacklisted();
if (error) {
if (acpi_blacklisted()) {
if (acpi_force) {
printk(KERN_WARNING PREFIX "acpi=force override\n");
} else {
printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
disable_acpi();
return error;
return;
}
}
return 0;
}
int __init early_acpi_boot_init(void)
......
......@@ -208,7 +208,7 @@ static int power_saving_thread(void *data)
* the mechanism only works when all CPUs have RT task running,
* as if one CPU hasn't RT task, RT task from other CPUs will
* borrow CPU time from this CPU and cause RT task use > 95%
* CPU time. To make 'avoid staration' work, takes a nap here.
* CPU time. To make 'avoid starvation' work, takes a nap here.
*/
if (do_sleep)
schedule_timeout_killable(HZ * idle_pct / 100);
......@@ -222,14 +222,18 @@ static struct task_struct *ps_tsks[NR_CPUS];
static unsigned int ps_tsk_num;
static int create_power_saving_task(void)
{
int rc = -ENOMEM;
ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
(void *)(unsigned long)ps_tsk_num,
"power_saving/%d", ps_tsk_num);
if (ps_tsks[ps_tsk_num]) {
rc = IS_ERR(ps_tsks[ps_tsk_num]) ? PTR_ERR(ps_tsks[ps_tsk_num]) : 0;
if (!rc)
ps_tsk_num++;
return 0;
}
return -EINVAL;
else
ps_tsks[ps_tsk_num] = NULL;
return rc;
}
static void destroy_power_saving_task(void)
......@@ -237,6 +241,7 @@ static void destroy_power_saving_task(void)
if (ps_tsk_num > 0) {
ps_tsk_num--;
kthread_stop(ps_tsks[ps_tsk_num]);
ps_tsks[ps_tsk_num] = NULL;
}
}
......@@ -253,7 +258,7 @@ static void set_power_saving_task_num(unsigned int num)
}
}
static int acpi_pad_idle_cpus(unsigned int num_cpus)
static void acpi_pad_idle_cpus(unsigned int num_cpus)
{
get_online_cpus();
......@@ -261,7 +266,6 @@ static int acpi_pad_idle_cpus(unsigned int num_cpus)
set_power_saving_task_num(num_cpus);
put_online_cpus();
return 0;
}
static uint32_t acpi_pad_idle_cpus_num(void)
......@@ -369,19 +373,21 @@ static void acpi_pad_remove_sysfs(struct acpi_device *device)
static int acpi_pad_pur(acpi_handle handle, int *num_cpus)
{
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
union acpi_object *package;
int rev, num, ret = -EINVAL;
status = acpi_evaluate_object(handle, "_PUR", NULL, &buffer);
if (ACPI_FAILURE(status))
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
return -EINVAL;
if (!buffer.length || !buffer.pointer)
return -EINVAL;
package = buffer.pointer;
if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2)
goto out;
rev = package->package.elements[0].integer.value;
num = package->package.elements[1].integer.value;
if (rev != 1)
if (rev != 1 || num < 0)
goto out;
*num_cpus = num;
ret = 0;
......@@ -410,7 +416,7 @@ static void acpi_pad_ost(acpi_handle handle, int stat,
static void acpi_pad_handle_notify(acpi_handle handle)
{
int num_cpus, ret;
int num_cpus;
uint32_t idle_cpus;
mutex_lock(&isolated_cpus_lock);
......@@ -418,12 +424,9 @@ static void acpi_pad_handle_notify(acpi_handle handle)
mutex_unlock(&isolated_cpus_lock);
return;
}
ret = acpi_pad_idle_cpus(num_cpus);
acpi_pad_idle_cpus(num_cpus);
idle_cpus = acpi_pad_idle_cpus_num();
if (!ret)
acpi_pad_ost(handle, 0, idle_cpus);
else
acpi_pad_ost(handle, 1, 0);
acpi_pad_ost(handle, 0, idle_cpus);
mutex_unlock(&isolated_cpus_lock);
}
......
......@@ -490,9 +490,14 @@ static void acpi_bus_osc_support(void)
capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
#ifdef CONFIG_ACPI_PROCESSOR_AGGREGATOR
#if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\
defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT;
#endif
#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT;
#endif
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
return;
if (ACPI_SUCCESS(acpi_run_osc(handle, &context)))
......
......@@ -201,14 +201,13 @@ static void advance_transaction(struct acpi_ec *ec, u8 status)
spin_unlock_irqrestore(&ec->curr_lock, flags);
}
static void acpi_ec_gpe_query(void *ec_cxt);
static int acpi_ec_sync_query(struct acpi_ec *ec);
static int ec_check_sci(struct acpi_ec *ec, u8 state)
static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
{
if (state & ACPI_EC_FLAG_SCI) {
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
return acpi_os_execute(OSL_EC_BURST_HANDLER,
acpi_ec_gpe_query, ec);
return acpi_ec_sync_query(ec);
}
return 0;
}
......@@ -249,11 +248,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
{
unsigned long tmp;
int ret = 0;
pr_debug(PREFIX "transaction start\n");
/* disable GPE during transaction if storm is detected */
if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
acpi_disable_gpe(NULL, ec->gpe);
}
if (EC_FLAGS_MSI)
udelay(ACPI_EC_MSI_UDELAY);
/* start transaction */
......@@ -265,20 +259,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
spin_unlock_irqrestore(&ec->curr_lock, tmp);
ret = ec_poll(ec);
pr_debug(PREFIX "transaction end\n");
spin_lock_irqsave(&ec->curr_lock, tmp);
ec->curr = NULL;
spin_unlock_irqrestore(&ec->curr_lock, tmp);
if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
/* check if we received SCI during transaction */
ec_check_sci(ec, acpi_ec_read_status(ec));
/* it is safe to enable GPE outside of transaction */
acpi_enable_gpe(NULL, ec->gpe);
} else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
pr_info(PREFIX "GPE storm detected, "
"transactions will use polling mode\n");
set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
}
return ret;
}
......@@ -321,7 +304,26 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
status = -ETIME;
goto end;
}
pr_debug(PREFIX "transaction start\n");
/* disable GPE during transaction if storm is detected */
if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
acpi_disable_gpe(NULL, ec->gpe);
}
status = acpi_ec_transaction_unlocked(ec, t);
/* check if we received SCI during transaction */
ec_check_sci_sync(ec, acpi_ec_read_status(ec));
if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
msleep(1);
/* it is safe to enable GPE outside of transaction */
acpi_enable_gpe(NULL, ec->gpe);
} else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
pr_info(PREFIX "GPE storm detected, "
"transactions will use polling mode\n");
set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
}
pr_debug(PREFIX "transaction end\n");
end:
if (ec->global_lock)
acpi_release_global_lock(glk);
......@@ -443,7 +445,7 @@ int ec_transaction(u8 command,
EXPORT_SYMBOL(ec_transaction);
static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data)
{
int result;
u8 d;
......@@ -452,20 +454,16 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
.wlen = 0, .rlen = 1};
if (!ec || !data)
return -EINVAL;
/*
* Query the EC to find out which _Qxx method we need to evaluate.
* Note that successful completion of the query causes the ACPI_EC_SCI
* bit to be cleared (and thus clearing the interrupt source).
*/
result = acpi_ec_transaction(ec, &t);
result = acpi_ec_transaction_unlocked(ec, &t);
if (result)
return result;
if (!d)
return -ENODATA;
*data = d;
return 0;
}
......@@ -509,43 +507,79 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
static void acpi_ec_gpe_query(void *ec_cxt)
static void acpi_ec_run(void *cxt)
{
struct acpi_ec *ec = ec_cxt;
u8 value = 0;
struct acpi_ec_query_handler *handler, copy;
if (!ec || acpi_ec_query(ec, &value))
struct acpi_ec_query_handler *handler = cxt;
if (!handler)
return;
mutex_lock(&ec->lock);
pr_debug(PREFIX "start query execution\n");
if (handler->func)
handler->func(handler->data);
else if (handler->handle)
acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
pr_debug(PREFIX "stop query execution\n");
kfree(handler);
}
static int acpi_ec_sync_query(struct acpi_ec *ec)
{
u8 value = 0;
int status;
struct acpi_ec_query_handler *handler, *copy;
if ((status = acpi_ec_query_unlocked(ec, &value)))
return status;
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
/* have custom handler for this bit */
memcpy(&copy, handler, sizeof(copy));
mutex_unlock(&ec->lock);
if (copy.func) {
copy.func(copy.data);
} else if (copy.handle) {
acpi_evaluate_object(copy.handle, NULL, NULL, NULL);
}
return;
copy = kmalloc(sizeof(*handler), GFP_KERNEL);
if (!copy)
return -ENOMEM;
memcpy(copy, handler, sizeof(*copy));
pr_debug(PREFIX "push query execution (0x%2x) on queue\n", value);
return acpi_os_execute((copy->func) ?
OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
acpi_ec_run, copy);
}
}
return 0;
}
static void acpi_ec_gpe_query(void *ec_cxt)
{
struct acpi_ec *ec = ec_cxt;
if (!ec)
return;
mutex_lock(&ec->lock);
acpi_ec_sync_query(ec);
mutex_unlock(&ec->lock);
}
static void acpi_ec_gpe_query(void *ec_cxt);
static int ec_check_sci(struct acpi_ec *ec, u8 state)
{
if (state & ACPI_EC_FLAG_SCI) {
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
pr_debug(PREFIX "push gpe query to the queue\n");
return acpi_os_execute(OSL_NOTIFY_HANDLER,
acpi_ec_gpe_query, ec);
}
}
return 0;
}
static u32 acpi_ec_gpe_handler(void *data)
{
struct acpi_ec *ec = data;
u8 status;
pr_debug(PREFIX "~~~> interrupt\n");
status = acpi_ec_read_status(ec);
advance_transaction(ec, status);
if (ec_transaction_done(ec) && (status & ACPI_EC_FLAG_IBF) == 0)
advance_transaction(ec, acpi_ec_read_status(ec));
if (ec_transaction_done(ec) &&
(acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
wake_up(&ec->wait);
ec_check_sci(ec, status);
ec_check_sci(ec, acpi_ec_read_status(ec));
}
return ACPI_INTERRUPT_HANDLED;
}
......
......@@ -56,7 +56,7 @@ ACPI_MODULE_NAME("pci_link");
static int acpi_pci_link_add(struct acpi_device *device);
static int acpi_pci_link_remove(struct acpi_device *device, int type);
static struct acpi_device_id link_device_ids[] = {
static const struct acpi_device_id link_device_ids[] = {
{"PNP0C0F", 0},
{"", 0},
};
......
......@@ -46,7 +46,7 @@ static int acpi_pci_root_add(struct acpi_device *device);
static int acpi_pci_root_remove(struct acpi_device *device, int type);
static int acpi_pci_root_start(struct acpi_device *device);
static struct acpi_device_id root_device_ids[] = {
static const struct acpi_device_id root_device_ids[] = {
{"PNP0A03", 0},
{"", 0},
};
......
......@@ -65,7 +65,7 @@ static int acpi_power_remove(struct acpi_device *device, int type);
static int acpi_power_resume(struct acpi_device *device);
static int acpi_power_open_fs(struct inode *inode, struct file *file);
static struct acpi_device_id power_device_ids[] = {
static const struct acpi_device_id power_device_ids[] = {
{ACPI_POWER_HID, 0},
{"", 0},
};
......
......@@ -64,7 +64,7 @@ static int can_cap_in_hardware(void)
return force_cap_on || cap_in_hardware;
}
static struct acpi_device_id power_meter_ids[] = {
static const struct acpi_device_id power_meter_ids[] = {
{"ACPI000D", 0},
{"", 0},
};
......@@ -534,6 +534,7 @@ static void remove_domain_devices(struct acpi_power_meter_resource *resource)
kfree(resource->domain_devices);
kobject_put(resource->holders_dir);
resource->num_domain_devices = 0;
}
static int read_domain_devices(struct acpi_power_meter_resource *resource)
......@@ -740,7 +741,6 @@ static int setup_attrs(struct acpi_power_meter_resource *resource)
return res;
error:
remove_domain_devices(resource);
remove_attrs(resource);
return res;
}
......
......@@ -305,6 +305,28 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
/*
* FADT specified C2 latency must be less than or equal to
* 100 microseconds.
*/
if (acpi_gbl_FADT.C2latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"C2 latency too large [%d]\n", acpi_gbl_FADT.C2latency));
/* invalidate C2 */
pr->power.states[ACPI_STATE_C2].address = 0;
}
/*
* FADT supplied C3 latency must be less than or equal to
* 1000 microseconds.
*/
if (acpi_gbl_FADT.C3latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"C3 latency too large [%d]\n", acpi_gbl_FADT.C3latency));
/* invalidate C3 */
pr->power.states[ACPI_STATE_C3].address = 0;
}
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"lvl2[0x%08x] lvl3[0x%08x]\n",
pr->power.states[ACPI_STATE_C2].address,
......@@ -494,33 +516,6 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
return status;
}
static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
{
if (!cx->address)
return;
/*
* C2 latency must be less than or equal to 100
* microseconds.
*/
else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"latency too large [%d]\n", cx->latency));
return;
}
/*
* Otherwise we've met all of our C2 requirements.
* Normalize the C2 latency to expidite policy
*/
cx->valid = 1;
cx->latency_ticks = cx->latency;
return;
}
static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
struct acpi_processor_cx *cx)
{
......@@ -531,16 +526,6 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
if (!cx->address)
return;
/*
* C3 latency must be less than or equal to 1000
* microseconds.
*/
else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"latency too large [%d]\n", cx->latency));
return;
}
/*
* PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
* DMA transfers are used by any ISA device to avoid livelock.
......@@ -629,7 +614,10 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
break;
case ACPI_STATE_C2:
acpi_processor_power_verify_c2(cx);
if (!cx->address)
break;
cx->valid = 1;
cx->latency_ticks = cx->latency; /* Normalize latency */
break;
case ACPI_STATE_C3:
......
......@@ -144,6 +144,29 @@ void acpi_processor_set_pdc(acpi_handle handle)
}
EXPORT_SYMBOL_GPL(acpi_processor_set_pdc);
static int early_pdc_optin;
static int set_early_pdc_optin(const struct dmi_system_id *id)
{
early_pdc_optin = 1;
return 0;
}
static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = {
{
set_early_pdc_optin, "HP Envy", {
DMI_MATCH(DMI_BIOS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Envy") }, NULL},
{
set_early_pdc_optin, "HP Pavilion dv6", {
DMI_MATCH(DMI_BIOS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6") }, NULL},
{
set_early_pdc_optin, "HP Pavilion dv7", {
DMI_MATCH(DMI_BIOS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7") }, NULL},
{},
};
static acpi_status
early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
{
......@@ -151,7 +174,7 @@ early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
}
void acpi_early_processor_set_pdc(void)
void __init acpi_early_processor_set_pdc(void)
{
/*
* Check whether the system is DMI table. If yes, OSPM
......@@ -159,6 +182,13 @@ void acpi_early_processor_set_pdc(void)
*/
dmi_check_system(processor_idle_dmi_table);
/*
* Allow systems to opt-in to early _PDC evaluation.
*/
dmi_check_system(early_pdc_optin_table);
if (!early_pdc_optin)
return;
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
early_init_pdc, NULL, NULL, NULL);
......
......@@ -443,8 +443,7 @@ struct thermal_cooling_device_ops processor_cooling_ops = {
#ifdef CONFIG_ACPI_PROCFS
static int acpi_processor_limit_seq_show(struct seq_file *seq, void *offset)
{
struct acpi_processor *pr = (struct acpi_processor *)seq->private;
struct acpi_processor *pr = seq->private;
if (!pr)
goto end;
......
......@@ -822,7 +822,10 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
{
#if defined(CONFIG_ACPI_SYSFS_POWER) || defined(CONFIG_ACPI_PROCFS_POWER)
struct acpi_battery *battery = &sbs->battery[id];
#endif
#ifdef CONFIG_ACPI_SYSFS_POWER
if (battery->bat.dev) {
if (battery->have_sysfs_alarm)
......
......@@ -242,7 +242,7 @@ static int smbus_alarm(void *context)
case ACPI_SBS_CHARGER:
case ACPI_SBS_MANAGER:
case ACPI_SBS_BATTERY:
acpi_os_execute(OSL_GPE_HANDLER,
acpi_os_execute(OSL_NOTIFY_HANDLER,
acpi_smbus_callback, hc);
default:;
}
......
......@@ -78,6 +78,13 @@ MODULE_LICENSE("GPL");
static int brightness_switch_enabled = 1;
module_param(brightness_switch_enabled, bool, 0644);
/*
* By default, we don't allow duplicate ACPI video bus devices
* under the same VGA controller
*/
static int allow_duplicates;
module_param(allow_duplicates, bool, 0644);
static int register_count = 0;
static int acpi_video_bus_add(struct acpi_device *device);
static int acpi_video_bus_remove(struct acpi_device *device, int type);
......@@ -2239,11 +2246,47 @@ static int acpi_video_resume(struct acpi_device *device)
return AE_OK;
}
static acpi_status
acpi_video_bus_match(acpi_handle handle, u32 level, void *context,
void **return_value)
{
struct acpi_device *device = context;
struct acpi_device *sibling;
int result;
if (handle == device->handle)
return AE_CTRL_TERMINATE;
result = acpi_bus_get_device(handle, &sibling);
if (result)
return AE_OK;
if (!strcmp(acpi_device_name(sibling), ACPI_VIDEO_BUS_NAME))
return AE_ALREADY_EXISTS;
return AE_OK;
}
static int acpi_video_bus_add(struct acpi_device *device)
{
struct acpi_video_bus *video;
struct input_dev *input;
int error;
acpi_status status;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
device->parent->handle, 1,
acpi_video_bus_match, NULL,
device, NULL);
if (status == AE_ALREADY_EXISTS) {
printk(KERN_WARNING FW_BUG
"Duplicate ACPI video bus devices for the"
" same VGA controller, please try module "
"parameter \"video.allow_duplicates=1\""
"if the current driver doesn't work.\n");
if (!allow_duplicates)
return -ENODEV;
}
video = kzalloc(sizeof(struct acpi_video_bus), GFP_KERNEL);
if (!video)
......
......@@ -364,6 +364,7 @@ config EEEPC_LAPTOP
select HWMON
select LEDS_CLASS
select NEW_LEDS
select INPUT_SPARSEKMAP
---help---
This driver supports the Fn-Fx keys on Eee PC laptops.
......
This diff is collapsed.
......@@ -1201,9 +1201,12 @@ static void sony_nc_rfkill_setup(struct acpi_device *device)
/* the buffer is filled with magic numbers describing the devices
* available, 0xff terminates the enumeration
*/
while ((dev_code = *(device_enum->buffer.pointer + i)) != 0xff &&
i < device_enum->buffer.length) {
i++;
for (i = 0; i < device_enum->buffer.length; i++) {
dev_code = *(device_enum->buffer.pointer + i);
if (dev_code == 0xff)
break;
dprintk("Radio devices, looking at 0x%.2x\n", dev_code);
if (dev_code == 0 && !sony_rfkill_devices[SONY_WIFI])
......
......@@ -152,7 +152,7 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
#include <linux/hardirq.h>
#define ACPI_PREEMPTION_POINT() \
do { \
if (!in_atomic_preempt_off()) \
if (!in_atomic_preempt_off() && !irqs_disabled()) \
cond_resched(); \
} while (0)
......
......@@ -80,7 +80,7 @@ char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
void __acpi_unmap_table(char *map, unsigned long size);
int early_acpi_boot_init(void);
int acpi_boot_init (void);
int acpi_boot_table_init (void);
void acpi_boot_table_init (void);
int acpi_mps_check (void);
int acpi_numa_init (void);
......@@ -321,9 +321,9 @@ static inline int acpi_boot_init(void)
return 0;
}
static inline int acpi_boot_table_init(void)
static inline void acpi_boot_table_init(void)
{
return 0;
return;
}
static inline int acpi_mps_check(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment