Commit 343ccb04 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branches 'acpi-scan', 'acpi-tables', 'acpi-ec' and 'acpi-assorted'

* acpi-scan:
  ACPI / scan: use kstrdup_const() in acpi_add_id()
  ACPI / scan: constify struct acpi_hardware_id::id
  ACPI / scan: constify first argument of struct acpi_scan_handler::match

* acpi-tables:
  ACPI / tables: test the correct variable
  x86, ACPI: Handle apic/x2apic entries in MADT in correct order
  ACPI / tables: Add acpi_subtable_proc to ACPI table parsers

* acpi-ec:
  ACPI / EC: Fix a race issue in acpi_ec_guard_event()
  ACPI / EC: Fix query handler related issues

* acpi-assorted:
  ACPI: change acpi_sleep_proc_init() to return void
  ACPI: change init_acpi_device_notify() to return void
......@@ -976,6 +976,8 @@ static int __init acpi_parse_madt_lapic_entries(void)
{
int count;
int x2count = 0;
int ret;
struct acpi_subtable_proc madt_proc[2];
if (!cpu_has_apic)
return -ENODEV;
......@@ -999,10 +1001,22 @@ static int __init acpi_parse_madt_lapic_entries(void)
acpi_parse_sapic, MAX_LOCAL_APIC);
if (!count) {
x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC,
acpi_parse_x2apic, MAX_LOCAL_APIC);
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
acpi_parse_lapic, MAX_LOCAL_APIC);
memset(madt_proc, 0, sizeof(madt_proc));
madt_proc[0].id = ACPI_MADT_TYPE_LOCAL_APIC;
madt_proc[0].handler = acpi_parse_lapic;
madt_proc[1].id = ACPI_MADT_TYPE_LOCAL_X2APIC;
madt_proc[1].handler = acpi_parse_x2apic;
ret = acpi_table_parse_entries_array(ACPI_SIG_MADT,
sizeof(struct acpi_table_madt),
madt_proc, ARRAY_SIZE(madt_proc), MAX_LOCAL_APIC);
if (ret < 0) {
printk(KERN_ERR PREFIX
"Error parsing LAPIC/X2APIC entries\n");
return ret;
}
x2count = madt_proc[0].count;
count = madt_proc[1].count;
}
if (!count && !x2count) {
printk(KERN_ERR PREFIX "No LAPIC entries present\n");
......
......@@ -316,7 +316,7 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
{""},
};
static bool matching_id(char *idstr, char *list_id)
static bool matching_id(const char *idstr, const char *list_id)
{
int i;
......@@ -333,7 +333,7 @@ static bool matching_id(char *idstr, char *list_id)
return true;
}
static bool acpi_pnp_match(char *idstr, const struct acpi_device_id **matchid)
static bool acpi_pnp_match(const char *idstr, const struct acpi_device_id **matchid)
{
const struct acpi_device_id *devid;
......
......@@ -441,17 +441,31 @@ static void acpi_ec_complete_query(struct acpi_ec *ec)
static bool acpi_ec_guard_event(struct acpi_ec *ec)
{
bool guarded = true;
unsigned long flags;
spin_lock_irqsave(&ec->lock, flags);
/*
* If firmware SCI_EVT clearing timing is "event", we actually
* don't know when the SCI_EVT will be cleared by firmware after
* evaluating _Qxx, so we need to re-check SCI_EVT after waiting an
* acceptable period.
*
* The guarding period begins when EC_FLAGS_QUERY_PENDING is
* flagged, which means SCI_EVT check has just been performed.
* But if the current transaction is ACPI_EC_COMMAND_QUERY, the
* guarding should have already been performed (via
* EC_FLAGS_QUERY_GUARDING) and should not be applied so that the
* ACPI_EC_COMMAND_QUERY transaction can be transitioned into
* ACPI_EC_COMMAND_POLL state immediately.
*/
if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY ||
!test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) ||
(ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY))
return false;
/*
* Postpone the query submission to allow the firmware to proceed,
* we shouldn't check SCI_EVT before the firmware reflagging it.
*/
return true;
guarded = false;
spin_unlock_irqrestore(&ec->lock, flags);
return guarded;
}
static int ec_transaction_polled(struct acpi_ec *ec)
......@@ -597,6 +611,7 @@ static int ec_guard(struct acpi_ec *ec)
unsigned long guard = usecs_to_jiffies(ec_polling_guard);
unsigned long timeout = ec->timestamp + guard;
/* Ensure guarding period before polling EC status */
do {
if (ec_busy_polling) {
/* Perform busy polling */
......@@ -606,11 +621,13 @@ static int ec_guard(struct acpi_ec *ec)
} else {
/*
* Perform wait polling
*
* For SCI_EVT clearing timing of "event",
* performing guarding before re-checking the
* SCI_EVT. Otherwise, such guarding is not needed
* due to the old practices.
* 1. Wait the transaction to be completed by the
* GPE handler after the transaction enters
* ACPI_EC_COMMAND_POLL state.
* 2. A special guarding logic is also required
* for event clearing mode "event" before the
* transaction enters ACPI_EC_COMMAND_POLL
* state.
*/
if (!ec_transaction_polled(ec) &&
!acpi_ec_guard_event(ec))
......@@ -620,7 +637,6 @@ static int ec_guard(struct acpi_ec *ec)
guard))
return 0;
}
/* Guard the register accesses for the polling modes */
} while (time_before(jiffies, timeout));
return -ETIME;
}
......@@ -929,6 +945,23 @@ acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
return handler;
}
static struct acpi_ec_query_handler *
acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
{
struct acpi_ec_query_handler *handler;
bool found = false;
mutex_lock(&ec->mutex);
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
found = true;
break;
}
}
mutex_unlock(&ec->mutex);
return found ? acpi_ec_get_query_handler(handler) : NULL;
}
static void acpi_ec_query_handler_release(struct kref *kref)
{
struct acpi_ec_query_handler *handler =
......@@ -964,14 +997,15 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
}
EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
bool remove_all, u8 query_bit)
{
struct acpi_ec_query_handler *handler, *tmp;
LIST_HEAD(free_list);
mutex_lock(&ec->mutex);
list_for_each_entry_safe(handler, tmp, &ec->list, node) {
if (query_bit == handler->query_bit) {
if (remove_all || query_bit == handler->query_bit) {
list_del_init(&handler->node);
list_add(&handler->node, &free_list);
}
......@@ -980,6 +1014,11 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
list_for_each_entry_safe(handler, tmp, &free_list, node)
acpi_ec_put_query_handler(handler);
}
void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
{
acpi_ec_remove_query_handlers(ec, false, query_bit);
}
EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
static struct acpi_ec_query *acpi_ec_create_query(u8 *pval)
......@@ -1025,7 +1064,6 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
{
u8 value = 0;
int result;
struct acpi_ec_query_handler *handler;
struct acpi_ec_query *q;
q = acpi_ec_create_query(&value);
......@@ -1043,25 +1081,26 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
if (result)
goto err_exit;
mutex_lock(&ec->mutex);
q->handler = acpi_ec_get_query_handler_by_value(ec, value);
if (!q->handler) {
result = -ENODATA;
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
result = 0;
q->handler = acpi_ec_get_query_handler(handler);
ec_dbg_evt("Query(0x%02x) scheduled",
q->handler->query_bit);
goto err_exit;
}
/*
* It is reported that _Qxx are evaluated in a
* parallel way on Windows:
* It is reported that _Qxx are evaluated in a parallel way on
* Windows:
* https://bugzilla.kernel.org/show_bug.cgi?id=94411
*
* Put this log entry before schedule_work() in order to make
* it appearing before any other log entries occurred during the
* work queue execution.
*/
if (!schedule_work(&q->work))
ec_dbg_evt("Query(0x%02x) scheduled", value);
if (!schedule_work(&q->work)) {
ec_dbg_evt("Query(0x%02x) overlapped", value);
result = -EBUSY;
break;
}
}
mutex_unlock(&ec->mutex);
err_exit:
if (result && q)
......@@ -1354,19 +1393,13 @@ static int acpi_ec_add(struct acpi_device *device)
static int acpi_ec_remove(struct acpi_device *device)
{
struct acpi_ec *ec;
struct acpi_ec_query_handler *handler, *tmp;
if (!device)
return -EINVAL;
ec = acpi_driver_data(device);
ec_remove_handlers(ec);
mutex_lock(&ec->mutex);
list_for_each_entry_safe(handler, tmp, &ec->list, node) {
list_del(&handler->node);
kfree(handler);
}
mutex_unlock(&ec->mutex);
acpi_ec_remove_query_handlers(ec, true, 0);
release_region(ec->data_addr, 1);
release_region(ec->command_addr, 1);
device->driver_data = NULL;
......
......@@ -351,13 +351,12 @@ static int acpi_platform_notify_remove(struct device *dev)
return 0;
}
int __init init_acpi_device_notify(void)
void __init init_acpi_device_notify(void)
{
if (platform_notify || platform_notify_remove) {
printk(KERN_ERR PREFIX "Can't use platform_notify\n");
return 0;
return;
}
platform_notify = acpi_platform_notify;
platform_notify_remove = acpi_platform_notify_remove;
return 0;
}
......@@ -21,7 +21,7 @@
#define PREFIX "ACPI: "
acpi_status acpi_os_initialize1(void);
int init_acpi_device_notify(void);
void init_acpi_device_notify(void);
int acpi_scan_init(void);
void acpi_pci_root_init(void);
void acpi_pci_link_init(void);
......@@ -179,13 +179,13 @@ static inline int acpi_sleep_init(void) { return -ENXIO; }
#endif
#ifdef CONFIG_ACPI_SLEEP
int acpi_sleep_proc_init(void);
void acpi_sleep_proc_init(void);
int suspend_nvs_alloc(void);
void suspend_nvs_free(void);
int suspend_nvs_save(void);
void suspend_nvs_restore(void);
#else
static inline int acpi_sleep_proc_init(void) { return 0; }
static inline void acpi_sleep_proc_init(void) {}
static inline int suspend_nvs_alloc(void) { return 0; }
static inline void suspend_nvs_free(void) {}
static inline int suspend_nvs_save(void) { return 0; }
......
......@@ -144,11 +144,9 @@ static const struct file_operations acpi_system_wakeup_device_fops = {
.release = single_release,
};
int __init acpi_sleep_proc_init(void)
void __init acpi_sleep_proc_init(void)
{
/* 'wakeup device' [R/W] */
proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
acpi_root_dir, &acpi_system_wakeup_device_fops);
return 0;
}
......@@ -1164,7 +1164,7 @@ static void acpi_add_id(struct acpi_device_pnp *pnp, const char *dev_id)
if (!id)
return;
id->id = kstrdup(dev_id, GFP_KERNEL);
id->id = kstrdup_const(dev_id, GFP_KERNEL);
if (!id->id) {
kfree(id);
return;
......@@ -1302,7 +1302,7 @@ void acpi_free_pnp_ids(struct acpi_device_pnp *pnp)
struct acpi_hardware_id *id, *tmp;
list_for_each_entry_safe(id, tmp, &pnp->ids, list) {
kfree(id->id);
kfree_const(id->id);
kfree(id);
}
kfree(pnp->unique_id);
......@@ -1452,7 +1452,7 @@ bool acpi_device_is_present(struct acpi_device *adev)
}
static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler,
char *idstr,
const char *idstr,
const struct acpi_device_id **matchid)
{
const struct acpi_device_id *devid;
......@@ -1471,7 +1471,7 @@ static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler,
return false;
}
static struct acpi_scan_handler *acpi_scan_match_handler(char *idstr,
static struct acpi_scan_handler *acpi_scan_match_handler(const char *idstr,
const struct acpi_device_id **matchid)
{
struct acpi_scan_handler *handler;
......
......@@ -210,20 +210,39 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
}
}
int __init
acpi_parse_entries(char *id, unsigned long table_size,
acpi_tbl_entry_handler handler,
/**
* acpi_parse_entries_array - for each proc_num find a suitable subtable
*
* @id: table id (for debugging purposes)
* @table_size: single entry size
* @table_header: where does the table start?
* @proc: array of acpi_subtable_proc struct containing entry id
* and associated handler with it
* @proc_num: how big proc is?
* @max_entries: how many entries can we process?
*
* For each proc_num find a subtable with proc->id and run proc->handler
* on it. Assumption is that there's only single handler for particular
* entry id.
*
* On success returns sum of all matching entries for all proc handlers.
* Otherwise, -ENODEV or -EINVAL is returned.
*/
static int __init
acpi_parse_entries_array(char *id, unsigned long table_size,
struct acpi_table_header *table_header,
int entry_id, unsigned int max_entries)
struct acpi_subtable_proc *proc, int proc_num,
unsigned int max_entries)
{
struct acpi_subtable_header *entry;
int count = 0;
unsigned long table_end;
int count = 0;
int i;
if (acpi_disabled)
return -ENODEV;
if (!id || !handler)
if (!id)
return -EINVAL;
if (!table_size)
......@@ -243,20 +262,28 @@ acpi_parse_entries(char *id, unsigned long table_size,
while (((unsigned long)entry) + sizeof(struct acpi_subtable_header) <
table_end) {
if (entry->type == entry_id
&& (!max_entries || count < max_entries)) {
if (handler(entry, table_end))
if (max_entries && count >= max_entries)
break;
for (i = 0; i < proc_num; i++) {
if (entry->type != proc[i].id)
continue;
if (!proc[i].handler ||
proc[i].handler(entry, table_end))
return -EINVAL;
count++;
proc->count++;
break;
}
if (i != proc_num)
count++;
/*
* If entry->length is 0, break from this loop to avoid
* infinite loop.
*/
if (entry->length == 0) {
pr_err("[%4.4s:0x%02x] Invalid zero length\n", id, entry_id);
pr_err("[%4.4s:0x%02x] Invalid zero length\n", id, proc->id);
return -EINVAL;
}
......@@ -266,17 +293,32 @@ acpi_parse_entries(char *id, unsigned long table_size,
if (max_entries && count > max_entries) {
pr_warn("[%4.4s:0x%02x] ignored %i entries of %i found\n",
id, entry_id, count - max_entries, count);
id, proc->id, count - max_entries, count);
}
return count;
}
int __init
acpi_table_parse_entries(char *id,
acpi_parse_entries(char *id,
unsigned long table_size,
int entry_id,
acpi_tbl_entry_handler handler,
struct acpi_table_header *table_header,
int entry_id, unsigned int max_entries)
{
struct acpi_subtable_proc proc = {
.id = entry_id,
.handler = handler,
};
return acpi_parse_entries_array(id, table_size, table_header,
&proc, 1, max_entries);
}
int __init
acpi_table_parse_entries_array(char *id,
unsigned long table_size,
struct acpi_subtable_proc *proc, int proc_num,
unsigned int max_entries)
{
struct acpi_table_header *table_header = NULL;
......@@ -287,7 +329,7 @@ acpi_table_parse_entries(char *id,
if (acpi_disabled)
return -ENODEV;
if (!id || !handler)
if (!id)
return -EINVAL;
if (!strncmp(id, ACPI_SIG_MADT, 4))
......@@ -299,13 +341,29 @@ acpi_table_parse_entries(char *id,
return -ENODEV;
}
count = acpi_parse_entries(id, table_size, handler, table_header,
entry_id, max_entries);
count = acpi_parse_entries_array(id, table_size, table_header,
proc, proc_num, max_entries);
early_acpi_os_unmap_memory((char *)table_header, tbl_size);
return count;
}
int __init
acpi_table_parse_entries(char *id,
unsigned long table_size,
int entry_id,
acpi_tbl_entry_handler handler,
unsigned int max_entries)
{
struct acpi_subtable_proc proc = {
.id = entry_id,
.handler = handler,
};
return acpi_table_parse_entries_array(id, table_size, &proc, 1,
max_entries);
}
int __init
acpi_table_parse_madt(enum acpi_madt_type id,
acpi_tbl_entry_handler handler, unsigned int max_entries)
......
......@@ -207,7 +207,7 @@ struct pnp_protocol pnpacpi_protocol = {
};
EXPORT_SYMBOL(pnpacpi_protocol);
static char *__init pnpacpi_get_id(struct acpi_device *device)
static const char *__init pnpacpi_get_id(struct acpi_device *device)
{
struct acpi_hardware_id *id;
......@@ -222,7 +222,7 @@ static char *__init pnpacpi_get_id(struct acpi_device *device)
static int __init pnpacpi_add_device(struct acpi_device *device)
{
struct pnp_dev *dev;
char *pnpid;
const char *pnpid;
struct acpi_hardware_id *id;
int error;
......
......@@ -129,7 +129,7 @@ static inline struct acpi_hotplug_profile *to_acpi_hotplug_profile(
struct acpi_scan_handler {
const struct acpi_device_id *ids;
struct list_head list_node;
bool (*match)(char *idstr, const struct acpi_device_id **matchid);
bool (*match)(const char *idstr, const struct acpi_device_id **matchid);
int (*attach)(struct acpi_device *dev, const struct acpi_device_id *id);
void (*detach)(struct acpi_device *dev);
void (*bind)(struct device *phys_dev);
......@@ -227,7 +227,7 @@ typedef char acpi_device_class[20];
struct acpi_hardware_id {
struct list_head list;
char *id;
const char *id;
};
struct acpi_pnp_type {
......
......@@ -131,6 +131,12 @@ static inline void acpi_initrd_override(void *data, size_t size)
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
struct acpi_subtable_proc {
int id;
acpi_tbl_entry_handler handler;
int count;
};
char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
void __acpi_unmap_table(char *map, unsigned long size);
int early_acpi_boot_init(void);
......@@ -149,6 +155,13 @@ int __init acpi_table_parse_entries(char *id, unsigned long table_size,
int entry_id,
acpi_tbl_entry_handler handler,
unsigned int max_entries);
int __init acpi_table_parse_entries(char *id, unsigned long table_size,
int entry_id,
acpi_tbl_entry_handler handler,
unsigned int max_entries);
int __init acpi_table_parse_entries_array(char *id, unsigned long table_size,
struct acpi_subtable_proc *proc, int proc_num,
unsigned int max_entries);
int acpi_table_parse_madt(enum acpi_madt_type id,
acpi_tbl_entry_handler handler,
unsigned int max_entries);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment