Commit 2c215f67 authored by Len Brown's avatar Len Brown

Merge intel.com:/home/lenb/bk/linux-2.6.8

into intel.com:/home/lenb/src/linux-acpi-test-2.6.8
parents 4e58aec8 e175f06b
......@@ -47,4 +47,4 @@ obj-$(CONFIG_ACPI_DEBUG) += debug.o
obj-$(CONFIG_ACPI_NUMA) += numa.o
obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o
obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
obj-$(CONFIG_ACPI_BUS) += scan.o
obj-$(CONFIG_ACPI_BUS) += scan.o motherboard.o
......@@ -145,8 +145,9 @@ acpi_ds_parse_method (
return_ACPI_STATUS (AE_NO_MEMORY);
}
status = acpi_ds_init_aml_walk (walk_state, op, node, obj_desc->method.aml_start,
obj_desc->method.aml_length, NULL, NULL, 1);
status = acpi_ds_init_aml_walk (walk_state, op, node,
obj_desc->method.aml_start,
obj_desc->method.aml_length, NULL, 1);
if (ACPI_FAILURE (status)) {
acpi_ds_delete_walk_state (walk_state);
return_ACPI_STATUS (status);
......@@ -267,8 +268,9 @@ acpi_ds_call_control_method (
{
acpi_status status;
struct acpi_namespace_node *method_node;
union acpi_operand_object *obj_desc;
struct acpi_walk_state *next_walk_state;
union acpi_operand_object *obj_desc;
struct acpi_parameter_info info;
u32 i;
......@@ -309,7 +311,6 @@ acpi_ds_call_control_method (
return_ACPI_STATUS (AE_NO_MEMORY);
}
/* Create and init a Root Node */
op = acpi_ps_create_scope_op ();
......@@ -320,7 +321,7 @@ acpi_ds_call_control_method (
status = acpi_ds_init_aml_walk (next_walk_state, op, method_node,
obj_desc->method.aml_start, obj_desc->method.aml_length,
NULL, NULL, 1);
NULL, 1);
if (ACPI_FAILURE (status)) {
acpi_ds_delete_walk_state (next_walk_state);
goto cleanup;
......@@ -348,9 +349,12 @@ acpi_ds_call_control_method (
*/
this_walk_state->operands [this_walk_state->num_operands] = NULL;
info.parameters = &this_walk_state->operands[0];
info.parameter_type = ACPI_PARAM_ARGS;
status = acpi_ds_init_aml_walk (next_walk_state, NULL, method_node,
obj_desc->method.aml_start, obj_desc->method.aml_length,
&this_walk_state->operands[0], NULL, 3);
&info, 3);
if (ACPI_FAILURE (status)) {
goto cleanup;
}
......@@ -382,7 +386,7 @@ acpi_ds_call_control_method (
/* On error, we must delete the new walk state */
cleanup:
if (next_walk_state->method_desc) {
if (next_walk_state && (next_walk_state->method_desc)) {
/* Decrement the thread count on the method parse tree */
next_walk_state->method_desc->method.thread_count--;
......
......@@ -656,11 +656,13 @@ acpi_ds_store_object_to_local (
new_obj_desc, current_obj_desc));
/*
* Store this object to the Node
* (perform the indirect store)
* Store this object to the Node (perform the indirect store)
* NOTE: No implicit conversion is performed, as per the ACPI
* specification rules on storing to Locals/Args.
*/
status = acpi_ex_store_object_to_node (new_obj_desc,
current_obj_desc->reference.object, walk_state);
current_obj_desc->reference.object, walk_state,
ACPI_NO_IMPLICIT_CONVERSION);
/* Remove local reference if we copied the object above */
......
......@@ -79,7 +79,6 @@ acpi_ds_execute_arguments (
acpi_status status;
union acpi_parse_object *op;
struct acpi_walk_state *walk_state;
union acpi_parse_object *arg;
ACPI_FUNCTION_TRACE ("ds_execute_arguments");
......@@ -105,7 +104,7 @@ acpi_ds_execute_arguments (
}
status = acpi_ds_init_aml_walk (walk_state, op, NULL, aml_start,
aml_length, NULL, NULL, 1);
aml_length, NULL, 1);
if (ACPI_FAILURE (status)) {
acpi_ds_delete_walk_state (walk_state);
return_ACPI_STATUS (status);
......@@ -126,9 +125,7 @@ acpi_ds_execute_arguments (
/* Get and init the Op created above */
arg = op->common.value.arg;
op->common.node = node;
arg->common.node = node;
acpi_ps_delete_parse_tree (op);
/* Evaluate the deferred arguments */
......@@ -150,7 +147,7 @@ acpi_ds_execute_arguments (
/* Execute the opcode and arguments */
status = acpi_ds_init_aml_walk (walk_state, op, NULL, aml_start,
aml_length, NULL, NULL, 3);
aml_length, NULL, 3);
if (ACPI_FAILURE (status)) {
acpi_ds_delete_walk_state (walk_state);
return_ACPI_STATUS (status);
......
......@@ -50,6 +50,9 @@
#include <acpi/acnamesp.h>
#include <acpi/acevents.h>
#ifdef _ACPI_ASL_COMPILER
#include <acpi/acdisasm.h>
#endif
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME ("dswload")
......@@ -180,7 +183,17 @@ acpi_ds_load1_begin_op (
status = acpi_ns_lookup (walk_state->scope_info, path, object_type,
ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT, walk_state, &(node));
if (ACPI_FAILURE (status)) {
#ifdef _ACPI_ASL_COMPILER
if (status == AE_NOT_FOUND) {
acpi_dm_add_to_external_list (path);
status = AE_OK;
}
else {
ACPI_REPORT_NSERROR (path, status);
}
#else
ACPI_REPORT_NSERROR (path, status);
#endif
return (status);
}
......@@ -529,7 +542,16 @@ acpi_ds_load2_begin_op (
status = acpi_ns_lookup (walk_state->scope_info, buffer_ptr, object_type,
ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT, walk_state, &(node));
if (ACPI_FAILURE (status)) {
#ifdef _ACPI_ASL_COMPILER
if (status == AE_NOT_FOUND) {
status = AE_OK;
}
else {
ACPI_REPORT_NSERROR (buffer_ptr, status);
}
#else
ACPI_REPORT_NSERROR (buffer_ptr, status);
#endif
return_ACPI_STATUS (status);
}
/*
......
......@@ -906,8 +906,7 @@ acpi_ds_init_aml_walk (
struct acpi_namespace_node *method_node,
u8 *aml_start,
u32 aml_length,
union acpi_operand_object **params,
union acpi_operand_object **return_obj_desc,
struct acpi_parameter_info *info,
u32 pass_number)
{
acpi_status status;
......@@ -926,8 +925,17 @@ acpi_ds_init_aml_walk (
/* The next_op of the next_walk will be the beginning of the method */
walk_state->next_op = NULL;
walk_state->params = params;
walk_state->caller_return_desc = return_obj_desc;
if (info) {
if (info->parameter_type == ACPI_PARAM_GPE) {
walk_state->gpe_event_info = ACPI_CAST_PTR (struct acpi_gpe_event_info,
info->parameters);
}
else {
walk_state->params = info->parameters;
walk_state->caller_return_desc = &info->return_object;
}
}
status = acpi_ps_init_scope (&walk_state->parser_state, op);
if (ACPI_FAILURE (status)) {
......@@ -949,7 +957,7 @@ acpi_ds_init_aml_walk (
/* Init the method arguments */
status = acpi_ds_method_data_init_args (params, ACPI_METHOD_NUM_ARGS, walk_state);
status = acpi_ds_method_data_init_args (walk_state->params, ACPI_METHOD_NUM_ARGS, walk_state);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
......
......@@ -381,7 +381,7 @@ acpi_ec_gpe_query (
acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR);
}
static void
static u32
acpi_ec_gpe_handler (
void *data)
{
......@@ -389,12 +389,17 @@ acpi_ec_gpe_handler (
struct acpi_ec *ec = (struct acpi_ec *) data;
if (!ec)
return;
return ACPI_INTERRUPT_NOT_HANDLED;
acpi_disable_gpe(NULL, ec->gpe_bit, ACPI_ISR);
status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE,
acpi_ec_gpe_query, ec);
if (status == AE_OK)
return ACPI_INTERRUPT_HANDLED;
else
return ACPI_INTERRUPT_NOT_HANDLED;
}
/* --------------------------------------------------------------------------
......@@ -729,6 +734,8 @@ acpi_ec_start (
if (ACPI_FAILURE(status)) {
return_VALUE(-ENODEV);
}
acpi_set_gpe_type (NULL, ec->gpe_bit, ACPI_GPE_TYPE_RUNTIME);
acpi_enable_gpe (NULL, ec->gpe_bit, ACPI_NOT_ISR);
status = acpi_install_address_space_handler (ec->handle,
ACPI_ADR_SPACE_EC, &acpi_ec_space_handler,
......@@ -814,6 +821,8 @@ acpi_ec_ecdt_probe (void)
if (ACPI_FAILURE(status)) {
goto error;
}
acpi_set_gpe_type (NULL, ec_ecdt->gpe_bit, ACPI_GPE_TYPE_RUNTIME);
acpi_enable_gpe (NULL, ec_ecdt->gpe_bit, ACPI_NOT_ISR);
status = acpi_install_address_space_handler (ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_EC, &acpi_ec_space_handler,
......
......@@ -50,7 +50,7 @@
/*******************************************************************************
*
* FUNCTION: acpi_ev_initialize
* FUNCTION: acpi_ev_initialize_events
*
* PARAMETERS: None
*
......@@ -61,13 +61,13 @@
******************************************************************************/
acpi_status
acpi_ev_initialize (
acpi_ev_initialize_events (
void)
{
acpi_status status;
ACPI_FUNCTION_TRACE ("ev_initialize");
ACPI_FUNCTION_TRACE ("ev_initialize_events");
/* Make sure we have ACPI tables */
......@@ -104,7 +104,7 @@ acpi_ev_initialize (
/*******************************************************************************
*
* FUNCTION: acpi_ev_handler_initialize
* FUNCTION: acpi_ev_install_xrupt_handlers
*
* PARAMETERS: None
*
......@@ -115,13 +115,13 @@ acpi_ev_initialize (
******************************************************************************/
acpi_status
acpi_ev_handler_initialize (
acpi_ev_install_xrupt_handlers (
void)
{
acpi_status status;
ACPI_FUNCTION_TRACE ("ev_handler_initialize");
ACPI_FUNCTION_TRACE ("ev_install_xrupt_handlers");
/* Install the SCI handler */
......
......@@ -49,6 +49,249 @@
ACPI_MODULE_NAME ("evgpe")
/*******************************************************************************
*
* FUNCTION: acpi_ev_set_gpe_type
*
* PARAMETERS: gpe_event_info - GPE to set
* Type - New type
*
* RETURN: Status
*
* DESCRIPTION: Sets the new type for the GPE (wake, run, or wake/run)
*
******************************************************************************/
acpi_status
acpi_ev_set_gpe_type (
struct acpi_gpe_event_info *gpe_event_info,
u8 type)
{
acpi_status status;
ACPI_FUNCTION_TRACE ("ev_set_gpe_type");
/* Validate type and update register enable masks */
switch (type) {
case ACPI_GPE_TYPE_WAKE:
case ACPI_GPE_TYPE_RUNTIME:
case ACPI_GPE_TYPE_WAKE_RUN:
break;
default:
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
/* Disable the GPE if currently enabled */
status = acpi_ev_disable_gpe (gpe_event_info);
/* Type was validated above */
gpe_event_info->flags &= ~ACPI_GPE_TYPE_MASK; /* Clear type bits */
gpe_event_info->flags |= type; /* Insert type */
return_ACPI_STATUS (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_update_gpe_enable_masks
*
* PARAMETERS: gpe_event_info - GPE to update
*
* RETURN: Status
*
* DESCRIPTION: Updates GPE register enable masks based on the GPE type
*
******************************************************************************/
acpi_status
acpi_ev_update_gpe_enable_masks (
struct acpi_gpe_event_info *gpe_event_info,
u8 type)
{
struct acpi_gpe_register_info *gpe_register_info;
u8 register_bit;
ACPI_FUNCTION_TRACE ("ev_update_gpe_enable_masks");
gpe_register_info = gpe_event_info->register_info;
if (!gpe_register_info) {
return_ACPI_STATUS (AE_NOT_EXIST);
}
register_bit = gpe_event_info->register_bit;
/* 1) Disable case. Simply clear all enable bits */
if (type == ACPI_GPE_DISABLE) {
ACPI_CLEAR_BIT (gpe_register_info->enable_for_wake, register_bit);
ACPI_CLEAR_BIT (gpe_register_info->enable_for_run, register_bit);
return_ACPI_STATUS (AE_OK);
}
/* 2) Enable case. Set/Clear the appropriate enable bits */
switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
case ACPI_GPE_TYPE_WAKE:
ACPI_SET_BIT (gpe_register_info->enable_for_wake, register_bit);
ACPI_CLEAR_BIT (gpe_register_info->enable_for_run, register_bit);
break;
case ACPI_GPE_TYPE_RUNTIME:
ACPI_CLEAR_BIT (gpe_register_info->enable_for_wake, register_bit);
ACPI_SET_BIT (gpe_register_info->enable_for_run, register_bit);
break;
case ACPI_GPE_TYPE_WAKE_RUN:
ACPI_SET_BIT (gpe_register_info->enable_for_wake, register_bit);
ACPI_SET_BIT (gpe_register_info->enable_for_run, register_bit);
break;
default:
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
return_ACPI_STATUS (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_enable_gpe
*
* PARAMETERS: gpe_event_info - GPE to enable
*
* RETURN: Status
*
* DESCRIPTION: Enable a GPE based on the GPE type
*
******************************************************************************/
acpi_status
acpi_ev_enable_gpe (
struct acpi_gpe_event_info *gpe_event_info,
u8 write_to_hardware)
{
acpi_status status;
ACPI_FUNCTION_TRACE ("ev_enable_gpe");
/* Make sure HW enable masks are updated */
status = acpi_ev_update_gpe_enable_masks (gpe_event_info, ACPI_GPE_ENABLE);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
/* Mark wake-enabled or HW enable, or both */
switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
case ACPI_GPE_TYPE_WAKE:
ACPI_SET_BIT (gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
break;
case ACPI_GPE_TYPE_WAKE_RUN:
ACPI_SET_BIT (gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
/*lint -fallthrough */
case ACPI_GPE_TYPE_RUNTIME:
ACPI_SET_BIT (gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
if (write_to_hardware) {
/* Clear the GPE (of stale events), then enable it */
status = acpi_hw_clear_gpe (gpe_event_info);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
/* Enable the requested runtime GPE */
status = acpi_hw_write_gpe_enable_reg (gpe_event_info);
}
break;
default:
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
return_ACPI_STATUS (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_disable_gpe
*
* PARAMETERS: gpe_event_info - GPE to disable
*
* RETURN: Status
*
* DESCRIPTION: Disable a GPE based on the GPE type
*
******************************************************************************/
acpi_status
acpi_ev_disable_gpe (
struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
ACPI_FUNCTION_TRACE ("ev_disable_gpe");
if (!(gpe_event_info->flags & ACPI_GPE_ENABLE_MASK)) {
return_ACPI_STATUS (AE_OK);
}
/* Make sure HW enable masks are updated */
status = acpi_ev_update_gpe_enable_masks (gpe_event_info, ACPI_GPE_DISABLE);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
/* Mark wake-disabled or HW disable, or both */
switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
case ACPI_GPE_TYPE_WAKE:
ACPI_CLEAR_BIT (gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
break;
case ACPI_GPE_TYPE_WAKE_RUN:
ACPI_CLEAR_BIT (gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
/*lint -fallthrough */
case ACPI_GPE_TYPE_RUNTIME:
/* Disable the requested runtime GPE */
ACPI_CLEAR_BIT (gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
status = acpi_hw_write_gpe_enable_reg (gpe_event_info);
break;
default:
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
return_ACPI_STATUS (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_get_gpe_event_info
......@@ -139,11 +382,12 @@ acpi_ev_gpe_detect (
u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
u8 enabled_status_byte;
struct acpi_gpe_register_info *gpe_register_info;
u32 in_value;
u32 status_reg;
u32 enable_reg;
acpi_status status;
struct acpi_gpe_block_info *gpe_block;
u32 i;
u32 j;
acpi_native_uint i;
acpi_native_uint j;
ACPI_FUNCTION_NAME ("ev_gpe_detect");
......@@ -171,33 +415,32 @@ acpi_ev_gpe_detect (
/* Read the Status Register */
status = acpi_hw_low_level_read (ACPI_GPE_REGISTER_WIDTH, &in_value,
status = acpi_hw_low_level_read (ACPI_GPE_REGISTER_WIDTH, &status_reg,
&gpe_register_info->status_address);
gpe_register_info->status = (u8) in_value;
if (ACPI_FAILURE (status)) {
goto unlock_and_exit;
}
/* Read the Enable Register */
status = acpi_hw_low_level_read (ACPI_GPE_REGISTER_WIDTH, &in_value,
status = acpi_hw_low_level_read (ACPI_GPE_REGISTER_WIDTH, &enable_reg,
&gpe_register_info->enable_address);
gpe_register_info->enable = (u8) in_value;
if (ACPI_FAILURE (status)) {
goto unlock_and_exit;
}
ACPI_DEBUG_PRINT ((ACPI_DB_INTERRUPTS,
"GPE pair: Status %8.8X%8.8X = %02X, Enable %8.8X%8.8X = %02X\n",
ACPI_FORMAT_UINT64 (gpe_register_info->status_address.address),
gpe_register_info->status,
ACPI_FORMAT_UINT64 (gpe_register_info->enable_address.address),
gpe_register_info->enable));
ACPI_FORMAT_UINT64 (
gpe_register_info->status_address.address),
status_reg,
ACPI_FORMAT_UINT64 (
gpe_register_info->enable_address.address),
enable_reg));
/* First check if there is anything active at all in this register */
enabled_status_byte = (u8) (gpe_register_info->status &
gpe_register_info->enable);
enabled_status_byte = (u8) (status_reg & enable_reg);
if (!enabled_status_byte) {
/* No active GPEs in this register, move on */
......@@ -216,7 +459,7 @@ acpi_ev_gpe_detect (
*/
int_status |= acpi_ev_gpe_dispatch (
&gpe_block->event_info[(i * ACPI_GPE_REGISTER_WIDTH) + j],
j + gpe_register_info->base_gpe_number);
(u32) j + gpe_register_info->base_gpe_number);
}
}
}
......@@ -255,6 +498,7 @@ acpi_ev_asynch_execute_gpe_method (
u32 gpe_number = 0;
acpi_status status;
struct acpi_gpe_event_info local_gpe_event_info;
struct acpi_parameter_info info;
ACPI_FUNCTION_TRACE ("ev_asynch_execute_gpe_method");
......@@ -272,6 +516,10 @@ acpi_ev_asynch_execute_gpe_method (
return_VOID;
}
/* Set the GPE flags for return to enabled state */
(void) acpi_ev_enable_gpe (gpe_event_info, FALSE);
/*
* Take a snapshot of the GPE info for this level - we copy the
* info to prevent a race condition with remove_handler/remove_block.
......@@ -283,23 +531,33 @@ acpi_ev_asynch_execute_gpe_method (
return_VOID;
}
if (local_gpe_event_info.method_node) {
/*
* Must check for control method type dispatch one more
* time to avoid race with ev_gpe_install_handler
*/
if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_METHOD) {
/*
* Invoke the GPE Method (_Lxx, _Exx):
* (Evaluate the _Lxx/_Exx control method that corresponds to this GPE.)
* Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx
* control method that corresponds to this GPE
*/
status = acpi_ns_evaluate_by_handle (local_gpe_event_info.method_node, NULL, NULL);
info.node = local_gpe_event_info.dispatch.method_node;
info.parameters = ACPI_CAST_PTR (union acpi_operand_object *, gpe_event_info);
info.parameter_type = ACPI_PARAM_GPE;
status = acpi_ns_evaluate_by_handle (&info);
if (ACPI_FAILURE (status)) {
ACPI_REPORT_ERROR (("%s while evaluating method [%4.4s] for GPE[%2X]\n",
ACPI_REPORT_ERROR ((
"%s while evaluating method [%4.4s] for GPE[%2X]\n",
acpi_format_exception (status),
acpi_ut_get_node_name (local_gpe_event_info.method_node), gpe_number));
acpi_ut_get_node_name (local_gpe_event_info.dispatch.method_node),
gpe_number));
}
}
if ((local_gpe_event_info.flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_LEVEL_TRIGGERED) {
/*
* GPE is level-triggered, we clear the GPE status bit after handling
* the event.
* GPE is level-triggered, we clear the GPE status bit after
* handling the event.
*/
status = acpi_hw_clear_gpe (&local_gpe_event_info);
if (ACPI_FAILURE (status)) {
......@@ -309,7 +567,7 @@ acpi_ev_asynch_execute_gpe_method (
/* Enable this GPE */
(void) acpi_hw_enable_gpe (&local_gpe_event_info);
(void) acpi_hw_write_gpe_enable_reg (&local_gpe_event_info);
return_VOID;
}
......@@ -354,6 +612,15 @@ acpi_ev_gpe_dispatch (
}
}
/* Save current system state */
if (acpi_gbl_system_awake_and_running) {
ACPI_SET_BIT (gpe_event_info->flags, ACPI_GPE_SYSTEM_RUNNING);
}
else {
ACPI_CLEAR_BIT (gpe_event_info->flags, ACPI_GPE_SYSTEM_RUNNING);
}
/*
* Dispatch the GPE to either an installed handler, or the control
* method associated with this GPE (_Lxx or _Exx).
......@@ -361,10 +628,15 @@ acpi_ev_gpe_dispatch (
* If there is neither a handler nor a method, we disable the level to
* prevent further events from coming in here.
*/
if (gpe_event_info->handler) {
/* Invoke the installed handler (at interrupt level) */
switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
case ACPI_GPE_DISPATCH_HANDLER:
gpe_event_info->handler (gpe_event_info->context);
/*
* Invoke the installed handler (at interrupt level)
* Ignore return status for now. TBD: leave GPE disabled on error?
*/
(void) gpe_event_info->dispatch.handler->address (
gpe_event_info->dispatch.handler->context);
/* It is now safe to clear level-triggered events. */
......@@ -377,13 +649,15 @@ acpi_ev_gpe_dispatch (
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
}
}
}
else if (gpe_event_info->method_node) {
break;
case ACPI_GPE_DISPATCH_METHOD:
/*
* Disable GPE, so it doesn't keep firing before the method has a
* chance to run.
*/
status = acpi_hw_disable_gpe (gpe_event_info);
status = acpi_ev_disable_gpe (gpe_event_info);
if (ACPI_FAILURE (status)) {
ACPI_REPORT_ERROR ((
"acpi_ev_gpe_dispatch: Unable to disable GPE[%2X]\n",
......@@ -402,8 +676,10 @@ acpi_ev_gpe_dispatch (
"acpi_ev_gpe_dispatch: Unable to queue handler for GPE[%2X], event is disabled\n",
gpe_number));
}
}
else {
break;
default:
/* No handler or method to run! */
ACPI_REPORT_ERROR ((
......@@ -414,15 +690,68 @@ acpi_ev_gpe_dispatch (
* Disable the GPE. The GPE will remain disabled until the ACPI
* Core Subsystem is restarted, or a handler is installed.
*/
status = acpi_hw_disable_gpe (gpe_event_info);
status = acpi_ev_disable_gpe (gpe_event_info);
if (ACPI_FAILURE (status)) {
ACPI_REPORT_ERROR ((
"acpi_ev_gpe_dispatch: Unable to disable GPE[%2X]\n",
gpe_number));
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
}
break;
}
return_VALUE (ACPI_INTERRUPT_HANDLED);
}
#ifdef ACPI_GPE_NOTIFY_CHECK
/*******************************************************************************
* NOT USED, PROTOTYPE ONLY AND WILL PROBABLY BE REMOVED
*
* FUNCTION: acpi_ev_check_for_wake_only_gpe
*
* PARAMETERS: gpe_event_info - info for this GPE
*
* RETURN: Status
*
* DESCRIPTION: Determine if a a GPE is "wake-only".
*
* Called from Notify() code in interpreter when a "device_wake"
* Notify comes in.
*
******************************************************************************/
acpi_status
acpi_ev_check_for_wake_only_gpe (
struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
ACPI_FUNCTION_TRACE ("ev_check_for_wake_only_gpe");
if ((gpe_event_info) && /* Only >0 for _Lxx/_Exx */
((gpe_event_info->flags & ACPI_GPE_SYSTEM_MASK) == ACPI_GPE_SYSTEM_RUNNING)) /* System state at GPE time */ {
/* This must be a wake-only GPE, disable it */
status = acpi_ev_disable_gpe (gpe_event_info);
/* Set GPE to wake-only. Do not change wake disabled/enabled status */
acpi_ev_set_gpe_type (gpe_event_info, ACPI_GPE_TYPE_WAKE);
ACPI_REPORT_INFO (("GPE %p was updated from wake/run to wake-only\n",
gpe_event_info));
/* This was a wake-only GPE */
return_ACPI_STATUS (AE_WAKE_ONLY_GPE);
}
return_ACPI_STATUS (AE_OK);
}
#endif
......@@ -53,7 +53,7 @@
*
* FUNCTION: acpi_ev_valid_gpe_event
*
* PARAMETERS: gpe_event_info - Info for this GPE
* PARAMETERS: gpe_event_info - Info for this GPE
*
* RETURN: TRUE if the gpe_event is valid
*
......@@ -154,6 +154,53 @@ acpi_ev_walk_gpe_list (
}
/******************************************************************************
*
* FUNCTION: acpi_ev_delete_gpe_handlers
*
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
* gpe_block - Gpe Block info
*
* RETURN: Status
*
* DESCRIPTION: Delete all Handler objects found in the GPE data structs.
* Used only prior to termination.
*
******************************************************************************/
acpi_status
acpi_ev_delete_gpe_handlers (
struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block)
{
struct acpi_gpe_event_info *gpe_event_info;
acpi_native_uint i;
acpi_native_uint j;
ACPI_FUNCTION_TRACE ("ev_delete_gpe_handlers");
/* Examine each GPE Register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
/* Now look at the individual GPEs in this byte register */
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
gpe_event_info = &gpe_block->event_info[(i * ACPI_GPE_REGISTER_WIDTH) + j];
if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_HANDLER) {
ACPI_MEM_FREE (gpe_event_info->dispatch.handler);
gpe_event_info->dispatch.handler = NULL;
gpe_event_info->flags &= ~ACPI_GPE_DISPATCH_MASK;
}
}
}
return_ACPI_STATUS (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_save_method_info
......@@ -188,6 +235,7 @@ acpi_ev_save_method_info (
u32 gpe_number;
char name[ACPI_NAME_SIZE + 1];
u8 type;
acpi_status status;
ACPI_FUNCTION_TRACE ("ev_save_method_info");
......@@ -206,16 +254,16 @@ acpi_ev_save_method_info (
* 2) Edge/Level determination is based on the 2nd character
* of the method name
*
* NOTE: Default GPE type is RUNTIME. May be changed later to WAKE if a
* _PRW object is found that points to this GPE.
* NOTE: Default GPE type is RUNTIME. May be changed later to WAKE
* if a _PRW object is found that points to this GPE.
*/
switch (name[1]) {
case 'L':
type = ACPI_GPE_LEVEL_TRIGGERED | ACPI_GPE_TYPE_RUNTIME;
type = ACPI_GPE_LEVEL_TRIGGERED;
break;
case 'E':
type = ACPI_GPE_EDGE_TRIGGERED | ACPI_GPE_TYPE_RUNTIME;
type = ACPI_GPE_EDGE_TRIGGERED;
break;
default:
......@@ -253,27 +301,35 @@ acpi_ev_save_method_info (
/*
* Now we can add this information to the gpe_event_info block
* for use during dispatch of this GPE.
* for use during dispatch of this GPE. Default type is RUNTIME, although
* this may change when the _PRW methods are executed later.
*/
gpe_event_info = &gpe_block->event_info[gpe_number - gpe_block->block_base_number];
gpe_event_info->flags = type;
gpe_event_info->method_node = (struct acpi_namespace_node *) obj_handle;
gpe_event_info->flags = (u8) (type | ACPI_GPE_DISPATCH_METHOD |
ACPI_GPE_TYPE_RUNTIME);
gpe_event_info->dispatch.method_node = (struct acpi_namespace_node *) obj_handle;
/* Update enable mask, but don't enable the HW GPE as of yet */
status = acpi_ev_enable_gpe (gpe_event_info, FALSE);
ACPI_DEBUG_PRINT ((ACPI_DB_LOAD,
"Registered GPE method %s as GPE number 0x%.2X\n",
name, gpe_number));
return_ACPI_STATUS (AE_OK);
return_ACPI_STATUS (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_get_gpe_type
* FUNCTION: acpi_ev_match_prw_and_gpe
*
* PARAMETERS: Callback from walk_namespace
*
* RETURN: Status
* RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
* not aborted on a single _PRW failure.
*
* DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
* Device. Run the _PRW method. If present, extract the GPE
......@@ -282,7 +338,7 @@ acpi_ev_save_method_info (
******************************************************************************/
static acpi_status
acpi_ev_get_gpe_type (
acpi_ev_match_prw_and_gpe (
acpi_handle obj_handle,
u32 level,
void *info,
......@@ -299,19 +355,18 @@ acpi_ev_get_gpe_type (
acpi_status status;
ACPI_FUNCTION_TRACE ("ev_get_gpe_type");
ACPI_FUNCTION_TRACE ("ev_match_prw_and_gpe");
/* Check for a _PRW method under this device */
status = acpi_ut_evaluate_object (obj_handle, METHOD_NAME__PRW,
ACPI_BTYPE_PACKAGE, &pkg_desc);
if (status == AE_NOT_FOUND) {
if (ACPI_FAILURE (status)) {
/* Ignore all errors from _PRW, we don't want to abort the subsystem */
return_ACPI_STATUS (AE_OK);
}
else if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
/* The returned _PRW package must have at least two elements */
......@@ -370,16 +425,21 @@ acpi_ev_get_gpe_type (
if ((gpe_device == target_gpe_device) &&
(gpe_number >= gpe_block->block_base_number) &&
(gpe_number < gpe_block->block_base_number + (gpe_block->register_count * 8))) {
/* Mark GPE for WAKE but DISABLED (even for wake) */
gpe_event_info = &gpe_block->event_info[gpe_number - gpe_block->block_base_number];
gpe_event_info->flags |= ACPI_GPE_TYPE_WAKE;
/* Mark GPE for WAKE-ONLY but WAKE_DISABLED */
gpe_event_info->flags &= ~(ACPI_GPE_WAKE_ENABLED | ACPI_GPE_RUN_ENABLED);
status = acpi_ev_set_gpe_type (gpe_event_info, ACPI_GPE_TYPE_WAKE);
if (ACPI_FAILURE (status)) {
goto cleanup;
}
status = acpi_ev_update_gpe_enable_masks (gpe_event_info, ACPI_GPE_DISABLE);
}
cleanup:
acpi_ut_remove_reference (pkg_desc);
return_ACPI_STATUS (status);
return_ACPI_STATUS (AE_OK);
}
......@@ -742,7 +802,7 @@ acpi_ev_create_gpe_info_blocks (
/* Init the event_info for each GPE within this register */
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
this_event->bit_mask = acpi_gbl_decode_to8bit[j];
this_event->register_bit = acpi_gbl_decode_to8bit[j];
this_event->register_info = this_register;
this_event++;
}
......@@ -817,6 +877,7 @@ acpi_ev_create_gpe_block (
acpi_status status;
struct acpi_gpe_walk_info gpe_info;
ACPI_FUNCTION_TRACE ("ev_create_gpe_block");
......@@ -835,6 +896,7 @@ acpi_ev_create_gpe_block (
gpe_block->register_count = register_count;
gpe_block->block_base_number = gpe_block_base_number;
gpe_block->node = gpe_device;
ACPI_MEMCPY (&gpe_block->block_address, gpe_block_address, sizeof (struct acpi_generic_address));
......@@ -854,18 +916,6 @@ acpi_ev_create_gpe_block (
return_ACPI_STATUS (status);
}
/* Dump info about this GPE block */
ACPI_DEBUG_PRINT ((ACPI_DB_INIT,
"GPE %02d to %02d [%4.4s] %d regs at %8.8X%8.8X on int %d\n",
gpe_block->block_base_number,
(u32) (gpe_block->block_base_number +
((gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH) -1)),
gpe_device->name.ascii,
gpe_block->register_count,
ACPI_FORMAT_UINT64 (gpe_block->block_address.address),
interrupt_level));
/* Find all GPE methods (_Lxx, _Exx) for this block */
status = acpi_ns_walk_namespace (ACPI_TYPE_METHOD, gpe_device,
......@@ -873,27 +923,28 @@ acpi_ev_create_gpe_block (
gpe_block, NULL);
/*
* Runtime option: Should Wake GPEs be enabled at runtime? The default is
* No,they should only be enabled just as the machine goes to sleep.
* Runtime option: Should Wake GPEs be enabled at runtime? The default
* is No,they should only be enabled just as the machine goes to sleep.
*/
if (acpi_gbl_leave_wake_gpes_disabled) {
/*
* Differentiate RUNTIME vs WAKE GPEs, via the _PRW control methods. (Each
* GPE that has one or more _PRWs that reference it is by definition a
* WAKE GPE and will not be enabled while the machine is running.)
* Differentiate RUNTIME vs WAKE GPEs, via the _PRW control methods.
* (Each GPE that has one or more _PRWs that reference it is by
* definition a WAKE GPE and will not be enabled while the machine
* is running.)
*/
gpe_info.gpe_block = gpe_block;
gpe_info.gpe_device = gpe_device;
status = acpi_ns_walk_namespace (ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, acpi_ev_get_gpe_type,
ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, acpi_ev_match_prw_and_gpe,
&gpe_info, NULL);
}
/*
* Enable all GPEs in this block that are 1) "runtime" GPEs, and 2) have
* a corresponding _Lxx or _Exx method. All other GPEs must be enabled via
* the acpi_enable_gpe() external interface.
* Enable all GPEs in this block that are 1) "runtime" or "run/wake" GPEs,
* and 2) have a corresponding _Lxx or _Exx method. All other GPEs must
* be enabled via the acpi_enable_gpe() external interface.
*/
wake_gpe_count = 0;
gpe_enabled_count = 0;
......@@ -903,23 +954,35 @@ acpi_ev_create_gpe_block (
/* Get the info block for this particular GPE */
gpe_event_info = &gpe_block->event_info[(i * ACPI_GPE_REGISTER_WIDTH) + j];
if ((gpe_event_info->method_node) &&
((gpe_event_info->flags & ACPI_GPE_TYPE_MASK) == ACPI_GPE_TYPE_RUNTIME)) {
/* Enable this GPE, it is 1) RUNTIME and 2) has an _Lxx or _Exx method */
status = acpi_hw_enable_gpe (gpe_event_info);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_METHOD) &&
(gpe_event_info->flags & ACPI_GPE_TYPE_RUNTIME)) {
gpe_enabled_count++;
}
if ((gpe_event_info->flags & ACPI_GPE_TYPE_MASK) == ACPI_GPE_TYPE_WAKE) {
if (gpe_event_info->flags & ACPI_GPE_TYPE_WAKE) {
wake_gpe_count++;
}
}
}
/* Dump info about this GPE block */
ACPI_DEBUG_PRINT ((ACPI_DB_INIT,
"GPE %02X to %02X [%4.4s] %u regs at %8.8X%8.8X on int 0x%X\n",
(u32) gpe_block->block_base_number,
(u32) (gpe_block->block_base_number +
((gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH) -1)),
gpe_device->name.ascii,
gpe_block->register_count,
ACPI_FORMAT_UINT64 (gpe_block->block_address.address),
interrupt_level));
/* Enable all valid GPEs found above */
status = acpi_hw_enable_runtime_gpe_block (NULL, gpe_block);
ACPI_DEBUG_PRINT ((ACPI_DB_INIT,
"Found %u Wake, Enabled %u Runtime GPEs in this block\n",
wake_gpe_count, gpe_enabled_count));
......@@ -1056,7 +1119,8 @@ acpi_ev_gpe_initialize (
if ((register_count0 + register_count1) == 0) {
/* GPEs are not required by ACPI, this is OK */
ACPI_REPORT_INFO (("There are no GPE blocks defined in the FADT\n"));
ACPI_DEBUG_PRINT ((ACPI_DB_INIT,
"There are no GPE blocks defined in the FADT\n"));
status = AE_OK;
goto cleanup;
}
......
......@@ -139,8 +139,7 @@ acpi_ev_queue_notify_request (
acpi_notify_value_names[notify_value]));
}
else {
ACPI_DEBUG_PRINT ((ACPI_DB_INFO,
"notify value: 0x%2.2x **Device Specific**\n",
ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Notify value: 0x%2.2X **Device Specific**\n",
notify_value));
}
......@@ -197,8 +196,8 @@ acpi_ev_queue_notify_request (
/* There is no per-device notify handler for this device */
ACPI_DEBUG_PRINT ((ACPI_DB_INFO,
"No notify handler for [%4.4s] node %p\n",
acpi_ut_get_node_name (node), node));
"No notify handler for Notify(%4.4s, %X) node %p\n",
acpi_ut_get_node_name (node), notify_value, node));
}
return (status);
......@@ -558,6 +557,10 @@ acpi_ev_terminate (void)
}
}
/* Deallocate all handler objects installed within GPE info structs */
status = acpi_ev_walk_gpe_list (acpi_ev_delete_gpe_handlers);
/* Return to original mode if necessary */
if (acpi_gbl_original_mode == ACPI_SYS_MODE_LEGACY) {
......
......@@ -61,7 +61,7 @@ static u8 acpi_gbl_default_address_spaces[ACPI_NUM_DEFAULT_SPA
/*******************************************************************************
*
* FUNCTION: acpi_ev_init_address_spaces
* FUNCTION: acpi_ev_install_region_handlers
*
* PARAMETERS: None
*
......@@ -72,15 +72,20 @@ static u8 acpi_gbl_default_address_spaces[ACPI_NUM_DEFAULT_SPA
******************************************************************************/
acpi_status
acpi_ev_init_address_spaces (
acpi_ev_install_region_handlers (
void) {
acpi_status status;
acpi_native_uint i;
ACPI_FUNCTION_TRACE ("ev_init_address_spaces");
ACPI_FUNCTION_TRACE ("ev_install_region_handlers");
status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
/*
* All address spaces (PCI Config, EC, SMBus) are scope dependent
* and registration must occur for a specific device.
......@@ -99,9 +104,8 @@ acpi_ev_init_address_spaces (
* has already been installed (via acpi_install_address_space_handler).
* Similar for AE_SAME_HANDLER.
*/
for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) {
status = acpi_install_address_space_handler ((acpi_handle) acpi_gbl_root_node,
status = acpi_ev_install_space_handler (acpi_gbl_root_node,
acpi_gbl_default_address_spaces[i],
ACPI_DEFAULT_HANDLER, NULL, NULL);
switch (status) {
......@@ -111,15 +115,63 @@ acpi_ev_init_address_spaces (
/* These exceptions are all OK */
status = AE_OK;
break;
default:
return_ACPI_STATUS (status);
goto unlock_and_exit;
}
}
return_ACPI_STATUS (AE_OK);
unlock_and_exit:
(void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE);
return_ACPI_STATUS (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_initialize_op_regions
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Execute _REG methods for all Operation Regions that have
* an installed default region handler.
*
******************************************************************************/
acpi_status
acpi_ev_initialize_op_regions (
void)
{
acpi_status status;
acpi_native_uint i;
ACPI_FUNCTION_TRACE ("ev_initialize_op_regions");
status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
/*
* Run the _REG methods for op_regions in each default address space
*/
for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) {
/* TBD: Make sure handler is the DEFAULT handler, otherwise
* _REG will have already been run.
*/
status = acpi_ev_execute_reg_methods (acpi_gbl_root_node,
acpi_gbl_default_address_spaces[i]);
}
(void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE);
return_ACPI_STATUS (status);
}
......@@ -138,11 +190,12 @@ acpi_ev_init_address_spaces (
acpi_status
acpi_ev_execute_reg_method (
union acpi_operand_object *region_obj,
union acpi_operand_object *region_obj,
u32 function)
{
union acpi_operand_object *params[3];
union acpi_operand_object *region_obj2;
struct acpi_parameter_info info;
union acpi_operand_object *params[3];
union acpi_operand_object *region_obj2;
acpi_status status;
......@@ -159,10 +212,11 @@ acpi_ev_execute_reg_method (
}
/*
* _REG method has two arguments
* Arg0: Integer: Operation region space ID
* The _REG method has two arguments:
*
* Arg0, Integer: Operation region space ID
* Same value as region_obj->Region.space_id
* Arg1: Integer: connection status
* Arg1, Integer: connection status
* 1 for connecting the handler,
* 0 for disconnecting the handler
* Passed as a parameter
......@@ -184,10 +238,15 @@ acpi_ev_execute_reg_method (
params[1]->integer.value = function;
params[2] = NULL;
info.node = region_obj2->extra.method_REG;
info.parameters = params;
info.parameter_type = ACPI_PARAM_ARGS;
/* Execute the method, no return value */
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname (ACPI_TYPE_METHOD, region_obj2->extra.method_REG, NULL));
status = acpi_ns_evaluate_by_handle (region_obj2->extra.method_REG, params, NULL);
ACPI_DEBUG_EXEC (acpi_ut_display_init_pathname (
ACPI_TYPE_METHOD, info.node, NULL));
status = acpi_ns_evaluate_by_handle (&info);
acpi_ut_remove_reference (params[1]);
......@@ -326,7 +385,7 @@ acpi_ev_address_space_dispatch (
ACPI_FORMAT_UINT64 (address),
acpi_ut_get_region_name (region_obj->region.space_id)));
if (!(handler_desc->address_space.flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
if (!(handler_desc->address_space.hflags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
/*
* For handlers other than the default (supplied) handlers, we must
* exit the interpreter because the handler *might* block -- we don't
......@@ -347,7 +406,7 @@ acpi_ev_address_space_dispatch (
acpi_format_exception (status)));
}
if (!(handler_desc->address_space.flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
if (!(handler_desc->address_space.hflags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
/*
* We just returned from a non-default handler, we must re-enter the
* interpreter
......@@ -676,6 +735,273 @@ acpi_ev_install_handler (
return (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_install_space_handler
*
* PARAMETERS: Node - Namespace node for the device
* space_id - The address space ID
* Handler - Address of the handler
* Setup - Address of the setup function
* Context - Value passed to the handler on each access
*
* RETURN: Status
*
* DESCRIPTION: Install a handler for all op_regions of a given space_id.
* Assumes namespace is locked
*
******************************************************************************/
acpi_status
acpi_ev_install_space_handler (
struct acpi_namespace_node *node,
acpi_adr_space_type space_id,
acpi_adr_space_handler handler,
acpi_adr_space_setup setup,
void *context)
{
union acpi_operand_object *obj_desc;
union acpi_operand_object *handler_obj;
acpi_status status;
acpi_object_type type;
u16 flags = 0;
ACPI_FUNCTION_TRACE ("ev_install_space_handler");
/*
* This registration is valid for only the types below
* and the root. This is where the default handlers
* get placed.
*/
if ((node->type != ACPI_TYPE_DEVICE) &&
(node->type != ACPI_TYPE_PROCESSOR) &&
(node->type != ACPI_TYPE_THERMAL) &&
(node != acpi_gbl_root_node)) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
if (handler == ACPI_DEFAULT_HANDLER) {
flags = ACPI_ADDR_HANDLER_DEFAULT_INSTALLED;
switch (space_id) {
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
handler = acpi_ex_system_memory_space_handler;
setup = acpi_ev_system_memory_region_setup;
break;
case ACPI_ADR_SPACE_SYSTEM_IO:
handler = acpi_ex_system_io_space_handler;
setup = acpi_ev_io_space_region_setup;
break;
case ACPI_ADR_SPACE_PCI_CONFIG:
handler = acpi_ex_pci_config_space_handler;
setup = acpi_ev_pci_config_region_setup;
break;
case ACPI_ADR_SPACE_CMOS:
handler = acpi_ex_cmos_space_handler;
setup = acpi_ev_cmos_region_setup;
break;
case ACPI_ADR_SPACE_PCI_BAR_TARGET:
handler = acpi_ex_pci_bar_space_handler;
setup = acpi_ev_pci_bar_region_setup;
break;
case ACPI_ADR_SPACE_DATA_TABLE:
handler = acpi_ex_data_table_space_handler;
setup = NULL;
break;
default:
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
}
/* If the caller hasn't specified a setup routine, use the default */
if (!setup) {
setup = acpi_ev_default_region_setup;
}
/* Check for an existing internal object */
obj_desc = acpi_ns_get_attached_object (node);
if (obj_desc) {
/*
* The attached device object already exists.
* Make sure the handler is not already installed.
*/
handler_obj = obj_desc->device.handler;
/* Walk the handler list for this device */
while (handler_obj) {
/* Same space_id indicates a handler already installed */
if (handler_obj->address_space.space_id == space_id) {
if (handler_obj->address_space.handler == handler) {
/*
* It is (relatively) OK to attempt to install the SAME
* handler twice. This can easily happen with PCI_Config space.
*/
status = AE_SAME_HANDLER;
goto unlock_and_exit;
}
else {
/* A handler is already installed */
status = AE_ALREADY_EXISTS;
}
goto unlock_and_exit;
}
/* Walk the linked list of handlers */
handler_obj = handler_obj->address_space.next;
}
}
else {
ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION,
"Creating object on Device %p while installing handler\n", node));
/* obj_desc does not exist, create one */
if (node->type == ACPI_TYPE_ANY) {
type = ACPI_TYPE_DEVICE;
}
else {
type = node->type;
}
obj_desc = acpi_ut_create_internal_object (type);
if (!obj_desc) {
status = AE_NO_MEMORY;
goto unlock_and_exit;
}
/* Init new descriptor */
obj_desc->common.type = (u8) type;
/* Attach the new object to the Node */
status = acpi_ns_attach_object (node, obj_desc, type);
/* Remove local reference to the object */
acpi_ut_remove_reference (obj_desc);
if (ACPI_FAILURE (status)) {
goto unlock_and_exit;
}
}
ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION,
"Installing address handler for region %s(%X) on Device %4.4s %p(%p)\n",
acpi_ut_get_region_name (space_id), space_id,
acpi_ut_get_node_name (node), node, obj_desc));
/*
* Install the handler
*
* At this point there is no existing handler.
* Just allocate the object for the handler and link it
* into the list.
*/
handler_obj = acpi_ut_create_internal_object (ACPI_TYPE_LOCAL_ADDRESS_HANDLER);
if (!handler_obj) {
status = AE_NO_MEMORY;
goto unlock_and_exit;
}
/* Init handler obj */
handler_obj->address_space.space_id = (u8) space_id;
handler_obj->address_space.hflags = flags;
handler_obj->address_space.region_list = NULL;
handler_obj->address_space.node = node;
handler_obj->address_space.handler = handler;
handler_obj->address_space.context = context;
handler_obj->address_space.setup = setup;
/* Install at head of Device.address_space list */
handler_obj->address_space.next = obj_desc->device.handler;
/*
* The Device object is the first reference on the handler_obj.
* Each region that uses the handler adds a reference.
*/
obj_desc->device.handler = handler_obj;
/*
* Walk the namespace finding all of the regions this
* handler will manage.
*
* Start at the device and search the branch toward
* the leaf nodes until either the leaf is encountered or
* a device is detected that has an address handler of the
* same type.
*
* In either case, back up and search down the remainder
* of the branch
*/
status = acpi_ns_walk_namespace (ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
ACPI_NS_WALK_UNLOCK, acpi_ev_install_handler,
handler_obj, NULL);
unlock_and_exit:
return_ACPI_STATUS (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_execute_reg_methods
*
* PARAMETERS: Node - Namespace node for the device
* space_id - The address space ID
*
* RETURN: Status
*
* DESCRIPTION: Run _REG methods for the Space ID;
* Note: assumes namespace is locked, or system init time.
*
******************************************************************************/
acpi_status
acpi_ev_execute_reg_methods (
struct acpi_namespace_node *node,
acpi_adr_space_type space_id)
{
acpi_status status;
ACPI_FUNCTION_TRACE ("ev_execute_reg_methods");
/*
* Run all _REG methods for all Operation Regions for this
* space ID. This is a separate walk in order to handle any
* interdependencies between regions and _REG methods. (i.e. handlers
* must be installed for all regions of this Space ID before we
* can run any _REG methods)
*/
status = acpi_ns_walk_namespace (ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run,
&space_id, NULL);
return_ACPI_STATUS (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_reg_run
......@@ -693,19 +1019,13 @@ acpi_ev_reg_run (
void *context,
void **return_value)
{
union acpi_operand_object *handler_obj;
union acpi_operand_object *obj_desc;
struct acpi_namespace_node *node;
acpi_adr_space_type space_id;
acpi_status status;
handler_obj = (union acpi_operand_object *) context;
/* Parameter validation */
if (!handler_obj) {
return (AE_OK);
}
space_id = *ACPI_CAST_PTR (acpi_adr_space_type, context);
/* Convert and validate the device handle */
......@@ -732,10 +1052,9 @@ acpi_ev_reg_run (
return (AE_OK);
}
/* Object is a Region */
if (obj_desc->region.space_id != handler_obj->address_space.space_id) {
if (obj_desc->region.space_id != space_id) {
/*
* This region is for a different address space
* -- just ignore it
......
......@@ -188,6 +188,7 @@ acpi_remove_fixed_event_handler (
* handler_type - The type of handler:
* ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
* ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
* ACPI_ALL_NOTIFY: both system and device
* Handler - Address of the handler
* Context - Value passed to the handler on each GPE
*
......@@ -243,20 +244,21 @@ acpi_install_notify_handler (
if (device == ACPI_ROOT_OBJECT) {
/* Make sure the handler is not already installed */
if (((handler_type == ACPI_SYSTEM_NOTIFY) &&
acpi_gbl_system_notify.handler) ||
((handler_type == ACPI_DEVICE_NOTIFY) &&
if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
acpi_gbl_system_notify.handler) ||
((handler_type & ACPI_DEVICE_NOTIFY) &&
acpi_gbl_device_notify.handler)) {
status = AE_ALREADY_EXISTS;
goto unlock_and_exit;
}
if (handler_type == ACPI_SYSTEM_NOTIFY) {
if (handler_type & ACPI_SYSTEM_NOTIFY) {
acpi_gbl_system_notify.node = node;
acpi_gbl_system_notify.handler = handler;
acpi_gbl_system_notify.context = context;
}
else /* ACPI_DEVICE_NOTIFY */ {
if (handler_type & ACPI_DEVICE_NOTIFY) {
acpi_gbl_device_notify.node = node;
acpi_gbl_device_notify.handler = handler;
acpi_gbl_device_notify.context = context;
......@@ -284,9 +286,9 @@ acpi_install_notify_handler (
if (obj_desc) {
/* Object exists - make sure there's no handler */
if (((handler_type == ACPI_SYSTEM_NOTIFY) &&
if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
obj_desc->common_notify.system_notify) ||
((handler_type == ACPI_DEVICE_NOTIFY) &&
((handler_type & ACPI_DEVICE_NOTIFY) &&
obj_desc->common_notify.device_notify)) {
status = AE_ALREADY_EXISTS;
goto unlock_and_exit;
......@@ -308,7 +310,6 @@ acpi_install_notify_handler (
/* Remove local reference to the object */
acpi_ut_remove_reference (obj_desc);
if (ACPI_FAILURE (status)) {
goto unlock_and_exit;
}
......@@ -326,12 +327,19 @@ acpi_install_notify_handler (
notify_obj->notify.handler = handler;
notify_obj->notify.context = context;
if (handler_type == ACPI_SYSTEM_NOTIFY) {
if (handler_type & ACPI_SYSTEM_NOTIFY) {
obj_desc->common_notify.system_notify = notify_obj;
}
else /* ACPI_DEVICE_NOTIFY */ {
if (handler_type & ACPI_DEVICE_NOTIFY) {
obj_desc->common_notify.device_notify = notify_obj;
}
if (handler_type == ACPI_ALL_NOTIFY) {
/* Extra ref if installed in both */
acpi_ut_add_reference (notify_obj);
}
}
......@@ -349,6 +357,7 @@ acpi_install_notify_handler (
* handler_type - The type of handler:
* ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
* ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
* ACPI_ALL_NOTIFY: both system and device
* Handler - Address of the handler
* RETURN: Status
*
......@@ -398,9 +407,9 @@ acpi_remove_notify_handler (
if (device == ACPI_ROOT_OBJECT) {
ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Removing notify handler for ROOT object.\n"));
if (((handler_type == ACPI_SYSTEM_NOTIFY) &&
!acpi_gbl_system_notify.handler) ||
((handler_type == ACPI_DEVICE_NOTIFY) &&
if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
!acpi_gbl_system_notify.handler) ||
((handler_type & ACPI_DEVICE_NOTIFY) &&
!acpi_gbl_device_notify.handler)) {
status = AE_NOT_EXIST;
goto unlock_and_exit;
......@@ -415,12 +424,13 @@ acpi_remove_notify_handler (
return_ACPI_STATUS (status);
}
if (handler_type == ACPI_SYSTEM_NOTIFY) {
if (handler_type & ACPI_SYSTEM_NOTIFY) {
acpi_gbl_system_notify.node = NULL;
acpi_gbl_system_notify.handler = NULL;
acpi_gbl_system_notify.context = NULL;
}
else {
if (handler_type & ACPI_DEVICE_NOTIFY) {
acpi_gbl_device_notify.node = NULL;
acpi_gbl_device_notify.handler = NULL;
acpi_gbl_device_notify.context = NULL;
......@@ -448,38 +458,47 @@ acpi_remove_notify_handler (
/* Object exists - make sure there's an existing handler */
if (handler_type == ACPI_SYSTEM_NOTIFY) {
if (handler_type & ACPI_SYSTEM_NOTIFY) {
notify_obj = obj_desc->common_notify.system_notify;
}
else {
notify_obj = obj_desc->common_notify.device_notify;
}
if ((!notify_obj) ||
(notify_obj->notify.handler != handler)) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
/* Make sure all deferred tasks are completed */
if ((!notify_obj) ||
(notify_obj->notify.handler != handler)) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
(void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE);
acpi_os_wait_events_complete(NULL);
status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
/* Make sure all deferred tasks are completed */
/* Remove the handler */
obj_desc->common_notify.system_notify = NULL;
acpi_ut_remove_reference (notify_obj);
}
(void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE);
acpi_os_wait_events_complete(NULL);
status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
if (handler_type & ACPI_DEVICE_NOTIFY) {
notify_obj = obj_desc->common_notify.device_notify;
if ((!notify_obj) ||
(notify_obj->notify.handler != handler)) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
/* Make sure all deferred tasks are completed */
/* Remove the handler */
(void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE);
acpi_os_wait_events_complete(NULL);
status = acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
if (handler_type == ACPI_SYSTEM_NOTIFY) {
obj_desc->common_notify.system_notify = NULL;
}
else {
/* Remove the handler */
obj_desc->common_notify.device_notify = NULL;
acpi_ut_remove_reference (notify_obj);
}
acpi_ut_remove_reference (notify_obj);
}
......@@ -497,7 +516,7 @@ acpi_remove_notify_handler (
* gpe_block - GPE block (NULL == FADT GPEs)
* Type - Whether this GPE should be treated as an
* edge- or level-triggered interrupt.
* Handler - Address of the handler
* Address - Address of the handler
* Context - Value passed to the handler on each GPE
*
* RETURN: Status
......@@ -511,11 +530,12 @@ acpi_install_gpe_handler (
acpi_handle gpe_device,
u32 gpe_number,
u32 type,
acpi_gpe_handler handler,
acpi_event_handler address,
void *context)
{
acpi_status status;
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_handler_info *handler;
acpi_status status;
ACPI_FUNCTION_TRACE ("acpi_install_gpe_handler");
......@@ -523,7 +543,7 @@ acpi_install_gpe_handler (
/* Parameter validation */
if (!handler) {
if ((!address) || (type > ACPI_GPE_XRUPT_TYPE_MASK)) {
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
......@@ -542,27 +562,41 @@ acpi_install_gpe_handler (
/* Make sure that there isn't a handler there already */
if (gpe_event_info->handler) {
if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_HANDLER) {
status = AE_ALREADY_EXISTS;
goto unlock_and_exit;
}
/* Install the handler */
/* Allocate and init handler object */
acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
gpe_event_info->handler = handler;
gpe_event_info->context = context;
gpe_event_info->flags = (u8) type;
acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
handler = ACPI_MEM_CALLOCATE (sizeof (struct acpi_handler_info));
if (!handler) {
status = AE_NO_MEMORY;
goto unlock_and_exit;
}
handler->address = address;
handler->context = context;
handler->method_node = gpe_event_info->dispatch.method_node;
/* Clear the GPE (of stale events), the enable it */
/* Disable the GPE before installing the handler */
status = acpi_hw_clear_gpe (gpe_event_info);
status = acpi_ev_disable_gpe (gpe_event_info);
if (ACPI_FAILURE (status)) {
goto unlock_and_exit;
}
status = acpi_hw_enable_gpe (gpe_event_info);
/* Install the handler */
acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
gpe_event_info->dispatch.handler = handler;
/* Setup up dispatch flags to indicate handler (vs. method) */
gpe_event_info->flags &= ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); /* Clear bits */
gpe_event_info->flags |= (u8) (type | ACPI_GPE_DISPATCH_HANDLER);
acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
unlock_and_exit:
......@@ -577,7 +611,7 @@ acpi_install_gpe_handler (
*
* PARAMETERS: gpe_number - The event to remove a handler
* gpe_block - GPE block (NULL == FADT GPEs)
* Handler - Address of the handler
* Address - Address of the handler
*
* RETURN: Status
*
......@@ -589,10 +623,11 @@ acpi_status
acpi_remove_gpe_handler (
acpi_handle gpe_device,
u32 gpe_number,
acpi_gpe_handler handler)
acpi_event_handler address)
{
acpi_status status;
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_handler_info *handler;
acpi_status status;
ACPI_FUNCTION_TRACE ("acpi_remove_gpe_handler");
......@@ -600,7 +635,7 @@ acpi_remove_gpe_handler (
/* Parameter validation */
if (!handler) {
if (!address) {
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
......@@ -617,21 +652,27 @@ acpi_remove_gpe_handler (
goto unlock_and_exit;
}
/* Disable the GPE before removing the handler */
/* Make sure that a handler is indeed installed */
status = acpi_hw_disable_gpe (gpe_event_info);
if (ACPI_FAILURE (status)) {
if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) != ACPI_GPE_DISPATCH_HANDLER) {
status = AE_NOT_EXIST;
goto unlock_and_exit;
}
/* Make sure that the installed handler is the same */
if (gpe_event_info->handler != handler) {
(void) acpi_hw_enable_gpe (gpe_event_info);
if (gpe_event_info->dispatch.handler->address != address) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
/* Disable the GPE before removing the handler */
status = acpi_ev_disable_gpe (gpe_event_info);
if (ACPI_FAILURE (status)) {
goto unlock_and_exit;
}
/* Make sure all deferred tasks are completed */
(void) acpi_ut_release_mutex (ACPI_MTX_EVENTS);
......@@ -644,10 +685,21 @@ acpi_remove_gpe_handler (
/* Remove the handler */
acpi_os_acquire_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
gpe_event_info->handler = NULL;
gpe_event_info->context = NULL;
handler = gpe_event_info->dispatch.handler;
/* Restore Method node (if any), set dispatch flags */
gpe_event_info->dispatch.method_node = handler->method_node;
gpe_event_info->flags &= ~ACPI_GPE_DISPATCH_MASK; /* Clear bits */
if (handler->method_node) {
gpe_event_info->flags |= ACPI_GPE_DISPATCH_METHOD;
}
acpi_os_release_lock (acpi_gbl_gpe_lock, ACPI_NOT_ISR);
/* Now we can free the handler object */
ACPI_MEM_FREE (handler);
unlock_and_exit:
(void) acpi_ut_release_mutex (ACPI_MTX_EVENTS);
......
......@@ -204,12 +204,11 @@ acpi_enable_event (
/*******************************************************************************
*
* FUNCTION: acpi_enable_gpe
* FUNCTION: acpi_set_gpe_type
*
* PARAMETERS: gpe_device - Parent GPE Device
* gpe_number - GPE level within the GPE block
* Flags - Just enable, or also wake enable?
* Called from ISR or not
* Type - New GPE type
*
* RETURN: Status
*
......@@ -218,26 +217,17 @@ acpi_enable_event (
******************************************************************************/
acpi_status
acpi_enable_gpe (
acpi_set_gpe_type (
acpi_handle gpe_device,
u32 gpe_number,
u32 flags)
u8 type)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
ACPI_FUNCTION_TRACE ("acpi_enable_gpe");
ACPI_FUNCTION_TRACE ("acpi_set_gpe_type");
/* Use semaphore lock if not executing at interrupt level */
if (flags & ACPI_NOT_ISR) {
status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
}
/* Ensure that we have a valid GPE number */
......@@ -247,91 +237,72 @@ acpi_enable_gpe (
goto unlock_and_exit;
}
/* Check for Wake vs Runtime GPE */
if (flags & ACPI_EVENT_WAKE_ENABLE) {
/* Ensure the requested wake GPE is disabled */
status = acpi_hw_disable_gpe (gpe_event_info);
if (ACPI_FAILURE (status)) {
goto unlock_and_exit;
}
/* Defer Enable of Wake GPE until sleep time */
acpi_hw_enable_gpe_for_wakeup (gpe_event_info);
if ((gpe_event_info->flags & ACPI_GPE_TYPE_MASK) == type) {
return_ACPI_STATUS (AE_OK);
}
else {
/* Enable the requested runtime GPE */
status = acpi_hw_enable_gpe (gpe_event_info);
if (ACPI_FAILURE (status)) {
goto unlock_and_exit;
}
}
/* Set the new type (will disable GPE if currently enabled) */
status = acpi_ev_set_gpe_type (gpe_event_info, type);
unlock_and_exit:
if (flags & ACPI_NOT_ISR) {
(void) acpi_ut_release_mutex (ACPI_MTX_EVENTS);
}
return_ACPI_STATUS (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_disable_event
* FUNCTION: acpi_enable_gpe
*
* PARAMETERS: Event - The fixed eventto be enabled
* Flags - Reserved
* PARAMETERS: gpe_device - Parent GPE Device
* gpe_number - GPE level within the GPE block
* Flags - Just enable, or also wake enable?
* Called from ISR or not
*
* RETURN: Status
*
* DESCRIPTION: Disable an ACPI event (fixed)
* DESCRIPTION: Enable an ACPI event (general purpose)
*
******************************************************************************/
acpi_status
acpi_disable_event (
u32 event,
acpi_enable_gpe (
acpi_handle gpe_device,
u32 gpe_number,
u32 flags)
{
acpi_status status = AE_OK;
u32 value;
struct acpi_gpe_event_info *gpe_event_info;
ACPI_FUNCTION_TRACE ("acpi_disable_event");
ACPI_FUNCTION_TRACE ("acpi_enable_gpe");
/* Decode the Fixed Event */
/* Use semaphore lock if not executing at interrupt level */
if (event > ACPI_EVENT_MAX) {
return_ACPI_STATUS (AE_BAD_PARAMETER);
if (flags & ACPI_NOT_ISR) {
status = acpi_ut_acquire_mutex (ACPI_MTX_EVENTS);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
}
/*
* Disable the requested fixed event (by writing a zero to the
* enable register bit)
*/
status = acpi_set_register (acpi_gbl_fixed_event_info[event].enable_register_id,
0, ACPI_MTX_LOCK);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
/* Ensure that we have a valid GPE number */
status = acpi_get_register (acpi_gbl_fixed_event_info[event].enable_register_id,
&value, ACPI_MTX_LOCK);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
gpe_event_info = acpi_ev_get_gpe_event_info (gpe_device, gpe_number);
if (!gpe_event_info) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
if (value != 0) {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Could not disable %s events\n", acpi_ut_get_event_name (event)));
return_ACPI_STATUS (AE_NO_HARDWARE_RESPONSE);
}
/* Perform the enable */
status = acpi_ev_enable_gpe (gpe_event_info, TRUE);
unlock_and_exit:
if (flags & ACPI_NOT_ISR) {
(void) acpi_ut_release_mutex (ACPI_MTX_EVENTS);
}
return_ACPI_STATUS (status);
}
......@@ -342,7 +313,7 @@ acpi_disable_event (
*
* PARAMETERS: gpe_device - Parent GPE Device
* gpe_number - GPE level within the GPE block
* Flags - Just enable, or also wake enable?
* Flags - Just disable, or also wake disable?
* Called from ISR or not
*
* RETURN: Status
......@@ -381,21 +352,69 @@ acpi_disable_gpe (
goto unlock_and_exit;
}
status = acpi_ev_disable_gpe (gpe_event_info);
unlock_and_exit:
if (flags & ACPI_NOT_ISR) {
(void) acpi_ut_release_mutex (ACPI_MTX_EVENTS);
}
return_ACPI_STATUS (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_disable_event
*
* PARAMETERS: Event - The fixed eventto be enabled
* Flags - Reserved
*
* RETURN: Status
*
* DESCRIPTION: Disable an ACPI event (fixed)
*
******************************************************************************/
acpi_status
acpi_disable_event (
u32 event,
u32 flags)
{
acpi_status status = AE_OK;
u32 value;
ACPI_FUNCTION_TRACE ("acpi_disable_event");
/* Decode the Fixed Event */
if (event > ACPI_EVENT_MAX) {
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
/*
* Only disable the requested GPE number for wake if specified.
* Otherwise, turn it totally off
* Disable the requested fixed event (by writing a zero to the
* enable register bit)
*/
if (flags & ACPI_EVENT_WAKE_DISABLE) {
acpi_hw_disable_gpe_for_wakeup (gpe_event_info);
status = acpi_set_register (acpi_gbl_fixed_event_info[event].enable_register_id,
0, ACPI_MTX_LOCK);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
else {
status = acpi_hw_disable_gpe (gpe_event_info);
status = acpi_get_register (acpi_gbl_fixed_event_info[event].enable_register_id,
&value, ACPI_MTX_LOCK);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
unlock_and_exit:
if (flags & ACPI_NOT_ISR) {
(void) acpi_ut_release_mutex (ACPI_MTX_EVENTS);
if (value != 0) {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Could not disable %s events\n", acpi_ut_get_event_name (event)));
return_ACPI_STATUS (AE_NO_HARDWARE_RESPONSE);
}
return_ACPI_STATUS (status);
}
......
......@@ -46,7 +46,6 @@
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
#include <acpi/acevents.h>
#include <acpi/acinterp.h>
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME ("evxfregn")
......@@ -76,12 +75,8 @@ acpi_install_address_space_handler (
acpi_adr_space_setup setup,
void *context)
{
union acpi_operand_object *obj_desc;
union acpi_operand_object *handler_obj;
struct acpi_namespace_node *node;
acpi_status status;
acpi_object_type type;
u16 flags = 0;
ACPI_FUNCTION_TRACE ("acpi_install_address_space_handler");
......@@ -106,202 +101,16 @@ acpi_install_address_space_handler (
goto unlock_and_exit;
}
/*
* This registration is valid for only the types below
* and the root. This is where the default handlers
* get placed.
*/
if ((node->type != ACPI_TYPE_DEVICE) &&
(node->type != ACPI_TYPE_PROCESSOR) &&
(node->type != ACPI_TYPE_THERMAL) &&
(node != acpi_gbl_root_node)) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
if (handler == ACPI_DEFAULT_HANDLER) {
flags = ACPI_ADDR_HANDLER_DEFAULT_INSTALLED;
switch (space_id) {
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
handler = acpi_ex_system_memory_space_handler;
setup = acpi_ev_system_memory_region_setup;
break;
case ACPI_ADR_SPACE_SYSTEM_IO:
handler = acpi_ex_system_io_space_handler;
setup = acpi_ev_io_space_region_setup;
break;
case ACPI_ADR_SPACE_PCI_CONFIG:
handler = acpi_ex_pci_config_space_handler;
setup = acpi_ev_pci_config_region_setup;
break;
case ACPI_ADR_SPACE_CMOS:
handler = acpi_ex_cmos_space_handler;
setup = acpi_ev_cmos_region_setup;
break;
case ACPI_ADR_SPACE_PCI_BAR_TARGET:
handler = acpi_ex_pci_bar_space_handler;
setup = acpi_ev_pci_bar_region_setup;
break;
case ACPI_ADR_SPACE_DATA_TABLE:
handler = acpi_ex_data_table_space_handler;
setup = NULL;
break;
default:
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
}
/* If the caller hasn't specified a setup routine, use the default */
if (!setup) {
setup = acpi_ev_default_region_setup;
}
/* Check for an existing internal object */
obj_desc = acpi_ns_get_attached_object (node);
if (obj_desc) {
/*
* The attached device object already exists.
* Make sure the handler is not already installed.
*/
handler_obj = obj_desc->device.handler;
/* Walk the handler list for this device */
while (handler_obj) {
/* Same space_id indicates a handler already installed */
if(handler_obj->address_space.space_id == space_id) {
if (handler_obj->address_space.handler == handler) {
/*
* It is (relatively) OK to attempt to install the SAME
* handler twice. This can easily happen with PCI_Config space.
*/
status = AE_SAME_HANDLER;
goto unlock_and_exit;
}
else {
/* A handler is already installed */
status = AE_ALREADY_EXISTS;
}
goto unlock_and_exit;
}
/* Walk the linked list of handlers */
handler_obj = handler_obj->address_space.next;
}
}
else {
ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION,
"Creating object on Device %p while installing handler\n", node));
/* obj_desc does not exist, create one */
if (node->type == ACPI_TYPE_ANY) {
type = ACPI_TYPE_DEVICE;
}
else {
type = node->type;
}
obj_desc = acpi_ut_create_internal_object (type);
if (!obj_desc) {
status = AE_NO_MEMORY;
goto unlock_and_exit;
}
/* Init new descriptor */
obj_desc->common.type = (u8) type;
/* Attach the new object to the Node */
/* Install the handler for all Regions for this Space ID */
status = acpi_ns_attach_object (node, obj_desc, type);
/* Remove local reference to the object */
acpi_ut_remove_reference (obj_desc);
if (ACPI_FAILURE (status)) {
goto unlock_and_exit;
}
}
ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION,
"Installing address handler for region %s(%X) on Device %4.4s %p(%p)\n",
acpi_ut_get_region_name (space_id), space_id,
acpi_ut_get_node_name (node), node, obj_desc));
/*
* Install the handler
*
* At this point there is no existing handler.
* Just allocate the object for the handler and link it
* into the list.
*/
handler_obj = acpi_ut_create_internal_object (ACPI_TYPE_LOCAL_ADDRESS_HANDLER);
if (!handler_obj) {
status = AE_NO_MEMORY;
status = acpi_ev_install_space_handler (node, space_id, handler, setup, context);
if (ACPI_FAILURE (status)) {
goto unlock_and_exit;
}
/* Init handler obj */
handler_obj->address_space.space_id = (u8) space_id;
handler_obj->address_space.hflags = flags;
handler_obj->address_space.region_list = NULL;
handler_obj->address_space.node = node;
handler_obj->address_space.handler = handler;
handler_obj->address_space.context = context;
handler_obj->address_space.setup = setup;
/* Install at head of Device.address_space list */
handler_obj->address_space.next = obj_desc->device.handler;
/*
* The Device object is the first reference on the handler_obj.
* Each region that uses the handler adds a reference.
*/
obj_desc->device.handler = handler_obj;
/*
* Walk the namespace finding all of the regions this
* handler will manage.
*
* Start at the device and search the branch toward
* the leaf nodes until either the leaf is encountered or
* a device is detected that has an address handler of the
* same type.
*
* In either case, back up and search down the remainder
* of the branch
*/
status = acpi_ns_walk_namespace (ACPI_TYPE_ANY, device, ACPI_UINT32_MAX,
ACPI_NS_WALK_UNLOCK, acpi_ev_install_handler,
handler_obj, NULL);
/*
* Now we can run the _REG methods for all Regions for this
* space ID. This is a separate walk in order to handle any
* interdependencies between regions and _REG methods. (i.e. handlers
* must be installed for all regions of this Space ID before we
* can run any _REG methods.
*/
status = acpi_ns_walk_namespace (ACPI_TYPE_ANY, device, ACPI_UINT32_MAX,
ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run,
handler_obj, NULL);
/* Run all _REG methods for this address space */
status = acpi_ev_execute_reg_methods (node, space_id);
unlock_and_exit:
(void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE);
......
......@@ -48,6 +48,7 @@
#include <acpi/acnamesp.h>
#include <acpi/acevents.h>
#include <acpi/actables.h>
#include <acpi/acdispat.h>
#define _COMPONENT ACPI_EXECUTER
......@@ -285,7 +286,7 @@ acpi_ex_load_op (
union acpi_operand_object *ddb_handle;
union acpi_operand_object *buffer_desc = NULL;
struct acpi_table_header *table_ptr = NULL;
u8 *table_data_ptr;
acpi_physical_address address;
struct acpi_table_header table_header;
u32 i;
......@@ -300,18 +301,39 @@ acpi_ex_load_op (
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Load from Region %p %s\n",
obj_desc, acpi_ut_get_object_type_name (obj_desc)));
/* Get the table header */
/*
* If the Region Address and Length have not been previously evaluated,
* evaluate them now and save the results.
*/
if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) {
status = acpi_ds_get_region_arguments (obj_desc);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
}
/* Get the base physical address of the region */
address = obj_desc->region.address;
/* Get the table length from the table header */
table_header.length = 0;
for (i = 0; i < sizeof (struct acpi_table_header); i++) {
for (i = 0; i < 8; i++) {
status = acpi_ev_address_space_dispatch (obj_desc, ACPI_READ,
(acpi_physical_address) i, 8,
(acpi_physical_address) (i + address), 8,
((u8 *) &table_header) + i);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
}
/* Sanity check the table length */
if (table_header.length < sizeof (struct acpi_table_header)) {
return_ACPI_STATUS (AE_BAD_HEADER);
}
/* Allocate a buffer for the entire table */
table_ptr = ACPI_MEM_ALLOCATE (table_header.length);
......@@ -319,17 +341,12 @@ acpi_ex_load_op (
return_ACPI_STATUS (AE_NO_MEMORY);
}
/* Copy the header to the buffer */
ACPI_MEMCPY (table_ptr, &table_header, sizeof (struct acpi_table_header));
table_data_ptr = ACPI_PTR_ADD (u8, table_ptr, sizeof (struct acpi_table_header));
/* Get the table from the op region */
/* Get the entire table from the op region */
for (i = 0; i < table_header.length; i++) {
status = acpi_ev_address_space_dispatch (obj_desc, ACPI_READ,
(acpi_physical_address) i, 8,
((u8 *) table_data_ptr + i));
(acpi_physical_address) (i + address), 8,
((u8 *) table_ptr + i));
if (ACPI_FAILURE (status)) {
goto cleanup;
}
......@@ -355,6 +372,12 @@ acpi_ex_load_op (
}
table_ptr = ACPI_CAST_PTR (struct acpi_table_header, buffer_desc->buffer.pointer);
/* Sanity check the table length */
if (table_ptr->length < sizeof (struct acpi_table_header)) {
return_ACPI_STATUS (AE_BAD_HEADER);
}
break;
......
......@@ -277,7 +277,7 @@ acpi_ex_access_region (
rgn_desc->region.space_id));
}
else if (status == AE_NOT_EXIST) {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
ACPI_REPORT_ERROR ((
"Region %s(%X) has no handler\n",
acpi_ut_get_region_name (rgn_desc->region.space_id),
rgn_desc->region.space_id));
......@@ -764,16 +764,85 @@ acpi_ex_set_buffer_datum (
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_common_buffer_setup
*
* PARAMETERS: obj_desc - Field object
* buffer_length - Length of caller's buffer
* datum_count - Where the datum_count is returned
*
* RETURN: Status, datum_count
*
* DESCRIPTION: Common code to validate the incoming buffer size and compute
* the number of field "datums" that must be read or written.
* A "datum" is the smallest unit that can be read or written
* to the field, it is either 1,2,4, or 8 bytes.
*
******************************************************************************/
acpi_status
acpi_ex_common_buffer_setup (
union acpi_operand_object *obj_desc,
u32 buffer_length,
u32 *datum_count)
{
u32 byte_field_length;
u32 actual_byte_field_length;
ACPI_FUNCTION_TRACE ("ex_common_buffer_setup");
/*
* Incoming buffer must be at least as long as the field, we do not
* allow "partial" field reads/writes. We do not care if the buffer is
* larger than the field, this typically happens when an integer is
* read/written to a field that is actually smaller than an integer.
*/
byte_field_length = ACPI_ROUND_BITS_UP_TO_BYTES (
obj_desc->common_field.bit_length);
if (byte_field_length > buffer_length) {
ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD,
"Field size %X (bytes) is too large for buffer (%X)\n",
byte_field_length, buffer_length));
return_ACPI_STATUS (AE_BUFFER_OVERFLOW);
}
/*
* Create "actual" field byte count (minimum number of bytes that
* must be read), then convert to datum count (minimum number
* of datum-sized units that must be read)
*/
actual_byte_field_length = ACPI_ROUND_BITS_UP_TO_BYTES (
obj_desc->common_field.start_field_bit_offset +
obj_desc->common_field.bit_length);
*datum_count = ACPI_ROUND_UP_TO (actual_byte_field_length,
obj_desc->common_field.access_byte_width);
ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD,
"buffer_bytes %X, actual_bytes %X, Datums %X, byte_gran %X\n",
byte_field_length, actual_byte_field_length,
*datum_count, obj_desc->common_field.access_byte_width));
return_ACPI_STATUS (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_extract_from_field
*
* PARAMETERS: *obj_desc - Field to be read
* *Value - Where to store value
* PARAMETERS: obj_desc - Field to be read
* Buffer - Where to store the field data
* buffer_length - Length of Buffer
*
* RETURN: Status
*
* DESCRIPTION: Retrieve the value of the given field
* DESCRIPTION: Retrieve the current value of the given field
*
******************************************************************************/
......@@ -789,7 +858,6 @@ acpi_ex_extract_from_field (
acpi_integer previous_raw_datum = 0;
acpi_integer this_raw_datum = 0;
acpi_integer merged_datum = 0;
u32 byte_field_length;
u32 datum_count;
u32 i;
......@@ -797,39 +865,13 @@ acpi_ex_extract_from_field (
ACPI_FUNCTION_TRACE ("ex_extract_from_field");
/*
* The field must fit within the caller's buffer
*/
byte_field_length = ACPI_ROUND_BITS_UP_TO_BYTES (obj_desc->common_field.bit_length);
if (byte_field_length > buffer_length) {
ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD,
"Field size %X (bytes) too large for buffer (%X)\n",
byte_field_length, buffer_length));
return_ACPI_STATUS (AE_BUFFER_OVERFLOW);
}
/* Convert field byte count to datum count, round up if necessary */
/* Validate buffer, compute number of datums */
datum_count = ACPI_ROUND_UP_TO (byte_field_length,
obj_desc->common_field.access_byte_width);
/*
* If the field is not aligned on a datum boundary and does not
* fit within a single datum, we must read an extra datum.
*
* We could just split the aligned and non-aligned cases since the
* aligned case is so very simple, but this would require more code.
*/
if ((obj_desc->common_field.end_field_valid_bits != 0) &&
(!(obj_desc->common_field.flags & AOPOBJ_SINGLE_DATUM))) {
datum_count++;
status = acpi_ex_common_buffer_setup (obj_desc, buffer_length, &datum_count);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD,
"byte_len %X, datum_len %X, byte_gran %X\n",
byte_field_length, datum_count,obj_desc->common_field.access_byte_width));
/*
* Clear the caller's buffer (the whole buffer length as given)
* This is very important, especially in the cases where the buffer
......@@ -942,12 +984,13 @@ acpi_ex_extract_from_field (
*
* FUNCTION: acpi_ex_insert_into_field
*
* PARAMETERS: *obj_desc - Field to be set
* Buffer - Value to store
* PARAMETERS: obj_desc - Field to be written
* Buffer - Data to be written
* buffer_length - Length of Buffer
*
* RETURN: Status
*
* DESCRIPTION: Store the value into the given field
* DESCRIPTION: Store the Buffer contents into the given field
*
******************************************************************************/
......@@ -964,42 +1007,19 @@ acpi_ex_insert_into_field (
acpi_integer merged_datum;
acpi_integer previous_raw_datum;
acpi_integer this_raw_datum;
u32 byte_field_length;
u32 datum_count;
ACPI_FUNCTION_TRACE ("ex_insert_into_field");
/*
* Incoming buffer must be at least as long as the field, we do not
* allow "partial" field writes. We do not care if the buffer is
* larger than the field, this typically happens when an integer is
* written to a field that is actually smaller than an integer.
*/
byte_field_length = ACPI_ROUND_BITS_UP_TO_BYTES (
obj_desc->common_field.bit_length);
if (buffer_length < byte_field_length) {
ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD,
"Buffer length %X too small for field %X\n",
buffer_length, byte_field_length));
/* Validate buffer, compute number of datums */
return_ACPI_STATUS (AE_BUFFER_OVERFLOW);
status = acpi_ex_common_buffer_setup (obj_desc, buffer_length, &datum_count);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
byte_field_length = ACPI_ROUND_BITS_UP_TO_BYTES (
obj_desc->common_field.start_field_bit_offset +
obj_desc->common_field.bit_length);
/* Convert byte count to datum count, round up if necessary */
datum_count = ACPI_ROUND_UP_TO (byte_field_length,
obj_desc->common_field.access_byte_width);
ACPI_DEBUG_PRINT ((ACPI_DB_BFIELD,
"Bytes %X, Datums %X, byte_gran %X\n",
byte_field_length, datum_count, obj_desc->common_field.access_byte_width));
/*
* Break the request into up to three parts (similar to an I/O request):
* 1) non-aligned part at start
......
......@@ -389,6 +389,8 @@ acpi_ex_do_math_op (
acpi_integer operand1)
{
ACPI_FUNCTION_ENTRY ();
switch (opcode) {
case AML_ADD_OP: /* Add (Operand0, Operand1, Result) */
......@@ -452,15 +454,17 @@ acpi_ex_do_math_op (
* FUNCTION: acpi_ex_do_logical_op
*
* PARAMETERS: Opcode - AML opcode
* Operand0 - Integer operand #0
* Operand1 - Integer operand #1
* obj_desc0 - operand #0
* obj_desc1 - operand #1
*
* RETURN: TRUE/FALSE result of the operation
*
* DESCRIPTION: Execute a logical AML opcode. The purpose of having all of the
* functions here is to prevent a lot of pointer dereferencing
* to obtain the operands and to simplify the generation of the
* logical value.
* logical value. Both operands must already be validated as
* 1) Both the same type, and
* 2) Either Integer, Buffer, or String type.
*
* Note: cleanest machine code seems to be produced by the code
* below, rather than using statements of the form:
......@@ -471,54 +475,137 @@ acpi_ex_do_math_op (
u8
acpi_ex_do_logical_op (
u16 opcode,
acpi_integer operand0,
acpi_integer operand1)
union acpi_operand_object *obj_desc0,
union acpi_operand_object *obj_desc1)
{
acpi_integer operand0;
acpi_integer operand1;
u8 *ptr0;
u8 *ptr1;
u32 length0;
u32 length1;
u32 i;
switch (opcode) {
ACPI_FUNCTION_ENTRY ();
case AML_LAND_OP: /* LAnd (Operand0, Operand1) */
if (operand0 && operand1) {
return (TRUE);
}
break;
if (ACPI_GET_OBJECT_TYPE (obj_desc0) == ACPI_TYPE_INTEGER) {
/* Both operands are of type integer */
operand0 = obj_desc0->integer.value;
operand1 = obj_desc1->integer.value;
case AML_LEQUAL_OP: /* LEqual (Operand0, Operand1) */
switch (opcode) {
case AML_LAND_OP: /* LAnd (Operand0, Operand1) */
if (operand0 == operand1) {
return (TRUE);
}
break;
if (operand0 && operand1) {
return (TRUE);
}
break;
case AML_LEQUAL_OP: /* LEqual (Operand0, Operand1) */
case AML_LGREATER_OP: /* LGreater (Operand0, Operand1) */
if (operand0 == operand1) {
return (TRUE);
}
break;
if (operand0 > operand1) {
return (TRUE);
}
break;
case AML_LGREATER_OP: /* LGreater (Operand0, Operand1) */
if (operand0 > operand1) {
return (TRUE);
}
break;
case AML_LLESS_OP: /* LLess (Operand0, Operand1) */
case AML_LLESS_OP: /* LLess (Operand0, Operand1) */
if (operand0 < operand1) {
return (TRUE);
if (operand0 < operand1) {
return (TRUE);
}
break;
case AML_LOR_OP: /* LOr (Operand0, Operand1) */
if (operand0 || operand1) {
return (TRUE);
}
break;
default:
break;
}
break;
}
else {
/*
* Case for Buffer/String objects.
* NOTE: takes advantage of common Buffer/String object fields
*/
length0 = obj_desc0->buffer.length;
ptr0 = obj_desc0->buffer.pointer;
length1 = obj_desc1->buffer.length;
ptr1 = obj_desc1->buffer.pointer;
case AML_LOR_OP: /* LOr (Operand0, Operand1) */
switch (opcode) {
case AML_LEQUAL_OP: /* LEqual (Operand0, Operand1) */
if (operand0 || operand1) {
/* Length and all bytes must be equal */
if (length0 != length1) {
return (FALSE);
}
for (i = 0; i < length0; i++) {
if (ptr0[i] != ptr1[i]) {
return (FALSE);
}
}
return (TRUE);
}
break;
default:
break;
case AML_LGREATER_OP: /* LGreater (Operand0, Operand1) */
/* Check lengths first */
if (length0 > length1) {
return (TRUE);
}
else if (length0 < length1) {
return (FALSE);
}
/* Lengths equal, now scan the data */
for (i = 0; i < length0; i++) {
if (ptr0[i] > ptr1[i]) {
return (TRUE);
}
}
return (FALSE);
case AML_LLESS_OP: /* LLess (Operand0, Operand1) */
/* Check lengths first */
if (length0 < length1) {
return (TRUE);
}
else if (length0 > length1) {
return (FALSE);
}
/* Lengths equal, now scan the data */
for (i = 0; i < length0; i++) {
if (ptr0[i] < ptr1[i]) {
return (TRUE);
}
}
return (FALSE);
default:
break;
}
}
return (FALSE);
......
......@@ -54,7 +54,7 @@
*
* FUNCTION: acpi_ex_unlink_mutex
*
* PARAMETERS: *obj_desc - The mutex to be unlinked
* PARAMETERS: obj_desc - The mutex to be unlinked
*
* RETURN: Status
*
......@@ -73,6 +73,8 @@ acpi_ex_unlink_mutex (
return;
}
/* Doubly linked list */
if (obj_desc->mutex.next) {
(obj_desc->mutex.next)->mutex.prev = obj_desc->mutex.prev;
}
......@@ -90,8 +92,8 @@ acpi_ex_unlink_mutex (
*
* FUNCTION: acpi_ex_link_mutex
*
* PARAMETERS: *obj_desc - The mutex to be linked
* *list_head - head of the "acquired_mutex" list
* PARAMETERS: obj_desc - The mutex to be linked
* list_head - head of the "acquired_mutex" list
*
* RETURN: Status
*
......@@ -130,8 +132,8 @@ acpi_ex_link_mutex (
*
* FUNCTION: acpi_ex_acquire_mutex
*
* PARAMETERS: *time_desc - The 'time to delay' object descriptor
* *obj_desc - The object descriptor for this op
* PARAMETERS: time_desc - The 'time to delay' object descriptor
* obj_desc - The object descriptor for this op
*
* RETURN: Status
*
......@@ -173,9 +175,8 @@ acpi_ex_acquire_mutex (
return_ACPI_STATUS (AE_AML_MUTEX_ORDER);
}
/*
* Support for multiple acquires by the owning thread
*/
/* Support for multiple acquires by the owning thread */
if (obj_desc->mutex.owner_thread) {
/* Special case for Global Lock, allow all threads */
......@@ -199,10 +200,11 @@ acpi_ex_acquire_mutex (
return_ACPI_STATUS (status);
}
/* Have the mutex, update mutex and walk info */
/* Have the mutex: update mutex and walk info and save the sync_level */
obj_desc->mutex.owner_thread = walk_state->thread;
obj_desc->mutex.owner_thread = walk_state->thread;
obj_desc->mutex.acquisition_depth = 1;
obj_desc->mutex.original_sync_level = walk_state->thread->current_sync_level;
walk_state->thread->current_sync_level = obj_desc->mutex.sync_level;
......@@ -218,7 +220,7 @@ acpi_ex_acquire_mutex (
*
* FUNCTION: acpi_ex_release_mutex
*
* PARAMETERS: *obj_desc - The object descriptor for this op
* PARAMETERS: obj_desc - The object descriptor for this op
*
* RETURN: Status
*
......@@ -281,9 +283,8 @@ acpi_ex_release_mutex (
return_ACPI_STATUS (AE_AML_MUTEX_ORDER);
}
/*
* Match multiple Acquires with multiple Releases
*/
/* Match multiple Acquires with multiple Releases */
obj_desc->mutex.acquisition_depth--;
if (obj_desc->mutex.acquisition_depth != 0) {
/* Just decrement the depth and return */
......@@ -299,10 +300,10 @@ acpi_ex_release_mutex (
status = acpi_ex_system_release_mutex (obj_desc);
/* Update the mutex and walk state */
/* Update the mutex and walk state, restore sync_level before acquire */
obj_desc->mutex.owner_thread = NULL;
walk_state->thread->current_sync_level = obj_desc->mutex.sync_level;
walk_state->thread->current_sync_level = obj_desc->mutex.original_sync_level;
return_ACPI_STATUS (status);
}
......@@ -312,7 +313,7 @@ acpi_ex_release_mutex (
*
* FUNCTION: acpi_ex_release_all_mutexes
*
* PARAMETERS: *mutex_list - Head of the mutex list
* PARAMETERS: mutex_list - Head of the mutex list
*
* RETURN: Status
*
......@@ -332,9 +333,8 @@ acpi_ex_release_all_mutexes (
ACPI_FUNCTION_ENTRY ();
/*
* Traverse the list of owned mutexes, releasing each one.
*/
/* Traverse the list of owned mutexes, releasing each one */
while (next) {
this = next;
next = this->mutex.next;
......@@ -352,7 +352,11 @@ acpi_ex_release_all_mutexes (
/* Mark mutex unowned */
this->mutex.owner_thread = NULL;
this->mutex.owner_thread = NULL;
/* Update Thread sync_level (Last mutex is the important one) */
thread->current_sync_level = this->mutex.original_sync_level;
}
}
......
......@@ -97,6 +97,7 @@ acpi_ex_opcode_2A_0T_0R (
{
union acpi_operand_object **operand = &walk_state->operands[0];
struct acpi_namespace_node *node;
u32 value;
acpi_status status = AE_OK;
......@@ -113,16 +114,46 @@ acpi_ex_opcode_2A_0T_0R (
node = (struct acpi_namespace_node *) operand[0];
/* Second value is the notify value */
value = (u32) operand[1]->integer.value;
/* Notifies allowed on this object? */
if (!acpi_ev_is_notify_object (node)) {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Unexpected notify object type [%s]\n",
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Unexpected notify object type [%s]\n",
acpi_ut_get_type_name (node->type)));
status = AE_AML_OPERAND_TYPE;
break;
}
#ifdef ACPI_GPE_NOTIFY_CHECK
/*
* GPE method wake/notify check. Here, we want to ensure that we
* don't receive any "device_wake" Notifies from a GPE _Lxx or _Exx
* GPE method during system runtime. If we do, the GPE is marked
* as "wake-only" and disabled.
*
* 1) Is the Notify() value == device_wake?
* 2) Is this a GPE deferred method? (An _Lxx or _Exx method)
* 3) Did the original GPE happen at system runtime?
* (versus during wake)
*
* If all three cases are true, this is a wake-only GPE that should
* be disabled at runtime.
*/
if (value == 2) /* device_wake */ {
status = acpi_ev_check_for_wake_only_gpe (walk_state->gpe_event_info);
if (ACPI_FAILURE (status)) {
/* AE_WAKE_ONLY_GPE only error, means ignore this notify */
return_ACPI_STATUS (AE_OK)
}
}
#endif
/*
* Dispatch the notify to the appropriate handler
* NOTE: the request is queued for execution after this method
......@@ -130,8 +161,7 @@ acpi_ex_opcode_2A_0T_0R (
* from this thread -- because handlers may in turn run other
* control methods.
*/
status = acpi_ev_queue_notify_request (node,
(u32) operand[1]->integer.value);
status = acpi_ev_queue_notify_request (node, value);
break;
......@@ -543,9 +573,17 @@ acpi_ex_opcode_2A_0T_1R (
* Execute the Opcode
*/
if (walk_state->op_info->flags & AML_LOGICAL) /* logical_op (Operand0, Operand1) */ {
/* Both operands must be of the same type */
if (ACPI_GET_OBJECT_TYPE (operand[0]) !=
ACPI_GET_OBJECT_TYPE (operand[1])) {
status = AE_AML_OPERAND_TYPE;
goto cleanup;
}
logical_result = acpi_ex_do_logical_op (walk_state->opcode,
operand[0]->integer.value,
operand[1]->integer.value);
operand[0],
operand[1]);
goto store_logical_result;
}
......
......@@ -187,15 +187,15 @@ acpi_ex_resolve_object_to_value (
return_ACPI_STATUS (status);
}
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Arg/Local %X] value_obj is %p\n",
stack_desc->reference.offset, obj_desc));
/*
* Now we can delete the original Reference Object and
* replace it with the resolve value
* replace it with the resolved value
*/
acpi_ut_remove_reference (stack_desc);
*stack_ptr = obj_desc;
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Arg/Local %d] value_obj is %p\n",
stack_desc->reference.offset, obj_desc));
break;
......
......@@ -102,7 +102,8 @@ acpi_ex_store (
* Storing an object into a Named node.
*/
status = acpi_ex_store_object_to_node (source_desc,
(struct acpi_namespace_node *) dest_desc, walk_state);
(struct acpi_namespace_node *) dest_desc, walk_state,
ACPI_IMPLICIT_CONVERSION);
return_ACPI_STATUS (status);
}
......@@ -153,7 +154,7 @@ acpi_ex_store (
/* Storing an object into a Name "container" */
status = acpi_ex_store_object_to_node (source_desc, ref_desc->reference.object,
walk_state);
walk_state, ACPI_IMPLICIT_CONVERSION);
break;
......@@ -399,6 +400,7 @@ acpi_ex_store_object_to_index (
* PARAMETERS: source_desc - Value to be stored
* Node - Named object to receive the value
* walk_state - Current walk state
* implicit_conversion - Perform implicit conversion (yes/no)
*
* RETURN: Status
*
......@@ -421,7 +423,8 @@ acpi_status
acpi_ex_store_object_to_node (
union acpi_operand_object *source_desc,
struct acpi_namespace_node *node,
struct acpi_walk_state *walk_state)
struct acpi_walk_state *walk_state,
u8 implicit_conversion)
{
acpi_status status = AE_OK;
union acpi_operand_object *target_desc;
......@@ -451,6 +454,14 @@ acpi_ex_store_object_to_node (
return_ACPI_STATUS (status);
}
/* If no implicit conversion, drop into the default case below */
if (!implicit_conversion) {
/* Force execution of default (no implicit conversion) */
target_type = ACPI_TYPE_ANY;
}
/*
* Do the actual store operation
*/
......
......@@ -51,104 +51,24 @@
/******************************************************************************
*
* FUNCTION: acpi_hw_enable_gpe
* FUNCTION: acpi_hw_write_gpe_enable_reg
*
* PARAMETERS: gpe_event_info - Info block for the GPE to be enabled
*
* RETURN: Status
*
* DESCRIPTION: Enable a single GPE.
* DESCRIPTION: Write a GPE enable register. Note: The bit for this GPE must
* already be cleared or set in the parent register
* enable_for_run mask.
*
******************************************************************************/
acpi_status
acpi_hw_enable_gpe (
struct acpi_gpe_event_info *gpe_event_info)
{
u32 in_byte;
acpi_status status;
ACPI_FUNCTION_ENTRY ();
/*
* Read the current value of the register, set the appropriate bit
* to enable the GPE, and write out the new register.
*/
status = acpi_hw_low_level_read (8, &in_byte,
&gpe_event_info->register_info->enable_address);
if (ACPI_FAILURE (status)) {
return (status);
}
/* Write with the new GPE bit enabled */
status = acpi_hw_low_level_write (8, (in_byte | gpe_event_info->bit_mask),
&gpe_event_info->register_info->enable_address);
return (status);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_enable_gpe_for_wakeup
*
* PARAMETERS: gpe_event_info - Info block for the GPE to be enabled
*
* RETURN: None
*
* DESCRIPTION: Keep track of which GPEs the OS has requested not be
* disabled when going to sleep.
*
******************************************************************************/
void
acpi_hw_enable_gpe_for_wakeup (
acpi_hw_write_gpe_enable_reg (
struct acpi_gpe_event_info *gpe_event_info)
{
struct acpi_gpe_register_info *gpe_register_info;
ACPI_FUNCTION_ENTRY ();
/* Get the info block for the entire GPE register */
gpe_register_info = gpe_event_info->register_info;
if (!gpe_register_info) {
return;
}
/*
* Set the bit so we will not enable this GPE when sleeping (and disable
* it upon wake)
*/
gpe_register_info->wake_enable |= gpe_event_info->bit_mask;
gpe_event_info->flags |= (ACPI_GPE_TYPE_WAKE | ACPI_GPE_ENABLED);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_disable_gpe
*
* PARAMETERS: gpe_event_info - Info block for the GPE to be disabled
*
* RETURN: Status
*
* DESCRIPTION: Disable a single GPE.
*
******************************************************************************/
acpi_status
acpi_hw_disable_gpe (
struct acpi_gpe_event_info *gpe_event_info)
{
u32 in_byte;
acpi_status status;
struct acpi_gpe_register_info *gpe_register_info;
ACPI_FUNCTION_ENTRY ();
......@@ -158,67 +78,15 @@ acpi_hw_disable_gpe (
gpe_register_info = gpe_event_info->register_info;
if (!gpe_register_info) {
return (AE_BAD_PARAMETER);
return (AE_NOT_EXIST);
}
/*
* Read the current value of the register, clear the appropriate bit,
* and write out the new register value to disable the GPE.
*/
status = acpi_hw_low_level_read (8, &in_byte,
&gpe_register_info->enable_address);
if (ACPI_FAILURE (status)) {
return (status);
}
/* Write the byte with this GPE bit cleared */
/* Write the entire GPE (runtime) enable register */
status = acpi_hw_low_level_write (8, (in_byte & ~(gpe_event_info->bit_mask)),
status = acpi_hw_low_level_write (8, gpe_register_info->enable_for_run,
&gpe_register_info->enable_address);
if (ACPI_FAILURE (status)) {
return (status);
}
/* Make sure this GPE is disabled for wake, also */
acpi_hw_disable_gpe_for_wakeup (gpe_event_info);
return (AE_OK);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_disable_gpe_for_wakeup
*
* PARAMETERS: gpe_event_info - Info block for the GPE to be disabled
*
* RETURN: None
*
* DESCRIPTION: Keep track of which GPEs the OS has requested not be
* disabled when going to sleep.
*
******************************************************************************/
void
acpi_hw_disable_gpe_for_wakeup (
struct acpi_gpe_event_info *gpe_event_info)
{
struct acpi_gpe_register_info *gpe_register_info;
ACPI_FUNCTION_ENTRY ();
/* Get the info block for the entire GPE register */
gpe_register_info = gpe_event_info->register_info;
if (!gpe_register_info) {
return;
}
/* Clear the bit so we will disable this when sleeping */
gpe_register_info->wake_enable &= ~(gpe_event_info->bit_mask);
return (status);
}
......@@ -248,7 +116,7 @@ acpi_hw_clear_gpe (
* Write a one to the appropriate bit in the status register to
* clear this GPE.
*/
status = acpi_hw_low_level_write (8, gpe_event_info->bit_mask,
status = acpi_hw_low_level_write (8, gpe_event_info->register_bit,
&gpe_event_info->register_info->status_address);
return (status);
......@@ -274,7 +142,7 @@ acpi_hw_get_gpe_status (
acpi_event_status *event_status)
{
u32 in_byte;
u8 bit_mask;
u8 register_bit;
struct acpi_gpe_register_info *gpe_register_info;
acpi_status status;
acpi_event_status local_event_status = 0;
......@@ -293,33 +161,28 @@ acpi_hw_get_gpe_status (
/* Get the register bitmask for this GPE */
bit_mask = gpe_event_info->bit_mask;
register_bit = gpe_event_info->register_bit;
/* GPE Enabled? */
status = acpi_hw_low_level_read (8, &in_byte, &gpe_register_info->enable_address);
if (ACPI_FAILURE (status)) {
goto unlock_and_exit;
}
/* GPE currently enabled? (enabled for runtime?) */
if (bit_mask & in_byte) {
if (register_bit & gpe_register_info->enable_for_run) {
local_event_status |= ACPI_EVENT_FLAG_ENABLED;
}
/* GPE Enabled for wake? */
/* GPE enabled for wake? */
if (bit_mask & gpe_register_info->wake_enable) {
if (register_bit & gpe_register_info->enable_for_wake) {
local_event_status |= ACPI_EVENT_FLAG_WAKE_ENABLED;
}
/* GPE active (set)? */
/* GPE currently active (status bit == 1)? */
status = acpi_hw_low_level_read (8, &in_byte, &gpe_register_info->status_address);
if (ACPI_FAILURE (status)) {
goto unlock_and_exit;
}
if (bit_mask & in_byte) {
if (register_bit & in_byte) {
local_event_status |= ACPI_EVENT_FLAG_SET;
}
......@@ -411,64 +274,43 @@ acpi_hw_clear_gpe_block (
/******************************************************************************
*
* FUNCTION: acpi_hw_prepare_gpe_block_for_sleep
* FUNCTION: acpi_hw_enable_runtime_gpe_block
*
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
* gpe_block - Gpe Block info
*
* RETURN: Status
*
* DESCRIPTION: Disable all runtime GPEs and enable all wakeup GPEs -- within
* a single GPE block
* DESCRIPTION: Enable all "runtime" GPEs within a GPE block. (Includes
* combination wake/run GPEs.)
*
******************************************************************************/
static acpi_status
acpi_hw_prepare_gpe_block_for_sleep (
acpi_status
acpi_hw_enable_runtime_gpe_block (
struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block)
{
u32 i;
struct acpi_gpe_register_info *gpe_register_info;
u32 in_value;
acpi_status status;
/* Get the register info for the entire GPE block */
gpe_register_info = gpe_block->register_info;
/* NOTE: assumes that all GPEs are currently disabled */
/* Examine each GPE Register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
/*
* Read the enabled/disabled status of all GPEs. We
* will be using it to restore all the GPEs later.
*
* NOTE: Wake GPEs are are ALL disabled at this time, so when we wake
* and restore this register, they will be automatically disabled.
*/
status = acpi_hw_low_level_read (8, &in_value,
&gpe_register_info->enable_address);
if (ACPI_FAILURE (status)) {
return (status);
if (!gpe_block->register_info[i].enable_for_run) {
continue;
}
gpe_register_info->enable = (u8) in_value;
/* Enable all "runtime" GPEs in this register */
/*
* 1) Disable all runtime GPEs
* 2) Enable all wakeup GPEs
*/
status = acpi_hw_low_level_write (8, gpe_register_info->wake_enable,
&gpe_register_info->enable_address);
status = acpi_hw_low_level_write (8, gpe_block->register_info[i].enable_for_run,
&gpe_block->register_info[i].enable_address);
if (ACPI_FAILURE (status)) {
return (status);
}
/* Point to next GPE register */
gpe_register_info++;
}
return (AE_OK);
......@@ -477,122 +319,125 @@ acpi_hw_prepare_gpe_block_for_sleep (
/******************************************************************************
*
* FUNCTION: acpi_hw_prepare_gpes_for_sleep
* FUNCTION: acpi_hw_enable_wakeup_gpe_block
*
* PARAMETERS: None
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
* gpe_block - Gpe Block info
*
* RETURN: Status
*
* DESCRIPTION: Disable all runtime GPEs, enable all wake GPEs.
* Called with interrupts disabled. The interrupt handler also
* modifies gpe_register_info->Enable, so it should not be
* given the chance to run until after the runtime GPEs are
* re-enabled.
* DESCRIPTION: Enable all "wake" GPEs within a GPE block. (Includes
* combination wake/run GPEs.)
*
******************************************************************************/
acpi_status
acpi_hw_prepare_gpes_for_sleep (
void)
acpi_hw_enable_wakeup_gpe_block (
struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block)
{
u32 i;
acpi_status status;
ACPI_FUNCTION_ENTRY ();
/* Examine each GPE Register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
if (!gpe_block->register_info[i].enable_for_wake) {
continue;
}
/* Enable all "wake" GPEs in this register */
status = acpi_ev_walk_gpe_list (acpi_hw_prepare_gpe_block_for_sleep);
return (status);
status = acpi_hw_low_level_write (8, gpe_block->register_info[i].enable_for_wake,
&gpe_block->register_info[i].enable_address);
if (ACPI_FAILURE (status)) {
return (status);
}
}
return (AE_OK);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_restore_gpe_block_on_wake
* FUNCTION: acpi_hw_disable_all_gpes
*
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
* gpe_block - Gpe Block info
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Enable all runtime GPEs and disable all wake GPEs -- in one
* GPE block
* DESCRIPTION: Disable and clear all GPEs
*
******************************************************************************/
static acpi_status
acpi_hw_restore_gpe_block_on_wake (
struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block)
acpi_status
acpi_hw_disable_all_gpes (
void)
{
u32 i;
struct acpi_gpe_register_info *gpe_register_info;
acpi_status status;
/* This callback processes one entire GPE block */
ACPI_FUNCTION_TRACE ("hw_disable_all_gpes");
/* Get the register info for the entire GPE block */
gpe_register_info = gpe_block->register_info;
status = acpi_ev_walk_gpe_list (acpi_hw_disable_gpe_block);
status = acpi_ev_walk_gpe_list (acpi_hw_clear_gpe_block);
return_ACPI_STATUS (status);
}
/* Examine each GPE register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
/* Clear the entire status register */
/******************************************************************************
*
* FUNCTION: acpi_hw_enable_all_runtime_gpes
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Enable all GPEs of the given type
*
******************************************************************************/
status = acpi_hw_low_level_write (8, 0xFF,
&gpe_block->register_info[i].status_address);
if (ACPI_FAILURE (status)) {
return (status);
}
acpi_status
acpi_hw_enable_all_runtime_gpes (
void)
{
acpi_status status;
/*
* Restore the GPE Enable register, which will do the following:
*
* 1) Disable all wakeup GPEs
* 2) Enable all runtime GPEs
*
* (On sleep, we saved the enabled status of all GPEs)
*/
status = acpi_hw_low_level_write (8, gpe_register_info->enable,
&gpe_register_info->enable_address);
if (ACPI_FAILURE (status)) {
return (status);
}
/* Point to next GPE register */
ACPI_FUNCTION_TRACE ("hw_enable_all_runtime_gpes");
gpe_register_info++;
}
return (AE_OK);
status = acpi_ev_walk_gpe_list (acpi_hw_enable_runtime_gpe_block);
return_ACPI_STATUS (status);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_restore_gpes_on_wake
* FUNCTION: acpi_hw_enable_all_wakeup_gpes
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Enable all runtime GPEs and disable all wake GPEs -- in all
* GPE blocks
* DESCRIPTION: Enable all GPEs of the given type
*
******************************************************************************/
acpi_status
acpi_hw_restore_gpes_on_wake (
acpi_hw_enable_all_wakeup_gpes (
void)
{
acpi_status status;
ACPI_FUNCTION_ENTRY ();
ACPI_FUNCTION_TRACE ("hw_enable_all_wakeup_gpes");
status = acpi_ev_walk_gpe_list (acpi_hw_restore_gpe_block_on_wake);
return (status);
status = acpi_ev_walk_gpe_list (acpi_hw_enable_wakeup_gpe_block);
return_ACPI_STATUS (status);
}
......@@ -135,7 +135,7 @@ acpi_get_sleep_type_data (
u8 *sleep_type_b)
{
acpi_status status = AE_OK;
union acpi_operand_object *obj_desc;
struct acpi_parameter_info info;
ACPI_FUNCTION_TRACE ("acpi_get_sleep_type_data");
......@@ -152,8 +152,9 @@ acpi_get_sleep_type_data (
/*
* Evaluate the namespace object containing the values for this state
*/
info.parameters = NULL;
status = acpi_ns_evaluate_by_name ((char *) acpi_gbl_sleep_state_names[sleep_state],
NULL, &obj_desc);
&info);
if (ACPI_FAILURE (status)) {
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "%s while evaluating sleep_state [%s]\n",
acpi_format_exception (status), acpi_gbl_sleep_state_names[sleep_state]));
......@@ -163,48 +164,50 @@ acpi_get_sleep_type_data (
/* Must have a return object */
if (!obj_desc) {
if (!info.return_object) {
ACPI_REPORT_ERROR (("Missing Sleep State object\n"));
status = AE_NOT_EXIST;
}
/* It must be of type Package */
else if (ACPI_GET_OBJECT_TYPE (obj_desc) != ACPI_TYPE_PACKAGE) {
else if (ACPI_GET_OBJECT_TYPE (info.return_object) != ACPI_TYPE_PACKAGE) {
ACPI_REPORT_ERROR (("Sleep State object not a Package\n"));
status = AE_AML_OPERAND_TYPE;
}
/* The package must have at least two elements */
else if (obj_desc->package.count < 2) {
else if (info.return_object->package.count < 2) {
ACPI_REPORT_ERROR (("Sleep State package does not have at least two elements\n"));
status = AE_AML_NO_OPERAND;
}
/* The first two elements must both be of type Integer */
else if ((ACPI_GET_OBJECT_TYPE (obj_desc->package.elements[0]) != ACPI_TYPE_INTEGER) ||
(ACPI_GET_OBJECT_TYPE (obj_desc->package.elements[1]) != ACPI_TYPE_INTEGER)) {
else if ((ACPI_GET_OBJECT_TYPE (info.return_object->package.elements[0]) != ACPI_TYPE_INTEGER) ||
(ACPI_GET_OBJECT_TYPE (info.return_object->package.elements[1]) != ACPI_TYPE_INTEGER)) {
ACPI_REPORT_ERROR (("Sleep State package elements are not both Integers (%s, %s)\n",
acpi_ut_get_object_type_name (obj_desc->package.elements[0]),
acpi_ut_get_object_type_name (obj_desc->package.elements[1])));
acpi_ut_get_object_type_name (info.return_object->package.elements[0]),
acpi_ut_get_object_type_name (info.return_object->package.elements[1])));
status = AE_AML_OPERAND_TYPE;
}
else {
/*
* Valid _Sx_ package size, type, and value
*/
*sleep_type_a = (u8) (obj_desc->package.elements[0])->integer.value;
*sleep_type_b = (u8) (obj_desc->package.elements[1])->integer.value;
*sleep_type_a = (u8) (info.return_object->package.elements[0])->integer.value;
*sleep_type_b = (u8) (info.return_object->package.elements[1])->integer.value;
}
if (ACPI_FAILURE (status)) {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "While evaluating sleep_state [%s], bad Sleep object %p type %s\n",
acpi_gbl_sleep_state_names[sleep_state], obj_desc, acpi_ut_get_object_type_name (obj_desc)));
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"While evaluating sleep_state [%s], bad Sleep object %p type %s\n",
acpi_gbl_sleep_state_names[sleep_state], info.return_object,
acpi_ut_get_object_type_name (info.return_object)));
}
acpi_ut_remove_reference (obj_desc);
acpi_ut_remove_reference (info.return_object);
return_ACPI_STATUS (status);
}
......
......@@ -265,19 +265,21 @@ acpi_enter_sleep_state (
sleep_type_reg_info = acpi_hw_get_bit_register_info (ACPI_BITREG_SLEEP_TYPE_A);
sleep_enable_reg_info = acpi_hw_get_bit_register_info (ACPI_BITREG_SLEEP_ENABLE);
if (sleep_state != ACPI_STATE_S5) {
/* Clear wake status */
/* Clear wake status */
status = acpi_set_register (ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_DO_NOT_LOCK);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
status = acpi_set_register (ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_DO_NOT_LOCK);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
status = acpi_hw_clear_acpi_status (ACPI_MTX_DO_NOT_LOCK);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
/* Clear all fixed and general purpose status bits */
status = acpi_hw_clear_acpi_status (ACPI_MTX_DO_NOT_LOCK);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
if (sleep_state != ACPI_STATE_S5) {
/* Disable BM arbitration */
status = acpi_set_register (ACPI_BITREG_ARB_DISABLE, 1, ACPI_MTX_DO_NOT_LOCK);
......@@ -287,10 +289,16 @@ acpi_enter_sleep_state (
}
/*
* 1) Disable all runtime GPEs
* 1) Disable/Clear all GPEs
* 2) Enable all wakeup GPEs
*/
status = acpi_hw_prepare_gpes_for_sleep ();
status = acpi_hw_disable_all_gpes ();
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
acpi_gbl_system_awake_and_running = FALSE;
status = acpi_hw_enable_all_wakeup_gpes ();
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
......@@ -420,10 +428,16 @@ acpi_enter_sleep_state_s4bios (
}
/*
* 1) Disable all runtime GPEs
* 1) Disable/Clear all GPEs
* 2) Enable all wakeup GPEs
*/
status = acpi_hw_prepare_gpes_for_sleep ();
status = acpi_hw_disable_all_gpes ();
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
acpi_gbl_system_awake_and_running = FALSE;
status = acpi_hw_enable_all_wakeup_gpes ();
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
......@@ -540,19 +554,25 @@ acpi_leave_sleep_state (
/*
* Restore the GPEs:
* 1) Disable all wakeup GPEs
* 1) Disable/Clear all GPEs
* 2) Enable all runtime GPEs
*/
status = acpi_hw_restore_gpes_on_wake ();
status = acpi_hw_disable_all_gpes ();
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
acpi_gbl_system_awake_and_running = TRUE;
status = acpi_hw_enable_all_runtime_gpes ();
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
/* Enable power button */
acpi_set_register(acpi_gbl_fixed_event_info[ACPI_EVENT_POWER_BUTTON].enable_register_id,
(void) acpi_set_register(acpi_gbl_fixed_event_info[ACPI_EVENT_POWER_BUTTON].enable_register_id,
1, ACPI_MTX_DO_NOT_LOCK);
acpi_set_register(acpi_gbl_fixed_event_info[ACPI_EVENT_POWER_BUTTON].status_register_id,
(void) acpi_set_register(acpi_gbl_fixed_event_info[ACPI_EVENT_POWER_BUTTON].status_register_id,
1, ACPI_MTX_DO_NOT_LOCK);
/* Enable BM arbitration */
......
/*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <asm/io.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME ("acpi_motherboard")
/* Dell use PNP0C01 instead of PNP0C02 */
#define ACPI_MB_HID1 "PNP0C01"
#define ACPI_MB_HID2 "PNP0C02"
/**
* Doesn't care about legacy IO ports, only IO ports beyond 0x1000 are reserved
* Doesn't care about the failure of 'request_region', since other may reserve
* the io ports as well
*/
#define IS_RESERVED_ADDR(base, len) \
(((len) > 0) && ((base) > 0) && ((base) + (len) < IO_SPACE_LIMIT) \
&& ((base) + (len) > PCIBIOS_MIN_IO))
static acpi_status
acpi_reserve_io_ranges (struct acpi_resource *res, void *data)
{
ACPI_FUNCTION_TRACE("acpi_reserve_io_ranges");
if (res->id == ACPI_RSTYPE_IO) {
struct acpi_resource_io *io_res = &res->data.io;
if (io_res->min_base_address != io_res->max_base_address)
return AE_OK;
if (IS_RESERVED_ADDR(io_res->min_base_address, io_res->range_length)) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Motherboard resources 0x%08x - 0x%08x\n",
io_res->min_base_address,
io_res->min_base_address + io_res->range_length));
request_region(io_res->min_base_address,
io_res->range_length, "motherboard");
}
}else if (res->id == ACPI_RSTYPE_FIXED_IO) {
struct acpi_resource_fixed_io *fixed_io_res = &res->data.fixed_io;
if (IS_RESERVED_ADDR(fixed_io_res->base_address, fixed_io_res->range_length)) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Motherboard resources 0x%08x - 0x%08x\n",
fixed_io_res->base_address,
fixed_io_res->base_address + fixed_io_res->range_length));
request_region(fixed_io_res->base_address,
fixed_io_res->range_length, "motherboard");
}
}else {
/* Memory mapped IO? */
}
return AE_OK;
}
static int acpi_motherboard_add (struct acpi_device *device)
{
if (!device)
return -EINVAL;
acpi_walk_resources(device->handle, METHOD_NAME__CRS,
acpi_reserve_io_ranges, NULL);
return 0;
}
static struct acpi_driver acpi_motherboard_driver1 = {
.name = "motherboard",
.class = "",
.ids = ACPI_MB_HID1,
.ops = {
.add = acpi_motherboard_add,
},
};
static struct acpi_driver acpi_motherboard_driver2 = {
.name = "motherboard",
.class = "",
.ids = ACPI_MB_HID2,
.ops = {
.add = acpi_motherboard_add,
},
};
static void __init
acpi_reserve_resources (void)
{
if (acpi_gbl_FADT->xpm1a_evt_blk.address && acpi_gbl_FADT->pm1_evt_len)
request_region(acpi_gbl_FADT->xpm1a_evt_blk.address,
acpi_gbl_FADT->pm1_evt_len, "PM1a_EVT_BLK");
if (acpi_gbl_FADT->xpm1b_evt_blk.address && acpi_gbl_FADT->pm1_evt_len)
request_region(acpi_gbl_FADT->xpm1b_evt_blk.address,
acpi_gbl_FADT->pm1_evt_len, "PM1b_EVT_BLK");
if (acpi_gbl_FADT->xpm1a_cnt_blk.address && acpi_gbl_FADT->pm1_cnt_len)
request_region(acpi_gbl_FADT->xpm1a_cnt_blk.address,
acpi_gbl_FADT->pm1_cnt_len, "PM1a_CNT_BLK");
if (acpi_gbl_FADT->xpm1b_cnt_blk.address && acpi_gbl_FADT->pm1_cnt_len)
request_region(acpi_gbl_FADT->xpm1b_cnt_blk.address,
acpi_gbl_FADT->pm1_cnt_len, "PM1b_CNT_BLK");
if (acpi_gbl_FADT->xpm_tmr_blk.address && acpi_gbl_FADT->pm_tm_len == 4)
request_region(acpi_gbl_FADT->xpm_tmr_blk.address,
4, "PM_TMR");
if (acpi_gbl_FADT->xpm2_cnt_blk.address && acpi_gbl_FADT->pm2_cnt_len)
request_region(acpi_gbl_FADT->xpm2_cnt_blk.address,
acpi_gbl_FADT->pm2_cnt_len, "PM2_CNT_BLK");
/* Length of GPE blocks must be a non-negative multiple of 2 */
if (acpi_gbl_FADT->xgpe0_blk.address && acpi_gbl_FADT->gpe0_blk_len &&
!(acpi_gbl_FADT->gpe0_blk_len & 0x1))
request_region(acpi_gbl_FADT->xgpe0_blk.address,
acpi_gbl_FADT->gpe0_blk_len, "GPE0_BLK");
if (acpi_gbl_FADT->xgpe1_blk.address && acpi_gbl_FADT->gpe1_blk_len &&
!(acpi_gbl_FADT->gpe1_blk_len & 0x1))
request_region(acpi_gbl_FADT->xgpe1_blk.address,
acpi_gbl_FADT->gpe1_blk_len, "GPE1_BLK");
}
static int __init acpi_motherboard_init(void)
{
acpi_bus_register_driver(&acpi_motherboard_driver1);
acpi_bus_register_driver(&acpi_motherboard_driver2);
/*
* Guarantee motherboard IO reservation first
* This module must run after scan.c
*/
if (!acpi_disabled)
acpi_reserve_resources ();
return 0;
}
subsys_initcall(acpi_motherboard_init);
......@@ -193,7 +193,7 @@ acpi_ns_root_initialize (void)
case ACPI_TYPE_MUTEX:
obj_desc->mutex.node = new_node;
obj_desc->mutex.sync_level = (u16) ACPI_STRTOUL
obj_desc->mutex.sync_level = (u8) ACPI_STRTOUL
(val, NULL, 10);
if (ACPI_STRCMP (init_val->name, "_GL_") == 0) {
......
......@@ -267,7 +267,7 @@ acpi_ns_install_node (
else {
#ifdef ACPI_ALPHABETIC_NAMESPACE
/*
* Walk the list whilst searching for the the correct
* Walk the list whilst searching for the correct
* alphabetic placement.
*/
previous_child_node = NULL;
......
......@@ -77,13 +77,10 @@
acpi_status
acpi_ns_evaluate_relative (
struct acpi_namespace_node *handle,
char *pathname,
union acpi_operand_object **params,
union acpi_operand_object **return_object)
struct acpi_parameter_info *info)
{
acpi_status status;
struct acpi_namespace_node *prefix_node;
struct acpi_namespace_node *node = NULL;
union acpi_generic_state *scope_info;
char *internal_path = NULL;
......@@ -95,7 +92,7 @@ acpi_ns_evaluate_relative (
/*
* Must have a valid object handle
*/
if (!handle) {
if (!info || !info->node) {
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
......@@ -118,8 +115,8 @@ acpi_ns_evaluate_relative (
goto cleanup;
}
prefix_node = acpi_ns_map_handle_to_node (handle);
if (!prefix_node) {
info->node = acpi_ns_map_handle_to_node (info->node);
if (!info->node) {
(void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE);
status = AE_BAD_PARAMETER;
goto cleanup;
......@@ -127,7 +124,7 @@ acpi_ns_evaluate_relative (
/* Lookup the name in the namespace */
scope_info->scope.node = prefix_node;
scope_info->scope.node = info->node;
status = acpi_ns_lookup (scope_info, internal_path, ACPI_TYPE_ANY,
ACPI_IMODE_EXECUTE, ACPI_NS_NO_UPSEARCH, NULL,
&node);
......@@ -147,7 +144,8 @@ acpi_ns_evaluate_relative (
ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "%s [%p] Value %p\n",
pathname, node, acpi_ns_get_attached_object (node)));
status = acpi_ns_evaluate_by_handle (node, params, return_object);
info->node = node;
status = acpi_ns_evaluate_by_handle (info);
ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "*** Completed eval of object %s ***\n",
pathname));
......@@ -166,6 +164,7 @@ acpi_ns_evaluate_relative (
* FUNCTION: acpi_ns_evaluate_by_name
*
* PARAMETERS: Pathname - Fully qualified pathname to the object
* Info - Contains:
* return_object - Where to put method's return value (if
* any). If NULL, no value is returned.
* Params - List of parameters to pass to the method,
......@@ -184,11 +183,9 @@ acpi_ns_evaluate_relative (
acpi_status
acpi_ns_evaluate_by_name (
char *pathname,
union acpi_operand_object **params,
union acpi_operand_object **return_object)
struct acpi_parameter_info *info)
{
acpi_status status;
struct acpi_namespace_node *node = NULL;
char *internal_path = NULL;
......@@ -211,7 +208,7 @@ acpi_ns_evaluate_by_name (
status = acpi_ns_lookup (NULL, internal_path, ACPI_TYPE_ANY,
ACPI_IMODE_EXECUTE, ACPI_NS_NO_UPSEARCH, NULL,
&node);
&info->node);
(void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE);
......@@ -226,9 +223,9 @@ acpi_ns_evaluate_by_name (
* to evaluate it.
*/
ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "%s [%p] Value %p\n",
pathname, node, acpi_ns_get_attached_object (node)));
pathname, info->node, acpi_ns_get_attached_object (info->node)));
status = acpi_ns_evaluate_by_handle (node, params, return_object);
status = acpi_ns_evaluate_by_handle (info);
ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "*** Completed eval of object %s ***\n",
pathname));
......@@ -254,6 +251,7 @@ acpi_ns_evaluate_by_name (
* Params - List of parameters to pass to the method,
* terminated by NULL. Params itself may be
* NULL if no parameters are being passed.
* param_type - Type of Parameter list
* return_object - Where to put method's return value (if
* any). If NULL, no value is returned.
*
......@@ -267,13 +265,9 @@ acpi_ns_evaluate_by_name (
acpi_status
acpi_ns_evaluate_by_handle (
struct acpi_namespace_node *handle,
union acpi_operand_object **params,
union acpi_operand_object **return_object)
struct acpi_parameter_info *info)
{
struct acpi_namespace_node *node;
acpi_status status;
union acpi_operand_object *local_return_object;
ACPI_FUNCTION_TRACE ("ns_evaluate_by_handle");
......@@ -287,15 +281,13 @@ acpi_ns_evaluate_by_handle (
/* Parameter Validation */
if (!handle) {
if (!info) {
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
if (return_object) {
/* Initialize the return value to an invalid object */
/* Initialize the return value to an invalid object */
*return_object = NULL;
}
info->return_object = NULL;
/* Get the prefix handle and Node */
......@@ -304,8 +296,8 @@ acpi_ns_evaluate_by_handle (
return_ACPI_STATUS (status);
}
node = acpi_ns_map_handle_to_node (handle);
if (!node) {
info->node = acpi_ns_map_handle_to_node (info->node);
if (!info->node) {
(void) acpi_ut_release_mutex (ACPI_MTX_NAMESPACE);
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
......@@ -315,8 +307,8 @@ acpi_ns_evaluate_by_handle (
* so that proper scoping context will be established
* before execution.
*/
if (acpi_ns_get_type (node) == ACPI_TYPE_LOCAL_METHOD_ALIAS) {
node = ACPI_CAST_PTR (struct acpi_namespace_node, node->object);
if (acpi_ns_get_type (info->node) == ACPI_TYPE_LOCAL_METHOD_ALIAS) {
info->node = ACPI_CAST_PTR (struct acpi_namespace_node, info->node->object);
}
/*
......@@ -328,19 +320,18 @@ acpi_ns_evaluate_by_handle (
* In both cases, the namespace is unlocked by the
* acpi_ns* procedure
*/
if (acpi_ns_get_type (node) == ACPI_TYPE_METHOD) {
if (acpi_ns_get_type (info->node) == ACPI_TYPE_METHOD) {
/*
* Case 1) We have an actual control method to execute
*/
status = acpi_ns_execute_control_method (node, params,
&local_return_object);
status = acpi_ns_execute_control_method (info);
}
else {
/*
* Case 2) Object is NOT a method, just return its
* current value
*/
status = acpi_ns_get_object_value (node, &local_return_object);
status = acpi_ns_get_object_value (info);
}
/*
......@@ -348,20 +339,6 @@ acpi_ns_evaluate_by_handle (
* be dealt with
*/
if (status == AE_CTRL_RETURN_VALUE) {
/*
* If the Method returned a value and the caller
* provided a place to store a returned value, Copy
* the returned value to the object descriptor provided
* by the caller.
*/
if (return_object) {
/*
* Valid return object, copy the pointer to
* the returned object
*/
*return_object = local_return_object;
}
/* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */
status = AE_OK;
......@@ -396,9 +373,7 @@ acpi_ns_evaluate_by_handle (
acpi_status
acpi_ns_execute_control_method (
struct acpi_namespace_node *method_node,
union acpi_operand_object **params,
union acpi_operand_object **return_obj_desc)
struct acpi_parameter_info *info)
{
acpi_status status;
union acpi_operand_object *obj_desc;
......@@ -409,7 +384,7 @@ acpi_ns_execute_control_method (
/* Verify that there is a method associated with this object */
obj_desc = acpi_ns_get_attached_object (method_node);
obj_desc = acpi_ns_get_attached_object (info->node);
if (!obj_desc) {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "No attached method object\n"));
......@@ -417,7 +392,7 @@ acpi_ns_execute_control_method (
return_ACPI_STATUS (AE_NULL_OBJECT);
}
ACPI_DUMP_PATHNAME (method_node, "Execute Method:",
ACPI_DUMP_PATHNAME (info->node, "Execute Method:",
ACPI_LV_INFO, _COMPONENT);
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Method at AML address %p Length %X\n",
......@@ -444,7 +419,7 @@ acpi_ns_execute_control_method (
return_ACPI_STATUS (status);
}
status = acpi_psx_execute (method_node, params, return_obj_desc);
status = acpi_psx_execute (info);
acpi_ex_exit_interpreter ();
return_ACPI_STATUS (status);
......@@ -468,11 +443,10 @@ acpi_ns_execute_control_method (
acpi_status
acpi_ns_get_object_value (
struct acpi_namespace_node *node,
union acpi_operand_object **return_obj_desc)
struct acpi_parameter_info *info)
{
acpi_status status = AE_OK;
struct acpi_namespace_node *resolved_node = node;
struct acpi_namespace_node *resolved_node = info->node;
ACPI_FUNCTION_TRACE ("ns_get_object_value");
......@@ -518,9 +492,9 @@ acpi_ns_get_object_value (
if (ACPI_SUCCESS (status)) {
status = AE_CTRL_RETURN_VALUE;
*return_obj_desc = ACPI_CAST_PTR (union acpi_operand_object, resolved_node);
info->return_object = ACPI_CAST_PTR (union acpi_operand_object, resolved_node);
ACPI_DEBUG_PRINT ((ACPI_DB_NAMES, "Returning object %p [%s]\n",
*return_obj_desc, acpi_ut_get_object_type_name (*return_obj_desc)));
info->return_object, acpi_ut_get_object_type_name (info->return_object)));
}
}
......
......@@ -149,7 +149,7 @@ acpi_ns_initialize_devices (
return_ACPI_STATUS (status);
}
/* Walk namespace for all objects of type Device or Processor */
/* Walk namespace for all objects */
status = acpi_ns_walk_namespace (ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, TRUE, acpi_ns_init_one_device, &info, NULL);
......@@ -337,25 +337,29 @@ acpi_ns_init_one_device (
void *context,
void **return_value)
{
acpi_status status;
struct acpi_namespace_node *node;
u32 flags;
struct acpi_device_walk_info *info = (struct acpi_device_walk_info *) context;
struct acpi_parameter_info pinfo;
u32 flags;
acpi_status status;
ACPI_FUNCTION_TRACE ("ns_init_one_device");
node = acpi_ns_map_handle_to_node (obj_handle);
if (!node) {
pinfo.parameters = NULL;
pinfo.parameter_type = ACPI_PARAM_ARGS;
pinfo.node = acpi_ns_map_handle_to_node (obj_handle);
if (!pinfo.node) {
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
/*
* We will run _STA/_INI on Devices and Processors only
* We will run _STA/_INI on Devices, Processors and thermal_zones only
*/
if ((node->type != ACPI_TYPE_DEVICE) &&
(node->type != ACPI_TYPE_PROCESSOR)) {
if ((pinfo.node->type != ACPI_TYPE_DEVICE) &&
(pinfo.node->type != ACPI_TYPE_PROCESSOR) &&
(pinfo.node->type != ACPI_TYPE_THERMAL)) {
return_ACPI_STATUS (AE_OK);
}
......@@ -368,17 +372,17 @@ acpi_ns_init_one_device (
/*
* Run _STA to determine if we can run _INI on the device.
*/
ACPI_DEBUG_EXEC (acpi_ut_display_init_pathname (ACPI_TYPE_METHOD, node, "_STA"));
status = acpi_ut_execute_STA (node, &flags);
ACPI_DEBUG_EXEC (acpi_ut_display_init_pathname (ACPI_TYPE_METHOD, pinfo.node, "_STA"));
status = acpi_ut_execute_STA (pinfo.node, &flags);
if (ACPI_FAILURE (status)) {
if (node->type == ACPI_TYPE_DEVICE) {
if (pinfo.node->type == ACPI_TYPE_DEVICE) {
/* Ignore error and move on to next device */
return_ACPI_STATUS (AE_OK);
}
/* _STA is not required for Processor objects */
/* _STA is not required for Processor or thermal_zone objects */
}
else {
info->num_STA++;
......@@ -393,22 +397,22 @@ acpi_ns_init_one_device (
/*
* The device is present. Run _INI.
*/
ACPI_DEBUG_EXEC (acpi_ut_display_init_pathname (ACPI_TYPE_METHOD, obj_handle, "_INI"));
status = acpi_ns_evaluate_relative (obj_handle, "_INI", NULL, NULL);
ACPI_DEBUG_EXEC (acpi_ut_display_init_pathname (ACPI_TYPE_METHOD, pinfo.node, "_INI"));
status = acpi_ns_evaluate_relative ("_INI", &pinfo);
if (ACPI_FAILURE (status)) {
/* No _INI (AE_NOT_FOUND) means device requires no initialization */
if (status != AE_NOT_FOUND) {
/* Ignore error and move on to next device */
#ifdef ACPI_DEBUG_OUTPUT
char *scope_name = acpi_ns_get_external_pathname (obj_handle);
#ifdef ACPI_DEBUG_OUTPUT
char *scope_name = acpi_ns_get_external_pathname (pinfo.node);
ACPI_DEBUG_PRINT ((ACPI_DB_WARN, "%s._INI failed: %s\n",
scope_name, acpi_format_exception (status)));
ACPI_MEM_FREE (scope_name);
#endif
#endif
}
status = AE_OK;
......@@ -422,7 +426,7 @@ acpi_ns_init_one_device (
if (acpi_gbl_init_handler) {
/* External initialization handler is present, call it */
status = acpi_gbl_init_handler (obj_handle, ACPI_INIT_DEVICE_INI);
status = acpi_gbl_init_handler (pinfo.node, ACPI_INIT_DEVICE_INI);
}
......
......@@ -94,8 +94,9 @@ acpi_ns_one_complete_parse (
return_ACPI_STATUS (AE_NO_MEMORY);
}
status = acpi_ds_init_aml_walk (walk_state, parse_root, NULL, table_desc->aml_start,
table_desc->aml_length, NULL, NULL, pass_number);
status = acpi_ds_init_aml_walk (walk_state, parse_root, NULL,
table_desc->aml_start, table_desc->aml_length,
NULL, pass_number);
if (ACPI_FAILURE (status)) {
acpi_ds_delete_walk_state (walk_state);
return_ACPI_STATUS (status);
......
......@@ -174,8 +174,7 @@ acpi_evaluate_object (
{
acpi_status status;
acpi_status status2;
union acpi_operand_object **internal_params = NULL;
union acpi_operand_object *internal_return_obj = NULL;
struct acpi_parameter_info info;
acpi_size buffer_space_needed;
u32 i;
......@@ -183,6 +182,11 @@ acpi_evaluate_object (
ACPI_FUNCTION_TRACE ("acpi_evaluate_object");
info.node = handle;
info.parameters = NULL;
info.return_object = NULL;
info.parameter_type = ACPI_PARAM_ARGS;
/*
* If there are parameters to be passed to the object
* (which must be a control method), the external objects
......@@ -193,9 +197,10 @@ acpi_evaluate_object (
* Allocate a new parameter block for the internal objects
* Add 1 to count to allow for null terminated internal list
*/
internal_params = ACPI_MEM_CALLOCATE (((acpi_size) external_params->count + 1) *
sizeof (void *));
if (!internal_params) {
info.parameters = ACPI_MEM_CALLOCATE (
((acpi_size) external_params->count + 1) *
sizeof (void *));
if (!info.parameters) {
return_ACPI_STATUS (AE_NO_MEMORY);
}
......@@ -205,15 +210,16 @@ acpi_evaluate_object (
*/
for (i = 0; i < external_params->count; i++) {
status = acpi_ut_copy_eobject_to_iobject (&external_params->pointer[i],
&internal_params[i]);
&info.parameters[i]);
if (ACPI_FAILURE (status)) {
acpi_ut_delete_internal_object_list (internal_params);
acpi_ut_delete_internal_object_list (info.parameters);
return_ACPI_STATUS (status);
}
}
internal_params[external_params->count] = NULL;
info.parameters[external_params->count] = NULL;
}
/*
* Three major cases:
* 1) Fully qualified pathname
......@@ -225,8 +231,7 @@ acpi_evaluate_object (
/*
* The path is fully qualified, just evaluate by name
*/
status = acpi_ns_evaluate_by_name (pathname, internal_params,
&internal_return_obj);
status = acpi_ns_evaluate_by_name (pathname, &info);
}
else if (!handle) {
/*
......@@ -256,15 +261,13 @@ acpi_evaluate_object (
* The null pathname case means the handle is for
* the actual object to be evaluated
*/
status = acpi_ns_evaluate_by_handle (handle, internal_params,
&internal_return_obj);
status = acpi_ns_evaluate_by_handle (&info);
}
else {
/*
* Both a Handle and a relative Pathname
*/
status = acpi_ns_evaluate_relative (handle, pathname, internal_params,
&internal_return_obj);
status = acpi_ns_evaluate_relative (pathname, &info);
}
}
......@@ -274,11 +277,11 @@ acpi_evaluate_object (
* copy the return value to an external object.
*/
if (return_buffer) {
if (!internal_return_obj) {
if (!info.return_object) {
return_buffer->length = 0;
}
else {
if (ACPI_GET_DESCRIPTOR_TYPE (internal_return_obj) == ACPI_DESC_TYPE_NAMED) {
if (ACPI_GET_DESCRIPTOR_TYPE (info.return_object) == ACPI_DESC_TYPE_NAMED) {
/*
* If we received a NS Node as a return object, this means that
* the object we are evaluating has nothing interesting to
......@@ -288,7 +291,7 @@ acpi_evaluate_object (
* support for various types at a later date if necessary.
*/
status = AE_TYPE;
internal_return_obj = NULL; /* No need to delete a NS Node */
info.return_object = NULL; /* No need to delete a NS Node */
return_buffer->length = 0;
}
......@@ -297,7 +300,7 @@ acpi_evaluate_object (
* Find out how large a buffer is needed
* to contain the returned object
*/
status = acpi_ut_get_object_size (internal_return_obj,
status = acpi_ut_get_object_size (info.return_object,
&buffer_space_needed);
if (ACPI_SUCCESS (status)) {
/* Validate/Allocate/Clear caller buffer */
......@@ -309,13 +312,14 @@ acpi_evaluate_object (
*/
ACPI_DEBUG_PRINT ((ACPI_DB_INFO,
"Needed buffer size %X, %s\n",
(u32) buffer_space_needed, acpi_format_exception (status)));
(u32) buffer_space_needed,
acpi_format_exception (status)));
}
else {
/*
* We have enough space for the object, build it
*/
status = acpi_ut_copy_iobject_to_eobject (internal_return_obj,
status = acpi_ut_copy_iobject_to_eobject (info.return_object,
return_buffer);
}
}
......@@ -323,7 +327,7 @@ acpi_evaluate_object (
}
}
if (internal_return_obj) {
if (info.return_object) {
/*
* Delete the internal return object. NOTE: Interpreter
* must be locked to avoid race condition.
......@@ -334,7 +338,7 @@ acpi_evaluate_object (
* Delete the internal return object. (Or at least
* decrement the reference count by one)
*/
acpi_ut_remove_reference (internal_return_obj);
acpi_ut_remove_reference (info.return_object);
acpi_ex_exit_interpreter ();
}
}
......@@ -342,10 +346,10 @@ acpi_evaluate_object (
/*
* Free the input parameter list (if we created one),
*/
if (internal_params) {
if (info.parameters) {
/* Free the allocated parameter block */
acpi_ut_delete_internal_object_list (internal_params);
acpi_ut_delete_internal_object_list (info.parameters);
}
return_ACPI_STATUS (status);
......
......@@ -281,7 +281,7 @@ acpi_get_object_info (
if (info.type == ACPI_TYPE_DEVICE) {
/*
* Get extra info for ACPI Devices objects only:
* Run the Device _HID, _UID, _CID, _STA, and _ADR methods.
* Run the Device _HID, _UID, _CID, _STA, _ADR and _sx_d methods.
*
* Note: none of these methods are required, so they may or may
* not be present for this device. The Info.Valid bitfield is used
......@@ -330,7 +330,7 @@ acpi_get_object_info (
status = acpi_ut_execute_sxds (node, info.highest_dstates);
if (ACPI_SUCCESS (status)) {
info.valid |= ACPI_VALID_STA;
info.valid |= ACPI_VALID_SXDS;
}
status = AE_OK;
......
......@@ -1066,15 +1066,15 @@ __setup("acpi_serialize", acpi_serialize_setup);
* Run-time events on the same GPE this flag is available
* to tell Linux to keep the wake-time GPEs enabled at run-time.
*/
static int __init
acpi_leave_gpes_disabled_setup(char *str)
int __init
acpi_wake_gpes_always_on_setup(char *str)
{
printk(KERN_INFO PREFIX "leave wake GPEs disabled\n");
printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
acpi_gbl_leave_wake_gpes_disabled = TRUE;
acpi_gbl_leave_wake_gpes_disabled = FALSE;
return 1;
}
__setup("acpi_leave_gpes_disabled", acpi_leave_gpes_disabled_setup);
__setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
......@@ -251,7 +251,7 @@
#define ARGI_CREATE_FIELD_OP ARGI_LIST4 (ARGI_BUFFER, ARGI_INTEGER, ARGI_INTEGER, ARGI_REFERENCE)
#define ARGI_CREATE_QWORD_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_REFERENCE)
#define ARGI_CREATE_WORD_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_REFERENCE)
#define ARGI_DATA_REGION_OP ARGI_LIST3 (ARGI_STRING, ARGI_STRING, ARGI_STRING)
#define ARGI_DATA_REGION_OP ARGI_LIST3 (ARGI_STRING, ARGI_STRING, ARGI_STRING)
#define ARGI_DEBUG_OP ARG_NONE
#define ARGI_DECREMENT_OP ARGI_LIST1 (ARGI_INTEGER_REF)
#define ARGI_DEREF_OF_OP ARGI_LIST1 (ARGI_REF_OR_STRING)
......@@ -270,10 +270,10 @@
#define ARGI_INDEX_FIELD_OP ARGI_INVALID_OPCODE
#define ARGI_INDEX_OP ARGI_LIST3 (ARGI_COMPLEXOBJ, ARGI_INTEGER, ARGI_TARGETREF)
#define ARGI_LAND_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_INTEGER)
#define ARGI_LEQUAL_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_INTEGER)
#define ARGI_LGREATER_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_INTEGER)
#define ARGI_LEQUAL_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA)
#define ARGI_LGREATER_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA)
#define ARGI_LGREATEREQUAL_OP ARGI_INVALID_OPCODE
#define ARGI_LLESS_OP ARGI_LIST2 (ARGI_INTEGER, ARGI_INTEGER)
#define ARGI_LLESS_OP ARGI_LIST2 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA)
#define ARGI_LLESSEQUAL_OP ARGI_INVALID_OPCODE
#define ARGI_LNOT_OP ARGI_LIST1 (ARGI_INTEGER)
#define ARGI_LNOTEQUAL_OP ARGI_INVALID_OPCODE
......
......@@ -57,7 +57,7 @@
*
* FUNCTION: acpi_psx_execute
*
* PARAMETERS: method_node - A method object containing both the AML
* PARAMETERS: Info->Node - A method object containing both the AML
* address and length.
* **Params - List of parameters to pass to method,
* terminated by NULL. Params itself may be
......@@ -73,9 +73,7 @@
acpi_status
acpi_psx_execute (
struct acpi_namespace_node *method_node,
union acpi_operand_object **params,
union acpi_operand_object **return_obj_desc)
struct acpi_parameter_info *info)
{
acpi_status status;
union acpi_operand_object *obj_desc;
......@@ -89,29 +87,30 @@ acpi_psx_execute (
/* Validate the Node and get the attached object */
if (!method_node) {
if (!info || !info->node) {
return_ACPI_STATUS (AE_NULL_ENTRY);
}
obj_desc = acpi_ns_get_attached_object (method_node);
obj_desc = acpi_ns_get_attached_object (info->node);
if (!obj_desc) {
return_ACPI_STATUS (AE_NULL_OBJECT);
}
/* Init for new method, wait on concurrency semaphore */
status = acpi_ds_begin_method_execution (method_node, obj_desc, NULL);
status = acpi_ds_begin_method_execution (info->node, obj_desc, NULL);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
if (params) {
if ((info->parameter_type == ACPI_PARAM_ARGS) &&
(info->parameters)) {
/*
* The caller "owns" the parameters, so give each one an extra
* reference
*/
for (i = 0; params[i]; i++) {
acpi_ut_add_reference (params[i]);
for (i = 0; info->parameters[i]; i++) {
acpi_ut_add_reference (info->parameters[i]);
}
}
......@@ -121,7 +120,7 @@ acpi_psx_execute (
*/
ACPI_DEBUG_PRINT ((ACPI_DB_PARSE,
"**** Begin Method Parse **** Entry=%p obj=%p\n",
method_node, obj_desc));
info->node, obj_desc));
/* Create and init a Root Node */
......@@ -147,8 +146,9 @@ acpi_psx_execute (
goto cleanup2;
}
status = acpi_ds_init_aml_walk (walk_state, op, method_node, obj_desc->method.aml_start,
obj_desc->method.aml_length, NULL, NULL, 1);
status = acpi_ds_init_aml_walk (walk_state, op, info->node,
obj_desc->method.aml_start,
obj_desc->method.aml_length, NULL, 1);
if (ACPI_FAILURE (status)) {
goto cleanup3;
}
......@@ -159,7 +159,6 @@ acpi_psx_execute (
acpi_ps_delete_parse_tree (op);
if (ACPI_FAILURE (status)) {
goto cleanup1; /* Walk state is already deleted */
}
/*
......@@ -167,7 +166,7 @@ acpi_psx_execute (
*/
ACPI_DEBUG_PRINT ((ACPI_DB_PARSE,
"**** Begin Method Execution **** Entry=%p obj=%p\n",
method_node, obj_desc));
info->node, obj_desc));
/* Create and init a Root Node */
......@@ -179,8 +178,8 @@ acpi_psx_execute (
/* Init new op with the method name and pointer back to the NS node */
acpi_ps_set_name (op, method_node->name.integer);
op->common.node = method_node;
acpi_ps_set_name (op, info->node->name.integer);
op->common.node = info->node;
/* Create and initialize a new walk state */
......@@ -190,8 +189,9 @@ acpi_psx_execute (
goto cleanup2;
}
status = acpi_ds_init_aml_walk (walk_state, op, method_node, obj_desc->method.aml_start,
obj_desc->method.aml_length, params, return_obj_desc, 3);
status = acpi_ds_init_aml_walk (walk_state, op, info->node,
obj_desc->method.aml_start,
obj_desc->method.aml_length, info, 3);
if (ACPI_FAILURE (status)) {
goto cleanup3;
}
......@@ -210,13 +210,14 @@ acpi_psx_execute (
acpi_ps_delete_parse_tree (op);
cleanup1:
if (params) {
if ((info->parameter_type == ACPI_PARAM_ARGS) &&
(info->parameters)) {
/* Take away the extra reference that we gave the parameters above */
for (i = 0; params[i]; i++) {
for (i = 0; info->parameters[i]; i++) {
/* Ignore errors, just do them all */
(void) acpi_ut_update_object_reference (params[i], REF_DECREMENT);
(void) acpi_ut_update_object_reference (info->parameters[i], REF_DECREMENT);
}
}
......@@ -228,10 +229,10 @@ acpi_psx_execute (
* If the method has returned an object, signal this to the caller with
* a control exception code
*/
if (*return_obj_desc) {
if (info->return_object) {
ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "Method returned obj_desc=%p\n",
*return_obj_desc));
ACPI_DUMP_STACK_ENTRY (*return_obj_desc);
info->return_object));
ACPI_DUMP_STACK_ENTRY (info->return_object);
status = AE_CTRL_RETURN_VALUE;
}
......
......@@ -2234,7 +2234,6 @@ acpi_processor_get_info (
* (In particular, allocating the IO range for Cardbus)
*/
request_region(pr->throttling.address, 6, "ACPI CPU throttle");
request_region(acpi_fadt.xpm_tmr_blk.address, 4, "ACPI timer");
}
acpi_processor_get_power_info(pr);
......
......@@ -289,6 +289,7 @@ acpi_rs_set_srs_method_data (
acpi_handle handle,
struct acpi_buffer *in_buffer)
{
struct acpi_parameter_info info;
union acpi_operand_object *params[2];
acpi_status status;
struct acpi_buffer buffer;
......@@ -329,10 +330,14 @@ acpi_rs_set_srs_method_data (
params[0]->common.flags = AOPOBJ_DATA_VALID;
params[1] = NULL;
info.node = handle;
info.parameters = params;
info.parameter_type = ACPI_PARAM_ARGS;
/*
* Execute the method, no return value
*/
status = acpi_ns_evaluate_relative (handle, "_SRS", params, NULL);
status = acpi_ns_evaluate_relative ("_SRS", &info);
/*
* Clean up and return the status from acpi_ns_evaluate_relative
......
......@@ -259,7 +259,8 @@ acpi_walk_resources (
/* Setup pointers */
resource = (struct acpi_resource *) buffer.pointer;
buffer_end = (struct acpi_resource *) ((u8 *) buffer.pointer + buffer.length);
buffer_end = ACPI_CAST_PTR (struct acpi_resource,
((u8 *) buffer.pointer + buffer.length));
/* Walk the resource list */
......
......@@ -32,16 +32,16 @@ static u32 acpi_suspend_states[] = {
/**
* acpi_pm_prepare - Do preliminary suspend work.
* @state: suspend state we're entering.
* @pm_state: suspend state we're entering.
*
* Make sure we support the state. If we do, and we need it, set the
* firmware waking vector and do arch-specific nastiness to get the
* wakeup code to the waking vector.
*/
static int acpi_pm_prepare(u32 state)
static int acpi_pm_prepare(u32 pm_state)
{
u32 acpi_state = acpi_suspend_states[state];
u32 acpi_state = acpi_suspend_states[pm_state];
if (!sleep_states[acpi_state])
return -EPERM;
......@@ -49,7 +49,7 @@ static int acpi_pm_prepare(u32 state)
/* do we have a wakeup address for S2 and S3? */
/* Here, we support only S4BIOS, those we set the wakeup address */
/* S4OS is only supported for now via swsusp.. */
if (state == PM_SUSPEND_MEM || state == PM_SUSPEND_DISK) {
if (pm_state == PM_SUSPEND_MEM || pm_state == PM_SUSPEND_DISK) {
if (!acpi_wakeup_address)
return -EFAULT;
acpi_set_firmware_waking_vector(
......@@ -63,23 +63,23 @@ static int acpi_pm_prepare(u32 state)
/**
* acpi_pm_enter - Actually enter a sleep state.
* @state: State we're entering.
* @pm_state: State we're entering.
*
* Flush caches and go to sleep. For STR or STD, we have to call
* arch-specific assembly, which in turn call acpi_enter_sleep_state().
* It's unfortunate, but it works. Please fix if you're feeling frisky.
*/
static int acpi_pm_enter(u32 state)
static int acpi_pm_enter(u32 pm_state)
{
acpi_status status = AE_OK;
unsigned long flags = 0;
u32 acpi_state = acpi_suspend_states[state];
u32 acpi_state = acpi_suspend_states[pm_state];
ACPI_FLUSH_CPU_CACHE();
/* Do arch specific saving of state. */
if (state > PM_SUSPEND_STANDBY) {
if (pm_state > PM_SUSPEND_STANDBY) {
int error = acpi_save_state_mem();
if (error)
return error;
......@@ -87,7 +87,7 @@ static int acpi_pm_enter(u32 state)
local_irq_save(flags);
switch (state)
switch (pm_state)
{
case PM_SUSPEND_STANDBY:
barrier();
......@@ -115,7 +115,7 @@ static int acpi_pm_enter(u32 state)
* And, in the case of the latter, the memory image should have already
* been loaded from disk.
*/
if (state > PM_SUSPEND_STANDBY)
if (pm_state > PM_SUSPEND_STANDBY)
acpi_restore_state_mem();
......@@ -125,15 +125,17 @@ static int acpi_pm_enter(u32 state)
/**
* acpi_pm_finish - Finish up suspend sequence.
* @state: State we're coming out of.
* @pm_state: State we're coming out of.
*
* This is called after we wake back up (or if entering the sleep state
* failed).
*/
static int acpi_pm_finish(u32 state)
static int acpi_pm_finish(u32 pm_state)
{
acpi_leave_sleep_state(state);
u32 acpi_state = acpi_suspend_states[pm_state];
acpi_leave_sleep_state(acpi_state);
/* reset firmware waking vector */
acpi_set_firmware_waking_vector((acpi_physical_address) 0);
......@@ -174,7 +176,7 @@ static int __init acpi_sleep_init(void)
return 0;
printk(KERN_INFO PREFIX "(supports");
for (i=0; i<ACPI_S_STATE_COUNT; i++) {
for (i=0; i < ACPI_S_STATE_COUNT; i++) {
acpi_status status;
u8 type_a, type_b;
status = acpi_get_sleep_type_data(i, &type_a, &type_b);
......
......@@ -389,14 +389,17 @@ acpi_tb_scan_memory_for_rsdp (
* Flags - Current memory mode (logical vs.
* physical addressing)
*
* RETURN: Status
* RETURN: Status, RSDP physical address
*
* DESCRIPTION: search lower 1_mbyte of memory for the root system descriptor
* pointer structure. If it is found, set *RSDP to point to it.
*
* NOTE: The RSDp must be either in the first 1_k of the Extended
* BIOS Data Area or between E0000 and FFFFF (ACPI 1.0 section
* 5.2.2; assertion #421).
* NOTE1: The RSDp must be either in the first 1_k of the Extended
* BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.)
* Only a 32-bit physical address is necessary.
*
* NOTE2: This function is always available, regardless of the
* initialization state of the rest of ACPI.
*
******************************************************************************/
......@@ -407,8 +410,8 @@ acpi_tb_find_rsdp (
{
u8 *table_ptr;
u8 *mem_rover;
u64 phys_addr;
acpi_status status = AE_OK;
u32 physical_address;
acpi_status status;
ACPI_FUNCTION_TRACE ("tb_find_rsdp");
......@@ -419,36 +422,57 @@ acpi_tb_find_rsdp (
*/
if ((flags & ACPI_MEMORY_MODE) == ACPI_LOGICAL_ADDRESSING) {
/*
* 1) Search EBDA (low memory) paragraphs
* 1a) Get the location of the EBDA
*/
status = acpi_os_map_memory ((u64) ACPI_LO_RSDP_WINDOW_BASE, ACPI_LO_RSDP_WINDOW_SIZE,
status = acpi_os_map_memory ((acpi_physical_address) ACPI_EBDA_PTR_LOCATION,
ACPI_EBDA_PTR_LENGTH,
(void *) &table_ptr);
if (ACPI_FAILURE (status)) {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not map memory at %X for length %X\n",
ACPI_LO_RSDP_WINDOW_BASE, ACPI_LO_RSDP_WINDOW_SIZE));
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not map memory at %8.8X for length %X\n",
ACPI_EBDA_PTR_LOCATION, ACPI_EBDA_PTR_LENGTH));
return_ACPI_STATUS (status);
}
mem_rover = acpi_tb_scan_memory_for_rsdp (table_ptr, ACPI_LO_RSDP_WINDOW_SIZE);
acpi_os_unmap_memory (table_ptr, ACPI_LO_RSDP_WINDOW_SIZE);
ACPI_MOVE_16_TO_32 (&physical_address, table_ptr);
physical_address <<= 4; /* Convert segment to physical address */
acpi_os_unmap_memory (table_ptr, ACPI_EBDA_PTR_LENGTH);
if (mem_rover) {
/* Found it, return the physical address */
/* EBDA present? */
phys_addr = ACPI_LO_RSDP_WINDOW_BASE;
phys_addr += ACPI_PTR_DIFF (mem_rover,table_ptr);
if (physical_address > 0x400) {
/*
* 1b) Search EBDA paragraphs (EBDa is required to be a minimum of 1_k length)
*/
status = acpi_os_map_memory ((acpi_physical_address) physical_address,
ACPI_EBDA_WINDOW_SIZE,
(void *) &table_ptr);
if (ACPI_FAILURE (status)) {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not map memory at %8.8X for length %X\n",
physical_address, ACPI_EBDA_WINDOW_SIZE));
return_ACPI_STATUS (status);
}
table_info->physical_address = phys_addr;
return_ACPI_STATUS (AE_OK);
mem_rover = acpi_tb_scan_memory_for_rsdp (table_ptr, ACPI_EBDA_WINDOW_SIZE);
acpi_os_unmap_memory (table_ptr, ACPI_EBDA_WINDOW_SIZE);
if (mem_rover) {
/* Found it, return the physical address */
physical_address += ACPI_PTR_DIFF (mem_rover, table_ptr);
table_info->physical_address = (acpi_physical_address) physical_address;
return_ACPI_STATUS (AE_OK);
}
}
/*
* 2) Search upper memory: 16-byte boundaries in E0000h-F0000h
* 2) Search upper memory: 16-byte boundaries in E0000h-FFFFFh
*/
status = acpi_os_map_memory ((u64) ACPI_HI_RSDP_WINDOW_BASE, ACPI_HI_RSDP_WINDOW_SIZE,
status = acpi_os_map_memory ((acpi_physical_address) ACPI_HI_RSDP_WINDOW_BASE,
ACPI_HI_RSDP_WINDOW_SIZE,
(void *) &table_ptr);
if (ACPI_FAILURE (status)) {
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not map memory at %X for length %X\n",
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Could not map memory at %8.8X for length %X\n",
ACPI_HI_RSDP_WINDOW_BASE, ACPI_HI_RSDP_WINDOW_SIZE));
return_ACPI_STATUS (status);
}
......@@ -459,10 +483,9 @@ acpi_tb_find_rsdp (
if (mem_rover) {
/* Found it, return the physical address */
phys_addr = ACPI_HI_RSDP_WINDOW_BASE;
phys_addr += ACPI_PTR_DIFF (mem_rover, table_ptr);
physical_address = ACPI_HI_RSDP_WINDOW_BASE + ACPI_PTR_DIFF (mem_rover, table_ptr);
table_info->physical_address = phys_addr;
table_info->physical_address = (acpi_physical_address) physical_address;
return_ACPI_STATUS (AE_OK);
}
}
......@@ -472,19 +495,29 @@ acpi_tb_find_rsdp (
*/
else {
/*
* 1) Search EBDA (low memory) paragraphs
* 1a) Get the location of the EBDA
*/
mem_rover = acpi_tb_scan_memory_for_rsdp (ACPI_PHYSADDR_TO_PTR (ACPI_LO_RSDP_WINDOW_BASE),
ACPI_LO_RSDP_WINDOW_SIZE);
if (mem_rover) {
/* Found it, return the physical address */
table_info->physical_address = ACPI_TO_INTEGER (mem_rover);
return_ACPI_STATUS (AE_OK);
ACPI_MOVE_16_TO_32 (&physical_address, ACPI_EBDA_PTR_LOCATION);
physical_address <<= 4; /* Convert segment to physical address */
/* EBDA present? */
if (physical_address > 0x400) {
/*
* 1b) Search EBDA paragraphs (EBDa is required to be a minimum of 1_k length)
*/
mem_rover = acpi_tb_scan_memory_for_rsdp (ACPI_PHYSADDR_TO_PTR (physical_address),
ACPI_EBDA_WINDOW_SIZE);
if (mem_rover) {
/* Found it, return the physical address */
table_info->physical_address = ACPI_TO_INTEGER (mem_rover);
return_ACPI_STATUS (AE_OK);
}
}
/*
* 2) Search upper memory: 16-byte boundaries in E0000h-F0000h
* 2) Search upper memory: 16-byte boundaries in E0000h-FFFFFh
*/
mem_rover = acpi_tb_scan_memory_for_rsdp (ACPI_PHYSADDR_TO_PTR (ACPI_HI_RSDP_WINDOW_BASE),
ACPI_HI_RSDP_WINDOW_SIZE);
......
......@@ -259,8 +259,8 @@ acpi_ut_validate_buffer (
*
* FUNCTION: acpi_ut_initialize_buffer
*
* PARAMETERS: required_length - Length needed
* Buffer - Buffer to be validated
* PARAMETERS: Buffer - Buffer to be validated
* required_length - Length needed
*
* RETURN: Status
*
......@@ -603,7 +603,8 @@ acpi_ut_free_and_track (
*
* FUNCTION: acpi_ut_find_allocation
*
* PARAMETERS: Allocation - Address of allocated memory
* PARAMETERS: list_id - Memory list to search
* Allocation - Address of allocated memory
*
* RETURN: A list element if found; NULL otherwise.
*
......@@ -646,7 +647,8 @@ acpi_ut_find_allocation (
*
* FUNCTION: acpi_ut_track_allocation
*
* PARAMETERS: Allocation - Address of allocated memory
* PARAMETERS: list_id - Memory list to search
* Allocation - Address of allocated memory
* Size - Size of the allocation
* alloc_type - MEM_MALLOC or MEM_CALLOC
* Component - Component type of caller
......@@ -733,7 +735,8 @@ acpi_ut_track_allocation (
*
* FUNCTION: acpi_ut_remove_allocation
*
* PARAMETERS: Allocation - Address of allocated memory
* PARAMETERS: list_id - Memory list to search
* Allocation - Address of allocated memory
* Component - Component type of caller
* Module - Source file name of caller
* Line - Line number of caller
......
......@@ -133,7 +133,7 @@ acpi_ut_evaluate_object (
u32 expected_return_btypes,
union acpi_operand_object **return_desc)
{
union acpi_operand_object *obj_desc;
struct acpi_parameter_info info;
acpi_status status;
u32 return_btype;
......@@ -141,9 +141,13 @@ acpi_ut_evaluate_object (
ACPI_FUNCTION_TRACE ("ut_evaluate_object");
info.node = prefix_node;
info.parameters = NULL;
info.parameter_type = ACPI_PARAM_ARGS;
/* Evaluate the object/method */
status = acpi_ns_evaluate_relative (prefix_node, path, NULL, &obj_desc);
status = acpi_ns_evaluate_relative (path, &info);
if (ACPI_FAILURE (status)) {
if (status == AE_NOT_FOUND) {
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[%4.4s.%s] was not found\n",
......@@ -159,7 +163,7 @@ acpi_ut_evaluate_object (
/* Did we get a return object? */
if (!obj_desc) {
if (!info.return_object) {
if (expected_return_btypes) {
ACPI_REPORT_METHOD_ERROR ("No object was returned from",
prefix_node, path, AE_NOT_EXIST);
......@@ -172,7 +176,7 @@ acpi_ut_evaluate_object (
/* Map the return object type to the bitmapped type */
switch (ACPI_GET_OBJECT_TYPE (obj_desc)) {
switch (ACPI_GET_OBJECT_TYPE (info.return_object)) {
case ACPI_TYPE_INTEGER:
return_btype = ACPI_BTYPE_INTEGER;
break;
......@@ -202,17 +206,17 @@ acpi_ut_evaluate_object (
ACPI_DEBUG_PRINT ((ACPI_DB_ERROR,
"Type returned from %s was incorrect: %X\n",
path, ACPI_GET_OBJECT_TYPE (obj_desc)));
path, ACPI_GET_OBJECT_TYPE (info.return_object)));
/* On error exit, we must delete the return object */
acpi_ut_remove_reference (obj_desc);
acpi_ut_remove_reference (info.return_object);
return_ACPI_STATUS (AE_TYPE);
}
/* Object type is OK, return it */
*return_desc = obj_desc;
*return_desc = info.return_object;
return_ACPI_STATUS (AE_OK);
}
......
......@@ -171,27 +171,40 @@ u8 acpi_gbl_shutdown = TRUE;
const u8 acpi_gbl_decode_to8bit [8] = {1,2,4,8,16,32,64,128};
const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT] = {
"\\_S0_",
"\\_S1_",
"\\_S2_",
"\\_S3_",
"\\_S4_",
"\\_S5_"};
const char *acpi_gbl_highest_dstate_names[4] = {
"_S1D",
"_S2D",
"_S3D",
"_S4D"};
const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT] =
{
"\\_S0_",
"\\_S1_",
"\\_S2_",
"\\_S3_",
"\\_S4_",
"\\_S5_"
};
/* Strings supported by the _OSI predefined (internal) method */
const char *acpi_gbl_highest_dstate_names[4] =
{
"_S1D",
"_S2D",
"_S3D",
"_S4D"
};
const char *acpi_gbl_valid_osi_strings[ACPI_NUM_OSI_STRINGS] = {
"Linux",
"Windows 2000",
"Windows 2001",
"Windows 2001.1"};
/*
* Strings supported by the _OSI predefined (internal) method.
* When adding strings, be sure to update ACPI_NUM_OSI_STRINGS.
*/
const char *acpi_gbl_valid_osi_strings[ACPI_NUM_OSI_STRINGS] =
{
"Linux",
"Windows 2000",
"Windows 2001",
"Windows 2001.1",
"Windows 2001 SP0",
"Windows 2001 SP1",
"Windows 2001 SP2",
"Windows 2001 SP3",
"Windows 2001 SP4"
};
/******************************************************************************
......@@ -213,7 +226,7 @@ const struct acpi_predefined_names acpi_gbl_pre_defined_names[] =
{"_PR_", ACPI_TYPE_LOCAL_SCOPE, NULL},
{"_SB_", ACPI_TYPE_DEVICE, NULL},
{"_SI_", ACPI_TYPE_LOCAL_SCOPE, NULL},
{"_TZ_", ACPI_TYPE_LOCAL_SCOPE, NULL},
{"_TZ_", ACPI_TYPE_THERMAL, NULL},
{"_REV", ACPI_TYPE_INTEGER, "2"},
{"_OS_", ACPI_TYPE_STRING, ACPI_OS_NAME},
{"_GL_", ACPI_TYPE_MUTEX, "0"},
......@@ -561,26 +574,37 @@ acpi_ut_get_node_name (
struct acpi_namespace_node *node = (struct acpi_namespace_node *) object;
/* Must return a string of exactly 4 characters == ACPI_NAME_SIZE */
if (!object)
{
return ("NULL NODE");
return ("NULL");
}
if (object == ACPI_ROOT_OBJECT)
/* Check for Root node */
if ((object == ACPI_ROOT_OBJECT) ||
(object == acpi_gbl_root_node))
{
node = acpi_gbl_root_node;
return ("\"\\\" ");
}
/* Descriptor must be a namespace node */
if (node->descriptor != ACPI_DESC_TYPE_NAMED)
{
return ("****");
return ("####");
}
/* Name must be a valid ACPI name */
if (!acpi_ut_valid_acpi_name (* (u32 *) node->name.ascii))
{
return ("----");
return ("????");
}
/* Return the name */
return (node->name.ascii);
}
......@@ -783,10 +807,6 @@ acpi_ut_init_globals (
ACPI_FUNCTION_TRACE ("ut_init_globals");
/* Runtime configuration */
acpi_gbl_create_osi_method = TRUE;
acpi_gbl_all_methods_serialized = FALSE;
/* Memory allocation and cache lists */
......@@ -880,6 +900,7 @@ acpi_ut_init_globals (
/* Hardware oriented */
acpi_gbl_events_initialized = FALSE;
acpi_gbl_system_awake_and_running = TRUE;
/* Namespace */
......
......@@ -157,9 +157,8 @@ acpi_enable_subsystem (
}
}
/*
* Enable ACPI mode
*/
/* Enable ACPI mode */
if (!(flags & ACPI_NO_ACPI_ENABLE)) {
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Going into ACPI mode\n"));
......@@ -173,7 +172,21 @@ acpi_enable_subsystem (
}
/*
* Initialize ACPI Event handling
* Install the default op_region handlers. These are installed unless
* other handlers have already been installed via the
* install_address_space_handler interface.
*/
if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Installing default address space handlers\n"));
status = acpi_ev_install_region_handlers ();
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
}
/*
* Initialize ACPI Event handling (Fixed and General Purpose)
*
* NOTE: We must have the hardware AND events initialized before we can execute
* ANY control methods SAFELY. Any control method can require ACPI hardware
......@@ -182,18 +195,18 @@ acpi_enable_subsystem (
if (!(flags & ACPI_NO_EVENT_INIT)) {
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Initializing ACPI events\n"));
status = acpi_ev_initialize ();
status = acpi_ev_initialize_events ();
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
}
/* Install the SCI handler, Global Lock handler, and GPE handlers */
/* Install the SCI handler and Global Lock handler */
if (!(flags & ACPI_NO_HANDLER_INIT)) {
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Installing SCI/GL/GPE handlers\n"));
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Installing SCI/GL handlers\n"));
status = acpi_ev_handler_initialize ();
status = acpi_ev_install_xrupt_handlers ();
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
......@@ -226,18 +239,16 @@ acpi_initialize_objects (
/*
* Install the default op_region handlers. These are installed unless
* other handlers have already been installed via the
* install_address_space_handler interface.
* Run all _REG methods
*
* NOTE: This will cause _REG methods to be run. Any objects accessed
* NOTE: Any objects accessed
* by the _REG methods will be automatically initialized, even if they
* contain executable AML (see call to acpi_ns_initialize_objects below).
*/
if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Installing default address space handlers\n"));
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Executing _REG op_region methods\n"));
status = acpi_ev_init_address_spaces ();
status = acpi_ev_initialize_op_regions ();
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
......@@ -249,7 +260,7 @@ acpi_initialize_objects (
* objects: operation_regions, buffer_fields, Buffers, and Packages.
*/
if (!(flags & ACPI_NO_OBJECT_INIT)) {
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Initializing ACPI Objects\n"));
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "[Init] Completing Initialization of ACPI Objects\n"));
status = acpi_ns_initialize_objects ();
if (ACPI_FAILURE (status)) {
......
......@@ -64,11 +64,21 @@
/* Version string */
#define ACPI_CA_VERSION 0x20040326
#define ACPI_CA_VERSION 0x20040615
/*
* OS name, used for the _OS object. The _OS object is essentially obsolete,
* but there is a large base of ASL/AML code in existing machines that check
* for the string below. The use of this string usually guarantees that
* the ASL will execute down the most tested code path. Also, there is some
* code that will not execute the _OSI method unless _OS matches the string
* below. Therefore, change this string at your own risk.
*/
#define ACPI_OS_NAME "Microsoft Windows NT"
/* Maximum objects in the various object caches */
#define ACPI_MAX_STATE_CACHE_DEPTH 64 /* State objects for stacks */
#define ACPI_MAX_STATE_CACHE_DEPTH 64 /* State objects */
#define ACPI_MAX_PARSE_CACHE_DEPTH 96 /* Parse tree objects */
#define ACPI_MAX_EXTPARSE_CACHE_DEPTH 64 /* Parse tree objects */
#define ACPI_MAX_OBJECT_CACHE_DEPTH 64 /* Interpreter operand objects */
......@@ -152,10 +162,11 @@
/* Constants used in searching for the RSDP in low memory */
#define ACPI_LO_RSDP_WINDOW_BASE 0 /* Physical Address */
#define ACPI_HI_RSDP_WINDOW_BASE 0xE0000 /* Physical Address */
#define ACPI_LO_RSDP_WINDOW_SIZE 0x400
#define ACPI_HI_RSDP_WINDOW_SIZE 0x20000
#define ACPI_EBDA_PTR_LOCATION 0x0000040E /* Physical Address */
#define ACPI_EBDA_PTR_LENGTH 2
#define ACPI_EBDA_WINDOW_SIZE 1024
#define ACPI_HI_RSDP_WINDOW_BASE 0x000E0000 /* Physical Address */
#define ACPI_HI_RSDP_WINDOW_SIZE 0x00020000
#define ACPI_RSDP_SCAN_STEP 16
/* Operation regions */
......@@ -187,7 +198,7 @@
/* Number of strings associated with the _OSI reserved method */
#define ACPI_NUM_OSI_STRINGS 4
#define ACPI_NUM_OSI_STRINGS 9
/******************************************************************************
......
......@@ -106,6 +106,10 @@ acpi_db_method_end (
* dbcmds - debug commands and output routines
*/
acpi_status
acpi_db_disassemble_method (
char *name);
void
acpi_db_display_table_info (
char *table_arg);
......@@ -164,6 +168,10 @@ void
acpi_db_set_scope (
char *name);
acpi_status
acpi_db_sleep (
char *object_arg);
void
acpi_db_find_references (
char *object_arg);
......
......@@ -52,6 +52,13 @@
#define BLOCK_BRACE 2
#define BLOCK_COMMA_LIST 4
struct acpi_external_list
{
char *path;
struct acpi_external_list *next;
};
extern struct acpi_external_list *acpi_gbl_external_list;
extern const char *acpi_gbl_io_decode[2];
extern const char *acpi_gbl_word_decode[4];
extern const char *acpi_gbl_consume_decode[2];
......@@ -399,4 +406,12 @@ acpi_dm_vendor_small_descriptor (
u32 level);
/*
* dmutils
*/
void
acpi_dm_add_to_external_list (
char *path);
#endif /* __ACDISASM_H__ */
......@@ -437,8 +437,7 @@ acpi_ds_init_aml_walk (
struct acpi_namespace_node *method_node,
u8 *aml_start,
u32 aml_length,
union acpi_operand_object **params,
union acpi_operand_object **return_obj_desc,
struct acpi_parameter_info *info,
u32 pass_number);
acpi_status
......
......@@ -46,11 +46,11 @@
acpi_status
acpi_ev_initialize (
acpi_ev_initialize_events (
void);
acpi_status
acpi_ev_handler_initialize (
acpi_ev_install_xrupt_handlers (
void);
......@@ -117,6 +117,20 @@ u8
acpi_ev_valid_gpe_event (
struct acpi_gpe_event_info *gpe_event_info);
acpi_status
acpi_ev_update_gpe_enable_masks (
struct acpi_gpe_event_info *gpe_event_info,
u8 type);
acpi_status
acpi_ev_enable_gpe (
struct acpi_gpe_event_info *gpe_event_info,
u8 write_to_hardware);
acpi_status
acpi_ev_disable_gpe (
struct acpi_gpe_event_info *gpe_event_info);
struct acpi_gpe_event_info *
acpi_ev_get_gpe_event_info (
acpi_handle gpe_device,
......@@ -139,6 +153,11 @@ acpi_status
acpi_ev_delete_gpe_block (
struct acpi_gpe_block_info *gpe_block);
acpi_status
acpi_ev_delete_gpe_handlers (
struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block);
u32
acpi_ev_gpe_dispatch (
struct acpi_gpe_event_info *gpe_event_info,
......@@ -148,12 +167,25 @@ u32
acpi_ev_gpe_detect (
struct acpi_gpe_xrupt_info *gpe_xrupt_list);
acpi_status
acpi_ev_set_gpe_type (
struct acpi_gpe_event_info *gpe_event_info,
u8 type);
acpi_status
acpi_ev_check_for_wake_only_gpe (
struct acpi_gpe_event_info *gpe_event_info);
/*
* Evregion - Address Space handling
*/
acpi_status
acpi_ev_init_address_spaces (
acpi_ev_install_region_handlers (
void);
acpi_status
acpi_ev_initialize_op_regions (
void);
acpi_status
......@@ -182,6 +214,19 @@ acpi_ev_detach_region (
union acpi_operand_object *region_obj,
u8 acpi_ns_is_locked);
acpi_status
acpi_ev_install_space_handler (
struct acpi_namespace_node *node,
acpi_adr_space_type space_id,
acpi_adr_space_handler handler,
acpi_adr_space_setup setup,
void *context);
acpi_status
acpi_ev_execute_reg_methods (
struct acpi_namespace_node *node,
acpi_adr_space_type space_id);
acpi_status
acpi_ev_execute_reg_method (
union acpi_operand_object *region_obj,
......
......@@ -95,8 +95,9 @@
#define AE_LOGICAL_ADDRESS (acpi_status) (0x001B | AE_CODE_ENVIRONMENTAL)
#define AE_ABORT_METHOD (acpi_status) (0x001C | AE_CODE_ENVIRONMENTAL)
#define AE_SAME_HANDLER (acpi_status) (0x001D | AE_CODE_ENVIRONMENTAL)
#define AE_WAKE_ONLY_GPE (acpi_status) (0x001E | AE_CODE_ENVIRONMENTAL)
#define AE_CODE_ENV_MAX 0x001D
#define AE_CODE_ENV_MAX 0x001E
/*
* Programmer exceptions
......@@ -222,7 +223,8 @@ char const *acpi_gbl_exception_names_env[] =
"AE_NO_GLOBAL_LOCK",
"AE_LOGICAL_ADDRESS",
"AE_ABORT_METHOD",
"AE_SAME_HANDLER"
"AE_SAME_HANDLER",
"AE_WAKE_ONLY_GPE"
};
char const *acpi_gbl_exception_names_pgm[] =
......
......@@ -46,15 +46,17 @@
/*
* Ensure that the globals are actually defined only once.
* Ensure that the globals are actually defined and initialized only once.
*
* The use of these defines allows a single list of globals (here) in order
* The use of these macros allows a single list of globals (here) in order
* to simplify maintenance of the code.
*/
#ifdef DEFINE_ACPI_GLOBALS
#define ACPI_EXTERN
#define ACPI_INIT_GLOBAL(a,b) a=b
#else
#define ACPI_EXTERN extern
#define ACPI_INIT_GLOBAL(a,b) a
#endif
/*
......@@ -64,6 +66,7 @@
ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_enable;
ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable;
/*****************************************************************************
*
* Debug support
......@@ -79,15 +82,35 @@ extern u32 acpi_dbg_layer;
extern u32 acpi_gbl_nesting_level;
/*****************************************************************************
*
* Runtime configuration
* Runtime configuration (static defaults that can be overriden at runtime)
*
****************************************************************************/
ACPI_EXTERN u8 acpi_gbl_create_osi_method;
ACPI_EXTERN u8 acpi_gbl_all_methods_serialized;
ACPI_EXTERN u8 acpi_gbl_leave_wake_gpes_disabled;
/*
* Create the predefined _OSI method in the namespace? Default is TRUE
* because ACPI CA is fully compatible with other ACPI implementations.
* Changing this will revert ACPI CA (and machine ASL) to pre-OSI behavior.
*/
ACPI_EXTERN u8 ACPI_INIT_GLOBAL (acpi_gbl_create_osi_method, TRUE);
/*
* Automatically serialize ALL control methods? Default is FALSE, meaning
* to use the Serialized/not_serialized method flags on a per method basis.
* Only change this if the ASL code is poorly written and cannot handle
* reentrancy even though methods are marked "not_serialized".
*/
ACPI_EXTERN u8 ACPI_INIT_GLOBAL (acpi_gbl_all_methods_serialized, FALSE);
/*
* Disable wakeup GPEs during runtime? Default is TRUE because WAKE and
* RUNTIME GPEs should never be shared, and WAKE GPEs should typically only
* be enabled just before going to sleep.
*/
ACPI_EXTERN u8 ACPI_INIT_GLOBAL (acpi_gbl_leave_wake_gpes_disabled, TRUE);
/*****************************************************************************
*
......@@ -102,7 +125,6 @@ ACPI_EXTERN u8 acpi_gbl_leave_wake_gpes_disable
*
* These tables are single-table only; meaning that there can be at most one
* of each in the system. Each global points to the actual table.
*
*/
ACPI_EXTERN u32 acpi_gbl_table_flags;
ACPI_EXTERN u32 acpi_gbl_rsdt_table_count;
......@@ -170,6 +192,7 @@ ACPI_EXTERN u8 acpi_gbl_step_to_next_call;
ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
ACPI_EXTERN u8 acpi_gbl_global_lock_present;
ACPI_EXTERN u8 acpi_gbl_events_initialized;
ACPI_EXTERN u8 acpi_gbl_system_awake_and_running;
extern u8 acpi_gbl_shutdown;
extern u32 acpi_gbl_startup_flags;
......
......@@ -114,15 +114,7 @@ acpi_hw_clear_acpi_status (
/* GPE support */
acpi_status
acpi_hw_enable_gpe (
struct acpi_gpe_event_info *gpe_event_info);
void
acpi_hw_enable_gpe_for_wakeup (
struct acpi_gpe_event_info *gpe_event_info);
acpi_status
acpi_hw_disable_gpe (
acpi_hw_write_gpe_enable_reg (
struct acpi_gpe_event_info *gpe_event_info);
acpi_status
......@@ -130,10 +122,6 @@ acpi_hw_disable_gpe_block (
struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block);
void
acpi_hw_disable_gpe_for_wakeup (
struct acpi_gpe_event_info *gpe_event_info);
acpi_status
acpi_hw_clear_gpe (
struct acpi_gpe_event_info *gpe_event_info);
......@@ -149,13 +137,27 @@ acpi_hw_get_gpe_status (
acpi_event_status *event_status);
acpi_status
acpi_hw_prepare_gpes_for_sleep (
acpi_hw_disable_all_gpes (
void);
acpi_status
acpi_hw_enable_all_runtime_gpes (
void);
acpi_status
acpi_hw_restore_gpes_on_wake (
acpi_hw_enable_all_wakeup_gpes (
void);
acpi_status
acpi_hw_enable_runtime_gpe_block (
struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block);
acpi_status
acpi_hw_enable_wakeup_gpe_block (
struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block);
/* ACPI Timer prototypes */
......
......@@ -117,6 +117,12 @@ acpi_ex_convert_to_ascii (
* exfield - ACPI AML (p-code) execution - field manipulation
*/
acpi_status
acpi_ex_common_buffer_setup (
union acpi_operand_object *obj_desc,
u32 buffer_length,
u32 *datum_count);
acpi_status
acpi_ex_extract_from_field (
union acpi_operand_object *obj_desc,
......@@ -240,8 +246,8 @@ acpi_ex_do_concatenate (
u8
acpi_ex_do_logical_op (
u16 opcode,
acpi_integer operand0,
acpi_integer operand1);
union acpi_operand_object *obj_desc,
union acpi_operand_object *obj_desc2);
acpi_integer
acpi_ex_do_math_op (
......@@ -563,8 +569,11 @@ acpi_status
acpi_ex_store_object_to_node (
union acpi_operand_object *source_desc,
struct acpi_namespace_node *node,
struct acpi_walk_state *walk_state);
struct acpi_walk_state *walk_state,
u8 implicit_conversion);
#define ACPI_IMPLICIT_CONVERSION TRUE
#define ACPI_NO_IMPLICIT_CONVERSION FALSE
/*
* exstoren
......
......@@ -189,8 +189,6 @@ struct acpi_namespace_node
u8 type; /* Type associated with this name */
u16 owner_id;
union acpi_name_union name; /* ACPI Name, always 4 chars per ACPI spec */
union acpi_operand_object *object; /* Pointer to attached ACPI object (optional) */
struct acpi_namespace_node *child; /* First child */
struct acpi_namespace_node *peer; /* Next peer*/
......@@ -211,10 +209,8 @@ struct acpi_namespace_node
#define ANOBJ_METHOD_LOCAL 0x10
#define ANOBJ_METHOD_NO_RETVAL 0x20
#define ANOBJ_METHOD_SOME_NO_RETVAL 0x40
#define ANOBJ_IS_BIT_OFFSET 0x80
/*
* ACPI Table Descriptor. One per ACPI table
*/
......@@ -309,16 +305,31 @@ struct acpi_create_field_info
*
****************************************************************************/
/* Information about a GPE, one per each GPE in an array */
/* Dispatch info for each GPE -- either a method or handler, cannot be both */
struct acpi_gpe_event_info
struct acpi_handler_info
{
struct acpi_namespace_node *method_node; /* Method node for this GPE level */
acpi_gpe_handler handler; /* Address of handler, if any */
acpi_event_handler address; /* Address of handler, if any */
void *context; /* Context to be passed to handler */
struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */
};
union acpi_gpe_dispatch_info
{
struct acpi_namespace_node *method_node; /* Method node for this GPE level */
struct acpi_handler_info *handler;
};
/*
* Information about a GPE, one per each GPE in an array.
* NOTE: Important to keep this struct as small as possible.
*/
struct acpi_gpe_event_info
{
union acpi_gpe_dispatch_info dispatch; /* Either Method or Handler */
struct acpi_gpe_register_info *register_info; /* Backpointer to register info */
u8 flags; /* Level or Edge */
u8 bit_mask; /* This GPE within the register */
u8 flags; /* Misc info about this GPE */
u8 register_bit; /* This GPE bit within the register */
};
/* Information about a GPE register pair, one per each status/enable pair in an array */
......@@ -327,9 +338,8 @@ struct acpi_gpe_register_info
{
struct acpi_generic_address status_address; /* Address of status reg */
struct acpi_generic_address enable_address; /* Address of enable reg */
u8 status; /* Current value of status reg */
u8 enable; /* Current value of enable reg */
u8 wake_enable; /* Mask of bits to keep enabled when sleeping */
u8 enable_for_wake; /* GPEs to keep enabled when sleeping */
u8 enable_for_run; /* GPEs to keep enabled when running */
u8 base_gpe_number; /* Base GPE number for this register */
};
......@@ -339,6 +349,7 @@ struct acpi_gpe_register_info
*/
struct acpi_gpe_block_info
{
struct acpi_namespace_node *node;
struct acpi_gpe_block_info *previous;
struct acpi_gpe_block_info *next;
struct acpi_gpe_xrupt_info *xrupt_block; /* Backpointer to interrupt block */
......@@ -502,7 +513,7 @@ struct acpi_thread_state
struct acpi_walk_state *walk_state_list; /* Head of list of walk_states for this thread */
union acpi_operand_object *acquired_mutex_list; /* List of all currently acquired mutexes */
u32 thread_id; /* Running thread ID */
u16 current_sync_level; /* Mutex Sync (nested acquire) level */
u8 current_sync_level; /* Mutex Sync (nested acquire) level */
};
......
......@@ -53,6 +53,9 @@
#define ACPI_LOBYTE(l) ((u8)(u16)(l))
#define ACPI_HIBYTE(l) ((u8)((((u16)(l)) >> 8) & 0xFF))
#define ACPI_SET_BIT(target,bit) ((target) |= (bit))
#define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit))
#if ACPI_MACHINE_WIDTH == 16
......@@ -97,7 +100,7 @@
* printf() format helpers
*/
/* Split 64-bit integer into two 32-bit values. use with %8,8_x%8.8X */
/* Split 64-bit integer into two 32-bit values. Use with %8.8X%8.8X */
#define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i),ACPI_LODWORD(i)
......
......@@ -278,33 +278,25 @@ acpi_ns_dump_objects (
acpi_status
acpi_ns_evaluate_by_handle (
struct acpi_namespace_node *prefix_node,
union acpi_operand_object **params,
union acpi_operand_object **return_object);
struct acpi_parameter_info *info);
acpi_status
acpi_ns_evaluate_by_name (
char *pathname,
union acpi_operand_object **params,
union acpi_operand_object **return_object);
struct acpi_parameter_info *info);
acpi_status
acpi_ns_evaluate_relative (
struct acpi_namespace_node *prefix_node,
char *pathname,
union acpi_operand_object **params,
union acpi_operand_object **return_object);
struct acpi_parameter_info *info);
acpi_status
acpi_ns_execute_control_method (
struct acpi_namespace_node *method_node,
union acpi_operand_object **params,
union acpi_operand_object **return_obj_desc);
struct acpi_parameter_info *info);
acpi_status
acpi_ns_get_object_value (
struct acpi_namespace_node *object_node,
union acpi_operand_object **return_obj_desc);
struct acpi_parameter_info *info);
/*
......
......@@ -204,13 +204,14 @@ struct acpi_object_method
struct acpi_object_mutex
{
ACPI_OBJECT_COMMON_HEADER
u16 sync_level;
u16 acquisition_depth;
struct acpi_thread_state *owner_thread;
void *semaphore;
u8 sync_level; /* 0-15, specified in Mutex() call */
u16 acquisition_depth; /* Allow multiple Acquires, same thread */
struct acpi_thread_state *owner_thread; /* Current owner of the mutex */
void *semaphore; /* Actual OS synchronization object */
union acpi_operand_object *prev; /* Link for list of acquired mutexes */
union acpi_operand_object *next; /* Link for list of acquired mutexes */
struct acpi_namespace_node *node; /* containing object */
struct acpi_namespace_node *node; /* Containing namespace node */
u8 original_sync_level; /* Owner's original sync level (0-15) */
};
......@@ -220,7 +221,7 @@ struct acpi_object_region
u8 space_id;
union acpi_operand_object *handler; /* Handler for region access */
struct acpi_namespace_node *node; /* containing object */
struct acpi_namespace_node *node; /* Containing namespace node */
union acpi_operand_object *next;
u32 length;
acpi_physical_address address;
......
......@@ -73,9 +73,7 @@ acpi_psx_load_table (
acpi_status
acpi_psx_execute (
struct acpi_namespace_node *method_node,
union acpi_operand_object **params,
union acpi_operand_object **return_obj_desc);
struct acpi_parameter_info *info);
/******************************************************************************
......
......@@ -296,7 +296,7 @@ acpi_install_gpe_handler (
acpi_handle gpe_device,
u32 gpe_number,
u32 type,
acpi_gpe_handler handler,
acpi_event_handler address,
void *context);
acpi_status
......@@ -312,7 +312,7 @@ acpi_status
acpi_remove_gpe_handler (
acpi_handle gpe_device,
u32 gpe_number,
acpi_gpe_handler handler);
acpi_event_handler address);
acpi_status
acpi_enable_event (
......@@ -333,6 +333,12 @@ acpi_get_event_status (
u32 event,
acpi_event_status *event_status);
acpi_status
acpi_set_gpe_type (
acpi_handle gpe_device,
u32 gpe_number,
u8 type);
acpi_status
acpi_enable_gpe (
acpi_handle gpe_device,
......
......@@ -69,13 +69,14 @@
struct acpi_walk_state
{
u8 data_type; /* To differentiate various internal objs MUST BE FIRST!*/\
u8 walk_type;
acpi_owner_id owner_id; /* Owner of objects created during the walk */
u8 last_predicate; /* Result of last predicate */
u8 reserved; /* For alignment */
u8 current_result; /* */
u8 next_op_info; /* Info about next_op */
u8 num_operands; /* Stack pointer for Operands[] array */
u8 return_used;
u8 walk_type;
u16 opcode; /* Current AML opcode */
u8 scope_depth;
u8 reserved1;
......@@ -91,7 +92,8 @@ struct acpi_walk_state
struct acpi_namespace_node arguments[ACPI_METHOD_NUM_ARGS]; /* Control method arguments */
union acpi_operand_object **caller_return_desc;
union acpi_generic_state *control_state; /* List of control states (nested IFs) */
struct acpi_namespace_node *deferred_node; /* Used when executing deferred opcodes */
struct acpi_namespace_node *deferred_node; /* Used when executing deferred opcodes */
struct acpi_gpe_event_info *gpe_event_info; /* Info for GPE (_Lxx/_Exx methods only */
struct acpi_namespace_node local_variables[ACPI_METHOD_NUM_LOCALS]; /* Control method locals */
struct acpi_namespace_node *method_call_node; /* Called method Node*/
union acpi_parse_object *method_call_op; /* method_call Op if running a method */
......@@ -200,4 +202,21 @@ union acpi_aml_operands
};
/* Internal method parameter list */
struct acpi_parameter_info
{
struct acpi_namespace_node *node;
union acpi_operand_object **parameters;
union acpi_operand_object *return_object;
u8 parameter_type;
u8 return_object_type;
};
/* Types for parameter_type above */
#define ACPI_PARAM_ARGS 0
#define ACPI_PARAM_GPE 1
#endif
......@@ -288,19 +288,6 @@ struct smart_battery_table
};
/*
* High performance timer
*/
struct hpet_table
{
ACPI_TABLE_HEADER_DEF
u32 hardware_id;
u32 base_address [3];
u8 hpet_number;
u16 clock_tick;
u8 attributes;
};
#pragma pack()
......@@ -344,4 +331,20 @@ struct acpi_table_support
#include "actbl2.h" /* Acpi 2.0 table definitions */
#pragma pack(1)
/*
* High performance timer
*/
struct hpet_table
{
ACPI_TABLE_HEADER_DEF
u32 hardware_id;
struct acpi_generic_address base_address;
u8 hpet_number;
u16 clock_tick;
u8 attributes;
};
#pragma pack()
#endif /* __ACTBL_H__ */
......@@ -557,34 +557,56 @@ typedef u32 acpi_event_status;
#define ACPI_GPE_MAX 0xFF
#define ACPI_NUM_GPE 256
#define ACPI_GPE_ENABLE 0
#define ACPI_GPE_DISABLE 1
/*
* GPE info flags - Per GPE
* +---------+-+-+-+
* |Bits 8:3 |2|1|0|
* +---------+-+-+-+
* | | | |
* | | | +- Edge or Level Triggered
* | | +--- Type: Wake or Runtime
* | +----- Enabled for wake?
* +--------<Reserved>
* +-+-+-+---+---+-+
* |7|6|5|4:3|2:1|0|
* +-+-+-+---+---+-+
* | | | | | |
* | | | | | +--- Interrupt type: Edge or Level Triggered
* | | | | +--- Type: Wake-only, Runtime-only, or wake/runtime
* | | | +--- Type of dispatch -- to method, handler, or none
* | | +--- Enabled for runtime?
* | +--- Enabled for wake?
* +--- System state when GPE ocurred (running/waking)
*/
#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 1
#define ACPI_GPE_LEVEL_TRIGGERED (u8) 1
#define ACPI_GPE_EDGE_TRIGGERED (u8) 0
#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x01
#define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x01
#define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00
#define ACPI_GPE_TYPE_MASK (u8) 0x06
#define ACPI_GPE_TYPE_WAKE_RUN (u8) 0x06
#define ACPI_GPE_TYPE_WAKE (u8) 0x02
#define ACPI_GPE_TYPE_RUNTIME (u8) 0x04 /* Default */
#define ACPI_GPE_DISPATCH_MASK (u8) 0x18
#define ACPI_GPE_DISPATCH_HANDLER (u8) 0x08
#define ACPI_GPE_DISPATCH_METHOD (u8) 0x10
#define ACPI_GPE_DISPATCH_NOT_USED (u8) 0x00 /* Default */
#define ACPI_GPE_RUN_ENABLE_MASK (u8) 0x20
#define ACPI_GPE_RUN_ENABLED (u8) 0x20
#define ACPI_GPE_RUN_DISABLED (u8) 0x00 /* Default */
#define ACPI_GPE_TYPE_MASK (u8) 2
#define ACPI_GPE_TYPE_WAKE (u8) 2
#define ACPI_GPE_TYPE_RUNTIME (u8) 0 /* Default */
#define ACPI_GPE_WAKE_ENABLE_MASK (u8) 0x40
#define ACPI_GPE_WAKE_ENABLED (u8) 0x40
#define ACPI_GPE_WAKE_DISABLED (u8) 0x00 /* Default */
#define ACPI_GPE_ENABLE_MASK (u8) 4
#define ACPI_GPE_ENABLED (u8) 4
#define ACPI_GPE_DISABLED (u8) 0 /* Default */
#define ACPI_GPE_ENABLE_MASK (u8) 0x60 /* Both run/wake */
#define ACPI_GPE_SYSTEM_MASK (u8) 0x80
#define ACPI_GPE_SYSTEM_RUNNING (u8) 0x80
#define ACPI_GPE_SYSTEM_WAKING (u8) 0x00
/*
* Flags for GPE and Lock interfaces
*/
#define ACPI_EVENT_WAKE_ENABLE 0x2
#define ACPI_EVENT_WAKE_DISABLE 0x2
#define ACPI_EVENT_WAKE_ENABLE 0x2 /* acpi_gpe_enable */
#define ACPI_EVENT_WAKE_DISABLE 0x2 /* acpi_gpe_disable */
#define ACPI_NOT_ISR 0x1
#define ACPI_ISR 0x0
......@@ -592,9 +614,10 @@ typedef u32 acpi_event_status;
/* Notify types */
#define ACPI_SYSTEM_NOTIFY 0
#define ACPI_DEVICE_NOTIFY 1
#define ACPI_MAX_NOTIFY_HANDLER_TYPE 1
#define ACPI_SYSTEM_NOTIFY 0x1
#define ACPI_DEVICE_NOTIFY 0x2
#define ACPI_ALL_NOTIFY 0x3
#define ACPI_MAX_NOTIFY_HANDLER_TYPE 0x3
#define ACPI_MAX_SYS_NOTIFY 0x7f
......@@ -789,10 +812,6 @@ typedef
u32 (*acpi_event_handler) (
void *context);
typedef
void (*acpi_gpe_handler) (
void *context);
typedef
void (*acpi_notify_handler) (
acpi_handle device,
......@@ -880,6 +899,7 @@ struct acpi_compatible_id_list
#define ACPI_VALID_HID 0x0004
#define ACPI_VALID_UID 0x0008
#define ACPI_VALID_CID 0x0010
#define ACPI_VALID_SXDS 0x0020
#define ACPI_COMMON_OBJ_INFO \
......@@ -899,12 +919,12 @@ struct acpi_device_info
{
ACPI_COMMON_OBJ_INFO;
u8 highest_dstates[4]; /* _sx_d values 0xFF indicates not valid */
u32 valid; /* Indicates which fields below are valid */
u32 current_status; /* _STA value */
acpi_integer address; /* _ADR value if any */
struct acpi_device_id hardware_id; /* _HID value if any */
struct acpi_device_id unique_id; /* _UID value if any */
u8 highest_dstates[4]; /* _sx_d values: 0xFF indicates not valid */
struct acpi_compatible_id_list compatibility_id; /* List of _CIDs if any */
};
......
......@@ -152,12 +152,8 @@
#define COMPILER_DEPENDENT_INT64 long long
#define COMPILER_DEPENDENT_UINT64 unsigned long long
/* Name of host operating system (returned by the _OS_ namespace object) */
#define ACPI_OS_NAME "Intel ACPI/CA Core Subsystem"
/* This macro is used to tag functions as "printf-like" because
/*
* This macro is used to tag functions as "printf-like" because
* some compilers can catch printf format string problems. MSVC
* doesn't, so this is proprocessed away.
*/
......
......@@ -44,8 +44,6 @@
#ifndef __ACLINUX_H__
#define __ACLINUX_H__
#define ACPI_OS_NAME "Linux"
#define ACPI_USE_SYSTEM_CLIBRARY
#define ACPI_USE_DO_WHILE_0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment