Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
9dce0e95
Commit
9dce0e95
authored
Jun 29, 2006
by
Len Brown
Browse files
Options
Browse Files
Download
Plain Diff
Pull acpica into release branch
parents
f1b2ad5d
967440e3
Changes
28
Show whitespace changes
Inline
Side-by-side
Showing
28 changed files
with
563 additions
and
470 deletions
+563
-470
drivers/acpi/dispatcher/dsinit.c
drivers/acpi/dispatcher/dsinit.c
+0
-30
drivers/acpi/dispatcher/dsmethod.c
drivers/acpi/dispatcher/dsmethod.c
+138
-192
drivers/acpi/dispatcher/dswexec.c
drivers/acpi/dispatcher/dswexec.c
+1
-3
drivers/acpi/dispatcher/dswload.c
drivers/acpi/dispatcher/dswload.c
+23
-26
drivers/acpi/events/evgpe.c
drivers/acpi/events/evgpe.c
+5
-9
drivers/acpi/executer/exconfig.c
drivers/acpi/executer/exconfig.c
+8
-0
drivers/acpi/executer/excreate.c
drivers/acpi/executer/excreate.c
+11
-16
drivers/acpi/executer/exdump.c
drivers/acpi/executer/exdump.c
+4
-4
drivers/acpi/executer/exfldio.c
drivers/acpi/executer/exfldio.c
+55
-16
drivers/acpi/executer/exmutex.c
drivers/acpi/executer/exmutex.c
+6
-6
drivers/acpi/executer/exsystem.c
drivers/acpi/executer/exsystem.c
+68
-14
drivers/acpi/hardware/hwregs.c
drivers/acpi/hardware/hwregs.c
+62
-15
drivers/acpi/namespace/nsaccess.c
drivers/acpi/namespace/nsaccess.c
+12
-15
drivers/acpi/osl.c
drivers/acpi/osl.c
+7
-21
drivers/acpi/parser/psparse.c
drivers/acpi/parser/psparse.c
+16
-2
drivers/acpi/utilities/utdelete.c
drivers/acpi/utilities/utdelete.c
+23
-13
drivers/acpi/utilities/utglobal.c
drivers/acpi/utilities/utglobal.c
+1
-0
drivers/acpi/utilities/utmutex.c
drivers/acpi/utilities/utmutex.c
+10
-29
include/acpi/acconfig.h
include/acpi/acconfig.h
+1
-1
include/acpi/acdispat.h
include/acpi/acdispat.h
+1
-1
include/acpi/acglobal.h
include/acpi/acglobal.h
+20
-6
include/acpi/acinterp.h
include/acpi/acinterp.h
+4
-1
include/acpi/aclocal.h
include/acpi/aclocal.h
+21
-15
include/acpi/acmacros.h
include/acpi/acmacros.h
+2
-0
include/acpi/acobject.h
include/acpi/acobject.h
+4
-4
include/acpi/acpiosxf.h
include/acpi/acpiosxf.h
+31
-9
include/acpi/actypes.h
include/acpi/actypes.h
+27
-22
include/acpi/platform/aclinux.h
include/acpi/platform/aclinux.h
+2
-0
No files found.
drivers/acpi/dispatcher/dsinit.c
View file @
9dce0e95
...
...
@@ -125,37 +125,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
if
(
info
->
table_desc
->
pointer
->
revision
==
1
)
{
node
->
flags
|=
ANOBJ_DATA_WIDTH_32
;
}
#ifdef ACPI_INIT_PARSE_METHODS
/*
* Note 11/2005: Removed this code to parse all methods during table
* load because it causes problems if there are any errors during the
* parse. Also, it seems like overkill and we probably don't want to
* abort a table load because of an issue with a single method.
*/
/*
* Print a dot for each method unless we are going to print
* the entire pathname
*/
if
(
!
(
acpi_dbg_level
&
ACPI_LV_INIT_NAMES
))
{
ACPI_DEBUG_PRINT_RAW
((
ACPI_DB_INIT
,
"."
));
}
/*
* Always parse methods to detect errors, we will delete
* the parse tree below
*/
status
=
acpi_ds_parse_method
(
obj_handle
);
if
(
ACPI_FAILURE
(
status
))
{
ACPI_ERROR
((
AE_INFO
,
"Method %p [%4.4s] - parse failure, %s"
,
obj_handle
,
acpi_ut_get_node_name
(
obj_handle
),
acpi_format_exception
(
status
)));
/* This parse failed, but we will continue parsing more methods */
}
#endif
info
->
method_count
++
;
break
;
...
...
drivers/acpi/dispatcher/dsmethod.c
View file @
9dce0e95
...
...
@@ -52,6 +52,10 @@
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME
(
"dsmethod"
)
/* Local prototypes */
static
acpi_status
acpi_ds_create_method_mutex
(
union
acpi_operand_object
*
method_desc
);
/*******************************************************************************
*
* FUNCTION: acpi_ds_method_error
...
...
@@ -67,6 +71,7 @@ ACPI_MODULE_NAME("dsmethod")
* Note: Allows the exception handler to change the status code
*
******************************************************************************/
acpi_status
acpi_ds_method_error
(
acpi_status
status
,
struct
acpi_walk_state
*
walk_state
)
{
...
...
@@ -111,13 +116,53 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
return
(
status
);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_create_method_mutex
*
* PARAMETERS: obj_desc - The method object
*
* RETURN: Status
*
* DESCRIPTION: Create a mutex object for a serialized control method
*
******************************************************************************/
static
acpi_status
acpi_ds_create_method_mutex
(
union
acpi_operand_object
*
method_desc
)
{
union
acpi_operand_object
*
mutex_desc
;
acpi_status
status
;
ACPI_FUNCTION_NAME
(
ds_create_method_mutex
);
/* Create the new mutex object */
mutex_desc
=
acpi_ut_create_internal_object
(
ACPI_TYPE_MUTEX
);
if
(
!
mutex_desc
)
{
return_ACPI_STATUS
(
AE_NO_MEMORY
);
}
/* Create the actual OS Mutex */
status
=
acpi_os_create_mutex
(
&
mutex_desc
->
mutex
.
os_mutex
);
if
(
ACPI_FAILURE
(
status
))
{
return_ACPI_STATUS
(
status
);
}
mutex_desc
->
mutex
.
sync_level
=
method_desc
->
method
.
sync_level
;
method_desc
->
method
.
mutex
=
mutex_desc
;
return_ACPI_STATUS
(
AE_OK
);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_begin_method_execution
*
* PARAMETERS: method_node - Node of the method
* obj_desc - The method object
* calling_method_node - Caller of this method (if non-null)
* walk_state - current state, NULL if not yet executing
* a method.
*
* RETURN: Status
*
...
...
@@ -128,9 +173,9 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
******************************************************************************/
acpi_status
acpi_ds_begin_method_execution
(
struct
acpi_namespace_node
*
method_node
,
union
acpi_operand_object
*
obj_desc
,
struct
acpi_
namespace_node
*
calling_method_nod
e
)
acpi_ds_begin_method_execution
(
struct
acpi_namespace_node
*
method_node
,
union
acpi_operand_object
*
obj_desc
,
struct
acpi_
walk_state
*
walk_stat
e
)
{
acpi_status
status
=
AE_OK
;
...
...
@@ -149,35 +194,80 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
}
/*
* If there is a concurrency limit on this method, we need to
* obtain a unit from the method semaphore.
* If this method is serialized, we need to acquire the method mutex.
*/
if
(
obj_desc
->
method
.
semaphore
)
{
if
(
obj_desc
->
method
.
method_flags
&
AML_METHOD_SERIALIZED
)
{
/*
* Allow recursive method calls, up to the reentrancy/concurrency
* limit imposed by the SERIALIZED rule and the sync_level method
* parameter.
*
* The point of this code is to avoid permanently blocking a
* thread that is making recursive method calls.
* Create a mutex for the method if it is defined to be Serialized
* and a mutex has not already been created. We defer the mutex creation
* until a method is actually executed, to minimize the object count
*/
if
(
method_node
==
calling_method_node
)
{
if
(
obj_desc
->
method
.
thread_count
>=
obj_desc
->
method
.
concurrency
)
{
return_ACPI_STATUS
(
AE_AML_METHOD_LIMIT
);
if
(
!
obj_desc
->
method
.
mutex
)
{
status
=
acpi_ds_create_method_mutex
(
obj_desc
);
if
(
ACPI_FAILURE
(
status
))
{
return_ACPI_STATUS
(
status
);
}
}
/*
* The current_sync_level (per-thread) must be less than or equal to
* the sync level of the method. This mechanism provides some
* deadlock prevention
*
* Top-level method invocation has no walk state at this point
*/
if
(
walk_state
&&
(
walk_state
->
thread
->
current_sync_level
>
obj_desc
->
method
.
mutex
->
mutex
.
sync_level
))
{
ACPI_ERROR
((
AE_INFO
,
"Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%d)"
,
acpi_ut_get_node_name
(
method_node
),
walk_state
->
thread
->
current_sync_level
));
return_ACPI_STATUS
(
AE_AML_MUTEX_ORDER
);
}
/*
* Get a unit from the method semaphore. This releases the
* interpreter if we block (then reacquires it)
* Obtain the method mutex if necessary. Do not acquire mutex for a
* recursive call.
*/
if
(
!
walk_state
||
!
obj_desc
->
method
.
mutex
->
mutex
.
owner_thread
||
(
walk_state
->
thread
!=
obj_desc
->
method
.
mutex
->
mutex
.
owner_thread
))
{
/*
* Acquire the method mutex. This releases the interpreter if we
* block (and reacquires it before it returns)
*/
status
=
acpi_ex_system_wait_semaphore
(
obj_desc
->
method
.
semaphore
,
acpi_ex_system_wait_mutex
(
obj_desc
->
method
.
mutex
->
mutex
.
os_mutex
,
ACPI_WAIT_FOREVER
);
if
(
ACPI_FAILURE
(
status
))
{
return_ACPI_STATUS
(
status
);
}
/* Update the mutex and walk info and save the original sync_level */
if
(
walk_state
)
{
obj_desc
->
method
.
mutex
->
mutex
.
original_sync_level
=
walk_state
->
thread
->
current_sync_level
;
obj_desc
->
method
.
mutex
->
mutex
.
owner_thread
=
walk_state
->
thread
;
walk_state
->
thread
->
current_sync_level
=
obj_desc
->
method
.
sync_level
;
}
else
{
obj_desc
->
method
.
mutex
->
mutex
.
original_sync_level
=
obj_desc
->
method
.
mutex
->
mutex
.
sync_level
;
}
}
/* Always increase acquisition depth */
obj_desc
->
method
.
mutex
->
mutex
.
acquisition_depth
++
;
}
/*
...
...
@@ -200,10 +290,10 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
return_ACPI_STATUS
(
status
);
cleanup:
/* On error, must
signal the method semaphore if present
*/
/* On error, must
release the method mutex (if present)
*/
if
(
obj_desc
->
method
.
semaphore
)
{
(
void
)
acpi_os_signal_semaphore
(
obj_desc
->
method
.
semaphore
,
1
);
if
(
obj_desc
->
method
.
mutex
)
{
acpi_os_release_mutex
(
obj_desc
->
method
.
mutex
->
mutex
.
os_mutex
);
}
return_ACPI_STATUS
(
status
);
}
...
...
@@ -253,10 +343,10 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS
(
AE_NULL_OBJECT
);
}
/* Init for new method, possibly wait on
concurrency semaphore
*/
/* Init for new method, possibly wait on
method mutex
*/
status
=
acpi_ds_begin_method_execution
(
method_node
,
obj_desc
,
this_walk_state
->
method_node
);
this_walk_state
);
if
(
ACPI_FAILURE
(
status
))
{
return_ACPI_STATUS
(
status
);
}
...
...
@@ -478,6 +568,8 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
* created, delete all locals and arguments, and delete the parse
* tree if requested.
*
* MUTEX: Interpreter is locked
*
******************************************************************************/
void
...
...
@@ -503,26 +595,21 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
}
/*
* Lock the parser while we terminate this method.
* If this is the last thread executing the method,
* we have additional cleanup to perform
* If method is serialized, release the mutex and restore the
* current sync level for this thread
*/
status
=
acpi_ut_acquire_mutex
(
ACPI_MTX_CONTROL_METHOD
);
if
(
ACPI_FAILURE
(
status
))
{
return_VOID
;
}
/* Signal completion of the execution of this method if necessary */
if
(
method_desc
->
method
.
mutex
)
{
if
(
method_desc
->
method
.
semaphore
)
{
status
=
acpi_os_signal_semaphore
(
method_desc
->
method
.
semaphore
,
1
);
if
(
ACPI_FAILURE
(
status
))
{
/* Acquisition Depth handles recursive calls */
/* Ignore error and continue */
method_desc
->
method
.
mutex
->
mutex
.
acquisition_depth
--
;
if
(
!
method_desc
->
method
.
mutex
->
mutex
.
acquisition_depth
)
{
walk_state
->
thread
->
current_sync_level
=
method_desc
->
method
.
mutex
->
mutex
.
original_sync_level
;
ACPI_EXCEPTION
((
AE_INFO
,
status
,
"Could not signal method semaphore"
)
);
acpi_os_release_mutex
(
method_desc
->
method
.
mutex
->
mutex
.
os_mutex
);
}
}
...
...
@@ -537,7 +624,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
status
=
acpi_ut_acquire_mutex
(
ACPI_MTX_NAMESPACE
);
if
(
ACPI_FAILURE
(
status
))
{
goto
exit
;
return_VOID
;
}
/*
...
...
@@ -587,11 +674,9 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
* This code is here because we must wait until the last thread exits
* before creating the synchronization semaphore.
*/
if
((
method_desc
->
method
.
concurrency
==
1
)
&&
(
!
method_desc
->
method
.
semaphore
))
{
status
=
acpi_os_create_semaphore
(
1
,
1
,
&
method_desc
->
method
.
semaphore
);
if
((
method_desc
->
method
.
method_flags
&
AML_METHOD_SERIALIZED
)
&&
(
!
method_desc
->
method
.
mutex
))
{
status
=
acpi_ds_create_method_mutex
(
method_desc
);
}
/* No more threads, we can free the owner_id */
...
...
@@ -599,144 +684,5 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
acpi_ut_release_owner_id
(
&
method_desc
->
method
.
owner_id
);
}
exit:
(
void
)
acpi_ut_release_mutex
(
ACPI_MTX_CONTROL_METHOD
);
return_VOID
;
}
#ifdef ACPI_INIT_PARSE_METHODS
/*
* Note 11/2005: Removed this code to parse all methods during table
* load because it causes problems if there are any errors during the
* parse. Also, it seems like overkill and we probably don't want to
* abort a table load because of an issue with a single method.
*/
/*******************************************************************************
*
* FUNCTION: acpi_ds_parse_method
*
* PARAMETERS: Node - Method node
*
* RETURN: Status
*
* DESCRIPTION: Parse the AML that is associated with the method.
*
* MUTEX: Assumes parser is locked
*
******************************************************************************/
acpi_status
acpi_ds_parse_method
(
struct
acpi_namespace_node
*
node
)
{
acpi_status
status
;
union
acpi_operand_object
*
obj_desc
;
union
acpi_parse_object
*
op
;
struct
acpi_walk_state
*
walk_state
;
ACPI_FUNCTION_TRACE_PTR
(
ds_parse_method
,
node
);
/* Parameter Validation */
if
(
!
node
)
{
return_ACPI_STATUS
(
AE_NULL_ENTRY
);
}
ACPI_DEBUG_PRINT
((
ACPI_DB_PARSE
,
"**** Parsing [%4.4s] **** NamedObj=%p
\n
"
,
acpi_ut_get_node_name
(
node
),
node
));
/* Extract the method object from the method Node */
obj_desc
=
acpi_ns_get_attached_object
(
node
);
if
(
!
obj_desc
)
{
return_ACPI_STATUS
(
AE_NULL_OBJECT
);
}
/* Create a mutex for the method if there is a concurrency limit */
if
((
obj_desc
->
method
.
concurrency
!=
ACPI_INFINITE_CONCURRENCY
)
&&
(
!
obj_desc
->
method
.
semaphore
))
{
status
=
acpi_os_create_semaphore
(
obj_desc
->
method
.
concurrency
,
obj_desc
->
method
.
concurrency
,
&
obj_desc
->
method
.
semaphore
);
if
(
ACPI_FAILURE
(
status
))
{
return_ACPI_STATUS
(
status
);
}
}
/*
* Allocate a new parser op to be the root of the parsed
* method tree
*/
op
=
acpi_ps_alloc_op
(
AML_METHOD_OP
);
if
(
!
op
)
{
return_ACPI_STATUS
(
AE_NO_MEMORY
);
}
/* Init new op with the method name and pointer back to the Node */
acpi_ps_set_name
(
op
,
node
->
name
.
integer
);
op
->
common
.
node
=
node
;
/*
* Get a new owner_id for objects created by this method. Namespace
* objects (such as Operation Regions) can be created during the
* first pass parse.
*/
status
=
acpi_ut_allocate_owner_id
(
&
obj_desc
->
method
.
owner_id
);
if
(
ACPI_FAILURE
(
status
))
{
goto
cleanup
;
}
/* Create and initialize a new walk state */
walk_state
=
acpi_ds_create_walk_state
(
obj_desc
->
method
.
owner_id
,
NULL
,
NULL
,
NULL
);
if
(
!
walk_state
)
{
status
=
AE_NO_MEMORY
;
goto
cleanup2
;
}
status
=
acpi_ds_init_aml_walk
(
walk_state
,
op
,
node
,
obj_desc
->
method
.
aml_start
,
obj_desc
->
method
.
aml_length
,
NULL
,
1
);
if
(
ACPI_FAILURE
(
status
))
{
acpi_ds_delete_walk_state
(
walk_state
);
goto
cleanup2
;
}
/*
* Parse the method, first pass
*
* The first pass load is where newly declared named objects are added into
* the namespace. Actual evaluation of the named objects (what would be
* called a "second pass") happens during the actual execution of the
* method so that operands to the named objects can take on dynamic
* run-time values.
*/
status
=
acpi_ps_parse_aml
(
walk_state
);
if
(
ACPI_FAILURE
(
status
))
{
goto
cleanup2
;
}
ACPI_DEBUG_PRINT
((
ACPI_DB_PARSE
,
"**** [%4.4s] Parsed **** NamedObj=%p Op=%p
\n
"
,
acpi_ut_get_node_name
(
node
),
node
,
op
));
/*
* Delete the parse tree. We simply re-parse the method for every
* execution since there isn't much overhead (compared to keeping lots
* of parse trees around)
*/
acpi_ns_delete_namespace_subtree
(
node
);
acpi_ns_delete_namespace_by_owner
(
obj_desc
->
method
.
owner_id
);
cleanup2:
acpi_ut_release_owner_id
(
&
obj_desc
->
method
.
owner_id
);
cleanup:
acpi_ps_delete_parse_tree
(
op
);
return_ACPI_STATUS
(
status
);
}
#endif
drivers/acpi/dispatcher/dswexec.c
View file @
9dce0e95
...
...
@@ -472,7 +472,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
acpi_ds_result_push
(
walk_state
->
result_obj
,
walk_state
);
}
break
;
default:
...
...
@@ -510,6 +509,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
ACPI_DEBUG_PRINT
((
ACPI_DB_DISPATCH
,
"Method Reference in a Package, Op=%p
\n
"
,
op
));
op
->
common
.
node
=
(
struct
acpi_namespace_node
*
)
op
->
asl
.
value
.
arg
->
asl
.
node
->
object
;
...
...
@@ -670,7 +670,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
status
=
acpi_ds_result_stack_pop
(
walk_state
);
}
break
;
case
AML_TYPE_UNDEFINED
:
...
...
@@ -708,7 +707,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
* Check if we just completed the evaluation of a
* conditional predicate
*/
if
((
ACPI_SUCCESS
(
status
))
&&
(
walk_state
->
control_state
)
&&
(
walk_state
->
control_state
->
common
.
state
==
...
...
drivers/acpi/dispatcher/dswload.c
View file @
9dce0e95
...
...
@@ -219,7 +219,6 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
* Note: silently change the type here. On the second pass, we will report
* a warning
*/
ACPI_DEBUG_PRINT
((
ACPI_DB_INFO
,
"Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)
\n
"
,
path
,
...
...
@@ -242,7 +241,6 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
break
;
default:
/*
* For all other named opcodes, we will enter the name into
* the namespace.
...
...
@@ -259,7 +257,6 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
* buffer_field, or Package), the name of the object is already
* in the namespace.
*/
if
(
walk_state
->
deferred_node
)
{
/* This name is already in the namespace, get the node */
...
...
@@ -327,12 +324,12 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
(
status
);
}
}
status
=
AE_OK
;
}
}
if
(
ACPI_FAILURE
(
status
))
{
ACPI_ERROR_NAMESPACE
(
path
,
status
);
return_ACPI_STATUS
(
status
);
}
...
...
@@ -434,9 +431,13 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
status
=
acpi_ex_create_region
(
op
->
named
.
data
,
op
->
named
.
length
,
(
acpi_adr_space_type
)
((
op
->
common
.
value
.
arg
)
->
common
.
value
.
integer
),
(
acpi_adr_space_type
)
((
op
->
common
.
value
.
arg
)
->
common
.
value
.
integer
),
walk_state
);
if
(
ACPI_FAILURE
(
status
))
{
return_ACPI_STATUS
(
status
);
...
...
@@ -499,6 +500,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
length
,
walk_state
);
}
walk_state
->
operands
[
0
]
=
NULL
;
walk_state
->
num_operands
=
0
;
...
...
@@ -570,7 +572,6 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
#ifdef ACPI_ENABLE_MODULE_LEVEL_CODE
if
((
walk_state
->
op_info
->
class
==
AML_CLASS_EXECUTE
)
||
(
walk_state
->
op_info
->
class
==
AML_CLASS_CONTROL
))
{
ACPI_DEBUG_PRINT
((
ACPI_DB_DISPATCH
,
"Begin/EXEC: %s (fl %8.8X)
\n
"
,
walk_state
->
op_info
->
name
,
...
...
@@ -602,7 +603,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
}
else
{
/* Get name from the op */
buffer_ptr
=
(
char
*
)
&
op
->
named
.
name
;
buffer_ptr
=
ACPI_CAST_PTR
(
char
,
&
op
->
named
.
name
)
;
}
}
else
{
/* Get the namestring from the raw AML */
...
...
@@ -629,7 +630,6 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
break
;
case
AML_INT_NAMEPATH_OP
:
/*
* The name_path is an object reference to an existing object.
* Don't enter the name into the namespace, but look it up
...
...
@@ -642,7 +642,6 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
break
;
case
AML_SCOPE_OP
:
/*
* The Path is an object reference to an existing object.
* Don't enter the name into the namespace, but look it up
...
...
@@ -664,6 +663,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
#endif
return_ACPI_STATUS
(
status
);
}
/*
* We must check to make sure that the target is
* one of the opcodes that actually opens a scope
...
...
@@ -689,7 +689,6 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
* Name (DEB, 0)
* Scope (DEB) { ... }
*/
ACPI_WARNING
((
AE_INFO
,
"Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)"
,
buffer_ptr
,
...
...
@@ -729,8 +728,8 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
if
(
ACPI_FAILURE
(
status
))
{
return_ACPI_STATUS
(
status
);
}
}
return_ACPI_STATUS
(
AE_OK
);
}
...
...
@@ -787,7 +786,6 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
* can get it again quickly when this scope is closed
*/
op
->
common
.
node
=
node
;
return_ACPI_STATUS
(
status
);
}
...
...
@@ -922,7 +920,6 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
#ifndef ACPI_NO_METHOD_EXECUTION
case
AML_TYPE_CREATE_FIELD
:
/*
* Create the field object, but the field buffer and index must
* be evaluated later during the execution phase
...
...
@@ -931,7 +928,6 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
break
;
case
AML_TYPE_NAMED_FIELD
:
/*
* If we are executing a method, initialize the field
*/
...
...
@@ -1051,6 +1047,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
* argument is the space_id. (We must save the address of the
* AML of the address and length operands)
*/
/*
* If we have a valid region, initialize it
* Namespace is NOT locked at this point.
...
...
drivers/acpi/events/evgpe.c
View file @
9dce0e95
...
...
@@ -382,7 +382,6 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
u32
status_reg
;
u32
enable_reg
;
acpi_cpu_flags
flags
;
acpi_cpu_flags
hw_flags
;
acpi_native_uint
i
;
acpi_native_uint
j
;
...
...
@@ -394,8 +393,11 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
return
(
int_status
);
}
/* We need to hold the GPE lock now, hardware lock in the loop */
/*
* We need to obtain the GPE lock for both the data structs and registers
* Note: Not necessary to obtain the hardware lock, since the GPE registers
* are owned by the gpe_lock.
*/
flags
=
acpi_os_acquire_lock
(
acpi_gbl_gpe_lock
);
/* Examine all GPE blocks attached to this interrupt level */
...
...
@@ -413,8 +415,6 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
gpe_register_info
=
&
gpe_block
->
register_info
[
i
];
hw_flags
=
acpi_os_acquire_lock
(
acpi_gbl_hardware_lock
);
/* Read the Status Register */
status
=
...
...
@@ -423,8 +423,6 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
&
gpe_register_info
->
status_address
);
if
(
ACPI_FAILURE
(
status
))
{
acpi_os_release_lock
(
acpi_gbl_hardware_lock
,
hw_flags
);
goto
unlock_and_exit
;
}
...
...
@@ -435,8 +433,6 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
&
enable_reg
,
&
gpe_register_info
->
enable_address
);
acpi_os_release_lock
(
acpi_gbl_hardware_lock
,
hw_flags
);
if
(
ACPI_FAILURE
(
status
))
{
goto
unlock_and_exit
;
}
...
...
drivers/acpi/executer/exconfig.c
View file @
9dce0e95
...
...
@@ -266,6 +266,10 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
}
}
ACPI_INFO
((
AE_INFO
,
"Dynamic OEM Table Load - [%4.4s] OemId [%6.6s] OemTableId [%8.8s]"
,
table
->
signature
,
table
->
oem_id
,
table
->
oem_table_id
));
*
return_desc
=
ddb_handle
;
return_ACPI_STATUS
(
status
);
}
...
...
@@ -446,6 +450,10 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
return_ACPI_STATUS
(
status
);
}
ACPI_INFO
((
AE_INFO
,
"Dynamic SSDT Load - OemId [%6.6s] OemTableId [%8.8s]"
,
table_ptr
->
oem_id
,
table_ptr
->
oem_table_id
));
cleanup:
if
(
ACPI_FAILURE
(
status
))
{
ACPI_FREE
(
table_ptr
);
...
...
drivers/acpi/executer/excreate.c
View file @
9dce0e95
...
...
@@ -177,7 +177,7 @@ acpi_status acpi_ex_create_event(struct acpi_walk_state *walk_state)
* that the event is created in an unsignalled state
*/
status
=
acpi_os_create_semaphore
(
ACPI_NO_UNIT_LIMIT
,
0
,
&
obj_desc
->
event
.
semaphore
);
&
obj_desc
->
event
.
os_
semaphore
);
if
(
ACPI_FAILURE
(
status
))
{
goto
cleanup
;
}
...
...
@@ -226,12 +226,9 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
goto
cleanup
;
}
/*
* Create the actual OS semaphore.
* One unit max to make it a mutex, with one initial unit to allow
* the mutex to be acquired.
*/
status
=
acpi_os_create_semaphore
(
1
,
1
,
&
obj_desc
->
mutex
.
semaphore
);
/* Create the actual OS Mutex */
status
=
acpi_os_create_mutex
(
&
obj_desc
->
mutex
.
os_mutex
);
if
(
ACPI_FAILURE
(
status
))
{
goto
cleanup
;
}
...
...
@@ -576,21 +573,19 @@ acpi_ex_create_method(u8 * aml_start,
(
u8
)
(
method_flags
&
AML_METHOD_ARG_COUNT
);
/*
* Get the
concurrency count. If required, a semaphore
will be
* Get the
sync_level. If method is serialized, a mutex
will be
* created for this method when it is parsed.
*/
if
(
acpi_gbl_all_methods_serialized
)
{
obj_desc
->
method
.
concurrency
=
1
;
obj_desc
->
method
.
sync_level
=
0
;
obj_desc
->
method
.
method_flags
|=
AML_METHOD_SERIALIZED
;
}
else
if
(
method_flags
&
AML_METHOD_SERIALIZED
)
{
/*
* ACPI 1.0:
Concurrency = 1
* ACPI 2.0:
Concurrency = (sync_level (in method declaration) + 1)
* ACPI 1.0:
sync_level = 0
* ACPI 2.0:
sync_level = sync_level in method declaration
*/
obj_desc
->
method
.
concurrency
=
(
u8
)
(((
method_flags
&
AML_METHOD_SYNCH_LEVEL
)
>>
4
)
+
1
);
}
else
{
obj_desc
->
method
.
concurrency
=
ACPI_INFINITE_CONCURRENCY
;
obj_desc
->
method
.
sync_level
=
(
u8
)
((
method_flags
&
AML_METHOD_SYNCH_LEVEL
)
>>
4
);
}
/* Attach the new object to the method Node */
...
...
drivers/acpi/executer/exdump.c
View file @
9dce0e95
...
...
@@ -118,14 +118,14 @@ static struct acpi_exdump_info acpi_ex_dump_device[4] = {
static
struct
acpi_exdump_info
acpi_ex_dump_event
[
2
]
=
{
{
ACPI_EXD_INIT
,
ACPI_EXD_TABLE_SIZE
(
acpi_ex_dump_event
),
NULL
},
{
ACPI_EXD_POINTER
,
ACPI_EXD_OFFSET
(
event
.
semaphore
),
"
Semaphore"
}
{
ACPI_EXD_POINTER
,
ACPI_EXD_OFFSET
(
event
.
os_semaphore
),
"Os
Semaphore"
}
};
static
struct
acpi_exdump_info
acpi_ex_dump_method
[
8
]
=
{
{
ACPI_EXD_INIT
,
ACPI_EXD_TABLE_SIZE
(
acpi_ex_dump_method
),
NULL
},
{
ACPI_EXD_UINT8
,
ACPI_EXD_OFFSET
(
method
.
param_count
),
"ParamCount"
},
{
ACPI_EXD_UINT8
,
ACPI_EXD_OFFSET
(
method
.
concurrency
),
"Concurrency
"
},
{
ACPI_EXD_POINTER
,
ACPI_EXD_OFFSET
(
method
.
semaphore
),
"Semaphore
"
},
{
ACPI_EXD_UINT8
,
ACPI_EXD_OFFSET
(
method
.
sync_level
),
"Sync Level
"
},
{
ACPI_EXD_POINTER
,
ACPI_EXD_OFFSET
(
method
.
mutex
),
"Mutex
"
},
{
ACPI_EXD_UINT8
,
ACPI_EXD_OFFSET
(
method
.
owner_id
),
"Owner Id"
},
{
ACPI_EXD_UINT8
,
ACPI_EXD_OFFSET
(
method
.
thread_count
),
"Thread Count"
},
{
ACPI_EXD_UINT32
,
ACPI_EXD_OFFSET
(
method
.
aml_length
),
"Aml Length"
},
...
...
@@ -138,7 +138,7 @@ static struct acpi_exdump_info acpi_ex_dump_mutex[5] = {
{
ACPI_EXD_POINTER
,
ACPI_EXD_OFFSET
(
mutex
.
owner_thread
),
"Owner Thread"
},
{
ACPI_EXD_UINT16
,
ACPI_EXD_OFFSET
(
mutex
.
acquisition_depth
),
"Acquire Depth"
},
{
ACPI_EXD_POINTER
,
ACPI_EXD_OFFSET
(
mutex
.
semaphore
),
"Semaphore
"
}
{
ACPI_EXD_POINTER
,
ACPI_EXD_OFFSET
(
mutex
.
os_mutex
),
"OsMutex
"
}
};
static
struct
acpi_exdump_info
acpi_ex_dump_region
[
7
]
=
{
...
...
drivers/acpi/executer/exfldio.c
View file @
9dce0e95
...
...
@@ -727,11 +727,23 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
return_ACPI_STATUS
(
status
);
}
/* Merge with previous datum if necessary */
merged_datum
|=
raw_datum
<<
(
obj_desc
->
common_field
.
access_bit_width
-
obj_desc
->
common_field
.
start_field_bit_offset
);
/*
* Merge with previous datum if necessary.
*
* Note: Before the shift, check if the shift value will be larger than
* the integer size. If so, there is no need to perform the operation.
* This avoids the differences in behavior between different compilers
* concerning shift values larger than the target data width.
*/
if
((
obj_desc
->
common_field
.
access_bit_width
-
obj_desc
->
common_field
.
start_field_bit_offset
)
<
ACPI_INTEGER_BIT_SIZE
)
{
merged_datum
|=
raw_datum
<<
(
obj_desc
->
common_field
.
access_bit_width
-
obj_desc
->
common_field
.
start_field_bit_offset
);
}
if
(
i
==
datum_count
)
{
break
;
...
...
@@ -808,13 +820,23 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
return_ACPI_STATUS
(
AE_BUFFER_OVERFLOW
);
}
/* Compute the number of datums (access width data items) */
/*
* Create the bitmasks used for bit insertion.
* Note: This if/else is used to bypass compiler differences with the
* shift operator
*/
if
(
obj_desc
->
common_field
.
access_bit_width
==
ACPI_INTEGER_BIT_SIZE
)
{
width_mask
=
ACPI_INTEGER_MAX
;
}
else
{
width_mask
=
ACPI_MASK_BITS_ABOVE
(
obj_desc
->
common_field
.
access_bit_width
);
mask
=
width_mask
&
ACPI_MASK_BITS_BELOW
(
obj_desc
->
common_field
.
start_field_bit_offset
);
ACPI_MASK_BITS_ABOVE
(
obj_desc
->
common_field
.
access_bit_width
);
}
mask
=
width_mask
&
ACPI_MASK_BITS_BELOW
(
obj_desc
->
common_field
.
start_field_bit_offset
);
/* Compute the number of datums (access width data items) */
datum_count
=
ACPI_ROUND_UP_TO
(
obj_desc
->
common_field
.
bit_length
,
obj_desc
->
common_field
.
access_bit_width
);
...
...
@@ -848,12 +870,29 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
return_ACPI_STATUS
(
status
);
}
/* Start new output datum by merging with previous input datum */
field_offset
+=
obj_desc
->
common_field
.
access_byte_width
;
merged_datum
=
raw_datum
>>
(
obj_desc
->
common_field
.
access_bit_width
-
obj_desc
->
common_field
.
start_field_bit_offset
);
/*
* Start new output datum by merging with previous input datum
* if necessary.
*
* Note: Before the shift, check if the shift value will be larger than
* the integer size. If so, there is no need to perform the operation.
* This avoids the differences in behavior between different compilers
* concerning shift values larger than the target data width.
*/
if
((
obj_desc
->
common_field
.
access_bit_width
-
obj_desc
->
common_field
.
start_field_bit_offset
)
<
ACPI_INTEGER_BIT_SIZE
)
{
merged_datum
=
raw_datum
>>
(
obj_desc
->
common_field
.
access_bit_width
-
obj_desc
->
common_field
.
start_field_bit_offset
);
}
else
{
merged_datum
=
0
;
}
mask
=
width_mask
;
if
(
i
==
datum_count
)
{
...
...
drivers/acpi/executer/exmutex.c
View file @
9dce0e95
...
...
@@ -165,8 +165,9 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
*/
if
(
walk_state
->
thread
->
current_sync_level
>
obj_desc
->
mutex
.
sync_level
)
{
ACPI_ERROR
((
AE_INFO
,
"Cannot acquire Mutex [%4.4s], incorrect SyncLevel"
,
acpi_ut_get_node_name
(
obj_desc
->
mutex
.
node
)));
"Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%d)"
,
acpi_ut_get_node_name
(
obj_desc
->
mutex
.
node
),
walk_state
->
thread
->
current_sync_level
));
return_ACPI_STATUS
(
AE_AML_MUTEX_ORDER
);
}
...
...
@@ -178,8 +179,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
if
((
obj_desc
->
mutex
.
owner_thread
->
thread_id
==
walk_state
->
thread
->
thread_id
)
||
(
obj_desc
->
mutex
.
semaphore
==
acpi_gbl_global_lock_semaphore
))
{
(
obj_desc
->
mutex
.
os_mutex
==
ACPI_GLOBAL_LOCK
))
{
/*
* The mutex is already owned by this thread,
* just increment the acquisition depth
...
...
@@ -264,7 +264,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
*/
if
((
obj_desc
->
mutex
.
owner_thread
->
thread_id
!=
walk_state
->
thread
->
thread_id
)
&&
(
obj_desc
->
mutex
.
semaphore
!=
acpi_gbl_global_lock_semaphore
))
{
&&
(
obj_desc
->
mutex
.
os_mutex
!=
ACPI_GLOBAL_LOCK
))
{
ACPI_ERROR
((
AE_INFO
,
"Thread %X cannot release Mutex [%4.4s] acquired by thread %X"
,
walk_state
->
thread
->
thread_id
,
...
...
drivers/acpi/executer/exsystem.c
View file @
9dce0e95
...
...
@@ -63,14 +63,14 @@ ACPI_MODULE_NAME("exsystem")
* interpreter is released.
*
******************************************************************************/
acpi_status
acpi_ex_system_wait_semaphore
(
acpi_
handl
e
semaphore
,
u16
timeout
)
acpi_status
acpi_ex_system_wait_semaphore
(
acpi_
semaphor
e
semaphore
,
u16
timeout
)
{
acpi_status
status
;
acpi_status
status2
;
ACPI_FUNCTION_TRACE
(
ex_system_wait_semaphore
);
status
=
acpi_os_wait_semaphore
(
semaphore
,
1
,
0
);
status
=
acpi_os_wait_semaphore
(
semaphore
,
1
,
ACPI_DO_NOT_WAIT
);
if
(
ACPI_SUCCESS
(
status
))
{
return_ACPI_STATUS
(
status
);
}
...
...
@@ -101,6 +101,59 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_handle semaphore, u16 timeout)
return_ACPI_STATUS
(
status
);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_system_wait_mutex
*
* PARAMETERS: Mutex - Mutex to wait on
* Timeout - Max time to wait
*
* RETURN: Status
*
* DESCRIPTION: Implements a semaphore wait with a check to see if the
* semaphore is available immediately. If it is not, the
* interpreter is released.
*
******************************************************************************/
acpi_status
acpi_ex_system_wait_mutex
(
acpi_mutex
mutex
,
u16
timeout
)
{
acpi_status
status
;
acpi_status
status2
;
ACPI_FUNCTION_TRACE
(
ex_system_wait_mutex
);
status
=
acpi_os_acquire_mutex
(
mutex
,
ACPI_DO_NOT_WAIT
);
if
(
ACPI_SUCCESS
(
status
))
{
return_ACPI_STATUS
(
status
);
}
if
(
status
==
AE_TIME
)
{
/* We must wait, so unlock the interpreter */
acpi_ex_exit_interpreter
();
status
=
acpi_os_acquire_mutex
(
mutex
,
timeout
);
ACPI_DEBUG_PRINT
((
ACPI_DB_EXEC
,
"*** Thread awake after blocking, %s
\n
"
,
acpi_format_exception
(
status
)));
/* Reacquire the interpreter */
status2
=
acpi_ex_enter_interpreter
();
if
(
ACPI_FAILURE
(
status2
))
{
/* Report fatal error, could not acquire interpreter */
return_ACPI_STATUS
(
status2
);
}
}
return_ACPI_STATUS
(
status
);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_system_do_stall
...
...
@@ -176,7 +229,7 @@ acpi_status acpi_ex_system_do_suspend(acpi_integer how_long)
*
* FUNCTION: acpi_ex_system_acquire_mutex
*
* PARAMETERS: time_desc -
The 'time to delay' object descriptor
* PARAMETERS: time_desc -
Maximum time to wait for the mutex
* obj_desc - The object descriptor for this op
*
* RETURN: Status
...
...
@@ -201,13 +254,13 @@ acpi_ex_system_acquire_mutex(union acpi_operand_object * time_desc,
/* Support for the _GL_ Mutex object -- go get the global lock */
if
(
obj_desc
->
mutex
.
semaphore
==
acpi_gbl_global_lock_semaphore
)
{
if
(
obj_desc
->
mutex
.
os_mutex
==
ACPI_GLOBAL_LOCK
)
{
status
=
acpi_ev_acquire_global_lock
((
u16
)
time_desc
->
integer
.
value
);
return_ACPI_STATUS
(
status
);
}
status
=
acpi_ex_system_wait_
semaphore
(
obj_desc
->
mutex
.
semaphore
,
status
=
acpi_ex_system_wait_
mutex
(
obj_desc
->
mutex
.
os_mutex
,
(
u16
)
time_desc
->
integer
.
value
);
return_ACPI_STATUS
(
status
);
}
...
...
@@ -239,13 +292,13 @@ acpi_status acpi_ex_system_release_mutex(union acpi_operand_object *obj_desc)
/* Support for the _GL_ Mutex object -- release the global lock */
if
(
obj_desc
->
mutex
.
semaphore
==
acpi_gbl_global_lock_semaphore
)
{
if
(
obj_desc
->
mutex
.
os_mutex
==
ACPI_GLOBAL_LOCK
)
{
status
=
acpi_ev_release_global_lock
();
return_ACPI_STATUS
(
status
);
}
status
=
acpi_os_signal_semaphore
(
obj_desc
->
mutex
.
semaphore
,
1
);
return_ACPI_STATUS
(
status
);
acpi_os_release_mutex
(
obj_desc
->
mutex
.
os_mutex
);
return_ACPI_STATUS
(
AE_OK
);
}
/*******************************************************************************
...
...
@@ -268,7 +321,8 @@ acpi_status acpi_ex_system_signal_event(union acpi_operand_object *obj_desc)
ACPI_FUNCTION_TRACE
(
ex_system_signal_event
);
if
(
obj_desc
)
{
status
=
acpi_os_signal_semaphore
(
obj_desc
->
event
.
semaphore
,
1
);
status
=
acpi_os_signal_semaphore
(
obj_desc
->
event
.
os_semaphore
,
1
);
}
return_ACPI_STATUS
(
status
);
...
...
@@ -299,7 +353,7 @@ acpi_ex_system_wait_event(union acpi_operand_object *time_desc,
if
(
obj_desc
)
{
status
=
acpi_ex_system_wait_semaphore
(
obj_desc
->
event
.
semaphore
,
acpi_ex_system_wait_semaphore
(
obj_desc
->
event
.
os_
semaphore
,
(
u16
)
time_desc
->
integer
.
value
);
}
...
...
@@ -322,7 +376,7 @@ acpi_ex_system_wait_event(union acpi_operand_object *time_desc,
acpi_status
acpi_ex_system_reset_event
(
union
acpi_operand_object
*
obj_desc
)
{
acpi_status
status
=
AE_OK
;
void
*
temp_semaphore
;
acpi_semaphore
temp_semaphore
;
ACPI_FUNCTION_ENTRY
();
...
...
@@ -333,8 +387,8 @@ acpi_status acpi_ex_system_reset_event(union acpi_operand_object *obj_desc)
status
=
acpi_os_create_semaphore
(
ACPI_NO_UNIT_LIMIT
,
0
,
&
temp_semaphore
);
if
(
ACPI_SUCCESS
(
status
))
{
(
void
)
acpi_os_delete_semaphore
(
obj_desc
->
event
.
semaphore
);
obj_desc
->
event
.
semaphore
=
temp_semaphore
;
(
void
)
acpi_os_delete_semaphore
(
obj_desc
->
event
.
os_
semaphore
);
obj_desc
->
event
.
os_
semaphore
=
temp_semaphore
;
}
return
(
status
);
...
...
drivers/acpi/hardware/hwregs.c
View file @
9dce0e95
...
...
@@ -467,14 +467,13 @@ ACPI_EXPORT_SYMBOL(acpi_set_register)
*
* FUNCTION: acpi_hw_register_read
*
* PARAMETERS: use_lock -
Mutex hw access
* register_id -
register_iD + Offset
* PARAMETERS: use_lock -
Lock hardware? True/False
* register_id -
ACPI Register ID
* return_value - Where the register value is returned
*
* RETURN: Status and the value read.
*
* DESCRIPTION: Acpi register read function. Registers are read at the
* given offset.
* DESCRIPTION: Read from the specified ACPI register
*
******************************************************************************/
acpi_status
...
...
@@ -580,14 +579,26 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
*
* FUNCTION: acpi_hw_register_write
*
* PARAMETERS: use_lock -
Mutex hw access
* register_id -
register_iD + Offset
* PARAMETERS: use_lock -
Lock hardware? True/False
* register_id -
ACPI Register ID
* Value - The value to write
*
* RETURN: Status
*
* DESCRIPTION: Acpi register Write function. Registers are written at the
* given offset.
* DESCRIPTION: Write to the specified ACPI register
*
* NOTE: In accordance with the ACPI specification, this function automatically
* preserves the value of the following bits, meaning that these bits cannot be
* changed via this interface:
*
* PM1_CONTROL[0] = SCI_EN
* PM1_CONTROL[9]
* PM1_STATUS[11]
*
* ACPI References:
* 1) Hardware Ignored Bits: When software writes to a register with ignored
* bit fields, it preserves the ignored bit fields
* 2) SCI_EN: OSPM always preserves this bit position
*
******************************************************************************/
...
...
@@ -595,6 +606,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
{
acpi_status
status
;
acpi_cpu_flags
lock_flags
=
0
;
u32
read_value
;
ACPI_FUNCTION_TRACE
(
hw_register_write
);
...
...
@@ -605,6 +617,22 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
switch
(
register_id
)
{
case
ACPI_REGISTER_PM1_STATUS
:
/* 16-bit access */
/* Perform a read first to preserve certain bits (per ACPI spec) */
status
=
acpi_hw_register_read
(
ACPI_MTX_DO_NOT_LOCK
,
ACPI_REGISTER_PM1_STATUS
,
&
read_value
);
if
(
ACPI_FAILURE
(
status
))
{
goto
unlock_and_exit
;
}
/* Insert the bits to be preserved */
ACPI_INSERT_BITS
(
value
,
ACPI_PM1_STATUS_PRESERVED_BITS
,
read_value
);
/* Now we can write the data */
status
=
acpi_hw_low_level_write
(
16
,
value
,
&
acpi_gbl_FADT
->
xpm1a_evt_blk
);
...
...
@@ -635,6 +663,25 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
case
ACPI_REGISTER_PM1_CONTROL
:
/* 16-bit access */
/*
* Perform a read first to preserve certain bits (per ACPI spec)
*
* Note: This includes SCI_EN, we never want to change this bit
*/
status
=
acpi_hw_register_read
(
ACPI_MTX_DO_NOT_LOCK
,
ACPI_REGISTER_PM1_CONTROL
,
&
read_value
);
if
(
ACPI_FAILURE
(
status
))
{
goto
unlock_and_exit
;
}
/* Insert the bits to be preserved */
ACPI_INSERT_BITS
(
value
,
ACPI_PM1_CONTROL_PRESERVED_BITS
,
read_value
);
/* Now we can write the data */
status
=
acpi_hw_low_level_write
(
16
,
value
,
&
acpi_gbl_FADT
->
xpm1a_cnt_blk
);
...
...
drivers/acpi/namespace/nsaccess.c
View file @
9dce0e95
...
...
@@ -196,33 +196,30 @@ acpi_status acpi_ns_root_initialize(void)
(
u8
)
(
ACPI_TO_INTEGER
(
val
)
-
1
);
if
(
ACPI_STRCMP
(
init_val
->
name
,
"_GL_"
)
==
0
)
{
/*
* Create a counting semaphore for the
* global lock
*/
/* Create a counting semaphore for the global lock */
status
=
acpi_os_create_semaphore
(
ACPI_NO_UNIT_LIMIT
,
1
,
&
obj_desc
->
mutex
.
semaphore
);
&
acpi_gbl_global_lock_
semaphore
);
if
(
ACPI_FAILURE
(
status
))
{
acpi_ut_remove_reference
(
obj_desc
);
goto
unlock_and_exit
;
}
/*
* We just created the mutex for the
* global lock, save it
*/
acpi_gbl_global_lock_semaphore
=
obj_desc
->
mutex
.
semaphore
;
/* Mark this mutex as very special */
obj_desc
->
mutex
.
os_mutex
=
ACPI_GLOBAL_LOCK
;
}
else
{
/* Create a mutex */
status
=
acpi_os_create_semaphore
(
1
,
1
,
&
obj_desc
->
status
=
acpi_os_create_mutex
(
&
obj_desc
->
mutex
.
semaphore
);
os_mutex
);
if
(
ACPI_FAILURE
(
status
))
{
acpi_ut_remove_reference
(
obj_desc
);
...
...
drivers/acpi/osl.c
View file @
9dce0e95
...
...
@@ -688,18 +688,9 @@ EXPORT_SYMBOL(acpi_os_wait_events_complete);
/*
* Allocate the memory for a spinlock and initialize it.
*/
acpi_status
acpi_os_create_lock
(
acpi_
handle
*
out_
handle
)
acpi_status
acpi_os_create_lock
(
acpi_
spinlock
*
handle
)
{
spinlock_t
*
lock_ptr
;
lock_ptr
=
acpi_os_allocate
(
sizeof
(
spinlock_t
));
spin_lock_init
(
lock_ptr
);
ACPI_DEBUG_PRINT
((
ACPI_DB_MUTEX
,
"Creating spinlock[%p].
\n
"
,
lock_ptr
));
*
out_handle
=
lock_ptr
;
spin_lock_init
(
*
handle
);
return
AE_OK
;
}
...
...
@@ -707,13 +698,8 @@ acpi_status acpi_os_create_lock(acpi_handle * out_handle)
/*
* Deallocate the memory for a spinlock.
*/
void
acpi_os_delete_lock
(
acpi_
handle
handle
)
void
acpi_os_delete_lock
(
acpi_
spinlock
handle
)
{
ACPI_DEBUG_PRINT
((
ACPI_DB_MUTEX
,
"Deleting spinlock[%p].
\n
"
,
handle
));
acpi_os_free
(
handle
);
return
;
}
...
...
@@ -1037,10 +1023,10 @@ EXPORT_SYMBOL(max_cstate);
* handle is a pointer to the spinlock_t.
*/
acpi_cpu_flags
acpi_os_acquire_lock
(
acpi_
handle
handle
)
acpi_cpu_flags
acpi_os_acquire_lock
(
acpi_
spinlock
lockp
)
{
acpi_cpu_flags
flags
;
spin_lock_irqsave
(
(
spinlock_t
*
)
handle
,
flags
);
spin_lock_irqsave
(
lockp
,
flags
);
return
flags
;
}
...
...
@@ -1048,9 +1034,9 @@ acpi_cpu_flags acpi_os_acquire_lock(acpi_handle handle)
* Release a spinlock. See above.
*/
void
acpi_os_release_lock
(
acpi_
handle
handle
,
acpi_cpu_flags
flags
)
void
acpi_os_release_lock
(
acpi_
spinlock
lockp
,
acpi_cpu_flags
flags
)
{
spin_unlock_irqrestore
(
(
spinlock_t
*
)
handle
,
flags
);
spin_unlock_irqrestore
(
lockp
,
flags
);
}
#ifndef ACPI_USE_LOCAL_CACHE
...
...
drivers/acpi/parser/psparse.c
View file @
9dce0e95
...
...
@@ -469,6 +469,16 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
}
walk_state
->
thread
=
thread
;
/*
* If executing a method, the starting sync_level is this method's
* sync_level
*/
if
(
walk_state
->
method_desc
)
{
walk_state
->
thread
->
current_sync_level
=
walk_state
->
method_desc
->
method
.
sync_level
;
}
acpi_ds_push_walk_state
(
walk_state
,
thread
);
/*
...
...
@@ -505,6 +515,10 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
status
=
acpi_ds_call_control_method
(
thread
,
walk_state
,
NULL
);
if
(
ACPI_FAILURE
(
status
))
{
status
=
acpi_ds_method_error
(
status
,
walk_state
);
}
/*
* If the transfer to the new method method call worked, a new walk
...
...
@@ -525,7 +539,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
/* Check for possible multi-thread reentrancy problem */
if
((
status
==
AE_ALREADY_EXISTS
)
&&
(
!
walk_state
->
method_desc
->
method
.
semaphore
))
{
(
!
walk_state
->
method_desc
->
method
.
mutex
))
{
/*
* Method tried to create an object twice. The probable cause is
* that the method cannot handle reentrancy.
...
...
@@ -537,7 +551,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
*/
walk_state
->
method_desc
->
method
.
method_flags
|=
AML_METHOD_SERIALIZED
;
walk_state
->
method_desc
->
method
.
concurrency
=
1
;
walk_state
->
method_desc
->
method
.
sync_level
=
0
;
}
}
...
...
drivers/acpi/utilities/utdelete.c
View file @
9dce0e95
...
...
@@ -155,21 +155,30 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
case
ACPI_TYPE_MUTEX
:
ACPI_DEBUG_PRINT
((
ACPI_DB_ALLOCATIONS
,
"***** Mutex %p,
Semaphore
%p
\n
"
,
object
,
object
->
mutex
.
semaphore
));
"***** Mutex %p,
OS Mutex
%p
\n
"
,
object
,
object
->
mutex
.
os_mutex
));
if
(
object
->
mutex
.
os_mutex
!=
ACPI_GLOBAL_LOCK
)
{
acpi_ex_unlink_mutex
(
object
);
(
void
)
acpi_os_delete_semaphore
(
object
->
mutex
.
semaphore
);
acpi_os_delete_mutex
(
object
->
mutex
.
os_mutex
);
}
else
{
/* Global Lock "mutex" is actually a counting semaphore */
(
void
)
acpi_os_delete_semaphore
(
acpi_gbl_global_lock_semaphore
);
acpi_gbl_global_lock_semaphore
=
NULL
;
}
break
;
case
ACPI_TYPE_EVENT
:
ACPI_DEBUG_PRINT
((
ACPI_DB_ALLOCATIONS
,
"***** Event %p, Semaphore %p
\n
"
,
object
,
object
->
event
.
semaphore
));
"***** Event %p,
OS
Semaphore %p
\n
"
,
object
,
object
->
event
.
os_
semaphore
));
(
void
)
acpi_os_delete_semaphore
(
object
->
event
.
semaphore
);
object
->
event
.
semaphore
=
NULL
;
(
void
)
acpi_os_delete_semaphore
(
object
->
event
.
os_
semaphore
);
object
->
event
.
os_
semaphore
=
NULL
;
break
;
case
ACPI_TYPE_METHOD
:
...
...
@@ -177,12 +186,13 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
ACPI_DEBUG_PRINT
((
ACPI_DB_ALLOCATIONS
,
"***** Method %p
\n
"
,
object
));
/* Delete the method
semaphore
if it exists */
/* Delete the method
mutex
if it exists */
if
(
object
->
method
.
semaphore
)
{
(
void
)
acpi_os_delete_semaphore
(
object
->
method
.
semaphore
);
object
->
method
.
semaphore
=
NULL
;
if
(
object
->
method
.
mutex
)
{
acpi_os_delete_mutex
(
object
->
method
.
mutex
->
mutex
.
os_mutex
);
acpi_ut_delete_object_desc
(
object
->
method
.
mutex
);
object
->
method
.
mutex
=
NULL
;
}
break
;
...
...
drivers/acpi/utilities/utglobal.c
View file @
9dce0e95
...
...
@@ -794,6 +794,7 @@ void acpi_ut_init_globals(void)
/* Global Lock support */
acpi_gbl_global_lock_semaphore
=
NULL
;
acpi_gbl_global_lock_acquired
=
FALSE
;
acpi_gbl_global_lock_thread_count
=
0
;
acpi_gbl_global_lock_handle
=
0
;
...
...
drivers/acpi/utilities/utmutex.c
View file @
9dce0e95
...
...
@@ -82,12 +82,9 @@ acpi_status acpi_ut_mutex_initialize(void)
/* Create the spinlocks for use at interrupt level */
status
=
acpi_os_create_lock
(
&
acpi_gbl_gpe_lock
);
if
(
ACPI_FAILURE
(
status
))
{
return_ACPI_STATUS
(
status
);
}
spin_lock_init
(
acpi_gbl_gpe_lock
);
spin_lock_init
(
acpi_gbl_hardware_lock
);
status
=
acpi_os_create_lock
(
&
acpi_gbl_hardware_lock
);
return_ACPI_STATUS
(
status
);
}
...
...
@@ -146,9 +143,8 @@ static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id)
}
if
(
!
acpi_gbl_mutex_info
[
mutex_id
].
mutex
)
{
status
=
acpi_os_create_semaphore
(
1
,
1
,
&
acpi_gbl_mutex_info
[
mutex_id
].
mutex
);
status
=
acpi_os_create_mutex
(
&
acpi_gbl_mutex_info
[
mutex_id
].
mutex
);
acpi_gbl_mutex_info
[
mutex_id
].
thread_id
=
ACPI_MUTEX_NOT_ACQUIRED
;
acpi_gbl_mutex_info
[
mutex_id
].
use_count
=
0
;
...
...
@@ -171,7 +167,6 @@ static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id)
static
acpi_status
acpi_ut_delete_mutex
(
acpi_mutex_handle
mutex_id
)
{
acpi_status
status
;
ACPI_FUNCTION_TRACE_U32
(
ut_delete_mutex
,
mutex_id
);
...
...
@@ -179,12 +174,12 @@ static acpi_status acpi_ut_delete_mutex(acpi_mutex_handle mutex_id)
return_ACPI_STATUS
(
AE_BAD_PARAMETER
);
}
status
=
acpi_os_delete_semaphore
(
acpi_gbl_mutex_info
[
mutex_id
].
mutex
);
acpi_os_delete_mutex
(
acpi_gbl_mutex_info
[
mutex_id
].
mutex
);
acpi_gbl_mutex_info
[
mutex_id
].
mutex
=
NULL
;
acpi_gbl_mutex_info
[
mutex_id
].
thread_id
=
ACPI_MUTEX_NOT_ACQUIRED
;
return_ACPI_STATUS
(
status
);
return_ACPI_STATUS
(
AE_OK
);
}
/*******************************************************************************
...
...
@@ -251,8 +246,8 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
"Thread %X attempting to acquire Mutex [%s]
\n
"
,
this_thread_id
,
acpi_ut_get_mutex_name
(
mutex_id
)));
status
=
acpi_os_
wait_semaphore
(
acpi_gbl_mutex_info
[
mutex_id
].
mutex
,
1
,
ACPI_WAIT_FOREVER
);
status
=
acpi_os_
acquire_mutex
(
acpi_gbl_mutex_info
[
mutex_id
].
mutex
,
ACPI_WAIT_FOREVER
);
if
(
ACPI_SUCCESS
(
status
))
{
ACPI_DEBUG_PRINT
((
ACPI_DB_MUTEX
,
"Thread %X acquired Mutex [%s]
\n
"
,
...
...
@@ -284,7 +279,6 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
acpi_status
acpi_ut_release_mutex
(
acpi_mutex_handle
mutex_id
)
{
acpi_status
status
;
acpi_thread_id
this_thread_id
;
ACPI_FUNCTION_NAME
(
ut_release_mutex
);
...
...
@@ -340,19 +334,6 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
acpi_gbl_mutex_info
[
mutex_id
].
thread_id
=
ACPI_MUTEX_NOT_ACQUIRED
;
status
=
acpi_os_signal_semaphore
(
acpi_gbl_mutex_info
[
mutex_id
].
mutex
,
1
);
if
(
ACPI_FAILURE
(
status
))
{
ACPI_EXCEPTION
((
AE_INFO
,
status
,
"Thread %X could not release Mutex [%X]"
,
this_thread_id
,
mutex_id
));
}
else
{
ACPI_DEBUG_PRINT
((
ACPI_DB_MUTEX
,
"Thread %X released Mutex [%s]
\n
"
,
this_thread_id
,
acpi_ut_get_mutex_name
(
mutex_id
)));
}
return
(
status
);
acpi_os_release_mutex
(
acpi_gbl_mutex_info
[
mutex_id
].
mutex
);
return
(
AE_OK
);
}
include/acpi/acconfig.h
View file @
9dce0e95
...
...
@@ -63,7 +63,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
#define ACPI_CA_VERSION 0x200606
08
#define ACPI_CA_VERSION 0x200606
23
/*
* OS name, used for the _OS object. The _OS object is essentially obsolete,
...
...
include/acpi/acdispat.h
View file @
9dce0e95
...
...
@@ -201,7 +201,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
acpi_status
acpi_ds_begin_method_execution
(
struct
acpi_namespace_node
*
method_node
,
union
acpi_operand_object
*
obj_desc
,
struct
acpi_
namespace_node
*
calling_method_nod
e
);
struct
acpi_
walk_state
*
walk_stat
e
);
acpi_status
acpi_ds_method_error
(
acpi_status
status
,
struct
acpi_walk_state
*
walk_state
);
...
...
include/acpi/acglobal.h
View file @
9dce0e95
...
...
@@ -181,6 +181,12 @@ ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
extern
struct
acpi_table_list
acpi_gbl_table_lists
[
ACPI_TABLE_ID_MAX
+
1
];
extern
struct
acpi_table_support
acpi_gbl_table_data
[
ACPI_TABLE_ID_MAX
+
1
];
/*****************************************************************************
*
* Mutual exlusion within ACPICA subsystem
*
****************************************************************************/
/*
* Predefined mutex objects. This array contains the
* actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs.
...
...
@@ -188,6 +194,20 @@ extern struct acpi_table_support acpi_gbl_table_data[ACPI_TABLE_ID_MAX + 1];
*/
ACPI_EXTERN
struct
acpi_mutex_info
acpi_gbl_mutex_info
[
ACPI_NUM_MUTEX
];
/*
* Global lock semaphore works in conjunction with the actual HW global lock
*/
ACPI_EXTERN
acpi_semaphore
acpi_gbl_global_lock_semaphore
;
/*
* Spinlocks are used for interfaces that can be possibly called at
* interrupt level
*/
ACPI_EXTERN
spinlock_t
_acpi_gbl_gpe_lock
;
/* For GPE data structs and registers */
ACPI_EXTERN
spinlock_t
_acpi_gbl_hardware_lock
;
/* For ACPI H/W except GPE registers */
#define acpi_gbl_gpe_lock &_acpi_gbl_gpe_lock
#define acpi_gbl_hardware_lock &_acpi_gbl_hardware_lock
/*****************************************************************************
*
* Miscellaneous globals
...
...
@@ -217,7 +237,6 @@ ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_system_notify;
ACPI_EXTERN
acpi_exception_handler
acpi_gbl_exception_handler
;
ACPI_EXTERN
acpi_init_handler
acpi_gbl_init_handler
;
ACPI_EXTERN
struct
acpi_walk_state
*
acpi_gbl_breakpoint_walk
;
ACPI_EXTERN
acpi_handle
acpi_gbl_global_lock_semaphore
;
/* Misc */
...
...
@@ -315,11 +334,6 @@ ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
ACPI_EXTERN
struct
acpi_gpe_block_info
*
acpi_gbl_gpe_fadt_blocks
[
ACPI_MAX_GPE_BLOCKS
];
/* Spinlocks */
ACPI_EXTERN
acpi_handle
acpi_gbl_gpe_lock
;
ACPI_EXTERN
acpi_handle
acpi_gbl_hardware_lock
;
/*****************************************************************************
*
* Debugger globals
...
...
include/acpi/acinterp.h
View file @
9dce0e95
...
...
@@ -287,7 +287,10 @@ acpi_ex_system_wait_event(union acpi_operand_object *time,
acpi_status
acpi_ex_system_reset_event
(
union
acpi_operand_object
*
obj_desc
);
acpi_status
acpi_ex_system_wait_semaphore
(
acpi_handle
semaphore
,
u16
timeout
);
acpi_status
acpi_ex_system_wait_semaphore
(
acpi_semaphore
semaphore
,
u16
timeout
);
acpi_status
acpi_ex_system_wait_mutex
(
acpi_mutex
mutex
,
u16
timeout
);
/*
* exoparg1 - ACPI AML execution, 1 operand
...
...
include/acpi/aclocal.h
View file @
9dce0e95
...
...
@@ -47,10 +47,11 @@
/* acpisrc:struct_defs -- for acpisrc conversion */
#define ACPI_WAIT_FOREVER 0xFFFF
/* u16, as per ACPI spec */
#define ACPI_INFINITE_CONCURRENCY 0xFF
#define ACPI_DO_NOT_WAIT 0
#define ACPI_SERIALIZED 0xFF
typedef
void
*
acpi_mutex
;
typedef
u32
acpi_mutex_handle
;
#define ACPI_GLOBAL_LOCK (acpi_semaphore) (-1)
/* Total number of aml opcodes defined */
...
...
@@ -79,16 +80,15 @@ union acpi_parse_object;
* table below also!
*/
#define ACPI_MTX_INTERPRETER 0
/* AML Interpreter, main lock */
#define ACPI_MTX_CONTROL_METHOD 1
/* Control method termination [TBD: may no longer be necessary] */
#define ACPI_MTX_TABLES 2
/* Data for ACPI tables */
#define ACPI_MTX_NAMESPACE 3
/* ACPI Namespace */
#define ACPI_MTX_EVENTS 4
/* Data for ACPI events */
#define ACPI_MTX_CACHES 5
/* Internal caches, general purposes */
#define ACPI_MTX_MEMORY 6
/* Debug memory tracking lists */
#define ACPI_MTX_DEBUG_CMD_COMPLETE 7
/* AML debugger */
#define ACPI_MTX_DEBUG_CMD_READY 8
/* AML debugger */
#define ACPI_MAX_MUTEX 8
#define ACPI_MTX_TABLES 1
/* Data for ACPI tables */
#define ACPI_MTX_NAMESPACE 2
/* ACPI Namespace */
#define ACPI_MTX_EVENTS 3
/* Data for ACPI events */
#define ACPI_MTX_CACHES 4
/* Internal caches, general purposes */
#define ACPI_MTX_MEMORY 5
/* Debug memory tracking lists */
#define ACPI_MTX_DEBUG_CMD_COMPLETE 6
/* AML debugger */
#define ACPI_MTX_DEBUG_CMD_READY 7
/* AML debugger */
#define ACPI_MAX_MUTEX 7
#define ACPI_NUM_MUTEX ACPI_MAX_MUTEX+1
#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
...
...
@@ -98,14 +98,13 @@ union acpi_parse_object;
static
char
*
acpi_gbl_mutex_names
[
ACPI_NUM_MUTEX
]
=
{
"ACPI_MTX_Interpreter"
,
"ACPI_MTX_Method"
,
"ACPI_MTX_Tables"
,
"ACPI_MTX_Namespace"
,
"ACPI_MTX_Events"
,
"ACPI_MTX_Caches"
,
"ACPI_MTX_Memory"
,
"ACPI_MTX_
DebugCm
dComplete"
,
"ACPI_MTX_
DebugCm
dReady"
"ACPI_MTX_
Comman
dComplete"
,
"ACPI_MTX_
Comman
dReady"
};
#endif
...
...
@@ -704,6 +703,13 @@ struct acpi_bit_register_info {
u16
access_bit_mask
;
};
/*
* Some ACPI registers have bits that must be ignored -- meaning that they
* must be preserved.
*/
#define ACPI_PM1_STATUS_PRESERVED_BITS 0x0800
/* Bit 11 */
#define ACPI_PM1_CONTROL_PRESERVED_BITS 0x0201
/* Bit 9, Bit 0 (SCI_EN) */
/*
* Register IDs
* These are the full ACPI registers
...
...
include/acpi/acmacros.h
View file @
9dce0e95
...
...
@@ -394,6 +394,8 @@
#define ACPI_REGISTER_PREPARE_BITS(val, pos, mask) ((val << pos) & mask)
#define ACPI_REGISTER_INSERT_VALUE(reg, pos, mask, val) reg = (reg & (~(mask))) | ACPI_REGISTER_PREPARE_BITS(val, pos, mask)
#define ACPI_INSERT_BITS(target, mask, source) target = ((target & (~(mask))) | (source & mask))
/* Generate a UUID */
#define ACPI_INIT_UUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \
...
...
include/acpi/acobject.h
View file @
9dce0e95
...
...
@@ -140,14 +140,14 @@ struct acpi_object_package {
*****************************************************************************/
struct
acpi_object_event
{
ACPI_OBJECT_COMMON_HEADER
void
*
semaphore
;
ACPI_OBJECT_COMMON_HEADER
acpi_semaphore
os_semaphore
;
/* Actual OS synchronization object */
};
struct
acpi_object_mutex
{
ACPI_OBJECT_COMMON_HEADER
u8
sync_level
;
/* 0-15, specified in Mutex() call */
u16
acquisition_depth
;
/* Allow multiple Acquires, same thread */
struct
acpi_thread_state
*
owner_thread
;
/* Current owner of the mutex */
void
*
semaphore
;
/* Actual OS synchronization object */
acpi_mutex
os_mutex
;
/* Actual OS synchronization object */
union
acpi_operand_object
*
prev
;
/* Link for list of acquired mutexes */
union
acpi_operand_object
*
next
;
/* Link for list of acquired mutexes */
struct
acpi_namespace_node
*
node
;
/* Containing namespace node */
...
...
@@ -166,8 +166,8 @@ struct acpi_object_region {
struct
acpi_object_method
{
ACPI_OBJECT_COMMON_HEADER
u8
method_flags
;
u8
param_count
;
u8
concurrency
;
void
*
semaphore
;
u8
sync_level
;
union
acpi_operand_object
*
mutex
;
u8
*
aml_start
;
ACPI_INTERNAL_METHOD
implementation
;
u32
aml_length
;
...
...
include/acpi/acpiosxf.h
View file @
9dce0e95
...
...
@@ -96,25 +96,47 @@ acpi_os_table_override(struct acpi_table_header *existing_table,
struct
acpi_table_header
**
new_table
);
/*
* Synchronization primitives
* Spinlock primitives
*/
acpi_status
acpi_os_create_lock
(
acpi_spinlock
*
out_handle
);
void
acpi_os_delete_lock
(
acpi_spinlock
handle
);
acpi_cpu_flags
acpi_os_acquire_lock
(
acpi_spinlock
handle
);
void
acpi_os_release_lock
(
acpi_spinlock
handle
,
acpi_cpu_flags
flags
);
/*
* Semaphore primitives
*/
acpi_status
acpi_os_create_semaphore
(
u32
max_units
,
u32
initial_units
,
acpi_
handl
e
*
out_handle
);
u32
initial_units
,
acpi_
semaphor
e
*
out_handle
);
acpi_status
acpi_os_delete_semaphore
(
acpi_
handl
e
handle
);
acpi_status
acpi_os_delete_semaphore
(
acpi_
semaphor
e
handle
);
acpi_status
acpi_os_wait_semaphore
(
acpi_handle
handle
,
u32
units
,
u16
timeout
);
acpi_status
acpi_os_wait_semaphore
(
acpi_semaphore
handle
,
u32
units
,
u16
timeout
);
acpi_status
acpi_os_signal_semaphore
(
acpi_semaphore
handle
,
u32
units
);
/*
* Mutex primitives
*/
acpi_status
acpi_os_create_mutex
(
acpi_mutex
*
out_handle
);
acpi_status
acpi_os_signal_semaphore
(
acpi_handle
handle
,
u32
units
);
void
acpi_os_delete_mutex
(
acpi_mutex
handle
);
acpi_status
acpi_os_
create_lock
(
acpi_handle
*
out_handle
);
acpi_status
acpi_os_
acquire_mutex
(
acpi_mutex
handle
,
u16
timeout
);
void
acpi_os_
delete_lock
(
acpi_handle
handle
);
void
acpi_os_
release_mutex
(
acpi_mutex
handle
);
acpi_cpu_flags
acpi_os_acquire_lock
(
acpi_handle
handle
);
/* Temporary macros for Mutex* interfaces, map to existing semaphore xfaces */
void
acpi_os_release_lock
(
acpi_handle
handle
,
acpi_cpu_flags
flags
);
#define acpi_os_create_mutex(out_handle) acpi_os_create_semaphore (1, 1, out_handle)
#define acpi_os_delete_mutex(handle) (void) acpi_os_delete_semaphore (handle)
#define acpi_os_acquire_mutex(handle,time) acpi_os_wait_semaphore (handle, 1, time)
#define acpi_os_release_mutex(handle) (void) acpi_os_signal_semaphore (handle, 1)
/*
* Memory allocation and mapping
...
...
include/acpi/actypes.h
View file @
9dce0e95
...
...
@@ -241,7 +241,7 @@ typedef acpi_native_uint acpi_size;
/*******************************************************************************
*
* OS-
or
compiler-dependent types
* OS-
dependent and
compiler-dependent types
*
* If the defaults below are not appropriate for the host system, they can
* be defined in the compiler-specific or OS-specific header, and this will
...
...
@@ -249,29 +249,36 @@ typedef acpi_native_uint acpi_size;
*
******************************************************************************/
/*
Use C99 uintptr_t for pointer casting if available, "void *" otherwise
*/
/*
Value returned by acpi_os_get_thread_id
*/
#ifndef acpi_
uintptr_t
#define acpi_
uintptr_t void *
#ifndef acpi_
thread_id
#define acpi_
thread_id acpi_native_uint
#endif
/*
* If acpi_cache_t was not defined in the OS-dependent header,
* define it now. This is typically the case where the local cache
* manager implementation is to be used (ACPI_USE_LOCAL_CACHE)
*/
#ifndef acpi_cache_t
#define acpi_cache_t struct acpi_memory_list
/* Object returned from acpi_os_create_lock */
#ifndef acpi_spinlock
#define acpi_spinlock void *
#endif
/*
* Allow the CPU flags word to be defined per-OS to simplify the use of the
* lock and unlock OSL interfaces.
*/
/* Flags for acpi_os_acquire_lock/acpi_os_release_lock */
#ifndef acpi_cpu_flags
#define acpi_cpu_flags acpi_native_uint
#endif
/* Object returned from acpi_os_create_cache */
#ifndef acpi_cache_t
#define acpi_cache_t struct acpi_memory_list
#endif
/* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */
#ifndef acpi_uintptr_t
#define acpi_uintptr_t void *
#endif
/*
* ACPI_PRINTF_LIKE is used to tag functions as "printf-like" because
* some compilers can catch printf format string problems
...
...
@@ -298,13 +305,6 @@ typedef acpi_native_uint acpi_size;
#define ACPI_EXPORT_SYMBOL(symbol)
#endif
/*
* thread_id is returned by acpi_os_get_thread_id.
*/
#ifndef acpi_thread_id
#define acpi_thread_id acpi_native_uint
#endif
/*******************************************************************************
*
* Independent types
...
...
@@ -380,6 +380,11 @@ struct uint32_struct {
u32
hi
;
};
/* Synchronization objects */
#define acpi_mutex void *
#define acpi_semaphore void *
/*
* Acpi integer width. In ACPI version 1, integers are
* 32 bits. In ACPI version 2, integers are 64 bits.
...
...
include/acpi/platform/aclinux.h
View file @
9dce0e95
...
...
@@ -58,11 +58,13 @@
#include <asm/div64.h>
#include <asm/acpi.h>
#include <linux/slab.h>
#include <linux/spinlock_types.h>
/* Host-dependent types and defines */
#define ACPI_MACHINE_WIDTH BITS_PER_LONG
#define acpi_cache_t kmem_cache_t
#define acpi_spinlock spinlock_t *
#define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol);
#define strtoul simple_strtoul
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment