Commit daa94222 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6:
  ACPI EC: remove redundant code
  ACPI: Add D3 cold state
  ACPI: processor: fix processor_physically_present in UP kernel
  ACPI: Split out custom_method functionality into an own driver
  ACPI: Cleanup custom_method debug stuff
  ACPI EC: enable MSI workaround for Quanta laptops
  ACPICA: Update to version 20110413
  ACPICA: Execute an orphan _REG method under the EC device
  ACPICA: Move ACPI_NUM_PREDEFINED_REGIONS to a more appropriate place
  ACPICA: Update internal address SpaceID for DataTable regions
  ACPICA: Add more methods eligible for NULL package element removal
  ACPICA: Split all internal Global Lock functions to new file - evglock
  ACPI: EC: add another DMI check for ASUS hardware
  ACPI EC: remove dead code
  ACPICA: Fix code divergence of global lock handling
  ACPICA: Use acpi_os_create_lock interface
  ACPI: osl, add acpi_os_create_lock interface
  ACPI:Fix goto flows in thermal-sys
parents f3106421 751516f0
...@@ -66,3 +66,8 @@ Note: We can use a kernel with multiple custom ACPI method running, ...@@ -66,3 +66,8 @@ Note: We can use a kernel with multiple custom ACPI method running,
But each individual write to debugfs can implement a SINGLE But each individual write to debugfs can implement a SINGLE
method override. i.e. if we want to insert/override multiple method override. i.e. if we want to insert/override multiple
ACPI methods, we need to redo step c) ~ g) for multiple times. ACPI methods, we need to redo step c) ~ g) for multiple times.
Note: Be aware that root can mis-use this driver to modify arbitrary
memory and gain additional rights, if root's privileges got
restricted (for example if root is not allowed to load additional
modules after boot).
...@@ -369,6 +369,21 @@ config ACPI_HED ...@@ -369,6 +369,21 @@ config ACPI_HED
which is used to report some hardware errors notified via which is used to report some hardware errors notified via
SCI, mainly the corrected errors. SCI, mainly the corrected errors.
config ACPI_CUSTOM_METHOD
tristate "Allow ACPI methods to be inserted/replaced at run time"
depends on DEBUG_FS
default n
help
This debug facility allows ACPI AML methods to me inserted and/or
replaced without rebooting the system. For details refer to:
Documentation/acpi/method-customizing.txt.
NOTE: This option is security sensitive, because it allows arbitrary
kernel memory to be written to by root (uid=0) users, allowing them
to bypass certain security measures (e.g. if root is not allowed to
load additional kernel modules after boot, this feature may be used
to override that restriction).
source "drivers/acpi/apei/Kconfig" source "drivers/acpi/apei/Kconfig"
endif # ACPI endif # ACPI
...@@ -61,6 +61,7 @@ obj-$(CONFIG_ACPI_SBS) += sbshc.o ...@@ -61,6 +61,7 @@ obj-$(CONFIG_ACPI_SBS) += sbshc.o
obj-$(CONFIG_ACPI_SBS) += sbs.o obj-$(CONFIG_ACPI_SBS) += sbs.o
obj-$(CONFIG_ACPI_HED) += hed.o obj-$(CONFIG_ACPI_HED) += hed.o
obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o
obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o
# processor has its own "processor." module_param namespace # processor has its own "processor." module_param namespace
processor-y := processor_driver.o processor_throttling.o processor-y := processor_driver.o processor_throttling.o
......
...@@ -14,7 +14,7 @@ acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \ ...@@ -14,7 +14,7 @@ acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \ acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
evmisc.o evrgnini.o evxface.o evxfregn.o \ evmisc.o evrgnini.o evxface.o evxfregn.o \
evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o evglock.o
acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\ acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\ exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
......
...@@ -187,7 +187,6 @@ ...@@ -187,7 +187,6 @@
/* Operation regions */ /* Operation regions */
#define ACPI_NUM_PREDEFINED_REGIONS 9
#define ACPI_USER_REGION_BEGIN 0x80 #define ACPI_USER_REGION_BEGIN 0x80
/* Maximum space_ids for Operation Regions */ /* Maximum space_ids for Operation Regions */
......
...@@ -58,18 +58,23 @@ u32 acpi_ev_fixed_event_detect(void); ...@@ -58,18 +58,23 @@ u32 acpi_ev_fixed_event_detect(void);
*/ */
u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node); u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node);
acpi_status acpi_ev_acquire_global_lock(u16 timeout);
acpi_status acpi_ev_release_global_lock(void);
acpi_status acpi_ev_init_global_lock_handler(void);
u32 acpi_ev_get_gpe_number_index(u32 gpe_number); u32 acpi_ev_get_gpe_number_index(u32 gpe_number);
acpi_status acpi_status
acpi_ev_queue_notify_request(struct acpi_namespace_node *node, acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
u32 notify_value); u32 notify_value);
/*
* evglock - Global Lock support
*/
acpi_status acpi_ev_init_global_lock_handler(void);
acpi_status acpi_ev_acquire_global_lock(u16 timeout);
acpi_status acpi_ev_release_global_lock(void);
acpi_status acpi_ev_remove_global_lock_handler(void);
/* /*
* evgpe - Low-level GPE support * evgpe - Low-level GPE support
*/ */
......
...@@ -214,24 +214,23 @@ ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX]; ...@@ -214,24 +214,23 @@ ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX];
/* /*
* Global lock mutex is an actual AML mutex object * Global lock mutex is an actual AML mutex object
* Global lock semaphore works in conjunction with the HW global lock * Global lock semaphore works in conjunction with the actual global lock
* Global lock spinlock is used for "pending" handshake
*/ */
ACPI_EXTERN union acpi_operand_object *acpi_gbl_global_lock_mutex; ACPI_EXTERN union acpi_operand_object *acpi_gbl_global_lock_mutex;
ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore; ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore;
ACPI_EXTERN acpi_spinlock acpi_gbl_global_lock_pending_lock;
ACPI_EXTERN u16 acpi_gbl_global_lock_handle; ACPI_EXTERN u16 acpi_gbl_global_lock_handle;
ACPI_EXTERN u8 acpi_gbl_global_lock_acquired; ACPI_EXTERN u8 acpi_gbl_global_lock_acquired;
ACPI_EXTERN u8 acpi_gbl_global_lock_present; ACPI_EXTERN u8 acpi_gbl_global_lock_present;
ACPI_EXTERN u8 acpi_gbl_global_lock_pending;
/* /*
* Spinlocks are used for interfaces that can be possibly called at * Spinlocks are used for interfaces that can be possibly called at
* interrupt level * interrupt level
*/ */
ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock; /* For GPE data structs and registers */ ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */
ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */ ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
ACPI_EXTERN spinlock_t _acpi_ev_global_lock_pending_lock; /* For global lock */
#define acpi_gbl_gpe_lock &_acpi_gbl_gpe_lock
#define acpi_gbl_hardware_lock &_acpi_gbl_hardware_lock
#define acpi_ev_global_lock_pending_lock &_acpi_ev_global_lock_pending_lock
/***************************************************************************** /*****************************************************************************
* *
......
...@@ -394,21 +394,6 @@ ...@@ -394,21 +394,6 @@
#define AML_CLASS_METHOD_CALL 0x09 #define AML_CLASS_METHOD_CALL 0x09
#define AML_CLASS_UNKNOWN 0x0A #define AML_CLASS_UNKNOWN 0x0A
/* Predefined Operation Region space_iDs */
typedef enum {
REGION_MEMORY = 0,
REGION_IO,
REGION_PCI_CONFIG,
REGION_EC,
REGION_SMBUS,
REGION_CMOS,
REGION_PCI_BAR,
REGION_IPMI,
REGION_DATA_TABLE, /* Internal use only */
REGION_FIXED_HW = 0x7F
} AML_REGION_TYPES;
/* Comparison operation codes for match_op operator */ /* Comparison operation codes for match_op operator */
typedef enum { typedef enum {
......
...@@ -450,7 +450,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state) ...@@ -450,7 +450,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
status = status =
acpi_ex_create_region(op->named.data, acpi_ex_create_region(op->named.data,
op->named.length, op->named.length,
REGION_DATA_TABLE, ACPI_ADR_SPACE_DATA_TABLE,
walk_state); walk_state);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
......
...@@ -562,7 +562,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) ...@@ -562,7 +562,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
((op->common.value.arg)->common.value. ((op->common.value.arg)->common.value.
integer); integer);
} else { } else {
region_space = REGION_DATA_TABLE; region_space = ACPI_ADR_SPACE_DATA_TABLE;
} }
/* /*
......
/******************************************************************************
*
* Module Name: evglock - Global Lock support
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acevents.h"
#include "acinterp.h"
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evglock")
/* Local prototypes */
static u32 acpi_ev_global_lock_handler(void *context);
/*******************************************************************************
*
* FUNCTION: acpi_ev_init_global_lock_handler
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Install a handler for the global lock release event
*
******************************************************************************/
acpi_status acpi_ev_init_global_lock_handler(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
/* Attempt installation of the global lock handler */
status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
acpi_ev_global_lock_handler,
NULL);
/*
* If the global lock does not exist on this platform, the attempt to
* enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick).
* Map to AE_OK, but mark global lock as not present. Any attempt to
* actually use the global lock will be flagged with an error.
*/
acpi_gbl_global_lock_present = FALSE;
if (status == AE_NO_HARDWARE_RESPONSE) {
ACPI_ERROR((AE_INFO,
"No response from Global Lock hardware, disabling lock"));
return_ACPI_STATUS(AE_OK);
}
status = acpi_os_create_lock(&acpi_gbl_global_lock_pending_lock);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
acpi_gbl_global_lock_pending = FALSE;
acpi_gbl_global_lock_present = TRUE;
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_remove_global_lock_handler
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Remove the handler for the Global Lock
*
******************************************************************************/
acpi_status acpi_ev_remove_global_lock_handler(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
acpi_gbl_global_lock_present = FALSE;
status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
acpi_ev_global_lock_handler);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_global_lock_handler
*
* PARAMETERS: Context - From thread interface, not used
*
* RETURN: ACPI_INTERRUPT_HANDLED
*
* DESCRIPTION: Invoked directly from the SCI handler when a global lock
* release interrupt occurs. If there is actually a pending
* request for the lock, signal the waiting thread.
*
******************************************************************************/
static u32 acpi_ev_global_lock_handler(void *context)
{
acpi_status status;
acpi_cpu_flags flags;
flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);
/*
* If a request for the global lock is not actually pending,
* we are done. This handles "spurious" global lock interrupts
* which are possible (and have been seen) with bad BIOSs.
*/
if (!acpi_gbl_global_lock_pending) {
goto cleanup_and_exit;
}
/*
* Send a unit to the global lock semaphore. The actual acquisition
* of the global lock will be performed by the waiting thread.
*/
status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore"));
}
acpi_gbl_global_lock_pending = FALSE;
cleanup_and_exit:
acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
return (ACPI_INTERRUPT_HANDLED);
}
/******************************************************************************
*
* FUNCTION: acpi_ev_acquire_global_lock
*
* PARAMETERS: Timeout - Max time to wait for the lock, in millisec.
*
* RETURN: Status
*
* DESCRIPTION: Attempt to gain ownership of the Global Lock.
*
* MUTEX: Interpreter must be locked
*
* Note: The original implementation allowed multiple threads to "acquire" the
* Global Lock, and the OS would hold the lock until the last thread had
* released it. However, this could potentially starve the BIOS out of the
* lock, especially in the case where there is a tight handshake between the
* Embedded Controller driver and the BIOS. Therefore, this implementation
* allows only one thread to acquire the HW Global Lock at a time, and makes
* the global lock appear as a standard mutex on the OS side.
*
*****************************************************************************/
acpi_status acpi_ev_acquire_global_lock(u16 timeout)
{
acpi_cpu_flags flags;
acpi_status status;
u8 acquired = FALSE;
ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
/*
* Only one thread can acquire the GL at a time, the global_lock_mutex
* enforces this. This interface releases the interpreter if we must wait.
*/
status =
acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex->mutex.
os_mutex, timeout);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/*
* Update the global lock handle and check for wraparound. The handle is
* only used for the external global lock interfaces, but it is updated
* here to properly handle the case where a single thread may acquire the
* lock via both the AML and the acpi_acquire_global_lock interfaces. The
* handle is therefore updated on the first acquire from a given thread
* regardless of where the acquisition request originated.
*/
acpi_gbl_global_lock_handle++;
if (acpi_gbl_global_lock_handle == 0) {
acpi_gbl_global_lock_handle = 1;
}
/*
* Make sure that a global lock actually exists. If not, just
* treat the lock as a standard mutex.
*/
if (!acpi_gbl_global_lock_present) {
acpi_gbl_global_lock_acquired = TRUE;
return_ACPI_STATUS(AE_OK);
}
flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);
do {
/* Attempt to acquire the actual hardware lock */
ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
if (acquired) {
acpi_gbl_global_lock_acquired = TRUE;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Acquired hardware Global Lock\n"));
break;
}
/*
* Did not get the lock. The pending bit was set above, and
* we must now wait until we receive the global lock
* released interrupt.
*/
acpi_gbl_global_lock_pending = TRUE;
acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Waiting for hardware Global Lock\n"));
/*
* Wait for handshake with the global lock interrupt handler.
* This interface releases the interpreter if we must wait.
*/
status =
acpi_ex_system_wait_semaphore
(acpi_gbl_global_lock_semaphore, ACPI_WAIT_FOREVER);
flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);
} while (ACPI_SUCCESS(status));
acpi_gbl_global_lock_pending = FALSE;
acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_release_global_lock
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Releases ownership of the Global Lock.
*
******************************************************************************/
acpi_status acpi_ev_release_global_lock(void)
{
u8 pending = FALSE;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(ev_release_global_lock);
/* Lock must be already acquired */
if (!acpi_gbl_global_lock_acquired) {
ACPI_WARNING((AE_INFO,
"Cannot release the ACPI Global Lock, it has not been acquired"));
return_ACPI_STATUS(AE_NOT_ACQUIRED);
}
if (acpi_gbl_global_lock_present) {
/* Allow any thread to release the lock */
ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_FACS, pending);
/*
* If the pending bit was set, we must write GBL_RLS to the control
* register
*/
if (pending) {
status =
acpi_write_bit_register
(ACPI_BITREG_GLOBAL_LOCK_RELEASE,
ACPI_ENABLE_EVENT);
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Released hardware Global Lock\n"));
}
acpi_gbl_global_lock_acquired = FALSE;
/* Release the local GL mutex */
acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
return_ACPI_STATUS(status);
}
...@@ -45,7 +45,6 @@ ...@@ -45,7 +45,6 @@
#include "accommon.h" #include "accommon.h"
#include "acevents.h" #include "acevents.h"
#include "acnamesp.h" #include "acnamesp.h"
#include "acinterp.h"
#define _COMPONENT ACPI_EVENTS #define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evmisc") ACPI_MODULE_NAME("evmisc")
...@@ -53,10 +52,6 @@ ACPI_MODULE_NAME("evmisc") ...@@ -53,10 +52,6 @@ ACPI_MODULE_NAME("evmisc")
/* Local prototypes */ /* Local prototypes */
static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context); static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context);
static u32 acpi_ev_global_lock_handler(void *context);
static acpi_status acpi_ev_remove_global_lock_handler(void);
/******************************************************************************* /*******************************************************************************
* *
* FUNCTION: acpi_ev_is_notify_object * FUNCTION: acpi_ev_is_notify_object
...@@ -275,304 +270,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context) ...@@ -275,304 +270,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
acpi_ut_delete_generic_state(notify_info); acpi_ut_delete_generic_state(notify_info);
} }
/*******************************************************************************
*
* FUNCTION: acpi_ev_global_lock_handler
*
* PARAMETERS: Context - From thread interface, not used
*
* RETURN: ACPI_INTERRUPT_HANDLED
*
* DESCRIPTION: Invoked directly from the SCI handler when a global lock
* release interrupt occurs. If there's a thread waiting for
* the global lock, signal it.
*
* NOTE: Assumes that the semaphore can be signaled from interrupt level. If
* this is not possible for some reason, a separate thread will have to be
* scheduled to do this.
*
******************************************************************************/
static u8 acpi_ev_global_lock_pending;
static u32 acpi_ev_global_lock_handler(void *context)
{
acpi_status status;
acpi_cpu_flags flags;
flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
if (!acpi_ev_global_lock_pending) {
goto out;
}
/* Send a unit to the semaphore */
status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore"));
}
acpi_ev_global_lock_pending = FALSE;
out:
acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
return (ACPI_INTERRUPT_HANDLED);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_init_global_lock_handler
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Install a handler for the global lock release event
*
******************************************************************************/
acpi_status acpi_ev_init_global_lock_handler(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
/* Attempt installation of the global lock handler */
status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
acpi_ev_global_lock_handler,
NULL);
/*
* If the global lock does not exist on this platform, the attempt to
* enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick).
* Map to AE_OK, but mark global lock as not present. Any attempt to
* actually use the global lock will be flagged with an error.
*/
if (status == AE_NO_HARDWARE_RESPONSE) {
ACPI_ERROR((AE_INFO,
"No response from Global Lock hardware, disabling lock"));
acpi_gbl_global_lock_present = FALSE;
return_ACPI_STATUS(AE_OK);
}
acpi_gbl_global_lock_present = TRUE;
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_remove_global_lock_handler
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Remove the handler for the Global Lock
*
******************************************************************************/
static acpi_status acpi_ev_remove_global_lock_handler(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
acpi_gbl_global_lock_present = FALSE;
status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
acpi_ev_global_lock_handler);
return_ACPI_STATUS(status);
}
/******************************************************************************
*
* FUNCTION: acpi_ev_acquire_global_lock
*
* PARAMETERS: Timeout - Max time to wait for the lock, in millisec.
*
* RETURN: Status
*
* DESCRIPTION: Attempt to gain ownership of the Global Lock.
*
* MUTEX: Interpreter must be locked
*
* Note: The original implementation allowed multiple threads to "acquire" the
* Global Lock, and the OS would hold the lock until the last thread had
* released it. However, this could potentially starve the BIOS out of the
* lock, especially in the case where there is a tight handshake between the
* Embedded Controller driver and the BIOS. Therefore, this implementation
* allows only one thread to acquire the HW Global Lock at a time, and makes
* the global lock appear as a standard mutex on the OS side.
*
*****************************************************************************/
static acpi_thread_id acpi_ev_global_lock_thread_id;
static int acpi_ev_global_lock_acquired;
acpi_status acpi_ev_acquire_global_lock(u16 timeout)
{
acpi_cpu_flags flags;
acpi_status status = AE_OK;
u8 acquired = FALSE;
ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
/*
* Only one thread can acquire the GL at a time, the global_lock_mutex
* enforces this. This interface releases the interpreter if we must wait.
*/
status = acpi_ex_system_wait_mutex(
acpi_gbl_global_lock_mutex->mutex.os_mutex, 0);
if (status == AE_TIME) {
if (acpi_ev_global_lock_thread_id == acpi_os_get_thread_id()) {
acpi_ev_global_lock_acquired++;
return AE_OK;
}
}
if (ACPI_FAILURE(status)) {
status = acpi_ex_system_wait_mutex(
acpi_gbl_global_lock_mutex->mutex.os_mutex,
timeout);
}
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
acpi_ev_global_lock_thread_id = acpi_os_get_thread_id();
acpi_ev_global_lock_acquired++;
/*
* Update the global lock handle and check for wraparound. The handle is
* only used for the external global lock interfaces, but it is updated
* here to properly handle the case where a single thread may acquire the
* lock via both the AML and the acpi_acquire_global_lock interfaces. The
* handle is therefore updated on the first acquire from a given thread
* regardless of where the acquisition request originated.
*/
acpi_gbl_global_lock_handle++;
if (acpi_gbl_global_lock_handle == 0) {
acpi_gbl_global_lock_handle = 1;
}
/*
* Make sure that a global lock actually exists. If not, just treat the
* lock as a standard mutex.
*/
if (!acpi_gbl_global_lock_present) {
acpi_gbl_global_lock_acquired = TRUE;
return_ACPI_STATUS(AE_OK);
}
flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
do {
/* Attempt to acquire the actual hardware lock */
ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
if (acquired) {
acpi_gbl_global_lock_acquired = TRUE;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Acquired hardware Global Lock\n"));
break;
}
acpi_ev_global_lock_pending = TRUE;
acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
/*
* Did not get the lock. The pending bit was set above, and we
* must wait until we get the global lock released interrupt.
*/
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Waiting for hardware Global Lock\n"));
/*
* Wait for handshake with the global lock interrupt handler.
* This interface releases the interpreter if we must wait.
*/
status = acpi_ex_system_wait_semaphore(
acpi_gbl_global_lock_semaphore,
ACPI_WAIT_FOREVER);
flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
} while (ACPI_SUCCESS(status));
acpi_ev_global_lock_pending = FALSE;
acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_release_global_lock
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Releases ownership of the Global Lock.
*
******************************************************************************/
acpi_status acpi_ev_release_global_lock(void)
{
u8 pending = FALSE;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(ev_release_global_lock);
/* Lock must be already acquired */
if (!acpi_gbl_global_lock_acquired) {
ACPI_WARNING((AE_INFO,
"Cannot release the ACPI Global Lock, it has not been acquired"));
return_ACPI_STATUS(AE_NOT_ACQUIRED);
}
acpi_ev_global_lock_acquired--;
if (acpi_ev_global_lock_acquired > 0) {
return AE_OK;
}
if (acpi_gbl_global_lock_present) {
/* Allow any thread to release the lock */
ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_FACS, pending);
/*
* If the pending bit was set, we must write GBL_RLS to the control
* register
*/
if (pending) {
status =
acpi_write_bit_register
(ACPI_BITREG_GLOBAL_LOCK_RELEASE,
ACPI_ENABLE_EVENT);
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Released hardware Global Lock\n"));
}
acpi_gbl_global_lock_acquired = FALSE;
/* Release the local GL mutex */
acpi_ev_global_lock_thread_id = 0;
acpi_ev_global_lock_acquired = 0;
acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
return_ACPI_STATUS(status);
}
/****************************************************************************** /******************************************************************************
* *
* FUNCTION: acpi_ev_terminate * FUNCTION: acpi_ev_terminate
......
...@@ -55,6 +55,8 @@ static u8 ...@@ -55,6 +55,8 @@ static u8
acpi_ev_has_default_handler(struct acpi_namespace_node *node, acpi_ev_has_default_handler(struct acpi_namespace_node *node,
acpi_adr_space_type space_id); acpi_adr_space_type space_id);
static void acpi_ev_orphan_ec_reg_method(void);
static acpi_status static acpi_status
acpi_ev_reg_run(acpi_handle obj_handle, acpi_ev_reg_run(acpi_handle obj_handle,
u32 level, void *context, void **return_value); u32 level, void *context, void **return_value);
...@@ -561,7 +563,9 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj, ...@@ -561,7 +563,9 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
/* Now stop region accesses by executing the _REG method */ /* Now stop region accesses by executing the _REG method */
status = acpi_ev_execute_reg_method(region_obj, 0); status =
acpi_ev_execute_reg_method(region_obj,
ACPI_REG_DISCONNECT);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, ACPI_EXCEPTION((AE_INFO, status,
"from region _REG, [%s]", "from region _REG, [%s]",
...@@ -1062,6 +1066,12 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, ...@@ -1062,6 +1066,12 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run,
NULL, &space_id, NULL); NULL, &space_id, NULL);
/* Special case for EC: handle "orphan" _REG methods with no region */
if (space_id == ACPI_ADR_SPACE_EC) {
acpi_ev_orphan_ec_reg_method();
}
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
...@@ -1120,6 +1130,113 @@ acpi_ev_reg_run(acpi_handle obj_handle, ...@@ -1120,6 +1130,113 @@ acpi_ev_reg_run(acpi_handle obj_handle,
return (AE_OK); return (AE_OK);
} }
status = acpi_ev_execute_reg_method(obj_desc, 1); status = acpi_ev_execute_reg_method(obj_desc, ACPI_REG_CONNECT);
return (status); return (status);
} }
/*******************************************************************************
*
* FUNCTION: acpi_ev_orphan_ec_reg_method
*
* PARAMETERS: None
*
* RETURN: None
*
* DESCRIPTION: Execute an "orphan" _REG method that appears under the EC
* device. This is a _REG method that has no corresponding region
* within the EC device scope. The orphan _REG method appears to
* have been enabled by the description of the ECDT in the ACPI
* specification: "The availability of the region space can be
* detected by providing a _REG method object underneath the
* Embedded Controller device."
*
* To quickly access the EC device, we use the EC_ID that appears
* within the ECDT. Otherwise, we would need to perform a time-
* consuming namespace walk, executing _HID methods to find the
* EC device.
*
******************************************************************************/
static void acpi_ev_orphan_ec_reg_method(void)
{
struct acpi_table_ecdt *table;
acpi_status status;
struct acpi_object_list args;
union acpi_object objects[2];
struct acpi_namespace_node *ec_device_node;
struct acpi_namespace_node *reg_method;
struct acpi_namespace_node *next_node;
ACPI_FUNCTION_TRACE(ev_orphan_ec_reg_method);
/* Get the ECDT (if present in system) */
status = acpi_get_table(ACPI_SIG_ECDT, 0,
ACPI_CAST_INDIRECT_PTR(struct acpi_table_header,
&table));
if (ACPI_FAILURE(status)) {
return_VOID;
}
/* We need a valid EC_ID string */
if (!(*table->id)) {
return_VOID;
}
/* Namespace is currently locked, must release */
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
/* Get a handle to the EC device referenced in the ECDT */
status = acpi_get_handle(NULL,
ACPI_CAST_PTR(char, table->id),
ACPI_CAST_PTR(acpi_handle, &ec_device_node));
if (ACPI_FAILURE(status)) {
goto exit;
}
/* Get a handle to a _REG method immediately under the EC device */
status = acpi_get_handle(ec_device_node,
METHOD_NAME__REG, ACPI_CAST_PTR(acpi_handle,
&reg_method));
if (ACPI_FAILURE(status)) {
goto exit;
}
/*
* Execute the _REG method only if there is no Operation Region in
* this scope with the Embedded Controller space ID. Otherwise, it
* will already have been executed. Note, this allows for Regions
* with other space IDs to be present; but the code below will then
* execute the _REG method with the EC space ID argument.
*/
next_node = acpi_ns_get_next_node(ec_device_node, NULL);
while (next_node) {
if ((next_node->type == ACPI_TYPE_REGION) &&
(next_node->object) &&
(next_node->object->region.space_id == ACPI_ADR_SPACE_EC)) {
goto exit; /* Do not execute _REG */
}
next_node = acpi_ns_get_next_node(ec_device_node, next_node);
}
/* Evaluate the _REG(EC,Connect) method */
args.count = 2;
args.pointer = objects;
objects[0].type = ACPI_TYPE_INTEGER;
objects[0].integer.value = ACPI_ADR_SPACE_EC;
objects[1].type = ACPI_TYPE_INTEGER;
objects[1].integer.value = ACPI_REG_CONNECT;
status = acpi_evaluate_object(reg_method, NULL, &args, NULL);
exit:
/* We ignore all errors from above, don't care */
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
return_VOID;
}
...@@ -637,7 +637,7 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj, ...@@ -637,7 +637,7 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
status = status =
acpi_ev_execute_reg_method acpi_ev_execute_reg_method
(region_obj, 1); (region_obj, ACPI_REG_CONNECT);
if (acpi_ns_locked) { if (acpi_ns_locked) {
status = status =
......
...@@ -130,20 +130,21 @@ acpi_install_address_space_handler(acpi_handle device, ...@@ -130,20 +130,21 @@ acpi_install_address_space_handler(acpi_handle device,
case ACPI_ADR_SPACE_PCI_CONFIG: case ACPI_ADR_SPACE_PCI_CONFIG:
case ACPI_ADR_SPACE_DATA_TABLE: case ACPI_ADR_SPACE_DATA_TABLE:
if (acpi_gbl_reg_methods_executed) { if (!acpi_gbl_reg_methods_executed) {
/* Run all _REG methods for this address space */ /* We will defer execution of the _REG methods for this space */
goto unlock_and_exit;
status = acpi_ev_execute_reg_methods(node, space_id);
} }
break; break;
default: default:
status = acpi_ev_execute_reg_methods(node, space_id);
break; break;
} }
/* Run all _REG methods for this address space */
status = acpi_ev_execute_reg_methods(node, space_id);
unlock_and_exit: unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
......
...@@ -305,7 +305,8 @@ acpi_ex_create_region(u8 * aml_start, ...@@ -305,7 +305,8 @@ acpi_ex_create_region(u8 * aml_start,
* range * range
*/ */
if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) && if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
(region_space < ACPI_USER_REGION_BEGIN)) { (region_space < ACPI_USER_REGION_BEGIN) &&
(region_space != ACPI_ADR_SPACE_DATA_TABLE)) {
ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X", ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X",
region_space)); region_space));
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID); return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
......
...@@ -74,7 +74,6 @@ ACPI_MODULE_NAME("nsrepair") ...@@ -74,7 +74,6 @@ ACPI_MODULE_NAME("nsrepair")
* *
* Additional possible repairs: * Additional possible repairs:
* *
* Optional/unnecessary NULL package elements removed
* Required package elements that are NULL replaced by Integer/String/Buffer * Required package elements that are NULL replaced by Integer/String/Buffer
* Incorrect standalone package wrapped with required outer package * Incorrect standalone package wrapped with required outer package
* *
...@@ -623,16 +622,12 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data, ...@@ -623,16 +622,12 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
ACPI_FUNCTION_NAME(ns_remove_null_elements); ACPI_FUNCTION_NAME(ns_remove_null_elements);
/* /*
* PTYPE1 packages contain no subpackages. * We can safely remove all NULL elements from these package types:
* PTYPE2 packages contain a variable number of sub-packages. We can * PTYPE1_VAR packages contain a variable number of simple data types.
* safely remove all NULL elements from the PTYPE2 packages. * PTYPE2 packages contain a variable number of sub-packages.
*/ */
switch (package_type) { switch (package_type) {
case ACPI_PTYPE1_FIXED:
case ACPI_PTYPE1_VAR: case ACPI_PTYPE1_VAR:
case ACPI_PTYPE1_OPTION:
return;
case ACPI_PTYPE2: case ACPI_PTYPE2:
case ACPI_PTYPE2_COUNT: case ACPI_PTYPE2_COUNT:
case ACPI_PTYPE2_PKG_COUNT: case ACPI_PTYPE2_PKG_COUNT:
...@@ -642,6 +637,8 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data, ...@@ -642,6 +637,8 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
break; break;
default: default:
case ACPI_PTYPE1_FIXED:
case ACPI_PTYPE1_OPTION:
return; return;
} }
......
...@@ -170,8 +170,7 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = { ...@@ -170,8 +170,7 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
"SMBus", "SMBus",
"SystemCMOS", "SystemCMOS",
"PCIBARTarget", "PCIBARTarget",
"IPMI", "IPMI"
"DataTable"
}; };
char *acpi_ut_get_region_name(u8 space_id) char *acpi_ut_get_region_name(u8 space_id)
...@@ -179,6 +178,8 @@ char *acpi_ut_get_region_name(u8 space_id) ...@@ -179,6 +178,8 @@ char *acpi_ut_get_region_name(u8 space_id)
if (space_id >= ACPI_USER_REGION_BEGIN) { if (space_id >= ACPI_USER_REGION_BEGIN) {
return ("UserDefinedRegion"); return ("UserDefinedRegion");
} else if (space_id == ACPI_ADR_SPACE_DATA_TABLE) {
return ("DataTable");
} else if (space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { } else if (space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
return ("FunctionalFixedHW"); return ("FunctionalFixedHW");
} else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) { } else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) {
......
...@@ -83,9 +83,15 @@ acpi_status acpi_ut_mutex_initialize(void) ...@@ -83,9 +83,15 @@ acpi_status acpi_ut_mutex_initialize(void)
/* Create the spinlocks for use at interrupt level */ /* Create the spinlocks for use at interrupt level */
spin_lock_init(acpi_gbl_gpe_lock); status = acpi_os_create_lock (&acpi_gbl_gpe_lock);
spin_lock_init(acpi_gbl_hardware_lock); if (ACPI_FAILURE (status)) {
spin_lock_init(acpi_ev_global_lock_pending_lock); return_ACPI_STATUS (status);
}
status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
/* Mutex for _OSI support */ /* Mutex for _OSI support */
status = acpi_os_create_mutex(&acpi_gbl_osi_mutex); status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
......
...@@ -227,7 +227,7 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state) ...@@ -227,7 +227,7 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
acpi_status status = AE_OK; acpi_status status = AE_OK;
char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' }; char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' };
if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
return -EINVAL; return -EINVAL;
/* Make sure this is a valid target state */ /* Make sure this is a valid target state */
......
/*
* debugfs.c - ACPI debugfs interface to userspace.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <acpi/acpi_drivers.h>
#include "internal.h"
#define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME("custom_method");
MODULE_LICENSE("GPL");
static struct dentry *cm_dentry;
/* /sys/kernel/debug/acpi/custom_method */
static ssize_t cm_write(struct file *file, const char __user * user_buf,
size_t count, loff_t *ppos)
{
static char *buf;
static u32 max_size;
static u32 uncopied_bytes;
struct acpi_table_header table;
acpi_status status;
if (!(*ppos)) {
/* parse the table header to get the table length */
if (count <= sizeof(struct acpi_table_header))
return -EINVAL;
if (copy_from_user(&table, user_buf,
sizeof(struct acpi_table_header)))
return -EFAULT;
uncopied_bytes = max_size = table.length;
buf = kzalloc(max_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
}
if (buf == NULL)
return -EINVAL;
if ((*ppos > max_size) ||
(*ppos + count > max_size) ||
(*ppos + count < count) ||
(count > uncopied_bytes))
return -EINVAL;
if (copy_from_user(buf + (*ppos), user_buf, count)) {
kfree(buf);
buf = NULL;
return -EFAULT;
}
uncopied_bytes -= count;
*ppos += count;
if (!uncopied_bytes) {
status = acpi_install_method(buf);
kfree(buf);
buf = NULL;
if (ACPI_FAILURE(status))
return -EINVAL;
add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
}
return count;
}
static const struct file_operations cm_fops = {
.write = cm_write,
.llseek = default_llseek,
};
static int __init acpi_custom_method_init(void)
{
if (acpi_debugfs_dir == NULL)
return -ENOENT;
cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
acpi_debugfs_dir, NULL, &cm_fops);
if (cm_dentry == NULL)
return -ENODEV;
return 0;
}
static void __exit acpi_custom_method_exit(void)
{
if (cm_dentry)
debugfs_remove(cm_dentry);
}
module_init(acpi_custom_method_init);
module_exit(acpi_custom_method_exit);
...@@ -3,100 +3,16 @@ ...@@ -3,100 +3,16 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <acpi/acpi_drivers.h> #include <acpi/acpi_drivers.h>
#define _COMPONENT ACPI_SYSTEM_COMPONENT #define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME("debugfs"); ACPI_MODULE_NAME("debugfs");
struct dentry *acpi_debugfs_dir;
EXPORT_SYMBOL_GPL(acpi_debugfs_dir);
/* /sys/modules/acpi/parameters/aml_debug_output */ void __init acpi_debugfs_init(void)
module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
bool, 0644);
MODULE_PARM_DESC(aml_debug_output,
"To enable/disable the ACPI Debug Object output.");
/* /sys/kernel/debug/acpi/custom_method */
static ssize_t cm_write(struct file *file, const char __user * user_buf,
size_t count, loff_t *ppos)
{ {
static char *buf; acpi_debugfs_dir = debugfs_create_dir("acpi", NULL);
static u32 max_size;
static u32 uncopied_bytes;
struct acpi_table_header table;
acpi_status status;
if (!(*ppos)) {
/* parse the table header to get the table length */
if (count <= sizeof(struct acpi_table_header))
return -EINVAL;
if (copy_from_user(&table, user_buf,
sizeof(struct acpi_table_header)))
return -EFAULT;
uncopied_bytes = max_size = table.length;
buf = kzalloc(max_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
}
if (buf == NULL)
return -EINVAL;
if ((*ppos > max_size) ||
(*ppos + count > max_size) ||
(*ppos + count < count) ||
(count > uncopied_bytes))
return -EINVAL;
if (copy_from_user(buf + (*ppos), user_buf, count)) {
kfree(buf);
buf = NULL;
return -EFAULT;
}
uncopied_bytes -= count;
*ppos += count;
if (!uncopied_bytes) {
status = acpi_install_method(buf);
kfree(buf);
buf = NULL;
if (ACPI_FAILURE(status))
return -EINVAL;
add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
}
return count;
}
static const struct file_operations cm_fops = {
.write = cm_write,
.llseek = default_llseek,
};
int __init acpi_debugfs_init(void)
{
struct dentry *acpi_dir, *cm_dentry;
acpi_dir = debugfs_create_dir("acpi", NULL);
if (!acpi_dir)
goto err;
cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
acpi_dir, NULL, &cm_fops);
if (!cm_dentry)
goto err;
return 0;
err:
if (acpi_dir)
debugfs_remove(acpi_dir);
return -EINVAL;
} }
...@@ -69,7 +69,6 @@ enum ec_command { ...@@ -69,7 +69,6 @@ enum ec_command {
#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
#define ACPI_EC_CDELAY 10 /* Wait 10us before polling EC */
#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts #define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts
...@@ -433,8 +432,7 @@ EXPORT_SYMBOL(ec_write); ...@@ -433,8 +432,7 @@ EXPORT_SYMBOL(ec_write);
int ec_transaction(u8 command, int ec_transaction(u8 command,
const u8 * wdata, unsigned wdata_len, const u8 * wdata, unsigned wdata_len,
u8 * rdata, unsigned rdata_len, u8 * rdata, unsigned rdata_len)
int force_poll)
{ {
struct transaction t = {.command = command, struct transaction t = {.command = command,
.wdata = wdata, .rdata = rdata, .wdata = wdata, .rdata = rdata,
...@@ -592,8 +590,6 @@ static void acpi_ec_gpe_query(void *ec_cxt) ...@@ -592,8 +590,6 @@ static void acpi_ec_gpe_query(void *ec_cxt)
mutex_unlock(&ec->lock); mutex_unlock(&ec->lock);
} }
static void acpi_ec_gpe_query(void *ec_cxt);
static int ec_check_sci(struct acpi_ec *ec, u8 state) static int ec_check_sci(struct acpi_ec *ec, u8 state)
{ {
if (state & ACPI_EC_FLAG_SCI) { if (state & ACPI_EC_FLAG_SCI) {
...@@ -808,8 +804,6 @@ static int acpi_ec_add(struct acpi_device *device) ...@@ -808,8 +804,6 @@ static int acpi_ec_add(struct acpi_device *device)
return -EINVAL; return -EINVAL;
} }
ec->handle = device->handle;
/* Find and register all query methods */ /* Find and register all query methods */
acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1, acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
acpi_ec_register_query_methods, NULL, ec, NULL); acpi_ec_register_query_methods, NULL, ec, NULL);
...@@ -938,8 +932,19 @@ static struct dmi_system_id __initdata ec_dmi_table[] = { ...@@ -938,8 +932,19 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
ec_flag_msi, "MSI hardware", { ec_flag_msi, "MSI hardware", {
DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL}, DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL},
{ {
ec_flag_msi, "Quanta hardware", {
DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
DMI_MATCH(DMI_PRODUCT_NAME, "TW8/SW8/DW8"),}, NULL},
{
ec_flag_msi, "Quanta hardware", {
DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
DMI_MATCH(DMI_PRODUCT_NAME, "TW9/SW9"),}, NULL},
{
ec_validate_ecdt, "ASUS hardware", { ec_validate_ecdt, "ASUS hardware", {
DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
{
ec_validate_ecdt, "ASUS hardware", {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
{}, {},
}; };
......
...@@ -28,9 +28,10 @@ int acpi_scan_init(void); ...@@ -28,9 +28,10 @@ int acpi_scan_init(void);
int acpi_sysfs_init(void); int acpi_sysfs_init(void);
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
extern struct dentry *acpi_debugfs_dir;
int acpi_debugfs_init(void); int acpi_debugfs_init(void);
#else #else
static inline int acpi_debugfs_init(void) { return 0; } static inline void acpi_debugfs_init(void) { return; }
#endif #endif
/* -------------------------------------------------------------------------- /* --------------------------------------------------------------------------
......
...@@ -902,14 +902,6 @@ void acpi_os_wait_events_complete(void *context) ...@@ -902,14 +902,6 @@ void acpi_os_wait_events_complete(void *context)
EXPORT_SYMBOL(acpi_os_wait_events_complete); EXPORT_SYMBOL(acpi_os_wait_events_complete);
/*
* Deallocate the memory for a spinlock.
*/
void acpi_os_delete_lock(acpi_spinlock handle)
{
return;
}
acpi_status acpi_status
acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
{ {
...@@ -1340,6 +1332,31 @@ int acpi_resources_are_enforced(void) ...@@ -1340,6 +1332,31 @@ int acpi_resources_are_enforced(void)
} }
EXPORT_SYMBOL(acpi_resources_are_enforced); EXPORT_SYMBOL(acpi_resources_are_enforced);
/*
* Create and initialize a spinlock.
*/
acpi_status
acpi_os_create_lock(acpi_spinlock *out_handle)
{
spinlock_t *lock;
lock = ACPI_ALLOCATE(sizeof(spinlock_t));
if (!lock)
return AE_NO_MEMORY;
spin_lock_init(lock);
*out_handle = lock;
return AE_OK;
}
/*
* Deallocate the memory for a spinlock.
*/
void acpi_os_delete_lock(acpi_spinlock handle)
{
ACPI_FREE(handle);
}
/* /*
* Acquire a spinlock. * Acquire a spinlock.
* *
......
...@@ -37,7 +37,6 @@ static struct dmi_system_id __initdata processor_idle_dmi_table[] = { ...@@ -37,7 +37,6 @@ static struct dmi_system_id __initdata processor_idle_dmi_table[] = {
{}, {},
}; };
#ifdef CONFIG_SMP
static int map_lapic_id(struct acpi_subtable_header *entry, static int map_lapic_id(struct acpi_subtable_header *entry,
u32 acpi_id, int *apic_id) u32 acpi_id, int *apic_id)
{ {
...@@ -165,7 +164,9 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) ...@@ -165,7 +164,9 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
{ {
#ifdef CONFIG_SMP
int i; int i;
#endif
int apic_id = -1; int apic_id = -1;
apic_id = map_mat_entry(handle, type, acpi_id); apic_id = map_mat_entry(handle, type, acpi_id);
...@@ -174,14 +175,19 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) ...@@ -174,14 +175,19 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
if (apic_id == -1) if (apic_id == -1)
return apic_id; return apic_id;
#ifdef CONFIG_SMP
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
if (cpu_physical_id(i) == apic_id) if (cpu_physical_id(i) == apic_id)
return i; return i;
} }
#else
/* In UP kernel, only processor 0 is valid */
if (apic_id == 0)
return apic_id;
#endif
return -1; return -1;
} }
EXPORT_SYMBOL_GPL(acpi_get_cpuid); EXPORT_SYMBOL_GPL(acpi_get_cpuid);
#endif
static bool __init processor_physically_present(acpi_handle handle) static bool __init processor_physically_present(acpi_handle handle)
{ {
...@@ -217,7 +223,7 @@ static bool __init processor_physically_present(acpi_handle handle) ...@@ -217,7 +223,7 @@ static bool __init processor_physically_present(acpi_handle handle)
type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
cpuid = acpi_get_cpuid(handle, type, acpi_id); cpuid = acpi_get_cpuid(handle, type, acpi_id);
if ((cpuid == -1) && (num_possible_cpus() > 1)) if (cpuid == -1)
return false; return false;
return true; return true;
......
...@@ -220,6 +220,14 @@ module_param_call(trace_state, param_set_trace_state, param_get_trace_state, ...@@ -220,6 +220,14 @@ module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
NULL, 0644); NULL, 0644);
#endif /* CONFIG_ACPI_DEBUG */ #endif /* CONFIG_ACPI_DEBUG */
/* /sys/modules/acpi/parameters/aml_debug_output */
module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
bool, 0644);
MODULE_PARM_DESC(aml_debug_output,
"To enable/disable the ACPI Debug Object output.");
/* /sys/module/acpi/parameters/acpica_version */ /* /sys/module/acpi/parameters/acpica_version */
static int param_get_acpica_version(char *buffer, struct kernel_param *kp) static int param_get_acpica_version(char *buffer, struct kernel_param *kp)
{ {
......
...@@ -195,6 +195,8 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) ...@@ -195,6 +195,8 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
return PCI_D2; return PCI_D2;
case ACPI_STATE_D3: case ACPI_STATE_D3:
return PCI_D3hot; return PCI_D3hot;
case ACPI_STATE_D3_COLD:
return PCI_D3cold;
} }
return PCI_POWER_ERROR; return PCI_POWER_ERROR;
} }
......
...@@ -200,8 +200,8 @@ static bool extra_features; ...@@ -200,8 +200,8 @@ static bool extra_features;
* watching the output of address 0x4F (do an ec_transaction writing 0x33 * watching the output of address 0x4F (do an ec_transaction writing 0x33
* into 0x4F and read a few bytes from the output, like so: * into 0x4F and read a few bytes from the output, like so:
* u8 writeData = 0x33; * u8 writeData = 0x33;
* ec_transaction(0x4F, &writeData, 1, buffer, 32, 0); * ec_transaction(0x4F, &writeData, 1, buffer, 32);
* That address is labelled "fan1 table information" in the service manual. * That address is labeled "fan1 table information" in the service manual.
* It should be clear which value in 'buffer' changes). This seems to be * It should be clear which value in 'buffer' changes). This seems to be
* related to fan speed. It isn't a proper 'realtime' fan speed value * related to fan speed. It isn't a proper 'realtime' fan speed value
* though, because physically stopping or speeding up the fan doesn't * though, because physically stopping or speeding up the fan doesn't
...@@ -286,7 +286,7 @@ static int get_backlight_level(void) ...@@ -286,7 +286,7 @@ static int get_backlight_level(void)
static void set_backlight_state(bool on) static void set_backlight_state(bool on)
{ {
u8 data = on ? BACKLIGHT_STATE_ON_DATA : BACKLIGHT_STATE_OFF_DATA; u8 data = on ? BACKLIGHT_STATE_ON_DATA : BACKLIGHT_STATE_OFF_DATA;
ec_transaction(BACKLIGHT_STATE_ADDR, &data, 1, NULL, 0, 0); ec_transaction(BACKLIGHT_STATE_ADDR, &data, 1, NULL, 0);
} }
...@@ -294,24 +294,24 @@ static void set_backlight_state(bool on) ...@@ -294,24 +294,24 @@ static void set_backlight_state(bool on)
static void pwm_enable_control(void) static void pwm_enable_control(void)
{ {
unsigned char writeData = PWM_ENABLE_DATA; unsigned char writeData = PWM_ENABLE_DATA;
ec_transaction(PWM_ENABLE_ADDR, &writeData, 1, NULL, 0, 0); ec_transaction(PWM_ENABLE_ADDR, &writeData, 1, NULL, 0);
} }
static void pwm_disable_control(void) static void pwm_disable_control(void)
{ {
unsigned char writeData = PWM_DISABLE_DATA; unsigned char writeData = PWM_DISABLE_DATA;
ec_transaction(PWM_DISABLE_ADDR, &writeData, 1, NULL, 0, 0); ec_transaction(PWM_DISABLE_ADDR, &writeData, 1, NULL, 0);
} }
static void set_pwm(int pwm) static void set_pwm(int pwm)
{ {
ec_transaction(PWM_ADDRESS, &pwm_lookup_table[pwm], 1, NULL, 0, 0); ec_transaction(PWM_ADDRESS, &pwm_lookup_table[pwm], 1, NULL, 0);
} }
static int get_fan_rpm(void) static int get_fan_rpm(void)
{ {
u8 value, data = FAN_DATA; u8 value, data = FAN_DATA;
ec_transaction(FAN_ADDRESS, &data, 1, &value, 1, 0); ec_transaction(FAN_ADDRESS, &data, 1, &value, 1);
return 100 * (int)value; return 100 * (int)value;
} }
......
...@@ -135,7 +135,7 @@ static int set_lcd_level(int level) ...@@ -135,7 +135,7 @@ static int set_lcd_level(int level)
buf[1] = (u8) (level*31); buf[1] = (u8) (level*31);
return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf), return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf),
NULL, 0, 1); NULL, 0);
} }
static int get_lcd_level(void) static int get_lcd_level(void)
...@@ -144,7 +144,7 @@ static int get_lcd_level(void) ...@@ -144,7 +144,7 @@ static int get_lcd_level(void)
int result; int result;
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1,
&rdata, 1, 1); &rdata, 1);
if (result < 0) if (result < 0)
return result; return result;
...@@ -157,7 +157,7 @@ static int get_auto_brightness(void) ...@@ -157,7 +157,7 @@ static int get_auto_brightness(void)
int result; int result;
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1,
&rdata, 1, 1); &rdata, 1);
if (result < 0) if (result < 0)
return result; return result;
...@@ -172,7 +172,7 @@ static int set_auto_brightness(int enable) ...@@ -172,7 +172,7 @@ static int set_auto_brightness(int enable)
wdata[0] = 4; wdata[0] = 4;
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1, result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1,
&rdata, 1, 1); &rdata, 1);
if (result < 0) if (result < 0)
return result; return result;
...@@ -180,7 +180,7 @@ static int set_auto_brightness(int enable) ...@@ -180,7 +180,7 @@ static int set_auto_brightness(int enable)
wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0); wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0);
return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2, return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2,
NULL, 0, 1); NULL, 0);
} }
static ssize_t set_device_state(const char *buf, size_t count, u8 mask) static ssize_t set_device_state(const char *buf, size_t count, u8 mask)
...@@ -217,7 +217,7 @@ static int get_wireless_state(int *wlan, int *bluetooth) ...@@ -217,7 +217,7 @@ static int get_wireless_state(int *wlan, int *bluetooth)
u8 wdata = 0, rdata; u8 wdata = 0, rdata;
int result; int result;
result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1, 1); result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1);
if (result < 0) if (result < 0)
return -1; return -1;
......
...@@ -499,7 +499,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) ...@@ -499,7 +499,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
dev_set_drvdata(hwmon->device, hwmon); dev_set_drvdata(hwmon->device, hwmon);
result = device_create_file(hwmon->device, &dev_attr_name); result = device_create_file(hwmon->device, &dev_attr_name);
if (result) if (result)
goto unregister_hwmon_device; goto free_mem;
register_sys_interface: register_sys_interface:
tz->hwmon = hwmon; tz->hwmon = hwmon;
...@@ -513,7 +513,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) ...@@ -513,7 +513,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
sysfs_attr_init(&tz->temp_input.attr.attr); sysfs_attr_init(&tz->temp_input.attr.attr);
result = device_create_file(hwmon->device, &tz->temp_input.attr); result = device_create_file(hwmon->device, &tz->temp_input.attr);
if (result) if (result)
goto unregister_hwmon_device; goto unregister_name;
if (tz->ops->get_crit_temp) { if (tz->ops->get_crit_temp) {
unsigned long temperature; unsigned long temperature;
...@@ -527,7 +527,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) ...@@ -527,7 +527,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
result = device_create_file(hwmon->device, result = device_create_file(hwmon->device,
&tz->temp_crit.attr); &tz->temp_crit.attr);
if (result) if (result)
goto unregister_hwmon_device; goto unregister_input;
} }
} }
...@@ -539,9 +539,9 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) ...@@ -539,9 +539,9 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
return 0; return 0;
unregister_hwmon_device: unregister_input:
device_remove_file(hwmon->device, &tz->temp_crit.attr);
device_remove_file(hwmon->device, &tz->temp_input.attr); device_remove_file(hwmon->device, &tz->temp_input.attr);
unregister_name:
if (new_hwmon_device) { if (new_hwmon_device) {
device_remove_file(hwmon->device, &dev_attr_name); device_remove_file(hwmon->device, &dev_attr_name);
hwmon_device_unregister(hwmon->device); hwmon_device_unregister(hwmon->device);
......
...@@ -98,6 +98,9 @@ acpi_os_table_override(struct acpi_table_header *existing_table, ...@@ -98,6 +98,9 @@ acpi_os_table_override(struct acpi_table_header *existing_table,
/* /*
* Spinlock primitives * Spinlock primitives
*/ */
acpi_status
acpi_os_create_lock(acpi_spinlock *out_handle);
void acpi_os_delete_lock(acpi_spinlock handle); void acpi_os_delete_lock(acpi_spinlock handle);
acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock handle); acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock handle);
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */ /* Current ACPICA subsystem version in YYYYMMDD format */
#define ACPI_CA_VERSION 0x20110316 #define ACPI_CA_VERSION 0x20110413
#include "actypes.h" #include "actypes.h"
#include "actbl.h" #include "actbl.h"
......
...@@ -501,8 +501,9 @@ typedef u64 acpi_integer; ...@@ -501,8 +501,9 @@ typedef u64 acpi_integer;
#define ACPI_STATE_D1 (u8) 1 #define ACPI_STATE_D1 (u8) 1
#define ACPI_STATE_D2 (u8) 2 #define ACPI_STATE_D2 (u8) 2
#define ACPI_STATE_D3 (u8) 3 #define ACPI_STATE_D3 (u8) 3
#define ACPI_D_STATES_MAX ACPI_STATE_D3 #define ACPI_STATE_D3_COLD (u8) 4
#define ACPI_D_STATE_COUNT 4 #define ACPI_D_STATES_MAX ACPI_STATE_D3_COLD
#define ACPI_D_STATE_COUNT 5
#define ACPI_STATE_C0 (u8) 0 #define ACPI_STATE_C0 (u8) 0
#define ACPI_STATE_C1 (u8) 1 #define ACPI_STATE_C1 (u8) 1
...@@ -712,8 +713,24 @@ typedef u8 acpi_adr_space_type; ...@@ -712,8 +713,24 @@ typedef u8 acpi_adr_space_type;
#define ACPI_ADR_SPACE_CMOS (acpi_adr_space_type) 5 #define ACPI_ADR_SPACE_CMOS (acpi_adr_space_type) 5
#define ACPI_ADR_SPACE_PCI_BAR_TARGET (acpi_adr_space_type) 6 #define ACPI_ADR_SPACE_PCI_BAR_TARGET (acpi_adr_space_type) 6
#define ACPI_ADR_SPACE_IPMI (acpi_adr_space_type) 7 #define ACPI_ADR_SPACE_IPMI (acpi_adr_space_type) 7
#define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 8
#define ACPI_ADR_SPACE_FIXED_HARDWARE (acpi_adr_space_type) 127 #define ACPI_NUM_PREDEFINED_REGIONS 8
/*
* Special Address Spaces
*
* Note: A Data Table region is a special type of operation region
* that has its own AML opcode. However, internally, the AML
* interpreter simply creates an operation region with an an address
* space type of ACPI_ADR_SPACE_DATA_TABLE.
*/
#define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 0x7E /* Internal to ACPICA only */
#define ACPI_ADR_SPACE_FIXED_HARDWARE (acpi_adr_space_type) 0x7F
/* Values for _REG connection code */
#define ACPI_REG_DISCONNECT 0
#define ACPI_REG_CONNECT 1
/* /*
* bit_register IDs * bit_register IDs
......
...@@ -310,14 +310,7 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) ...@@ -310,14 +310,7 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
/* in processor_core.c */ /* in processor_core.c */
void acpi_processor_set_pdc(acpi_handle handle); void acpi_processor_set_pdc(acpi_handle handle);
#ifdef CONFIG_SMP
int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id); int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
#else
static inline int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
{
return -1;
}
#endif
/* in processor_throttling.c */ /* in processor_throttling.c */
int acpi_processor_tstate_has_changed(struct acpi_processor *pr); int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
......
...@@ -150,8 +150,7 @@ extern int ec_read(u8 addr, u8 *val); ...@@ -150,8 +150,7 @@ extern int ec_read(u8 addr, u8 *val);
extern int ec_write(u8 addr, u8 val); extern int ec_write(u8 addr, u8 val);
extern int ec_transaction(u8 command, extern int ec_transaction(u8 command,
const u8 *wdata, unsigned wdata_len, const u8 *wdata, unsigned wdata_len,
u8 *rdata, unsigned rdata_len, u8 *rdata, unsigned rdata_len);
int force_poll);
#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE) #if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment