Commit 9fdb62af authored by Len Brown's avatar Len Brown

[ACPI] merge 3549 4320 4485 4588 4980 5483 5651 acpica asus fops pnpacpi branches into release

Signed-off-by: default avatarLen Brown <len.brown@intel.com>
......@@ -452,6 +452,11 @@ running once the system is up.
eata= [HW,SCSI]
ec_intr= [HW,ACPI] ACPI Embedded Controller interrupt mode
Format: <int>
0: polling mode
non-0: interrupt mode (default)
eda= [HW,PS2]
edb= [HW,PS2]
......
......@@ -3,6 +3,6 @@ obj-$(CONFIG_X86_IO_APIC) += earlyquirk.o
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += cstate.o
obj-y += cstate.o processor.o
endif
......@@ -464,7 +464,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
* success: return IRQ number (>=0)
* failure: return < 0
*/
int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
int acpi_register_gsi(u32 gsi, int triggering, int polarity)
{
unsigned int irq;
unsigned int plat_gsi = gsi;
......@@ -476,14 +476,14 @@ int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
extern void eisa_set_level_irq(unsigned int irq);
if (edge_level == ACPI_LEVEL_SENSITIVE)
if (triggering == ACPI_LEVEL_SENSITIVE)
eisa_set_level_irq(gsi);
}
#endif
#ifdef CONFIG_X86_IO_APIC
if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
plat_gsi = mp_register_gsi(gsi, edge_level, active_high_low);
plat_gsi = mp_register_gsi(gsi, triggering, polarity);
}
#endif
acpi_gsi_to_irq(plat_gsi, &irq);
......
......@@ -14,64 +14,6 @@
#include <acpi/processor.h>
#include <asm/acpi.h>
static void acpi_processor_power_init_intel_pdc(struct acpi_processor_power
*pow)
{
struct acpi_object_list *obj_list;
union acpi_object *obj;
u32 *buf;
/* allocate and initialize pdc. It will be used later. */
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
if (!obj_list) {
printk(KERN_ERR "Memory allocation error\n");
return;
}
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!obj) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj_list);
return;
}
buf = kmalloc(12, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj);
kfree(obj_list);
return;
}
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
obj_list->count = 1;
obj_list->pointer = obj;
pow->pdc = obj_list;
return;
}
/* Initialize _PDC data based on the CPU vendor */
void acpi_processor_power_init_pdc(struct acpi_processor_power *pow,
unsigned int cpu)
{
struct cpuinfo_x86 *c = cpu_data + cpu;
pow->pdc = NULL;
if (c->x86_vendor == X86_VENDOR_INTEL)
acpi_processor_power_init_intel_pdc(pow);
return;
}
EXPORT_SYMBOL(acpi_processor_power_init_pdc);
/*
* Initialize bm_flags based on the CPU cache properties
* On SMP it depends on cache configuration
......
/*
* arch/i386/kernel/acpi/processor.c
*
* Copyright (C) 2005 Intel Corporation
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* - Added _PDC for platforms with Intel CPUs
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <asm/acpi.h>
static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
{
struct acpi_object_list *obj_list;
union acpi_object *obj;
u32 *buf;
/* allocate and initialize pdc. It will be used later. */
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
if (!obj_list) {
printk(KERN_ERR "Memory allocation error\n");
return;
}
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!obj) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj_list);
return;
}
buf = kmalloc(12, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj);
kfree(obj_list);
return;
}
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
if (cpu_has(c, X86_FEATURE_EST))
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
obj_list->count = 1;
obj_list->pointer = obj;
pr->pdc = obj_list;
return;
}
/* Initialize _PDC data based on the CPU vendor */
void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
{
unsigned int cpu = pr->id;
struct cpuinfo_x86 *c = cpu_data + cpu;
pr->pdc = NULL;
if (c->x86_vendor == X86_VENDOR_INTEL)
init_intel_pdc(pr, c);
return;
}
EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
......@@ -295,68 +295,6 @@ acpi_cpufreq_guess_freq (
}
/*
* acpi_processor_cpu_init_pdc_est - let BIOS know about the SMP capabilities
* of this driver
* @perf: processor-specific acpi_io_data struct
* @cpu: CPU being initialized
*
* To avoid issues with legacy OSes, some BIOSes require to be informed of
* the SMP capabilities of OS P-state driver. Here we set the bits in _PDC
* accordingly, for Enhanced Speedstep. Actual call to _PDC is done in
* driver/acpi/processor.c
*/
static void
acpi_processor_cpu_init_pdc_est(
struct acpi_processor_performance *perf,
unsigned int cpu,
struct acpi_object_list *obj_list
)
{
union acpi_object *obj;
u32 *buf;
struct cpuinfo_x86 *c = cpu_data + cpu;
dprintk("acpi_processor_cpu_init_pdc_est\n");
if (!cpu_has(c, X86_FEATURE_EST))
return;
/* Initialize pdc. It will be used later. */
if (!obj_list)
return;
if (!(obj_list->count && obj_list->pointer))
return;
obj = obj_list->pointer;
if ((obj->buffer.length == 12) && obj->buffer.pointer) {
buf = (u32 *)obj->buffer.pointer;
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
perf->pdc = obj_list;
}
return;
}
/* CPU specific PDC initialization */
static void
acpi_processor_cpu_init_pdc(
struct acpi_processor_performance *perf,
unsigned int cpu,
struct acpi_object_list *obj_list
)
{
struct cpuinfo_x86 *c = cpu_data + cpu;
dprintk("acpi_processor_cpu_init_pdc\n");
perf->pdc = NULL;
if (cpu_has(c, X86_FEATURE_EST))
acpi_processor_cpu_init_pdc_est(perf, cpu, obj_list);
return;
}
static int
acpi_cpufreq_cpu_init (
struct cpufreq_policy *policy)
......@@ -367,14 +305,7 @@ acpi_cpufreq_cpu_init (
unsigned int result = 0;
struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
union acpi_object arg0 = {ACPI_TYPE_BUFFER};
u32 arg0_buf[3];
struct acpi_object_list arg_list = {1, &arg0};
dprintk("acpi_cpufreq_cpu_init\n");
/* setup arg_list for _PDC settings */
arg0.buffer.length = 12;
arg0.buffer.pointer = (u8 *) arg0_buf;
data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
if (!data)
......@@ -382,9 +313,7 @@ acpi_cpufreq_cpu_init (
acpi_io_data[cpu] = data;
acpi_processor_cpu_init_pdc(&data->acpi_data, cpu, &arg_list);
result = acpi_processor_register_performance(&data->acpi_data, cpu);
data->acpi_data.pdc = NULL;
if (result)
goto err_free;
......
......@@ -362,22 +362,10 @@ static struct acpi_processor_performance p;
*/
static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
{
union acpi_object arg0 = {ACPI_TYPE_BUFFER};
u32 arg0_buf[3];
struct acpi_object_list arg_list = {1, &arg0};
unsigned long cur_freq;
int result = 0, i;
unsigned int cpu = policy->cpu;
/* _PDC settings */
arg0.buffer.length = 12;
arg0.buffer.pointer = (u8 *) arg0_buf;
arg0_buf[0] = ACPI_PDC_REVISION_ID;
arg0_buf[1] = 1;
arg0_buf[2] = ACPI_PDC_EST_CAPABILITY_SMP_MSR;
p.pdc = &arg_list;
/* register with ACPI core */
if (acpi_processor_register_performance(&p, cpu)) {
dprintk(KERN_INFO PFX "obtaining ACPI data failed\n");
......
......@@ -1080,7 +1080,7 @@ void __init mp_config_acpi_legacy_irqs (void)
#define MAX_GSI_NUM 4096
int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
int mp_register_gsi (u32 gsi, int triggering, int polarity)
{
int ioapic = -1;
int ioapic_pin = 0;
......@@ -1129,7 +1129,7 @@ int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
if (edge_level) {
if (triggering == ACPI_LEVEL_SENSITIVE) {
/*
* For PCI devices assign IRQs in order, avoiding gaps
* due to unused I/O APIC pins.
......@@ -1151,8 +1151,8 @@ int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
}
io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
return gsi;
}
......
......@@ -13,6 +13,11 @@ obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o
obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += acpi-processor.o
endif
obj-$(CONFIG_IA64_PALINFO) += palinfo.o
obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_MODULES) += module.o
......
......@@ -33,33 +33,33 @@ acpi_vendor_resource_match(struct acpi_resource *resource, void *context)
struct acpi_vendor_info *info = (struct acpi_vendor_info *)context;
struct acpi_resource_vendor *vendor;
struct acpi_vendor_descriptor *descriptor;
u32 length;
u32 byte_length;
if (resource->id != ACPI_RSTYPE_VENDOR)
if (resource->type != ACPI_RESOURCE_TYPE_VENDOR)
return AE_OK;
vendor = (struct acpi_resource_vendor *)&resource->data;
descriptor = (struct acpi_vendor_descriptor *)vendor->reserved;
if (vendor->length <= sizeof(*info->descriptor) ||
descriptor = (struct acpi_vendor_descriptor *)vendor->byte_data;
if (vendor->byte_length <= sizeof(*info->descriptor) ||
descriptor->guid_id != info->descriptor->guid_id ||
efi_guidcmp(descriptor->guid, info->descriptor->guid))
return AE_OK;
length = vendor->length - sizeof(struct acpi_vendor_descriptor);
info->data = acpi_os_allocate(length);
byte_length = vendor->byte_length - sizeof(struct acpi_vendor_descriptor);
info->data = acpi_os_allocate(byte_length);
if (!info->data)
return AE_NO_MEMORY;
memcpy(info->data,
vendor->reserved + sizeof(struct acpi_vendor_descriptor),
length);
info->length = length;
vendor->byte_data + sizeof(struct acpi_vendor_descriptor),
byte_length);
info->length = byte_length;
return AE_CTRL_TERMINATE;
}
acpi_status
acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id,
u8 ** data, u32 * length)
u8 ** data, u32 * byte_length)
{
struct acpi_vendor_info info;
......@@ -72,7 +72,7 @@ acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id,
return AE_NOT_FOUND;
*data = info.data;
*length = info.length;
*byte_length = info.length;
return AE_OK;
}
......
/*
* arch/ia64/kernel/cpufreq/processor.c
*
* Copyright (C) 2005 Intel Corporation
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* - Added _PDC for platforms with Intel CPUs
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <asm/acpi.h>
static void init_intel_pdc(struct acpi_processor *pr)
{
struct acpi_object_list *obj_list;
union acpi_object *obj;
u32 *buf;
/* allocate and initialize pdc. It will be used later. */
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
if (!obj_list) {
printk(KERN_ERR "Memory allocation error\n");
return;
}
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!obj) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj_list);
return;
}
buf = kmalloc(12, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj);
kfree(obj_list);
return;
}
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
obj_list->count = 1;
obj_list->pointer = obj;
pr->pdc = obj_list;
return;
}
/* Initialize _PDC data based on the CPU vendor */
void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
{
pr->pdc = NULL;
init_intel_pdc(pr);
return;
}
EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
......@@ -567,16 +567,16 @@ void __init acpi_numa_arch_fixup(void)
* success: return IRQ number (>=0)
* failure: return < 0
*/
int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
int acpi_register_gsi(u32 gsi, int triggering, int polarity)
{
if (has_8259 && gsi < 16)
return isa_irq_to_vector(gsi);
return iosapic_register_intr(gsi,
(active_high_low ==
(polarity ==
ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH :
IOSAPIC_POL_LOW,
(edge_level ==
(triggering ==
ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE :
IOSAPIC_LEVEL);
}
......
obj-$(CONFIG_IA64_ACPI_CPUFREQ) += acpi-cpufreq.o
......@@ -269,48 +269,6 @@ acpi_cpufreq_verify (
}
/*
* processor_init_pdc - let BIOS know about the SMP capabilities
* of this driver
* @perf: processor-specific acpi_io_data struct
* @cpu: CPU being initialized
*
* To avoid issues with legacy OSes, some BIOSes require to be informed of
* the SMP capabilities of OS P-state driver. Here we set the bits in _PDC
* accordingly. Actual call to _PDC is done in driver/acpi/processor.c
*/
static void
processor_init_pdc (
struct acpi_processor_performance *perf,
unsigned int cpu,
struct acpi_object_list *obj_list
)
{
union acpi_object *obj;
u32 *buf;
dprintk("processor_init_pdc\n");
perf->pdc = NULL;
/* Initialize pdc. It will be used later. */
if (!obj_list)
return;
if (!(obj_list->count && obj_list->pointer))
return;
obj = obj_list->pointer;
if ((obj->buffer.length == 12) && obj->buffer.pointer) {
buf = (u32 *)obj->buffer.pointer;
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
perf->pdc = obj_list;
}
return;
}
static int
acpi_cpufreq_cpu_init (
struct cpufreq_policy *policy)
......@@ -320,14 +278,7 @@ acpi_cpufreq_cpu_init (
struct cpufreq_acpi_io *data;
unsigned int result = 0;
union acpi_object arg0 = {ACPI_TYPE_BUFFER};
u32 arg0_buf[3];
struct acpi_object_list arg_list = {1, &arg0};
dprintk("acpi_cpufreq_cpu_init\n");
/* setup arg_list for _PDC settings */
arg0.buffer.length = 12;
arg0.buffer.pointer = (u8 *) arg0_buf;
data = kmalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
if (!data)
......@@ -337,9 +288,7 @@ acpi_cpufreq_cpu_init (
acpi_io_data[cpu] = data;
processor_init_pdc(&data->acpi_data, cpu, &arg_list);
result = acpi_processor_register_performance(&data->acpi_data, cpu);
data->acpi_data.pdc = NULL;
if (result)
goto err_free;
......
......@@ -193,12 +193,12 @@ add_io_space (struct pci_root_info *info, struct acpi_resource_address64 *addr)
goto free_resource;
}
min = addr->min_address_range;
min = addr->minimum;
max = min + addr->address_length - 1;
if (addr->attribute.io.translation_attribute == ACPI_SPARSE_TRANSLATION)
if (addr->info.io.translation_type == ACPI_SPARSE_TRANSLATION)
sparse = 1;
space_nr = new_space(addr->address_translation_offset, sparse);
space_nr = new_space(addr->translation_offset, sparse);
if (space_nr == ~0)
goto free_name;
......@@ -285,7 +285,7 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
if (addr.resource_type == ACPI_MEMORY_RANGE) {
flags = IORESOURCE_MEM;
root = &iomem_resource;
offset = addr.address_translation_offset;
offset = addr.translation_offset;
} else if (addr.resource_type == ACPI_IO_RANGE) {
flags = IORESOURCE_IO;
root = &ioport_resource;
......@@ -298,7 +298,7 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
window = &info->controller->window[info->controller->windows++];
window->resource.name = info->name;
window->resource.flags = flags;
window->resource.start = addr.min_address_range + offset;
window->resource.start = addr.minimum + offset;
window->resource.end = window->resource.start + addr.address_length - 1;
window->resource.child = NULL;
window->offset = offset;
......
obj-y := boot.o
boot-y := ../../../i386/kernel/acpi/boot.o
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += processor.o
endif
/*
* arch/x86_64/kernel/acpi/processor.c
*
* Copyright (C) 2005 Intel Corporation
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* - Added _PDC for platforms with Intel CPUs
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <asm/acpi.h>
static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
{
struct acpi_object_list *obj_list;
union acpi_object *obj;
u32 *buf;
/* allocate and initialize pdc. It will be used later. */
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
if (!obj_list) {
printk(KERN_ERR "Memory allocation error\n");
return;
}
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!obj) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj_list);
return;
}
buf = kmalloc(12, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj);
kfree(obj_list);
return;
}
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
obj_list->count = 1;
obj_list->pointer = obj;
pr->pdc = obj_list;
return;
}
/* Initialize _PDC data based on the CPU vendor */
void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
{
unsigned int cpu = pr->id;
struct cpuinfo_x86 *c = cpu_data + cpu;
pr->pdc = NULL;
if (c->x86_vendor == X86_VENDOR_INTEL && cpu_has(c, X86_FEATURE_EST))
init_intel_pdc(pr, c);
return;
}
EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
......@@ -2027,7 +2027,7 @@ int __init io_apic_get_redir_entries (int ioapic)
}
int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
{
struct IO_APIC_route_entry entry;
unsigned long flags;
......@@ -2049,8 +2049,8 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
entry.delivery_mode = INT_DELIVERY_MODE;
entry.dest_mode = INT_DEST_MODE;
entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
entry.trigger = edge_level;
entry.polarity = active_high_low;
entry.trigger = triggering;
entry.polarity = polarity;
entry.mask = 1; /* Disabled (masked) */
irq = gsi_irq_sharing(irq);
......@@ -2065,9 +2065,9 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
"IRQ %d Mode:%i Active:%i)\n", ioapic,
mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
edge_level, active_high_low);
triggering, polarity);
ioapic_register_intr(irq, entry.vector, edge_level);
ioapic_register_intr(irq, entry.vector, triggering);
if (!ioapic && (irq < 16))
disable_8259A_irq(irq);
......
......@@ -915,7 +915,7 @@ void __init mp_config_acpi_legacy_irqs (void)
#define MAX_GSI_NUM 4096
int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
int mp_register_gsi(u32 gsi, int triggering, int polarity)
{
int ioapic = -1;
int ioapic_pin = 0;
......@@ -964,7 +964,7 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
if (edge_level) {
if (triggering == ACPI_LEVEL_SENSITIVE) {
/*
* For PCI devices assign IRQs in order, avoiding gaps
* due to unused I/O APIC pins.
......@@ -986,8 +986,8 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
}
io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
return gsi;
}
......
......@@ -267,7 +267,6 @@ config ACPI_DEBUG
config ACPI_EC
bool
depends on X86
default y
help
This driver is required on some systems for the proper operation of
......
......@@ -71,8 +71,8 @@ static struct acpi_driver acpi_memory_device_driver = {
struct acpi_memory_device {
acpi_handle handle;
unsigned int state; /* State of the memory device */
unsigned short cache_attribute; /* memory cache attribute */
unsigned short read_write_attribute; /* memory read/write attribute */
unsigned short caching; /* memory cache attribute */
unsigned short write_protect; /* memory read/write attribute */
u64 start_addr; /* Memory Range start physical addr */
u64 end_addr; /* Memory Range end physical addr */
};
......@@ -97,12 +97,12 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
if (ACPI_SUCCESS(status)) {
if (address64.resource_type == ACPI_MEMORY_RANGE) {
/* Populate the structure */
mem_device->cache_attribute =
address64.attribute.memory.cache_attribute;
mem_device->read_write_attribute =
address64.attribute.memory.read_write_attribute;
mem_device->start_addr = address64.min_address_range;
mem_device->end_addr = address64.max_address_range;
mem_device->caching =
address64.info.mem.caching;
mem_device->write_protect =
address64.info.mem.write_protect;
mem_device->start_addr = address64.minimum;
mem_device->end_addr = address64.maximum;
}
}
......@@ -250,7 +250,6 @@ static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
int result;
u64 start = mem_device->start_addr;
u64 len = mem_device->end_addr - start + 1;
unsigned long attr = mem_device->read_write_attribute;
ACPI_FUNCTION_TRACE("acpi_memory_disable_device");
......
......@@ -78,9 +78,9 @@ MODULE_LICENSE("GPL");
static uid_t asus_uid;
static gid_t asus_gid;
module_param(asus_uid, uint, 0);
MODULE_PARM_DESC(uid, "UID for entries in /proc/acpi/asus.\n");
MODULE_PARM_DESC(asus_uid, "UID for entries in /proc/acpi/asus.\n");
module_param(asus_gid, uint, 0);
MODULE_PARM_DESC(gid, "GID for entries in /proc/acpi/asus.\n");
MODULE_PARM_DESC(asus_gid, "GID for entries in /proc/acpi/asus.\n");
/* For each model, all features implemented,
* those marked with R are relative to HOTK, A for absolute */
......@@ -302,7 +302,7 @@ static struct model_data model_conf[END_MODEL] = {
.brightness_set = "SPLV",
.brightness_get = "GPLV",
.display_set = "SDSP",
.display_get = "\\SSTE"},
.display_get = "\\_SB.PCI0.P0P1.VGA.GETD"},
{
.name = "M6R",
.mt_mled = "MLED",
......@@ -851,6 +851,8 @@ static int __init asus_hotk_add_fs(struct acpi_device *device)
mode = S_IFREG | S_IRUGO | S_IWUGO;
} else {
mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP;
printk(KERN_WARNING " asus_uid and asus_gid parameters are "
"deprecated, use chown and chmod instead!\n");
}
acpi_device_dir(device) = asus_proc_dir;
......@@ -987,9 +989,21 @@ static int __init asus_hotk_get_info(void)
printk(KERN_NOTICE " BSTS called, 0x%02x returned\n",
bsts_result);
/* Samsung P30 has a device with a valid _HID whose INIT does not
* return anything. Catch this one and any similar here */
if (buffer.pointer == NULL) {
/* This is unlikely with implicit return */
if (buffer.pointer == NULL)
return -EINVAL;
model = (union acpi_object *) buffer.pointer;
/*
* Samsung P30 has a device with a valid _HID whose INIT does not
* return anything. It used to be possible to catch this exception,
* but the implicit return code will now happily confuse the
* driver. We assume that every ACPI_TYPE_STRING is a valid model
* identifier but it's still possible to get completely bogus data.
*/
if (model->type == ACPI_TYPE_STRING) {
printk(KERN_NOTICE " %s model detected, ", model->string.pointer);
} else {
if (asus_info && /* Samsung P30 */
strncmp(asus_info->oem_table_id, "ODEM", 4) == 0) {
hotk->model = P30;
......@@ -1002,13 +1016,10 @@ static int __init asus_hotk_get_info(void)
"the developers with your DSDT\n");
}
hotk->methods = &model_conf[hotk->model];
return AE_OK;
}
model = (union acpi_object *)buffer.pointer;
if (model->type == ACPI_TYPE_STRING) {
printk(KERN_NOTICE " %s model detected, ",
model->string.pointer);
acpi_os_free(model);
return AE_OK;
}
hotk->model = END_MODEL;
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -293,7 +293,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
+ (acpi_integer) arg->common.value.size;
if (position > ACPI_UINT32_MAX) {
ACPI_REPORT_ERROR(("Field [%4.4s] bit offset too large (> 0xFFFFFFFF)\n", (char *)&info->field_node->name));
ACPI_REPORT_ERROR(("Field [%4.4s] bit offset too large (> 0xFFFFFFFF)\n", ACPI_CAST_PTR(char, &info->field_node->name)));
return_ACPI_STATUS(AE_SUPPORT);
}
......@@ -302,8 +302,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Invalid opcode in field list: %X\n",
ACPI_REPORT_ERROR(("Invalid opcode in field list: %X\n",
arg->common.aml_opcode));
return_ACPI_STATUS(AE_AML_BAD_OPCODE);
}
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -84,7 +84,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
acpi_object_type type;
acpi_status status;
ACPI_FUNCTION_NAME("ds_init_one_object");
ACPI_FUNCTION_ENTRY();
/*
* We are only interested in NS nodes owned by the table that
......@@ -105,11 +105,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
status = acpi_ds_initialize_region(obj_handle);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Region %p [%4.4s] - Init failure, %s\n",
obj_handle,
acpi_ut_get_node_name(obj_handle),
acpi_format_exception(status)));
ACPI_REPORT_ERROR(("Region %p [%4.4s] - Init failure, %s\n", obj_handle, acpi_ut_get_node_name(obj_handle), acpi_format_exception(status)));
}
info->op_region_count++;
......@@ -117,14 +113,6 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
case ACPI_TYPE_METHOD:
/*
* Print a dot for each method unless we are going to print
* the entire pathname
*/
if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
}
/*
* Set the execution data width (32 or 64) based upon the
* revision number of the parent ACPI table.
......@@ -134,6 +122,21 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
if (info->table_desc->pointer->revision == 1) {
node->flags |= ANOBJ_DATA_WIDTH_32;
}
#ifdef ACPI_INIT_PARSE_METHODS
/*
* Note 11/2005: Removed this code to parse all methods during table
* load because it causes problems if there are any errors during the
* parse. Also, it seems like overkill and we probably don't want to
* abort a table load because of an issue with a single method.
*/
/*
* Print a dot for each method unless we are going to print
* the entire pathname
*/
if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
}
/*
* Always parse methods to detect errors, we will delete
......@@ -141,15 +144,11 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
*/
status = acpi_ds_parse_method(obj_handle);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"\n+Method %p [%4.4s] - parse failure, %s\n",
obj_handle,
acpi_ut_get_node_name(obj_handle),
acpi_format_exception(status)));
ACPI_REPORT_ERROR(("\n+Method %p [%4.4s] - parse failure, %s\n", obj_handle, acpi_ut_get_node_name(obj_handle), acpi_format_exception(status)));
/* This parse failed, but we will continue parsing more methods */
}
#endif
info->method_count++;
break;
......@@ -207,7 +206,7 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
status = acpi_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
acpi_ds_init_one_object, &info, NULL);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "walk_namespace failed, %s\n",
ACPI_REPORT_ERROR(("walk_namespace failed, %s\n",
acpi_format_exception(status)));
}
......
This diff is collapsed.
......@@ -5,7 +5,7 @@
******************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -260,9 +260,7 @@ acpi_ds_method_data_get_node(u16 opcode,
case AML_LOCAL_OP:
if (index > ACPI_METHOD_MAX_LOCAL) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Local index %d is invalid (max %d)\n",
index, ACPI_METHOD_MAX_LOCAL));
ACPI_REPORT_ERROR(("Local index %d is invalid (max %d)\n", index, ACPI_METHOD_MAX_LOCAL));
return_ACPI_STATUS(AE_AML_INVALID_INDEX);
}
......@@ -274,8 +272,7 @@ acpi_ds_method_data_get_node(u16 opcode,
case AML_ARG_OP:
if (index > ACPI_METHOD_MAX_ARG) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Arg index %d is invalid (max %d)\n",
ACPI_REPORT_ERROR(("Arg index %d is invalid (max %d)\n",
index, ACPI_METHOD_MAX_ARG));
return_ACPI_STATUS(AE_AML_INVALID_INDEX);
}
......@@ -286,8 +283,7 @@ acpi_ds_method_data_get_node(u16 opcode,
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Opcode %d is invalid\n",
opcode));
ACPI_REPORT_ERROR(("Opcode %d is invalid\n", opcode));
return_ACPI_STATUS(AE_AML_BAD_OPCODE);
}
......@@ -378,8 +374,7 @@ acpi_ds_method_data_get_value(u16 opcode,
/* Validate the object descriptor */
if (!dest_desc) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Null object descriptor pointer\n"));
ACPI_REPORT_ERROR(("Null object descriptor pointer\n"));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
......@@ -424,23 +419,18 @@ acpi_ds_method_data_get_value(u16 opcode,
switch (opcode) {
case AML_ARG_OP:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Uninitialized Arg[%d] at node %p\n",
index, node));
ACPI_REPORT_ERROR(("Uninitialized Arg[%d] at node %p\n", index, node));
return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
case AML_LOCAL_OP:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Uninitialized Local[%d] at node %p\n",
index, node));
ACPI_REPORT_ERROR(("Uninitialized Local[%d] at node %p\n", index, node));
return_ACPI_STATUS(AE_AML_UNINITIALIZED_LOCAL);
default:
ACPI_REPORT_ERROR(("Not Arg/Local opcode: %X\n",
opcode));
ACPI_REPORT_ERROR(("Not a Arg/Local opcode: %X\n", opcode));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
}
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -51,6 +51,7 @@
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsobject")
/* Local prototypes */
static acpi_status
acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
......@@ -85,7 +86,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
*obj_desc_ptr = NULL;
if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
/*
* This is an named object reference. If this name was
* This is a named object reference. If this name was
* previously looked up in the namespace, it was stored in this op.
* Otherwise, go ahead and look it up now
*/
......@@ -96,18 +97,48 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
ACPI_IMODE_EXECUTE,
ACPI_NS_SEARCH_PARENT |
ACPI_NS_DONT_OPEN_SCOPE, NULL,
(struct acpi_namespace_node **)
&(op->common.node));
ACPI_CAST_INDIRECT_PTR(struct
acpi_namespace_node,
&(op->
common.
node)));
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(op->common.value.string,
status);
/* Check if we are resolving a named reference within a package */
if ((status == AE_NOT_FOUND)
&& (acpi_gbl_enable_interpreter_slack)
&&
((op->common.parent->common.aml_opcode ==
AML_PACKAGE_OP)
|| (op->common.parent->common.aml_opcode ==
AML_VAR_PACKAGE_OP))) {
/*
* We didn't find the target and we are populating elements
* of a package - ignore if slack enabled. Some ASL code
* contains dangling invalid references in packages and
* expects that no exception will be issued. Leave the
* element as a null element. It cannot be used, but it
* can be overwritten by subsequent ASL code - this is
* typically the case.
*/
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Ignoring unresolved reference in package [%4.4s]\n",
walk_state->
scope_info->scope.
node->name.ascii));
return_ACPI_STATUS(AE_OK);
} else {
ACPI_REPORT_NSERROR(op->common.value.
string, status);
}
return_ACPI_STATUS(status);
}
}
}
/* Create and init the internal ACPI object */
/* Create and init a new internal ACPI object */
obj_desc = acpi_ut_create_internal_object((acpi_ps_get_opcode_info
(op->common.aml_opcode))->
......@@ -157,13 +188,13 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
ACPI_FUNCTION_TRACE("ds_build_internal_buffer_obj");
obj_desc = *obj_desc_ptr;
if (obj_desc) {
/*
* We are evaluating a Named buffer object "Name (xxxx, Buffer)".
* The buffer object already exists (from the NS node)
* If we are evaluating a Named buffer object "Name (xxxx, Buffer)".
* The buffer object already exists (from the NS node), otherwise it must
* be created.
*/
} else {
obj_desc = *obj_desc_ptr;
if (!obj_desc) {
/* Create a new buffer object */
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
......@@ -183,10 +214,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
byte_list = arg->named.next;
if (byte_list) {
if (byte_list->common.aml_opcode != AML_INT_BYTELIST_OP) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Expecting bytelist, got AML opcode %X in op %p\n",
byte_list->common.aml_opcode,
byte_list));
ACPI_REPORT_ERROR(("Expecting bytelist, got AML opcode %X in op %p\n", byte_list->common.aml_opcode, byte_list));
acpi_ut_remove_reference(obj_desc);
return (AE_TYPE);
......@@ -259,7 +287,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
union acpi_operand_object *obj_desc = NULL;
u32 package_list_length;
acpi_status status = AE_OK;
u32 i;
acpi_native_uint i;
ACPI_FUNCTION_TRACE("ds_build_internal_package_obj");
......@@ -271,13 +299,12 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
parent = parent->common.parent;
}
obj_desc = *obj_desc_ptr;
if (obj_desc) {
/*
* We are evaluating a Named package object "Name (xxxx, Package)".
* Get the existing package object from the NS node
* If we are evaluating a Named package object "Name (xxxx, Package)",
* the package object already exists, otherwise it must be created.
*/
} else {
obj_desc = *obj_desc_ptr;
if (!obj_desc) {
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE);
*obj_desc_ptr = obj_desc;
if (!obj_desc) {
......@@ -291,11 +318,9 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
/* Count the number of items in the package list */
package_list_length = 0;
arg = op->common.value.arg;
arg = arg->common.next;
while (arg) {
package_list_length++;
for (package_list_length = 0; arg; package_list_length++) {
arg = arg->common.next;
}
......@@ -322,12 +347,11 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
}
/*
* Now init the elements of the package
* Initialize all elements of the package
*/
i = 0;
arg = op->common.value.arg;
arg = arg->common.next;
while (arg) {
for (i = 0; arg; i++) {
if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
/* Object (package or buffer) is already built */
......@@ -340,8 +364,6 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
package.
elements[i]);
}
i++;
arg = arg->common.next;
}
......@@ -518,9 +540,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unknown constant opcode %X\n",
opcode));
ACPI_REPORT_ERROR(("Unknown constant opcode %X\n", opcode));
status = AE_AML_OPERAND_TYPE;
break;
}
......@@ -535,8 +555,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unknown Integer type %X\n",
ACPI_REPORT_ERROR(("Unknown Integer type %X\n",
op_info->type));
status = AE_AML_OPERAND_TYPE;
break;
......@@ -615,8 +634,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unimplemented data type: %X\n",
ACPI_REPORT_ERROR(("Unimplemented data type: %X\n",
ACPI_GET_OBJECT_TYPE(obj_desc)));
status = AE_AML_OPERAND_TYPE;
......
......@@ -6,7 +6,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -413,9 +413,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
/* Host object must be a Buffer */
if (ACPI_GET_OBJECT_TYPE(buffer_desc) != ACPI_TYPE_BUFFER) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Target of Create Field is not a Buffer object - %s\n",
acpi_ut_get_object_type_name(buffer_desc)));
ACPI_REPORT_ERROR(("Target of Create Field is not a Buffer object - %s\n", acpi_ut_get_object_type_name(buffer_desc)));
status = AE_AML_OPERAND_TYPE;
goto cleanup;
......@@ -427,8 +425,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
* after resolution in acpi_ex_resolve_operands().
*/
if (ACPI_GET_DESCRIPTOR_TYPE(result_desc) != ACPI_DESC_TYPE_NAMED) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"(%s) destination not a NS Node [%s]\n",
ACPI_REPORT_ERROR(("(%s) destination not a NS Node [%s]\n",
acpi_ps_get_opcode_name(aml_opcode),
acpi_ut_get_descriptor_name(result_desc)));
......@@ -453,8 +450,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
/* Must have a valid (>0) bit count */
if (bit_count == 0) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Attempt to create_field of length 0\n"));
ACPI_REPORT_ERROR(("Attempt to create_field of length 0\n"));
status = AE_AML_OPERAND_VALUE;
goto cleanup;
}
......@@ -507,8 +503,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unknown field creation opcode %02x\n",
ACPI_REPORT_ERROR(("Unknown field creation opcode %02x\n",
aml_opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
......@@ -517,13 +512,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
/* Entire field must fit within the current length of the buffer */
if ((bit_offset + bit_count) > (8 * (u32) buffer_desc->buffer.length)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Field [%4.4s] size %d exceeds Buffer [%4.4s] size %d (bits)\n",
acpi_ut_get_node_name(result_desc),
bit_offset + bit_count,
acpi_ut_get_node_name(buffer_desc->buffer.
node),
8 * (u32) buffer_desc->buffer.length));
ACPI_REPORT_ERROR(("Field [%4.4s] size %d exceeds Buffer [%4.4s] size %d (bits)\n", acpi_ut_get_node_name(result_desc), bit_offset + bit_count, acpi_ut_get_node_name(buffer_desc->buffer.node), 8 * (u32) buffer_desc->buffer.length));
status = AE_AML_BUFFER_LIMIT;
goto cleanup;
}
......@@ -629,9 +618,10 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
"after acpi_ex_resolve_operands");
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "(%s) bad operand(s) (%X)\n",
ACPI_REPORT_ERROR(("(%s) bad operand(s) (%X)\n",
acpi_ps_get_opcode_name(op->common.
aml_opcode), status));
aml_opcode),
status));
return_ACPI_STATUS(status);
}
......@@ -1155,8 +1145,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unknown control opcode=%X Op=%p\n",
ACPI_REPORT_ERROR(("Unknown control opcode=%X Op=%p\n",
op->common.aml_opcode, op));
status = AE_AML_BAD_OPCODE;
......
......@@ -5,7 +5,7 @@
******************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -176,8 +176,8 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
/* Must have both an Op and a Result Object */
if (!op) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Null Op\n"));
return_VALUE(TRUE);
ACPI_REPORT_ERROR(("Null Op\n"));
return_UINT8(TRUE);
}
/*
......@@ -208,7 +208,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
"At Method level, result of [%s] not used\n",
acpi_ps_get_opcode_name(op->common.
aml_opcode)));
return_VALUE(FALSE);
return_UINT8(FALSE);
}
/* Get info on the parent. The root_op is AML_SCOPE */
......@@ -216,9 +216,8 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
parent_info =
acpi_ps_get_opcode_info(op->common.parent->common.aml_opcode);
if (parent_info->class == AML_CLASS_UNKNOWN) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unknown parent opcode. Op=%p\n", op));
return_VALUE(FALSE);
ACPI_REPORT_ERROR(("Unknown parent opcode Op=%p\n", op));
return_UINT8(FALSE);
}
/*
......@@ -304,7 +303,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
acpi_ps_get_opcode_name(op->common.parent->common.
aml_opcode), op));
return_VALUE(TRUE);
return_UINT8(TRUE);
result_not_used:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
......@@ -313,7 +312,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
acpi_ps_get_opcode_name(op->common.parent->common.
aml_opcode), op));
return_VALUE(FALSE);
return_UINT8(FALSE);
}
/*******************************************************************************
......@@ -344,7 +343,7 @@ acpi_ds_delete_result_if_not_used(union acpi_parse_object *op,
ACPI_FUNCTION_TRACE_PTR("ds_delete_result_if_not_used", result_obj);
if (!op) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Null Op\n"));
ACPI_REPORT_ERROR(("Null Op\n"));
return_VOID;
}
......@@ -616,7 +615,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
if (op_info->flags & AML_HAS_RETVAL) {
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Argument previously created, already stacked \n"));
"Argument previously created, already stacked\n"));
ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object
(walk_state->
......@@ -635,10 +634,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
* Only error is underflow, and this indicates
* a missing or null operand!
*/
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Missing or null operand, %s\n",
acpi_format_exception
(status)));
ACPI_REPORT_ERROR(("Missing or null operand, %s\n", acpi_format_exception(status)));
return_ACPI_STATUS(status);
}
} else {
......@@ -730,7 +726,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
*/
(void)acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "While creating Arg %d - %s\n",
ACPI_REPORT_ERROR(("While creating Arg %d - %s\n",
(arg_count + 1), acpi_format_exception(status)));
return_ACPI_STATUS(status);
}
......@@ -6,7 +6,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -100,9 +100,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
if (result_obj) {
status = acpi_ds_result_pop(&obj_desc, walk_state);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not get result from predicate evaluation, %s\n",
acpi_format_exception(status)));
ACPI_REPORT_ERROR(("Could not get result from predicate evaluation, %s\n", acpi_format_exception(status)));
return_ACPI_STATUS(status);
}
......@@ -123,8 +121,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
}
if (!obj_desc) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"No predicate obj_desc=%p State=%p\n",
ACPI_REPORT_ERROR(("No predicate obj_desc=%p State=%p\n",
obj_desc, walk_state));
return_ACPI_STATUS(AE_AML_NO_OPERAND);
......@@ -140,10 +137,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
}
if (ACPI_GET_OBJECT_TYPE(local_obj_desc) != ACPI_TYPE_INTEGER) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Bad predicate (not an integer) obj_desc=%p State=%p Type=%X\n",
obj_desc, walk_state,
ACPI_GET_OBJECT_TYPE(obj_desc)));
ACPI_REPORT_ERROR(("Bad predicate (not an integer) obj_desc=%p State=%p Type=%X\n", obj_desc, walk_state, ACPI_GET_OBJECT_TYPE(obj_desc)));
status = AE_AML_OPERAND_TYPE;
goto cleanup;
......@@ -314,12 +308,13 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
case AML_CLASS_EXECUTE:
case AML_CLASS_CREATE:
/*
* Most operators with arguments.
* Start a new result/operand state
*/
if (walk_state->opcode != AML_CREATE_FIELD_OP) {
status = acpi_ds_result_stack_push(walk_state);
}
break;
default:
......@@ -361,7 +356,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
op_class = walk_state->op_info->class;
if (op_class == AML_CLASS_UNKNOWN) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown opcode %X\n",
ACPI_REPORT_ERROR(("Unknown opcode %X\n",
op->common.aml_opcode));
return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
}
......@@ -452,12 +447,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
walk_state->operands[1]->reference.offset)) {
status = AE_OK;
} else {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"[%s]: Could not resolve operands, %s\n",
acpi_ps_get_opcode_name
(walk_state->opcode),
acpi_format_exception
(status)));
ACPI_REPORT_ERROR(("[%s]: Could not resolve operands, %s\n", acpi_ps_get_opcode_name(walk_state->opcode), acpi_format_exception(status)));
}
}
......@@ -676,8 +666,8 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
case AML_TYPE_UNDEFINED:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Undefined opcode type Op=%p\n", op));
ACPI_REPORT_ERROR(("Undefined opcode type Op=%p\n",
op));
return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
case AML_TYPE_BOGUS:
......@@ -689,10 +679,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unimplemented opcode, class=%X type=%X Opcode=%X Op=%p\n",
op_class, op_type,
op->common.aml_opcode, op));
ACPI_REPORT_ERROR(("Unimplemented opcode, class=%X type=%X Opcode=%X Op=%p\n", op_class, op_type, op->common.aml_opcode, op));
status = AE_NOT_IMPLEMENTED;
break;
......@@ -723,20 +710,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
cleanup:
/* Invoke exception handler on error */
if (ACPI_FAILURE(status) &&
acpi_gbl_exception_handler && !(status & AE_CODE_CONTROL)) {
acpi_ex_exit_interpreter();
status = acpi_gbl_exception_handler(status,
walk_state->method_node->
name.integer,
walk_state->opcode,
walk_state->aml_offset,
NULL);
(void)acpi_ex_enter_interpreter();
}
if (walk_state->result_obj) {
/* Break to debugger to display result */
......@@ -758,18 +731,14 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
}
#endif
/* Always clear the object stack */
walk_state->num_operands = 0;
#ifdef ACPI_DISASSEMBLER
/* On error, display method locals/args */
/* Invoke exception handler on error */
if (ACPI_FAILURE(status)) {
acpi_dm_dump_method_info(status, walk_state, op);
status = acpi_ds_method_error(status, walk_state);
}
#endif
/* Always clear the object stack */
walk_state->num_operands = 0;
return_ACPI_STATUS(status);
}
This diff is collapsed.
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -107,14 +107,14 @@ acpi_ds_scope_stack_push(struct acpi_namespace_node *node,
if (!node) {
/* Invalid scope */
ACPI_REPORT_ERROR(("ds_scope_stack_push: null scope passed\n"));
ACPI_REPORT_ERROR(("Null scope parameter\n"));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Make sure object type is valid */
if (!acpi_ut_valid_object_type(type)) {
ACPI_REPORT_WARNING(("ds_scope_stack_push: Invalid object type: 0x%X\n", type));
ACPI_REPORT_WARNING(("Invalid object type: 0x%X\n", type));
}
/* Allocate a new scope object */
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -92,15 +92,13 @@ acpi_ds_result_remove(union acpi_operand_object **object,
state = walk_state->results;
if (!state) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"No result object pushed! State=%p\n",
ACPI_REPORT_ERROR(("No result object pushed! State=%p\n",
walk_state));
return (AE_NOT_EXIST);
}
if (index >= ACPI_OBJ_MAX_OPERAND) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Index out of range: %X State=%p Num=%X\n",
ACPI_REPORT_ERROR(("Index out of range: %X State=%p Num=%X\n",
index, walk_state,
state->results.num_results));
}
......@@ -108,8 +106,7 @@ acpi_ds_result_remove(union acpi_operand_object **object,
/* Check for a valid result object */
if (!state->results.obj_desc[index]) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Null operand! State=%p #Ops=%X, Index=%X\n",
ACPI_REPORT_ERROR(("Null operand! State=%p #Ops=%X, Index=%X\n",
walk_state, state->results.num_results,
index));
return (AE_AML_NO_RETURN_VALUE);
......@@ -163,8 +160,7 @@ acpi_ds_result_pop(union acpi_operand_object ** object,
}
if (!state->results.num_results) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Result stack is empty! State=%p\n",
ACPI_REPORT_ERROR(("Result stack is empty! State=%p\n",
walk_state));
return (AE_AML_NO_RETURN_VALUE);
}
......@@ -192,8 +188,7 @@ acpi_ds_result_pop(union acpi_operand_object ** object,
}
}
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"No result objects! State=%p\n", walk_state));
ACPI_REPORT_ERROR(("No result objects! State=%p\n", walk_state));
return (AE_AML_NO_RETURN_VALUE);
}
......@@ -222,15 +217,14 @@ acpi_ds_result_pop_from_bottom(union acpi_operand_object ** object,
state = walk_state->results;
if (!state) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Warning: No result object pushed! State=%p\n",
ACPI_REPORT_ERROR(("No result object pushed! State=%p\n",
walk_state));
return (AE_NOT_EXIST);
}
if (!state->results.num_results) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"No result objects! State=%p\n", walk_state));
ACPI_REPORT_ERROR(("No result objects! State=%p\n",
walk_state));
return (AE_AML_NO_RETURN_VALUE);
}
......@@ -250,8 +244,7 @@ acpi_ds_result_pop_from_bottom(union acpi_operand_object ** object,
/* Check for a valid result object */
if (!*object) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Null operand! State=%p #Ops=%X Index=%X\n",
ACPI_REPORT_ERROR(("Null operand! State=%p #Ops=%X Index=%X\n",
walk_state, state->results.num_results,
(u32) index));
return (AE_AML_NO_RETURN_VALUE);
......@@ -293,16 +286,12 @@ acpi_ds_result_push(union acpi_operand_object * object,
}
if (state->results.num_results == ACPI_OBJ_NUM_OPERANDS) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Result stack overflow: Obj=%p State=%p Num=%X\n",
object, walk_state,
state->results.num_results));
ACPI_REPORT_ERROR(("Result stack overflow: Obj=%p State=%p Num=%X\n", object, walk_state, state->results.num_results));
return (AE_STACK_OVERFLOW);
}
if (!object) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Null Object! Obj=%p State=%p Num=%X\n",
ACPI_REPORT_ERROR(("Null Object! Obj=%p State=%p Num=%X\n",
object, walk_state,
state->results.num_results));
return (AE_BAD_PARAMETER);
......@@ -413,10 +402,7 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
/* Check for stack overflow */
if (walk_state->num_operands >= ACPI_OBJ_NUM_OPERANDS) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"overflow! Obj=%p State=%p #Ops=%X\n",
object, walk_state,
walk_state->num_operands));
ACPI_REPORT_ERROR(("Object stack overflow! Obj=%p State=%p #Ops=%X\n", object, walk_state, walk_state->num_operands));
return (AE_STACK_OVERFLOW);
}
......@@ -460,10 +446,7 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
/* Check for stack underflow */
if (walk_state->num_operands == 0) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Underflow! Count=%X State=%p #Ops=%X\n",
pop_count, walk_state,
walk_state->num_operands));
ACPI_REPORT_ERROR(("Object stack underflow! Count=%X State=%p #Ops=%X\n", pop_count, walk_state, walk_state->num_operands));
return (AE_STACK_UNDERFLOW);
}
......@@ -506,10 +489,7 @@ acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
/* Check for stack underflow */
if (walk_state->num_operands == 0) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Underflow! Count=%X State=%p #Ops=%X\n",
pop_count, walk_state,
walk_state->num_operands));
ACPI_REPORT_ERROR(("Object stack underflow! Count=%X State=%p #Ops=%X\n", pop_count, walk_state, walk_state->num_operands));
return (AE_STACK_UNDERFLOW);
}
......@@ -826,15 +806,13 @@ void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state)
}
if (walk_state->data_type != ACPI_DESC_TYPE_WALK) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"%p is not a valid walk state\n",
ACPI_REPORT_ERROR(("%p is not a valid walk state\n",
walk_state));
return;
}
if (walk_state->parser_state.scope) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"%p walk still has a scope list\n",
ACPI_REPORT_ERROR(("%p walk still has a scope list\n",
walk_state));
}
......@@ -894,25 +872,18 @@ acpi_ds_result_insert(void *object,
state = walk_state->results;
if (!state) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"No result object pushed! State=%p\n",
ACPI_REPORT_ERROR(("No result object pushed! State=%p\n",
walk_state));
return (AE_NOT_EXIST);
}
if (index >= ACPI_OBJ_NUM_OPERANDS) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Index out of range: %X Obj=%p State=%p Num=%X\n",
index, object, walk_state,
state->results.num_results));
ACPI_REPORT_ERROR(("Index out of range: %X Obj=%p State=%p Num=%X\n", index, object, walk_state, state->results.num_results));
return (AE_BAD_PARAMETER);
}
if (!object) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Null Object! Index=%X Obj=%p State=%p Num=%X\n",
index, object, walk_state,
state->results.num_results));
ACPI_REPORT_ERROR(("Null Object! Index=%X Obj=%p State=%p Num=%X\n", index, object, walk_state, state->results.num_results));
return (AE_BAD_PARAMETER);
}
......@@ -986,9 +957,7 @@ acpi_ds_obj_stack_pop_object(union acpi_operand_object **object,
/* Check for stack underflow */
if (walk_state->num_operands == 0) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Missing operand/stack empty! State=%p #Ops=%X\n",
walk_state, walk_state->num_operands));
ACPI_REPORT_ERROR(("Missing operand/stack empty! State=%p #Ops=%X\n", walk_state, walk_state->num_operands));
*object = NULL;
return (AE_AML_NO_OPERAND);
}
......@@ -1000,8 +969,7 @@ acpi_ds_obj_stack_pop_object(union acpi_operand_object **object,
/* Check for a valid operand */
if (!walk_state->operands[walk_state->num_operands]) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Null operand! State=%p #Ops=%X\n",
ACPI_REPORT_ERROR(("Null operand! State=%p #Ops=%X\n",
walk_state, walk_state->num_operands));
*object = NULL;
return (AE_AML_NO_OPERAND);
......
This diff is collapsed.
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -73,7 +73,7 @@ acpi_status acpi_ev_initialize_events(void)
/* Make sure we have ACPI tables */
if (!acpi_gbl_DSDT) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN, "No ACPI tables present!\n"));
ACPI_REPORT_WARNING(("No ACPI tables present!\n"));
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
}
......@@ -98,6 +98,48 @@ acpi_status acpi_ev_initialize_events(void)
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_install_fadt_gpes
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Completes initialization of the FADT-defined GPE blocks
* (0 and 1). This causes the _PRW methods to be run, so the HW
* must be fully initialized at this point, including global lock
* support.
*
******************************************************************************/
acpi_status acpi_ev_install_fadt_gpes(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE("ev_install_fadt_gpes");
/* Namespace must be locked */
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return (status);
}
/* FADT GPE Block 0 */
(void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
acpi_gbl_gpe_fadt_blocks[0]);
/* FADT GPE Block 1 */
(void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
acpi_gbl_gpe_fadt_blocks[1]);
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_install_xrupt_handlers
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -372,14 +372,14 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
{
acpi_status status;
struct acpi_gpe_block_info *gpe_block;
struct acpi_gpe_register_info *gpe_register_info;
u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
u8 enabled_status_byte;
struct acpi_gpe_register_info *gpe_register_info;
u32 status_reg;
u32 enable_reg;
u32 flags;
acpi_status status;
struct acpi_gpe_block_info *gpe_block;
acpi_native_uint flags;
acpi_native_uint i;
acpi_native_uint j;
......@@ -599,8 +599,10 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
ACPI_GPE_EDGE_TRIGGERED) {
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: %s, Unable to clear GPE[%2X]\n", acpi_format_exception(status), gpe_number));
return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
ACPI_REPORT_ERROR(("%s, Unable to clear GPE[%2X]\n",
acpi_format_exception(status),
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
}
......@@ -637,8 +639,8 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
ACPI_GPE_LEVEL_TRIGGERED) {
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: %s, Unable to clear GPE[%2X]\n", acpi_format_exception(status), gpe_number));
return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
ACPI_REPORT_ERROR(("%s, Unable to clear GPE[%2X]\n", acpi_format_exception(status), gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
}
break;
......@@ -651,8 +653,10 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
*/
status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: %s, Unable to disable GPE[%2X]\n", acpi_format_exception(status), gpe_number));
return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
ACPI_REPORT_ERROR(("%s, Unable to disable GPE[%2X]\n",
acpi_format_exception(status),
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
/*
......@@ -663,7 +667,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
acpi_ev_asynch_execute_gpe_method,
gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: %s, Unable to queue handler for GPE[%2X] - event disabled\n", acpi_format_exception(status), gpe_number));
ACPI_REPORT_ERROR(("%s, Unable to queue handler for GPE[%2X] - event disabled\n", acpi_format_exception(status), gpe_number));
}
break;
......@@ -671,7 +675,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
/* No handler or method to run! */
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: No handler or method for GPE[%2X], disabling event\n", gpe_number));
ACPI_REPORT_ERROR(("No handler or method for GPE[%2X], disabling event\n", gpe_number));
/*
* Disable the GPE. The GPE will remain disabled until the ACPI
......@@ -679,13 +683,15 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
*/
status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: %s, Unable to disable GPE[%2X]\n", acpi_format_exception(status), gpe_number));
return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
ACPI_REPORT_ERROR(("%s, Unable to disable GPE[%2X]\n",
acpi_format_exception(status),
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
break;
}
return_VALUE(ACPI_INTERRUPT_HANDLED);
return_UINT32(ACPI_INTERRUPT_HANDLED);
}
#ifdef ACPI_GPE_NOTIFY_CHECK
......
This diff is collapsed.
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -542,9 +542,7 @@ void acpi_ev_terminate(void)
for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
status = acpi_disable_event((u32) i, 0);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not disable fixed event %d\n",
(u32) i));
ACPI_REPORT_ERROR(("Could not disable fixed event %d\n", (u32) i));
}
}
......@@ -556,8 +554,7 @@ void acpi_ev_terminate(void)
status = acpi_ev_remove_sci_handler();
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not remove SCI handler\n"));
ACPI_REPORT_ERROR(("Could not remove SCI handler\n"));
}
}
......@@ -570,8 +567,7 @@ void acpi_ev_terminate(void)
if (acpi_gbl_original_mode == ACPI_SYS_MODE_LEGACY) {
status = acpi_disable();
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN,
"acpi_disable failed\n"));
ACPI_REPORT_WARNING(("acpi_disable failed\n"));
}
}
return_VOID;
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -295,8 +295,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
handler_desc = region_obj->region.handler;
if (!handler_desc) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"No handler for Region [%4.4s] (%p) [%s]\n",
ACPI_REPORT_ERROR(("No handler for Region [%4.4s] (%p) [%s]\n",
acpi_ut_get_node_name(region_obj->region.
node), region_obj,
acpi_ut_get_region_name(region_obj->region.
......@@ -317,12 +316,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
if (!region_setup) {
/* No initialization routine, exit with error */
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"No init routine for region(%p) [%s]\n",
region_obj,
acpi_ut_get_region_name(region_obj->
region.
space_id)));
ACPI_REPORT_ERROR(("No init routine for region(%p) [%s]\n", region_obj, acpi_ut_get_region_name(region_obj->region.space_id)));
return_ACPI_STATUS(AE_NOT_EXIST);
}
......@@ -347,8 +341,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
/* Check for failure of the Region Setup */
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Region Init: %s [%s]\n",
ACPI_REPORT_ERROR(("Region Initialization: %s [%s]\n",
acpi_format_exception(status),
acpi_ut_get_region_name(region_obj->
region.
......@@ -501,12 +494,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
status = acpi_ev_execute_reg_method(region_obj, 0);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"%s from region _REG, [%s]\n",
acpi_format_exception(status),
acpi_ut_get_region_name
(region_obj->region.
space_id)));
ACPI_REPORT_ERROR(("%s from region _REG, [%s]\n", acpi_format_exception(status), acpi_ut_get_region_name(region_obj->region.space_id)));
}
if (acpi_ns_is_locked) {
......@@ -528,12 +516,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
/* Init routine may fail, Just ignore errors */
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"%s from region init, [%s]\n",
acpi_format_exception(status),
acpi_ut_get_region_name
(region_obj->region.
space_id)));
ACPI_REPORT_ERROR(("%s from region init, [%s]\n", acpi_format_exception(status), acpi_ut_get_region_name(region_obj->region.space_id)));
}
region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE);
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......
......@@ -6,7 +6,7 @@
******************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -88,7 +88,7 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
*/
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
return_VALUE(interrupt_handled);
return_UINT32(interrupt_handled);
}
/*******************************************************************************
......@@ -121,7 +121,7 @@ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
*/
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
return_VALUE(interrupt_handled);
return_UINT32(interrupt_handled);
}
/******************************************************************************
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -143,8 +143,8 @@ acpi_install_fixed_event_handler(u32 event,
if (ACPI_SUCCESS(status))
status = acpi_enable_event(event, 0);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN,
"Could not enable fixed event.\n"));
ACPI_REPORT_WARNING(("Could not enable fixed event %X\n",
event));
/* Remove the handler */
......@@ -204,10 +204,9 @@ acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
acpi_gbl_fixed_event_handlers[event].context = NULL;
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN,
"Could not write to fixed event enable register.\n"));
ACPI_REPORT_WARNING(("Could not write to fixed event enable register %X\n", event));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X.\n",
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
event));
}
......@@ -434,7 +433,7 @@ acpi_remove_notify_handler(acpi_handle device,
if (device == ACPI_ROOT_OBJECT) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Removing notify handler for ROOT object.\n"));
"Removing notify handler for namespace root object\n"));
if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
!acpi_gbl_system_notify.handler) ||
......@@ -562,7 +561,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_handler_info *handler;
acpi_status status;
u32 flags;
acpi_native_uint flags;
ACPI_FUNCTION_TRACE("acpi_install_gpe_handler");
......@@ -653,7 +652,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_handler_info *handler;
acpi_status status;
u32 flags;
acpi_native_uint flags;
ACPI_FUNCTION_TRACE("acpi_remove_gpe_handler");
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -70,8 +70,7 @@ acpi_status acpi_enable(void)
/* Make sure we have the FADT */
if (!acpi_gbl_FADT) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN,
"No FADT information present!\n"));
ACPI_REPORT_WARNING(("No FADT information present!\n"));
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
}
......@@ -83,7 +82,7 @@ acpi_status acpi_enable(void)
status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Could not transition to ACPI mode.\n"));
ACPI_REPORT_ERROR(("Could not transition to ACPI mode\n"));
return_ACPI_STATUS(status);
}
......@@ -113,8 +112,7 @@ acpi_status acpi_disable(void)
ACPI_FUNCTION_TRACE("acpi_disable");
if (!acpi_gbl_FADT) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN,
"No FADT information present!\n"));
ACPI_REPORT_WARNING(("No FADT information present!\n"));
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
}
......@@ -127,8 +125,7 @@ acpi_status acpi_disable(void)
status = acpi_hw_set_mode(ACPI_SYS_MODE_LEGACY);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not exit ACPI mode to legacy mode"));
ACPI_REPORT_ERROR(("Could not exit ACPI mode to legacy mode"));
return_ACPI_STATUS(status);
}
......@@ -185,8 +182,7 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
}
if (value != 1) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not enable %s event\n",
ACPI_REPORT_ERROR(("Could not enable %s event\n",
acpi_ut_get_event_name(event)));
return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
}
......@@ -384,8 +380,7 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
}
if (value != 0) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not disable %s events\n",
ACPI_REPORT_ERROR(("Could not disable %s events\n",
acpi_ut_get_event_name(event)));
return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
}
......@@ -626,6 +621,13 @@ acpi_install_gpe_block(acpi_handle gpe_device,
goto unlock_and_exit;
}
/* Run the _PRW methods and enable the GPEs */
status = acpi_ev_initialize_gpe_block(node, gpe_block);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
}
/* Get the device_object attached to the node */
obj_desc = acpi_ns_get_attached_object(node);
......
......@@ -6,7 +6,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -413,9 +413,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
(!ACPI_STRNCMP(table_ptr->signature,
acpi_gbl_table_data[ACPI_TABLE_SSDT].signature,
acpi_gbl_table_data[ACPI_TABLE_SSDT].sig_length))) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Table has invalid signature [%4.4s], must be SSDT or PSDT\n",
table_ptr->signature));
ACPI_REPORT_ERROR(("Table has invalid signature [%4.4s], must be SSDT or PSDT\n", table_ptr->signature));
status = AE_BAD_SIGNATURE;
goto cleanup;
}
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -504,18 +504,12 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
}
/*
* Perform the conversion.
* Create a new string object and string buffer
* (-1 because of extra separator included in string_length from above)
*/
string_length--;
if (string_length > ACPI_MAX_STRING_CONVERSION) { /* ACPI limit */
return_ACPI_STATUS(AE_AML_STRING_LIMIT);
}
/* Create a new string object and string buffer */
return_desc =
acpi_ut_create_string_object((acpi_size) string_length);
acpi_ut_create_string_object((acpi_size)
(string_length - 1));
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
......@@ -660,16 +654,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unknown Target type ID 0x%X Op %s dest_type %s\n",
GET_CURRENT_ARG_TYPE(walk_state->op_info->
runtime_args),
walk_state->op_info->name,
acpi_ut_get_type_name(destination_type)));
ACPI_REPORT_ERROR(("Bad Target Type (ARGI): %X\n",
GET_CURRENT_ARG_TYPE(walk_state->op_info->
runtime_args)))
ACPI_REPORT_ERROR(("Unknown Target type ID 0x%X aml_opcode %X dest_type %s\n", GET_CURRENT_ARG_TYPE(walk_state->op_info->runtime_args), walk_state->opcode, acpi_ut_get_type_name(destination_type)));
status = AE_AML_INTERNAL;
}
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......
This diff is collapsed.
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -94,8 +94,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
/* We must have a valid region */
if (ACPI_GET_OBJECT_TYPE(rgn_desc) != ACPI_TYPE_REGION) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Needed Region, found type %X (%s)\n",
ACPI_REPORT_ERROR(("Needed Region, found type %X (%s)\n",
ACPI_GET_OBJECT_TYPE(rgn_desc),
acpi_ut_get_object_type_name(rgn_desc)));
......@@ -162,31 +161,14 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
* than the region itself. For example, a region of length one
* byte, and a field with Dword access specified.
*/
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Field [%4.4s] access width (%d bytes) too large for region [%4.4s] (length %X)\n",
acpi_ut_get_node_name(obj_desc->
common_field.
node),
obj_desc->common_field.
access_byte_width,
acpi_ut_get_node_name(rgn_desc->
region.node),
rgn_desc->region.length));
ACPI_REPORT_ERROR(("Field [%4.4s] access width (%d bytes) too large for region [%4.4s] (length %X)\n", acpi_ut_get_node_name(obj_desc->common_field.node), obj_desc->common_field.access_byte_width, acpi_ut_get_node_name(rgn_desc->region.node), rgn_desc->region.length));
}
/*
* Offset rounded up to next multiple of field width
* exceeds region length, indicate an error
*/
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Field [%4.4s] Base+Offset+Width %X+%X+%X is beyond end of region [%4.4s] (length %X)\n",
acpi_ut_get_node_name(obj_desc->common_field.
node),
obj_desc->common_field.base_byte_offset,
field_datum_byte_offset,
obj_desc->common_field.access_byte_width,
acpi_ut_get_node_name(rgn_desc->region.node),
rgn_desc->region.length));
ACPI_REPORT_ERROR(("Field [%4.4s] Base+Offset+Width %X+%X+%X is beyond end of region [%4.4s] (length %X)\n", acpi_ut_get_node_name(obj_desc->common_field.node), obj_desc->common_field.base_byte_offset, field_datum_byte_offset, obj_desc->common_field.access_byte_width, acpi_ut_get_node_name(rgn_desc->region.node), rgn_desc->region.length));
return_ACPI_STATUS(AE_AML_REGION_LIMIT);
}
......@@ -270,8 +252,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_IMPLEMENTED) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Region %s(%X) not implemented\n",
ACPI_REPORT_ERROR(("Region %s(%X) not implemented\n",
acpi_ut_get_region_name(rgn_desc->
region.
space_id),
......@@ -618,8 +599,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"write_with_update_rule: Unknown update_rule setting: %X\n",
ACPI_REPORT_ERROR(("Unknown update_rule value: %X\n",
(obj_desc->common_field.
field_flags &
AML_FIELD_UPDATE_RULE_MASK)));
......@@ -677,10 +657,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
if (buffer_length <
ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Field size %X (bits) is too large for buffer (%X)\n",
obj_desc->common_field.bit_length,
buffer_length));
ACPI_REPORT_ERROR(("Field size %X (bits) is too large for buffer (%X)\n", obj_desc->common_field.bit_length, buffer_length));
return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
}
......@@ -792,10 +769,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
if (buffer_length <
ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Field size %X (bits) is too large for buffer (%X)\n",
obj_desc->common_field.bit_length,
buffer_length));
ACPI_REPORT_ERROR(("Field size %X (bits) is too large for buffer (%X)\n", obj_desc->common_field.bit_length, buffer_length));
return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
}
......
This diff is collapsed.
......@@ -6,7 +6,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -6,7 +6,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -119,7 +119,8 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
default:
ACPI_REPORT_ERROR(("acpi_ex_opcode_3A_0T_0R: Unknown opcode %X\n", walk_state->opcode));
ACPI_REPORT_ERROR(("Unknown AML opcode %X\n",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
}
......@@ -223,8 +224,8 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
goto cleanup;
}
if (length > 0) {
/* Copy the portion requested */
if (buffer) {
/* We have a buffer, copy the portion requested */
ACPI_MEMCPY(buffer, operand[0]->string.pointer + index,
length);
......@@ -242,7 +243,8 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
default:
ACPI_REPORT_ERROR(("acpi_ex_opcode_3A_0T_0R: Unknown opcode %X\n", walk_state->opcode));
ACPI_REPORT_ERROR(("Unknown AML opcode %X\n",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -6,7 +6,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment