Commit 59ed2f59 authored by Linus Torvalds's avatar Linus Torvalds
parents 9ad11ab4 b8e4d893
......@@ -452,6 +452,11 @@ running once the system is up.
eata= [HW,SCSI]
ec_intr= [HW,ACPI] ACPI Embedded Controller interrupt mode
Format: <int>
0: polling mode
non-0: interrupt mode (default)
eda= [HW,PS2]
edb= [HW,PS2]
......
......@@ -3,6 +3,6 @@ obj-$(CONFIG_X86_IO_APIC) += earlyquirk.o
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += cstate.o
obj-y += cstate.o processor.o
endif
......@@ -464,7 +464,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
* success: return IRQ number (>=0)
* failure: return < 0
*/
int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
int acpi_register_gsi(u32 gsi, int triggering, int polarity)
{
unsigned int irq;
unsigned int plat_gsi = gsi;
......@@ -476,14 +476,14 @@ int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
extern void eisa_set_level_irq(unsigned int irq);
if (edge_level == ACPI_LEVEL_SENSITIVE)
if (triggering == ACPI_LEVEL_SENSITIVE)
eisa_set_level_irq(gsi);
}
#endif
#ifdef CONFIG_X86_IO_APIC
if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
plat_gsi = mp_register_gsi(gsi, edge_level, active_high_low);
plat_gsi = mp_register_gsi(gsi, triggering, polarity);
}
#endif
acpi_gsi_to_irq(plat_gsi, &irq);
......
......@@ -14,64 +14,6 @@
#include <acpi/processor.h>
#include <asm/acpi.h>
static void acpi_processor_power_init_intel_pdc(struct acpi_processor_power
*pow)
{
struct acpi_object_list *obj_list;
union acpi_object *obj;
u32 *buf;
/* allocate and initialize pdc. It will be used later. */
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
if (!obj_list) {
printk(KERN_ERR "Memory allocation error\n");
return;
}
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!obj) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj_list);
return;
}
buf = kmalloc(12, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj);
kfree(obj_list);
return;
}
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
obj_list->count = 1;
obj_list->pointer = obj;
pow->pdc = obj_list;
return;
}
/* Initialize _PDC data based on the CPU vendor */
void acpi_processor_power_init_pdc(struct acpi_processor_power *pow,
unsigned int cpu)
{
struct cpuinfo_x86 *c = cpu_data + cpu;
pow->pdc = NULL;
if (c->x86_vendor == X86_VENDOR_INTEL)
acpi_processor_power_init_intel_pdc(pow);
return;
}
EXPORT_SYMBOL(acpi_processor_power_init_pdc);
/*
* Initialize bm_flags based on the CPU cache properties
* On SMP it depends on cache configuration
......
/*
* arch/i386/kernel/acpi/processor.c
*
* Copyright (C) 2005 Intel Corporation
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* - Added _PDC for platforms with Intel CPUs
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <asm/acpi.h>
static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
{
struct acpi_object_list *obj_list;
union acpi_object *obj;
u32 *buf;
/* allocate and initialize pdc. It will be used later. */
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
if (!obj_list) {
printk(KERN_ERR "Memory allocation error\n");
return;
}
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!obj) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj_list);
return;
}
buf = kmalloc(12, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj);
kfree(obj_list);
return;
}
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
if (cpu_has(c, X86_FEATURE_EST))
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
obj_list->count = 1;
obj_list->pointer = obj;
pr->pdc = obj_list;
return;
}
/* Initialize _PDC data based on the CPU vendor */
void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
{
unsigned int cpu = pr->id;
struct cpuinfo_x86 *c = cpu_data + cpu;
pr->pdc = NULL;
if (c->x86_vendor == X86_VENDOR_INTEL)
init_intel_pdc(pr, c);
return;
}
EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
......@@ -295,68 +295,6 @@ acpi_cpufreq_guess_freq (
}
/*
* acpi_processor_cpu_init_pdc_est - let BIOS know about the SMP capabilities
* of this driver
* @perf: processor-specific acpi_io_data struct
* @cpu: CPU being initialized
*
* To avoid issues with legacy OSes, some BIOSes require to be informed of
* the SMP capabilities of OS P-state driver. Here we set the bits in _PDC
* accordingly, for Enhanced Speedstep. Actual call to _PDC is done in
* driver/acpi/processor.c
*/
static void
acpi_processor_cpu_init_pdc_est(
struct acpi_processor_performance *perf,
unsigned int cpu,
struct acpi_object_list *obj_list
)
{
union acpi_object *obj;
u32 *buf;
struct cpuinfo_x86 *c = cpu_data + cpu;
dprintk("acpi_processor_cpu_init_pdc_est\n");
if (!cpu_has(c, X86_FEATURE_EST))
return;
/* Initialize pdc. It will be used later. */
if (!obj_list)
return;
if (!(obj_list->count && obj_list->pointer))
return;
obj = obj_list->pointer;
if ((obj->buffer.length == 12) && obj->buffer.pointer) {
buf = (u32 *)obj->buffer.pointer;
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
perf->pdc = obj_list;
}
return;
}
/* CPU specific PDC initialization */
static void
acpi_processor_cpu_init_pdc(
struct acpi_processor_performance *perf,
unsigned int cpu,
struct acpi_object_list *obj_list
)
{
struct cpuinfo_x86 *c = cpu_data + cpu;
dprintk("acpi_processor_cpu_init_pdc\n");
perf->pdc = NULL;
if (cpu_has(c, X86_FEATURE_EST))
acpi_processor_cpu_init_pdc_est(perf, cpu, obj_list);
return;
}
static int
acpi_cpufreq_cpu_init (
struct cpufreq_policy *policy)
......@@ -367,14 +305,7 @@ acpi_cpufreq_cpu_init (
unsigned int result = 0;
struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
union acpi_object arg0 = {ACPI_TYPE_BUFFER};
u32 arg0_buf[3];
struct acpi_object_list arg_list = {1, &arg0};
dprintk("acpi_cpufreq_cpu_init\n");
/* setup arg_list for _PDC settings */
arg0.buffer.length = 12;
arg0.buffer.pointer = (u8 *) arg0_buf;
data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
if (!data)
......@@ -382,9 +313,7 @@ acpi_cpufreq_cpu_init (
acpi_io_data[cpu] = data;
acpi_processor_cpu_init_pdc(&data->acpi_data, cpu, &arg_list);
result = acpi_processor_register_performance(&data->acpi_data, cpu);
data->acpi_data.pdc = NULL;
if (result)
goto err_free;
......
......@@ -362,22 +362,10 @@ static struct acpi_processor_performance p;
*/
static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
{
union acpi_object arg0 = {ACPI_TYPE_BUFFER};
u32 arg0_buf[3];
struct acpi_object_list arg_list = {1, &arg0};
unsigned long cur_freq;
int result = 0, i;
unsigned int cpu = policy->cpu;
/* _PDC settings */
arg0.buffer.length = 12;
arg0.buffer.pointer = (u8 *) arg0_buf;
arg0_buf[0] = ACPI_PDC_REVISION_ID;
arg0_buf[1] = 1;
arg0_buf[2] = ACPI_PDC_EST_CAPABILITY_SMP_MSR;
p.pdc = &arg_list;
/* register with ACPI core */
if (acpi_processor_register_performance(&p, cpu)) {
dprintk(KERN_INFO PFX "obtaining ACPI data failed\n");
......
......@@ -1080,7 +1080,7 @@ void __init mp_config_acpi_legacy_irqs (void)
#define MAX_GSI_NUM 4096
int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
int mp_register_gsi (u32 gsi, int triggering, int polarity)
{
int ioapic = -1;
int ioapic_pin = 0;
......@@ -1129,7 +1129,7 @@ int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
if (edge_level) {
if (triggering == ACPI_LEVEL_SENSITIVE) {
/*
* For PCI devices assign IRQs in order, avoiding gaps
* due to unused I/O APIC pins.
......@@ -1151,8 +1151,8 @@ int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
}
io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
return gsi;
}
......
......@@ -13,6 +13,11 @@ obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o
obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += acpi-processor.o
endif
obj-$(CONFIG_IA64_PALINFO) += palinfo.o
obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_MODULES) += module.o
......
......@@ -33,33 +33,33 @@ acpi_vendor_resource_match(struct acpi_resource *resource, void *context)
struct acpi_vendor_info *info = (struct acpi_vendor_info *)context;
struct acpi_resource_vendor *vendor;
struct acpi_vendor_descriptor *descriptor;
u32 length;
u32 byte_length;
if (resource->id != ACPI_RSTYPE_VENDOR)
if (resource->type != ACPI_RESOURCE_TYPE_VENDOR)
return AE_OK;
vendor = (struct acpi_resource_vendor *)&resource->data;
descriptor = (struct acpi_vendor_descriptor *)vendor->reserved;
if (vendor->length <= sizeof(*info->descriptor) ||
descriptor = (struct acpi_vendor_descriptor *)vendor->byte_data;
if (vendor->byte_length <= sizeof(*info->descriptor) ||
descriptor->guid_id != info->descriptor->guid_id ||
efi_guidcmp(descriptor->guid, info->descriptor->guid))
return AE_OK;
length = vendor->length - sizeof(struct acpi_vendor_descriptor);
info->data = acpi_os_allocate(length);
byte_length = vendor->byte_length - sizeof(struct acpi_vendor_descriptor);
info->data = acpi_os_allocate(byte_length);
if (!info->data)
return AE_NO_MEMORY;
memcpy(info->data,
vendor->reserved + sizeof(struct acpi_vendor_descriptor),
length);
info->length = length;
vendor->byte_data + sizeof(struct acpi_vendor_descriptor),
byte_length);
info->length = byte_length;
return AE_CTRL_TERMINATE;
}
acpi_status
acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id,
u8 ** data, u32 * length)
u8 ** data, u32 * byte_length)
{
struct acpi_vendor_info info;
......@@ -72,7 +72,7 @@ acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id,
return AE_NOT_FOUND;
*data = info.data;
*length = info.length;
*byte_length = info.length;
return AE_OK;
}
......
/*
* arch/ia64/kernel/cpufreq/processor.c
*
* Copyright (C) 2005 Intel Corporation
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* - Added _PDC for platforms with Intel CPUs
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <asm/acpi.h>
static void init_intel_pdc(struct acpi_processor *pr)
{
struct acpi_object_list *obj_list;
union acpi_object *obj;
u32 *buf;
/* allocate and initialize pdc. It will be used later. */
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
if (!obj_list) {
printk(KERN_ERR "Memory allocation error\n");
return;
}
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!obj) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj_list);
return;
}
buf = kmalloc(12, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj);
kfree(obj_list);
return;
}
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
obj_list->count = 1;
obj_list->pointer = obj;
pr->pdc = obj_list;
return;
}
/* Initialize _PDC data based on the CPU vendor */
void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
{
pr->pdc = NULL;
init_intel_pdc(pr);
return;
}
EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
......@@ -567,16 +567,16 @@ void __init acpi_numa_arch_fixup(void)
* success: return IRQ number (>=0)
* failure: return < 0
*/
int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
int acpi_register_gsi(u32 gsi, int triggering, int polarity)
{
if (has_8259 && gsi < 16)
return isa_irq_to_vector(gsi);
return iosapic_register_intr(gsi,
(active_high_low ==
(polarity ==
ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH :
IOSAPIC_POL_LOW,
(edge_level ==
(triggering ==
ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE :
IOSAPIC_LEVEL);
}
......
obj-$(CONFIG_IA64_ACPI_CPUFREQ) += acpi-cpufreq.o
......@@ -269,48 +269,6 @@ acpi_cpufreq_verify (
}
/*
* processor_init_pdc - let BIOS know about the SMP capabilities
* of this driver
* @perf: processor-specific acpi_io_data struct
* @cpu: CPU being initialized
*
* To avoid issues with legacy OSes, some BIOSes require to be informed of
* the SMP capabilities of OS P-state driver. Here we set the bits in _PDC
* accordingly. Actual call to _PDC is done in driver/acpi/processor.c
*/
static void
processor_init_pdc (
struct acpi_processor_performance *perf,
unsigned int cpu,
struct acpi_object_list *obj_list
)
{
union acpi_object *obj;
u32 *buf;
dprintk("processor_init_pdc\n");
perf->pdc = NULL;
/* Initialize pdc. It will be used later. */
if (!obj_list)
return;
if (!(obj_list->count && obj_list->pointer))
return;
obj = obj_list->pointer;
if ((obj->buffer.length == 12) && obj->buffer.pointer) {
buf = (u32 *)obj->buffer.pointer;
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
perf->pdc = obj_list;
}
return;
}
static int
acpi_cpufreq_cpu_init (
struct cpufreq_policy *policy)
......@@ -320,14 +278,7 @@ acpi_cpufreq_cpu_init (
struct cpufreq_acpi_io *data;
unsigned int result = 0;
union acpi_object arg0 = {ACPI_TYPE_BUFFER};
u32 arg0_buf[3];
struct acpi_object_list arg_list = {1, &arg0};
dprintk("acpi_cpufreq_cpu_init\n");
/* setup arg_list for _PDC settings */
arg0.buffer.length = 12;
arg0.buffer.pointer = (u8 *) arg0_buf;
data = kmalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
if (!data)
......@@ -337,9 +288,7 @@ acpi_cpufreq_cpu_init (
acpi_io_data[cpu] = data;
processor_init_pdc(&data->acpi_data, cpu, &arg_list);
result = acpi_processor_register_performance(&data->acpi_data, cpu);
data->acpi_data.pdc = NULL;
if (result)
goto err_free;
......
......@@ -193,12 +193,12 @@ add_io_space (struct pci_root_info *info, struct acpi_resource_address64 *addr)
goto free_resource;
}
min = addr->min_address_range;
min = addr->minimum;
max = min + addr->address_length - 1;
if (addr->attribute.io.translation_attribute == ACPI_SPARSE_TRANSLATION)
if (addr->info.io.translation_type == ACPI_SPARSE_TRANSLATION)
sparse = 1;
space_nr = new_space(addr->address_translation_offset, sparse);
space_nr = new_space(addr->translation_offset, sparse);
if (space_nr == ~0)
goto free_name;
......@@ -285,7 +285,7 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
if (addr.resource_type == ACPI_MEMORY_RANGE) {
flags = IORESOURCE_MEM;
root = &iomem_resource;
offset = addr.address_translation_offset;
offset = addr.translation_offset;
} else if (addr.resource_type == ACPI_IO_RANGE) {
flags = IORESOURCE_IO;
root = &ioport_resource;
......@@ -298,7 +298,7 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
window = &info->controller->window[info->controller->windows++];
window->resource.name = info->name;
window->resource.flags = flags;
window->resource.start = addr.min_address_range + offset;
window->resource.start = addr.minimum + offset;
window->resource.end = window->resource.start + addr.address_length - 1;
window->resource.child = NULL;
window->offset = offset;
......
obj-y := boot.o
boot-y := ../../../i386/kernel/acpi/boot.o
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += processor.o
endif
/*
* arch/x86_64/kernel/acpi/processor.c
*
* Copyright (C) 2005 Intel Corporation
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* - Added _PDC for platforms with Intel CPUs
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <asm/acpi.h>
static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
{
struct acpi_object_list *obj_list;
union acpi_object *obj;
u32 *buf;
/* allocate and initialize pdc. It will be used later. */
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
if (!obj_list) {
printk(KERN_ERR "Memory allocation error\n");
return;
}
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!obj) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj_list);
return;
}
buf = kmalloc(12, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj);
kfree(obj_list);
return;
}
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
obj_list->count = 1;
obj_list->pointer = obj;
pr->pdc = obj_list;
return;
}
/* Initialize _PDC data based on the CPU vendor */
void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
{
unsigned int cpu = pr->id;
struct cpuinfo_x86 *c = cpu_data + cpu;
pr->pdc = NULL;
if (c->x86_vendor == X86_VENDOR_INTEL && cpu_has(c, X86_FEATURE_EST))
init_intel_pdc(pr, c);
return;
}
EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
......@@ -2027,7 +2027,7 @@ int __init io_apic_get_redir_entries (int ioapic)
}
int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
{
struct IO_APIC_route_entry entry;
unsigned long flags;
......@@ -2049,8 +2049,8 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
entry.delivery_mode = INT_DELIVERY_MODE;
entry.dest_mode = INT_DEST_MODE;
entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
entry.trigger = edge_level;
entry.polarity = active_high_low;
entry.trigger = triggering;
entry.polarity = polarity;
entry.mask = 1; /* Disabled (masked) */
irq = gsi_irq_sharing(irq);
......@@ -2065,9 +2065,9 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
"IRQ %d Mode:%i Active:%i)\n", ioapic,
mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
edge_level, active_high_low);
triggering, polarity);
ioapic_register_intr(irq, entry.vector, edge_level);
ioapic_register_intr(irq, entry.vector, triggering);
if (!ioapic && (irq < 16))
disable_8259A_irq(irq);
......
......@@ -915,7 +915,7 @@ void __init mp_config_acpi_legacy_irqs (void)
#define MAX_GSI_NUM 4096
int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
int mp_register_gsi(u32 gsi, int triggering, int polarity)
{
int ioapic = -1;
int ioapic_pin = 0;
......@@ -964,7 +964,7 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
if (edge_level) {
if (triggering == ACPI_LEVEL_SENSITIVE) {
/*
* For PCI devices assign IRQs in order, avoiding gaps
* due to unused I/O APIC pins.
......@@ -986,8 +986,8 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
}
io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
return gsi;
}
......
......@@ -267,7 +267,6 @@ config ACPI_DEBUG
config ACPI_EC
bool
depends on X86
default y
help
This driver is required on some systems for the proper operation of
......
......@@ -71,8 +71,8 @@ static struct acpi_driver acpi_memory_device_driver = {
struct acpi_memory_device {
acpi_handle handle;
unsigned int state; /* State of the memory device */
unsigned short cache_attribute; /* memory cache attribute */
unsigned short read_write_attribute; /* memory read/write attribute */
unsigned short caching; /* memory cache attribute */
unsigned short write_protect; /* memory read/write attribute */
u64 start_addr; /* Memory Range start physical addr */
u64 end_addr; /* Memory Range end physical addr */
};
......@@ -97,12 +97,12 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
if (ACPI_SUCCESS(status)) {
if (address64.resource_type == ACPI_MEMORY_RANGE) {
/* Populate the structure */
mem_device->cache_attribute =
address64.attribute.memory.cache_attribute;
mem_device->read_write_attribute =
address64.attribute.memory.read_write_attribute;
mem_device->start_addr = address64.min_address_range;
mem_device->end_addr = address64.max_address_range;
mem_device->caching =
address64.info.mem.caching;
mem_device->write_protect =
address64.info.mem.write_protect;
mem_device->start_addr = address64.minimum;
mem_device->end_addr = address64.maximum;
}
}
......@@ -250,7 +250,6 @@ static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
int result;
u64 start = mem_device->start_addr;
u64 len = mem_device->end_addr - start + 1;
unsigned long attr = mem_device->read_write_attribute;
ACPI_FUNCTION_TRACE("acpi_memory_disable_device");
......
......@@ -78,9 +78,9 @@ MODULE_LICENSE("GPL");
static uid_t asus_uid;
static gid_t asus_gid;
module_param(asus_uid, uint, 0);
MODULE_PARM_DESC(uid, "UID for entries in /proc/acpi/asus.\n");
MODULE_PARM_DESC(asus_uid, "UID for entries in /proc/acpi/asus.\n");
module_param(asus_gid, uint, 0);
MODULE_PARM_DESC(gid, "GID for entries in /proc/acpi/asus.\n");
MODULE_PARM_DESC(asus_gid, "GID for entries in /proc/acpi/asus.\n");
/* For each model, all features implemented,
* those marked with R are relative to HOTK, A for absolute */
......@@ -302,7 +302,7 @@ static struct model_data model_conf[END_MODEL] = {
.brightness_set = "SPLV",
.brightness_get = "GPLV",
.display_set = "SDSP",
.display_get = "\\SSTE"},
.display_get = "\\_SB.PCI0.P0P1.VGA.GETD"},
{
.name = "M6R",
.mt_mled = "MLED",
......@@ -851,6 +851,8 @@ static int __init asus_hotk_add_fs(struct acpi_device *device)
mode = S_IFREG | S_IRUGO | S_IWUGO;
} else {
mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP;
printk(KERN_WARNING " asus_uid and asus_gid parameters are "
"deprecated, use chown and chmod instead!\n");
}
acpi_device_dir(device) = asus_proc_dir;
......@@ -987,9 +989,21 @@ static int __init asus_hotk_get_info(void)
printk(KERN_NOTICE " BSTS called, 0x%02x returned\n",
bsts_result);
/* Samsung P30 has a device with a valid _HID whose INIT does not
* return anything. Catch this one and any similar here */
if (buffer.pointer == NULL) {
/* This is unlikely with implicit return */
if (buffer.pointer == NULL)
return -EINVAL;
model = (union acpi_object *) buffer.pointer;
/*
* Samsung P30 has a device with a valid _HID whose INIT does not
* return anything. It used to be possible to catch this exception,
* but the implicit return code will now happily confuse the
* driver. We assume that every ACPI_TYPE_STRING is a valid model
* identifier but it's still possible to get completely bogus data.
*/
if (model->type == ACPI_TYPE_STRING) {
printk(KERN_NOTICE " %s model detected, ", model->string.pointer);
} else {
if (asus_info && /* Samsung P30 */
strncmp(asus_info->oem_table_id, "ODEM", 4) == 0) {
hotk->model = P30;
......@@ -1002,13 +1016,10 @@ static int __init asus_hotk_get_info(void)
"the developers with your DSDT\n");
}
hotk->methods = &model_conf[hotk->model];
return AE_OK;
}
acpi_os_free(model);
model = (union acpi_object *)buffer.pointer;
if (model->type == ACPI_TYPE_STRING) {
printk(KERN_NOTICE " %s model detected, ",
model->string.pointer);
return AE_OK;
}
hotk->model = END_MODEL;
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -128,7 +128,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
ACPI_IMODE_LOAD_PASS1, flags, walk_state,
&(node));
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(arg->common.value.string, status);
ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
return_ACPI_STATUS(status);
}
}
......@@ -232,7 +232,8 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
+ (acpi_integer) arg->common.value.size;
if (position > ACPI_UINT32_MAX) {
ACPI_REPORT_ERROR(("Bit offset within field too large (> 0xFFFFFFFF)\n"));
ACPI_ERROR((AE_INFO,
"Bit offset within field too large (> 0xFFFFFFFF)"));
return_ACPI_STATUS(AE_SUPPORT);
}
......@@ -268,8 +269,8 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
ACPI_NS_DONT_OPEN_SCOPE,
walk_state, &info->field_node);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR((char *)&arg->named.name,
status);
ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
status);
if (status != AE_ALREADY_EXISTS) {
return_ACPI_STATUS(status);
}
......@@ -293,7 +294,11 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
+ (acpi_integer) arg->common.value.size;
if (position > ACPI_UINT32_MAX) {
ACPI_REPORT_ERROR(("Field [%4.4s] bit offset too large (> 0xFFFFFFFF)\n", (char *)&info->field_node->name));
ACPI_ERROR((AE_INFO,
"Field [%4.4s] bit offset too large (> 0xFFFFFFFF)",
ACPI_CAST_PTR(char,
&info->field_node->
name)));
return_ACPI_STATUS(AE_SUPPORT);
}
......@@ -302,9 +307,9 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Invalid opcode in field list: %X\n",
arg->common.aml_opcode));
ACPI_ERROR((AE_INFO,
"Invalid opcode in field list: %X",
arg->common.aml_opcode));
return_ACPI_STATUS(AE_AML_BAD_OPCODE);
}
......@@ -349,7 +354,7 @@ acpi_ds_create_field(union acpi_parse_object *op,
ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
walk_state, &region_node);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(arg->common.value.name, status);
ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
return_ACPI_STATUS(status);
}
}
......@@ -431,8 +436,8 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
ACPI_NS_ERROR_IF_FOUND,
walk_state, &node);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR((char *)&arg->named.name,
status);
ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
status);
if (status != AE_ALREADY_EXISTS) {
return_ACPI_STATUS(status);
}
......@@ -488,7 +493,7 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
walk_state, &region_node);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(arg->common.value.name, status);
ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
return_ACPI_STATUS(status);
}
}
......@@ -502,7 +507,7 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state,
&info.register_node);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(arg->common.value.string, status);
ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
return_ACPI_STATUS(status);
}
......@@ -560,7 +565,7 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state,
&info.register_node);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(arg->common.value.string, status);
ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
return_ACPI_STATUS(status);
}
......@@ -573,7 +578,7 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state,
&info.data_register_node);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(arg->common.value.string, status);
ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
return_ACPI_STATUS(status);
}
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -84,7 +84,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
acpi_object_type type;
acpi_status status;
ACPI_FUNCTION_NAME("ds_init_one_object");
ACPI_FUNCTION_ENTRY();
/*
* We are only interested in NS nodes owned by the table that
......@@ -105,11 +105,10 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
status = acpi_ds_initialize_region(obj_handle);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Region %p [%4.4s] - Init failure, %s\n",
obj_handle,
acpi_ut_get_node_name(obj_handle),
acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status,
"During Region initialization %p [%4.4s]",
obj_handle,
acpi_ut_get_node_name(obj_handle)));
}
info->op_region_count++;
......@@ -117,14 +116,6 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
case ACPI_TYPE_METHOD:
/*
* Print a dot for each method unless we are going to print
* the entire pathname
*/
if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
}
/*
* Set the execution data width (32 or 64) based upon the
* revision number of the parent ACPI table.
......@@ -134,6 +125,21 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
if (info->table_desc->pointer->revision == 1) {
node->flags |= ANOBJ_DATA_WIDTH_32;
}
#ifdef ACPI_INIT_PARSE_METHODS
/*
* Note 11/2005: Removed this code to parse all methods during table
* load because it causes problems if there are any errors during the
* parse. Also, it seems like overkill and we probably don't want to
* abort a table load because of an issue with a single method.
*/
/*
* Print a dot for each method unless we are going to print
* the entire pathname
*/
if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) {
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
}
/*
* Always parse methods to detect errors, we will delete
......@@ -141,15 +147,15 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
*/
status = acpi_ds_parse_method(obj_handle);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"\n+Method %p [%4.4s] - parse failure, %s\n",
obj_handle,
acpi_ut_get_node_name(obj_handle),
acpi_format_exception(status)));
ACPI_ERROR((AE_INFO,
"Method %p [%4.4s] - parse failure, %s",
obj_handle,
acpi_ut_get_node_name(obj_handle),
acpi_format_exception(status)));
/* This parse failed, but we will continue parsing more methods */
}
#endif
info->method_count++;
break;
......@@ -207,8 +213,7 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
status = acpi_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
acpi_ds_init_one_object, &info, NULL);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "walk_namespace failed, %s\n",
acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status, "During walk_namespace"));
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
......
This diff is collapsed.
......@@ -5,7 +5,7 @@
******************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -260,9 +260,9 @@ acpi_ds_method_data_get_node(u16 opcode,
case AML_LOCAL_OP:
if (index > ACPI_METHOD_MAX_LOCAL) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Local index %d is invalid (max %d)\n",
index, ACPI_METHOD_MAX_LOCAL));
ACPI_ERROR((AE_INFO,
"Local index %d is invalid (max %d)",
index, ACPI_METHOD_MAX_LOCAL));
return_ACPI_STATUS(AE_AML_INVALID_INDEX);
}
......@@ -274,9 +274,9 @@ acpi_ds_method_data_get_node(u16 opcode,
case AML_ARG_OP:
if (index > ACPI_METHOD_MAX_ARG) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Arg index %d is invalid (max %d)\n",
index, ACPI_METHOD_MAX_ARG));
ACPI_ERROR((AE_INFO,
"Arg index %d is invalid (max %d)",
index, ACPI_METHOD_MAX_ARG));
return_ACPI_STATUS(AE_AML_INVALID_INDEX);
}
......@@ -286,8 +286,7 @@ acpi_ds_method_data_get_node(u16 opcode,
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Opcode %d is invalid\n",
opcode));
ACPI_ERROR((AE_INFO, "Opcode %d is invalid", opcode));
return_ACPI_STATUS(AE_AML_BAD_OPCODE);
}
......@@ -378,8 +377,7 @@ acpi_ds_method_data_get_value(u16 opcode,
/* Validate the object descriptor */
if (!dest_desc) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Null object descriptor pointer\n"));
ACPI_ERROR((AE_INFO, "Null object descriptor pointer"));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
......@@ -424,23 +422,24 @@ acpi_ds_method_data_get_value(u16 opcode,
switch (opcode) {
case AML_ARG_OP:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Uninitialized Arg[%d] at node %p\n",
index, node));
ACPI_ERROR((AE_INFO,
"Uninitialized Arg[%d] at node %p",
index, node));
return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
case AML_LOCAL_OP:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Uninitialized Local[%d] at node %p\n",
index, node));
ACPI_ERROR((AE_INFO,
"Uninitialized Local[%d] at node %p",
index, node));
return_ACPI_STATUS(AE_AML_UNINITIALIZED_LOCAL);
default:
ACPI_REPORT_ERROR(("Not Arg/Local opcode: %X\n",
opcode));
ACPI_ERROR((AE_INFO,
"Not a Arg/Local opcode: %X",
opcode));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
}
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -51,6 +51,7 @@
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsobject")
/* Local prototypes */
static acpi_status
acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
......@@ -85,7 +86,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
*obj_desc_ptr = NULL;
if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
/*
* This is an named object reference. If this name was
* This is a named object reference. If this name was
* previously looked up in the namespace, it was stored in this op.
* Otherwise, go ahead and look it up now
*/
......@@ -96,18 +97,48 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
ACPI_IMODE_EXECUTE,
ACPI_NS_SEARCH_PARENT |
ACPI_NS_DONT_OPEN_SCOPE, NULL,
(struct acpi_namespace_node **)
&(op->common.node));
ACPI_CAST_INDIRECT_PTR(struct
acpi_namespace_node,
&(op->
common.
node)));
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(op->common.value.string,
status);
/* Check if we are resolving a named reference within a package */
if ((status == AE_NOT_FOUND)
&& (acpi_gbl_enable_interpreter_slack)
&&
((op->common.parent->common.aml_opcode ==
AML_PACKAGE_OP)
|| (op->common.parent->common.aml_opcode ==
AML_VAR_PACKAGE_OP))) {
/*
* We didn't find the target and we are populating elements
* of a package - ignore if slack enabled. Some ASL code
* contains dangling invalid references in packages and
* expects that no exception will be issued. Leave the
* element as a null element. It cannot be used, but it
* can be overwritten by subsequent ASL code - this is
* typically the case.
*/
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Ignoring unresolved reference in package [%4.4s]\n",
walk_state->
scope_info->scope.
node->name.ascii));
return_ACPI_STATUS(AE_OK);
} else {
ACPI_ERROR_NAMESPACE(op->common.value.
string, status);
}
return_ACPI_STATUS(status);
}
}
}
/* Create and init the internal ACPI object */
/* Create and init a new internal ACPI object */
obj_desc = acpi_ut_create_internal_object((acpi_ps_get_opcode_info
(op->common.aml_opcode))->
......@@ -157,13 +188,13 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
ACPI_FUNCTION_TRACE("ds_build_internal_buffer_obj");
/*
* If we are evaluating a Named buffer object "Name (xxxx, Buffer)".
* The buffer object already exists (from the NS node), otherwise it must
* be created.
*/
obj_desc = *obj_desc_ptr;
if (obj_desc) {
/*
* We are evaluating a Named buffer object "Name (xxxx, Buffer)".
* The buffer object already exists (from the NS node)
*/
} else {
if (!obj_desc) {
/* Create a new buffer object */
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
......@@ -183,10 +214,9 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
byte_list = arg->named.next;
if (byte_list) {
if (byte_list->common.aml_opcode != AML_INT_BYTELIST_OP) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Expecting bytelist, got AML opcode %X in op %p\n",
byte_list->common.aml_opcode,
byte_list));
ACPI_ERROR((AE_INFO,
"Expecting bytelist, got AML opcode %X in op %p",
byte_list->common.aml_opcode, byte_list));
acpi_ut_remove_reference(obj_desc);
return (AE_TYPE);
......@@ -259,7 +289,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
union acpi_operand_object *obj_desc = NULL;
u32 package_list_length;
acpi_status status = AE_OK;
u32 i;
acpi_native_uint i;
ACPI_FUNCTION_TRACE("ds_build_internal_package_obj");
......@@ -271,13 +301,12 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
parent = parent->common.parent;
}
/*
* If we are evaluating a Named package object "Name (xxxx, Package)",
* the package object already exists, otherwise it must be created.
*/
obj_desc = *obj_desc_ptr;
if (obj_desc) {
/*
* We are evaluating a Named package object "Name (xxxx, Package)".
* Get the existing package object from the NS node
*/
} else {
if (!obj_desc) {
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE);
*obj_desc_ptr = obj_desc;
if (!obj_desc) {
......@@ -291,11 +320,9 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
/* Count the number of items in the package list */
package_list_length = 0;
arg = op->common.value.arg;
arg = arg->common.next;
while (arg) {
package_list_length++;
for (package_list_length = 0; arg; package_list_length++) {
arg = arg->common.next;
}
......@@ -322,12 +349,11 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
}
/*
* Now init the elements of the package
* Initialize all elements of the package
*/
i = 0;
arg = op->common.value.arg;
arg = arg->common.next;
while (arg) {
for (i = 0; arg; i++) {
if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
/* Object (package or buffer) is already built */
......@@ -340,8 +366,6 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
package.
elements[i]);
}
i++;
arg = arg->common.next;
}
......@@ -518,9 +542,9 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unknown constant opcode %X\n",
opcode));
ACPI_ERROR((AE_INFO,
"Unknown constant opcode %X",
opcode));
status = AE_AML_OPERAND_TYPE;
break;
}
......@@ -535,9 +559,8 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unknown Integer type %X\n",
op_info->type));
ACPI_ERROR((AE_INFO, "Unknown Integer type %X",
op_info->type));
status = AE_AML_OPERAND_TYPE;
break;
}
......@@ -615,9 +638,8 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unimplemented data type: %X\n",
ACPI_GET_OBJECT_TYPE(obj_desc)));
ACPI_ERROR((AE_INFO, "Unimplemented data type: %X",
ACPI_GET_OBJECT_TYPE(obj_desc)));
status = AE_AML_OPERAND_TYPE;
break;
......
......@@ -6,7 +6,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -245,7 +245,9 @@ acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
node = obj_desc->buffer.node;
if (!node) {
ACPI_REPORT_ERROR(("No pointer back to NS node in buffer obj %p\n", obj_desc));
ACPI_ERROR((AE_INFO,
"No pointer back to NS node in buffer obj %p",
obj_desc));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
......@@ -287,8 +289,9 @@ acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
node = obj_desc->package.node;
if (!node) {
ACPI_REPORT_ERROR(("No pointer back to NS node in package %p\n",
obj_desc));
ACPI_ERROR((AE_INFO,
"No pointer back to NS node in package %p",
obj_desc));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
......@@ -413,9 +416,9 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
/* Host object must be a Buffer */
if (ACPI_GET_OBJECT_TYPE(buffer_desc) != ACPI_TYPE_BUFFER) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Target of Create Field is not a Buffer object - %s\n",
acpi_ut_get_object_type_name(buffer_desc)));
ACPI_ERROR((AE_INFO,
"Target of Create Field is not a Buffer object - %s",
acpi_ut_get_object_type_name(buffer_desc)));
status = AE_AML_OPERAND_TYPE;
goto cleanup;
......@@ -427,10 +430,10 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
* after resolution in acpi_ex_resolve_operands().
*/
if (ACPI_GET_DESCRIPTOR_TYPE(result_desc) != ACPI_DESC_TYPE_NAMED) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"(%s) destination not a NS Node [%s]\n",
acpi_ps_get_opcode_name(aml_opcode),
acpi_ut_get_descriptor_name(result_desc)));
ACPI_ERROR((AE_INFO,
"(%s) destination not a NS Node [%s]",
acpi_ps_get_opcode_name(aml_opcode),
acpi_ut_get_descriptor_name(result_desc)));
status = AE_AML_OPERAND_TYPE;
goto cleanup;
......@@ -453,8 +456,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
/* Must have a valid (>0) bit count */
if (bit_count == 0) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Attempt to create_field of length 0\n"));
ACPI_ERROR((AE_INFO,
"Attempt to create_field of length zero"));
status = AE_AML_OPERAND_VALUE;
goto cleanup;
}
......@@ -507,9 +510,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unknown field creation opcode %02x\n",
aml_opcode));
ACPI_ERROR((AE_INFO,
"Unknown field creation opcode %02x", aml_opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
}
......@@ -517,13 +519,12 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
/* Entire field must fit within the current length of the buffer */
if ((bit_offset + bit_count) > (8 * (u32) buffer_desc->buffer.length)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Field [%4.4s] size %d exceeds Buffer [%4.4s] size %d (bits)\n",
acpi_ut_get_node_name(result_desc),
bit_offset + bit_count,
acpi_ut_get_node_name(buffer_desc->buffer.
node),
8 * (u32) buffer_desc->buffer.length));
ACPI_ERROR((AE_INFO,
"Field [%4.4s] at %d exceeds Buffer [%4.4s] size %d (bits)",
acpi_ut_get_node_name(result_desc),
bit_offset + bit_count,
acpi_ut_get_node_name(buffer_desc->buffer.node),
8 * (u32) buffer_desc->buffer.length));
status = AE_AML_BUFFER_LIMIT;
goto cleanup;
}
......@@ -629,9 +630,9 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
"after acpi_ex_resolve_operands");
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "(%s) bad operand(s) (%X)\n",
acpi_ps_get_opcode_name(op->common.
aml_opcode), status));
ACPI_ERROR((AE_INFO, "(%s) bad operand(s) (%X)",
acpi_ps_get_opcode_name(op->common.aml_opcode),
status));
return_ACPI_STATUS(status);
}
......@@ -1155,9 +1156,8 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unknown control opcode=%X Op=%p\n",
op->common.aml_opcode, op));
ACPI_ERROR((AE_INFO, "Unknown control opcode=%X Op=%p",
op->common.aml_opcode, op));
status = AE_AML_BAD_OPCODE;
break;
......
......@@ -5,7 +5,7 @@
******************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -176,8 +176,8 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
/* Must have both an Op and a Result Object */
if (!op) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Null Op\n"));
return_VALUE(TRUE);
ACPI_ERROR((AE_INFO, "Null Op"));
return_UINT8(TRUE);
}
/*
......@@ -208,7 +208,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
"At Method level, result of [%s] not used\n",
acpi_ps_get_opcode_name(op->common.
aml_opcode)));
return_VALUE(FALSE);
return_UINT8(FALSE);
}
/* Get info on the parent. The root_op is AML_SCOPE */
......@@ -216,9 +216,8 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
parent_info =
acpi_ps_get_opcode_info(op->common.parent->common.aml_opcode);
if (parent_info->class == AML_CLASS_UNKNOWN) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unknown parent opcode. Op=%p\n", op));
return_VALUE(FALSE);
ACPI_ERROR((AE_INFO, "Unknown parent opcode Op=%p", op));
return_UINT8(FALSE);
}
/*
......@@ -304,7 +303,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
acpi_ps_get_opcode_name(op->common.parent->common.
aml_opcode), op));
return_VALUE(TRUE);
return_UINT8(TRUE);
result_not_used:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
......@@ -313,7 +312,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
acpi_ps_get_opcode_name(op->common.parent->common.
aml_opcode), op));
return_VALUE(FALSE);
return_UINT8(FALSE);
}
/*******************************************************************************
......@@ -344,7 +343,7 @@ acpi_ds_delete_result_if_not_used(union acpi_parse_object *op,
ACPI_FUNCTION_TRACE_PTR("ds_delete_result_if_not_used", result_obj);
if (!op) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Null Op\n"));
ACPI_ERROR((AE_INFO, "Null Op"));
return_VOID;
}
......@@ -567,7 +566,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
}
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(name_string, status);
ACPI_ERROR_NAMESPACE(name_string, status);
}
}
......@@ -616,7 +615,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
if (op_info->flags & AML_HAS_RETVAL) {
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Argument previously created, already stacked \n"));
"Argument previously created, already stacked\n"));
ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object
(walk_state->
......@@ -635,10 +634,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
* Only error is underflow, and this indicates
* a missing or null operand!
*/
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Missing or null operand, %s\n",
acpi_format_exception
(status)));
ACPI_EXCEPTION((AE_INFO, status,
"Missing or null operand"));
return_ACPI_STATUS(status);
}
} else {
......@@ -730,7 +727,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
*/
(void)acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "While creating Arg %d - %s\n",
(arg_count + 1), acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %d",
(arg_count + 1)));
return_ACPI_STATUS(status);
}
......@@ -6,7 +6,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -100,9 +100,8 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
if (result_obj) {
status = acpi_ds_result_pop(&obj_desc, walk_state);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not get result from predicate evaluation, %s\n",
acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status,
"Could not get result from predicate evaluation"));
return_ACPI_STATUS(status);
}
......@@ -123,9 +122,9 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
}
if (!obj_desc) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"No predicate obj_desc=%p State=%p\n",
obj_desc, walk_state));
ACPI_ERROR((AE_INFO,
"No predicate obj_desc=%p State=%p",
obj_desc, walk_state));
return_ACPI_STATUS(AE_AML_NO_OPERAND);
}
......@@ -140,10 +139,10 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
}
if (ACPI_GET_OBJECT_TYPE(local_obj_desc) != ACPI_TYPE_INTEGER) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Bad predicate (not an integer) obj_desc=%p State=%p Type=%X\n",
obj_desc, walk_state,
ACPI_GET_OBJECT_TYPE(obj_desc)));
ACPI_ERROR((AE_INFO,
"Bad predicate (not an integer) obj_desc=%p State=%p Type=%X",
obj_desc, walk_state,
ACPI_GET_OBJECT_TYPE(obj_desc)));
status = AE_AML_OPERAND_TYPE;
goto cleanup;
......@@ -314,12 +313,13 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
case AML_CLASS_EXECUTE:
case AML_CLASS_CREATE:
/*
* Most operators with arguments.
* Start a new result/operand state
*/
status = acpi_ds_result_stack_push(walk_state);
if (walk_state->opcode != AML_CREATE_FIELD_OP) {
status = acpi_ds_result_stack_push(walk_state);
}
break;
default:
......@@ -361,8 +361,8 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
op_class = walk_state->op_info->class;
if (op_class == AML_CLASS_UNKNOWN) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown opcode %X\n",
op->common.aml_opcode));
ACPI_ERROR((AE_INFO, "Unknown opcode %X",
op->common.aml_opcode));
return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
}
......@@ -452,12 +452,10 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
walk_state->operands[1]->reference.offset)) {
status = AE_OK;
} else {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"[%s]: Could not resolve operands, %s\n",
acpi_ps_get_opcode_name
(walk_state->opcode),
acpi_format_exception
(status)));
ACPI_EXCEPTION((AE_INFO, status,
"While resolving operands for [%s]",
acpi_ps_get_opcode_name
(walk_state->opcode)));
}
}
......@@ -676,8 +674,8 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
case AML_TYPE_UNDEFINED:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Undefined opcode type Op=%p\n", op));
ACPI_ERROR((AE_INFO,
"Undefined opcode type Op=%p", op));
return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
case AML_TYPE_BOGUS:
......@@ -689,10 +687,10 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unimplemented opcode, class=%X type=%X Opcode=%X Op=%p\n",
op_class, op_type,
op->common.aml_opcode, op));
ACPI_ERROR((AE_INFO,
"Unimplemented opcode, class=%X type=%X Opcode=%X Op=%p",
op_class, op_type, op->common.aml_opcode,
op));
status = AE_NOT_IMPLEMENTED;
break;
......@@ -723,20 +721,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
cleanup:
/* Invoke exception handler on error */
if (ACPI_FAILURE(status) &&
acpi_gbl_exception_handler && !(status & AE_CODE_CONTROL)) {
acpi_ex_exit_interpreter();
status = acpi_gbl_exception_handler(status,
walk_state->method_node->
name.integer,
walk_state->opcode,
walk_state->aml_offset,
NULL);
(void)acpi_ex_enter_interpreter();
}
if (walk_state->result_obj) {
/* Break to debugger to display result */
......@@ -758,18 +742,14 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
}
#endif
/* Always clear the object stack */
walk_state->num_operands = 0;
#ifdef ACPI_DISASSEMBLER
/* On error, display method locals/args */
/* Invoke exception handler on error */
if (ACPI_FAILURE(status)) {
acpi_dm_dump_method_info(status, walk_state, op);
status = acpi_ds_method_error(status, walk_state);
}
#endif
/* Always clear the object stack */
walk_state->num_operands = 0;
return_ACPI_STATUS(status);
}
This diff is collapsed.
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -107,14 +107,14 @@ acpi_ds_scope_stack_push(struct acpi_namespace_node *node,
if (!node) {
/* Invalid scope */
ACPI_REPORT_ERROR(("ds_scope_stack_push: null scope passed\n"));
ACPI_ERROR((AE_INFO, "Null scope parameter"));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Make sure object type is valid */
if (!acpi_ut_valid_object_type(type)) {
ACPI_REPORT_WARNING(("ds_scope_stack_push: Invalid object type: 0x%X\n", type));
ACPI_WARNING((AE_INFO, "Invalid object type: 0x%X", type));
}
/* Allocate a new scope object */
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -92,26 +92,23 @@ acpi_ds_result_remove(union acpi_operand_object **object,
state = walk_state->results;
if (!state) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"No result object pushed! State=%p\n",
walk_state));
ACPI_ERROR((AE_INFO, "No result object pushed! State=%p",
walk_state));
return (AE_NOT_EXIST);
}
if (index >= ACPI_OBJ_MAX_OPERAND) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Index out of range: %X State=%p Num=%X\n",
index, walk_state,
state->results.num_results));
ACPI_ERROR((AE_INFO,
"Index out of range: %X State=%p Num=%X",
index, walk_state, state->results.num_results));
}
/* Check for a valid result object */
if (!state->results.obj_desc[index]) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Null operand! State=%p #Ops=%X, Index=%X\n",
walk_state, state->results.num_results,
index));
ACPI_ERROR((AE_INFO,
"Null operand! State=%p #Ops=%X, Index=%X",
walk_state, state->results.num_results, index));
return (AE_AML_NO_RETURN_VALUE);
}
......@@ -163,9 +160,8 @@ acpi_ds_result_pop(union acpi_operand_object ** object,
}
if (!state->results.num_results) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Result stack is empty! State=%p\n",
walk_state));
ACPI_ERROR((AE_INFO, "Result stack is empty! State=%p",
walk_state));
return (AE_AML_NO_RETURN_VALUE);
}
......@@ -192,8 +188,7 @@ acpi_ds_result_pop(union acpi_operand_object ** object,
}
}
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"No result objects! State=%p\n", walk_state));
ACPI_ERROR((AE_INFO, "No result objects! State=%p", walk_state));
return (AE_AML_NO_RETURN_VALUE);
}
......@@ -222,15 +217,14 @@ acpi_ds_result_pop_from_bottom(union acpi_operand_object ** object,
state = walk_state->results;
if (!state) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Warning: No result object pushed! State=%p\n",
walk_state));
ACPI_ERROR((AE_INFO,
"No result object pushed! State=%p", walk_state));
return (AE_NOT_EXIST);
}
if (!state->results.num_results) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"No result objects! State=%p\n", walk_state));
ACPI_ERROR((AE_INFO, "No result objects! State=%p",
walk_state));
return (AE_AML_NO_RETURN_VALUE);
}
......@@ -250,10 +244,10 @@ acpi_ds_result_pop_from_bottom(union acpi_operand_object ** object,
/* Check for a valid result object */
if (!*object) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Null operand! State=%p #Ops=%X Index=%X\n",
walk_state, state->results.num_results,
(u32) index));
ACPI_ERROR((AE_INFO,
"Null operand! State=%p #Ops=%X Index=%X",
walk_state, state->results.num_results,
(u32) index));
return (AE_AML_NO_RETURN_VALUE);
}
......@@ -288,23 +282,21 @@ acpi_ds_result_push(union acpi_operand_object * object,
state = walk_state->results;
if (!state) {
ACPI_REPORT_ERROR(("No result stack frame during push\n"));
ACPI_ERROR((AE_INFO, "No result stack frame during push"));
return (AE_AML_INTERNAL);
}
if (state->results.num_results == ACPI_OBJ_NUM_OPERANDS) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Result stack overflow: Obj=%p State=%p Num=%X\n",
object, walk_state,
state->results.num_results));
ACPI_ERROR((AE_INFO,
"Result stack overflow: Obj=%p State=%p Num=%X",
object, walk_state, state->results.num_results));
return (AE_STACK_OVERFLOW);
}
if (!object) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Null Object! Obj=%p State=%p Num=%X\n",
object, walk_state,
state->results.num_results));
ACPI_ERROR((AE_INFO,
"Null Object! Obj=%p State=%p Num=%X",
object, walk_state, state->results.num_results));
return (AE_BAD_PARAMETER);
}
......@@ -413,10 +405,9 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
/* Check for stack overflow */
if (walk_state->num_operands >= ACPI_OBJ_NUM_OPERANDS) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"overflow! Obj=%p State=%p #Ops=%X\n",
object, walk_state,
walk_state->num_operands));
ACPI_ERROR((AE_INFO,
"Object stack overflow! Obj=%p State=%p #Ops=%X",
object, walk_state, walk_state->num_operands));
return (AE_STACK_OVERFLOW);
}
......@@ -460,10 +451,10 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
/* Check for stack underflow */
if (walk_state->num_operands == 0) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Underflow! Count=%X State=%p #Ops=%X\n",
pop_count, walk_state,
walk_state->num_operands));
ACPI_ERROR((AE_INFO,
"Object stack underflow! Count=%X State=%p #Ops=%X",
pop_count, walk_state,
walk_state->num_operands));
return (AE_STACK_UNDERFLOW);
}
......@@ -506,10 +497,10 @@ acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
/* Check for stack underflow */
if (walk_state->num_operands == 0) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Underflow! Count=%X State=%p #Ops=%X\n",
pop_count, walk_state,
walk_state->num_operands));
ACPI_ERROR((AE_INFO,
"Object stack underflow! Count=%X State=%p #Ops=%X",
pop_count, walk_state,
walk_state->num_operands));
return (AE_STACK_UNDERFLOW);
}
......@@ -826,16 +817,14 @@ void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state)
}
if (walk_state->data_type != ACPI_DESC_TYPE_WALK) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"%p is not a valid walk state\n",
walk_state));
ACPI_ERROR((AE_INFO, "%p is not a valid walk state",
walk_state));
return;
}
if (walk_state->parser_state.scope) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"%p walk still has a scope list\n",
walk_state));
ACPI_ERROR((AE_INFO, "%p walk still has a scope list",
walk_state));
}
/* Always must free any linked control states */
......@@ -894,25 +883,24 @@ acpi_ds_result_insert(void *object,
state = walk_state->results;
if (!state) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"No result object pushed! State=%p\n",
walk_state));
ACPI_ERROR((AE_INFO, "No result object pushed! State=%p",
walk_state));
return (AE_NOT_EXIST);
}
if (index >= ACPI_OBJ_NUM_OPERANDS) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Index out of range: %X Obj=%p State=%p Num=%X\n",
index, object, walk_state,
state->results.num_results));
ACPI_ERROR((AE_INFO,
"Index out of range: %X Obj=%p State=%p Num=%X",
index, object, walk_state,
state->results.num_results));
return (AE_BAD_PARAMETER);
}
if (!object) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Null Object! Index=%X Obj=%p State=%p Num=%X\n",
index, object, walk_state,
state->results.num_results));
ACPI_ERROR((AE_INFO,
"Null Object! Index=%X Obj=%p State=%p Num=%X",
index, object, walk_state,
state->results.num_results));
return (AE_BAD_PARAMETER);
}
......@@ -986,9 +974,9 @@ acpi_ds_obj_stack_pop_object(union acpi_operand_object **object,
/* Check for stack underflow */
if (walk_state->num_operands == 0) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Missing operand/stack empty! State=%p #Ops=%X\n",
walk_state, walk_state->num_operands));
ACPI_ERROR((AE_INFO,
"Missing operand/stack empty! State=%p #Ops=%X",
walk_state, walk_state->num_operands));
*object = NULL;
return (AE_AML_NO_OPERAND);
}
......@@ -1000,9 +988,9 @@ acpi_ds_obj_stack_pop_object(union acpi_operand_object **object,
/* Check for a valid operand */
if (!walk_state->operands[walk_state->num_operands]) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Null operand! State=%p #Ops=%X\n",
walk_state, walk_state->num_operands));
ACPI_ERROR((AE_INFO,
"Null operand! State=%p #Ops=%X",
walk_state, walk_state->num_operands));
*object = NULL;
return (AE_AML_NO_OPERAND);
}
......
This diff is collapsed.
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -73,7 +73,7 @@ acpi_status acpi_ev_initialize_events(void)
/* Make sure we have ACPI tables */
if (!acpi_gbl_DSDT) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN, "No ACPI tables present!\n"));
ACPI_WARNING((AE_INFO, "No ACPI tables present!"));
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
}
......@@ -84,20 +84,63 @@ acpi_status acpi_ev_initialize_events(void)
*/
status = acpi_ev_fixed_event_initialize();
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Unable to initialize fixed events, %s\n",
acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status,
"Unable to initialize fixed events"));
return_ACPI_STATUS(status);
}
status = acpi_ev_gpe_initialize();
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Unable to initialize general purpose events, %s\n", acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status,
"Unable to initialize general purpose events"));
return_ACPI_STATUS(status);
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_install_fadt_gpes
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Completes initialization of the FADT-defined GPE blocks
* (0 and 1). This causes the _PRW methods to be run, so the HW
* must be fully initialized at this point, including global lock
* support.
*
******************************************************************************/
acpi_status acpi_ev_install_fadt_gpes(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE("ev_install_fadt_gpes");
/* Namespace must be locked */
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return (status);
}
/* FADT GPE Block 0 */
(void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
acpi_gbl_gpe_fadt_blocks[0]);
/* FADT GPE Block 1 */
(void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
acpi_gbl_gpe_fadt_blocks[1]);
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_install_xrupt_handlers
......@@ -120,7 +163,8 @@ acpi_status acpi_ev_install_xrupt_handlers(void)
status = acpi_ev_install_sci_handler();
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Unable to install System Control Interrupt Handler, %s\n", acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status,
"Unable to install System Control Interrupt handler"));
return_ACPI_STATUS(status);
}
......@@ -128,7 +172,8 @@ acpi_status acpi_ev_install_xrupt_handlers(void)
status = acpi_ev_init_global_lock_handler();
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Unable to initialize Global Lock handler, %s\n", acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status,
"Unable to initialize Global Lock handler"));
return_ACPI_STATUS(status);
}
......@@ -262,7 +307,9 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
enable_register_id, 0,
ACPI_MTX_DO_NOT_LOCK);
ACPI_REPORT_ERROR(("No installed handler for fixed event [%08X]\n", event));
ACPI_ERROR((AE_INFO,
"No installed handler for fixed event [%08X]",
event));
return (ACPI_INTERRUPT_NOT_HANDLED);
}
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -372,14 +372,14 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
{
acpi_status status;
struct acpi_gpe_block_info *gpe_block;
struct acpi_gpe_register_info *gpe_register_info;
u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
u8 enabled_status_byte;
struct acpi_gpe_register_info *gpe_register_info;
u32 status_reg;
u32 enable_reg;
u32 flags;
acpi_status status;
struct acpi_gpe_block_info *gpe_block;
acpi_cpu_flags flags;
acpi_native_uint i;
acpi_native_uint j;
......@@ -546,7 +546,11 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
status = acpi_ns_evaluate_by_handle(&info);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("%s while evaluating method [%4.4s] for GPE[%2X]\n", acpi_format_exception(status), acpi_ut_get_node_name(local_gpe_event_info.dispatch.method_node), gpe_number));
ACPI_EXCEPTION((AE_INFO, status,
"While evaluating method [%4.4s] for GPE[%2X]",
acpi_ut_get_node_name
(local_gpe_event_info.dispatch.
method_node), gpe_number));
}
}
......@@ -599,8 +603,10 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
ACPI_GPE_EDGE_TRIGGERED) {
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: %s, Unable to clear GPE[%2X]\n", acpi_format_exception(status), gpe_number));
return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
ACPI_EXCEPTION((AE_INFO, status,
"Unable to clear GPE[%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
}
......@@ -637,8 +643,10 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
ACPI_GPE_LEVEL_TRIGGERED) {
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: %s, Unable to clear GPE[%2X]\n", acpi_format_exception(status), gpe_number));
return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
ACPI_EXCEPTION((AE_INFO, status,
"Unable to clear GPE[%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
}
break;
......@@ -651,8 +659,10 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
*/
status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: %s, Unable to disable GPE[%2X]\n", acpi_format_exception(status), gpe_number));
return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
ACPI_EXCEPTION((AE_INFO, status,
"Unable to disable GPE[%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
/*
......@@ -663,7 +673,9 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
acpi_ev_asynch_execute_gpe_method,
gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: %s, Unable to queue handler for GPE[%2X] - event disabled\n", acpi_format_exception(status), gpe_number));
ACPI_EXCEPTION((AE_INFO, status,
"Unable to queue handler for GPE[%2X] - event disabled",
gpe_number));
}
break;
......@@ -671,7 +683,9 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
/* No handler or method to run! */
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: No handler or method for GPE[%2X], disabling event\n", gpe_number));
ACPI_ERROR((AE_INFO,
"No handler or method for GPE[%2X], disabling event",
gpe_number));
/*
* Disable the GPE. The GPE will remain disabled until the ACPI
......@@ -679,13 +693,15 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
*/
status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("acpi_ev_gpe_dispatch: %s, Unable to disable GPE[%2X]\n", acpi_format_exception(status), gpe_number));
return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
ACPI_EXCEPTION((AE_INFO, status,
"Unable to disable GPE[%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
break;
}
return_VALUE(ACPI_INTERRUPT_HANDLED);
return_UINT32(ACPI_INTERRUPT_HANDLED);
}
#ifdef ACPI_GPE_NOTIFY_CHECK
......@@ -722,7 +738,9 @@ acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info)
acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
ACPI_REPORT_INFO(("GPE %p was updated from wake/run to wake-only\n", gpe_event_info));
ACPI_INFO((AE_INFO,
"GPE %p was updated from wake/run to wake-only",
gpe_event_info));
/* This was a wake-only GPE */
......
This diff is collapsed.
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -303,7 +303,8 @@ static void ACPI_SYSTEM_XFACE acpi_ev_global_lock_thread(void *context)
acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore,
acpi_gbl_global_lock_thread_count);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Could not signal Global Lock semaphore\n"));
ACPI_ERROR((AE_INFO,
"Could not signal Global Lock semaphore"));
}
}
}
......@@ -344,7 +345,8 @@ static u32 acpi_ev_global_lock_handler(void *context)
acpi_ev_global_lock_thread,
context);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Could not queue Global Lock thread, %s\n", acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status,
"Could not queue Global Lock thread"));
return (ACPI_INTERRUPT_NOT_HANDLED);
}
......@@ -384,7 +386,8 @@ acpi_status acpi_ev_init_global_lock_handler(void)
* with an error.
*/
if (status == AE_NO_HARDWARE_RESPONSE) {
ACPI_REPORT_ERROR(("No response from Global Lock hardware, disabling lock\n"));
ACPI_ERROR((AE_INFO,
"No response from Global Lock hardware, disabling lock"));
acpi_gbl_global_lock_present = FALSE;
status = AE_OK;
......@@ -480,7 +483,8 @@ acpi_status acpi_ev_release_global_lock(void)
ACPI_FUNCTION_TRACE("ev_release_global_lock");
if (!acpi_gbl_global_lock_thread_count) {
ACPI_REPORT_WARNING(("Cannot release HW Global Lock, it has not been acquired\n"));
ACPI_WARNING((AE_INFO,
"Cannot release HW Global Lock, it has not been acquired"));
return_ACPI_STATUS(AE_NOT_ACQUIRED);
}
......@@ -542,9 +546,9 @@ void acpi_ev_terminate(void)
for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
status = acpi_disable_event((u32) i, 0);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not disable fixed event %d\n",
(u32) i));
ACPI_ERROR((AE_INFO,
"Could not disable fixed event %d",
(u32) i));
}
}
......@@ -556,8 +560,7 @@ void acpi_ev_terminate(void)
status = acpi_ev_remove_sci_handler();
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not remove SCI handler\n"));
ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
}
}
......@@ -570,8 +573,7 @@ void acpi_ev_terminate(void)
if (acpi_gbl_original_mode == ACPI_SYS_MODE_LEGACY) {
status = acpi_disable();
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN,
"acpi_disable failed\n"));
ACPI_WARNING((AE_INFO, "acpi_disable failed"));
}
}
return_VOID;
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -295,12 +295,12 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
handler_desc = region_obj->region.handler;
if (!handler_desc) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"No handler for Region [%4.4s] (%p) [%s]\n",
acpi_ut_get_node_name(region_obj->region.
node), region_obj,
acpi_ut_get_region_name(region_obj->region.
space_id)));
ACPI_ERROR((AE_INFO,
"No handler for Region [%4.4s] (%p) [%s]",
acpi_ut_get_node_name(region_obj->region.node),
region_obj,
acpi_ut_get_region_name(region_obj->region.
space_id)));
return_ACPI_STATUS(AE_NOT_EXIST);
}
......@@ -317,12 +317,11 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
if (!region_setup) {
/* No initialization routine, exit with error */
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"No init routine for region(%p) [%s]\n",
region_obj,
acpi_ut_get_region_name(region_obj->
region.
space_id)));
ACPI_ERROR((AE_INFO,
"No init routine for region(%p) [%s]",
region_obj,
acpi_ut_get_region_name(region_obj->region.
space_id)));
return_ACPI_STATUS(AE_NOT_EXIST);
}
......@@ -347,12 +346,11 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
/* Check for failure of the Region Setup */
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Region Init: %s [%s]\n",
acpi_format_exception(status),
acpi_ut_get_region_name(region_obj->
region.
space_id)));
ACPI_EXCEPTION((AE_INFO, status,
"During region initialization: [%s]",
acpi_ut_get_region_name(region_obj->
region.
space_id)));
return_ACPI_STATUS(status);
}
......@@ -406,10 +404,9 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
region_obj2->extra.region_context);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Handler for [%s] returned %s\n",
acpi_ut_get_region_name(region_obj->region.
space_id),
acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]",
acpi_ut_get_region_name(region_obj->region.
space_id)));
}
if (!
......@@ -501,12 +498,10 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
status = acpi_ev_execute_reg_method(region_obj, 0);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"%s from region _REG, [%s]\n",
acpi_format_exception(status),
acpi_ut_get_region_name
(region_obj->region.
space_id)));
ACPI_EXCEPTION((AE_INFO, status,
"from region _REG, [%s]",
acpi_ut_get_region_name
(region_obj->region.space_id)));
}
if (acpi_ns_is_locked) {
......@@ -528,12 +523,10 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
/* Init routine may fail, Just ignore errors */
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"%s from region init, [%s]\n",
acpi_format_exception(status),
acpi_ut_get_region_name
(region_obj->region.
space_id)));
ACPI_EXCEPTION((AE_INFO, status,
"from region init, [%s]",
acpi_ut_get_region_name
(region_obj->region.space_id)));
}
region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE);
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -233,7 +233,11 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
*/
status = AE_OK;
} else {
ACPI_REPORT_ERROR(("Could not install pci_config handler for Root Bridge %4.4s, %s\n", acpi_ut_get_node_name(pci_root_node), acpi_format_exception(status)));
ACPI_EXCEPTION((AE_INFO,
status,
"Could not install pci_config handler for Root Bridge %4.4s",
acpi_ut_get_node_name
(pci_root_node)));
}
}
break;
......
......@@ -6,7 +6,7 @@
******************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -88,7 +88,7 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
*/
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
return_VALUE(interrupt_handled);
return_UINT32(interrupt_handled);
}
/*******************************************************************************
......@@ -121,7 +121,7 @@ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
*/
interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
return_VALUE(interrupt_handled);
return_UINT32(interrupt_handled);
}
/******************************************************************************
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -143,8 +143,8 @@ acpi_install_fixed_event_handler(u32 event,
if (ACPI_SUCCESS(status))
status = acpi_enable_event(event, 0);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN,
"Could not enable fixed event.\n"));
ACPI_WARNING((AE_INFO, "Could not enable fixed event %X",
event));
/* Remove the handler */
......@@ -204,10 +204,11 @@ acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
acpi_gbl_fixed_event_handlers[event].context = NULL;
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN,
"Could not write to fixed event enable register.\n"));
ACPI_WARNING((AE_INFO,
"Could not write to fixed event enable register %X",
event));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X.\n",
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
event));
}
......@@ -434,7 +435,7 @@ acpi_remove_notify_handler(acpi_handle device,
if (device == ACPI_ROOT_OBJECT) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Removing notify handler for ROOT object.\n"));
"Removing notify handler for namespace root object\n"));
if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
!acpi_gbl_system_notify.handler) ||
......@@ -562,7 +563,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_handler_info *handler;
acpi_status status;
u32 flags;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("acpi_install_gpe_handler");
......@@ -653,7 +654,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_handler_info *handler;
acpi_status status;
u32 flags;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE("acpi_remove_gpe_handler");
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -70,8 +70,7 @@ acpi_status acpi_enable(void)
/* Make sure we have the FADT */
if (!acpi_gbl_FADT) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN,
"No FADT information present!\n"));
ACPI_WARNING((AE_INFO, "No FADT information present!"));
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
}
......@@ -83,7 +82,8 @@ acpi_status acpi_enable(void)
status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
if (ACPI_FAILURE(status)) {
ACPI_REPORT_ERROR(("Could not transition to ACPI mode.\n"));
ACPI_ERROR((AE_INFO,
"Could not transition to ACPI mode"));
return_ACPI_STATUS(status);
}
......@@ -113,8 +113,7 @@ acpi_status acpi_disable(void)
ACPI_FUNCTION_TRACE("acpi_disable");
if (!acpi_gbl_FADT) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN,
"No FADT information present!\n"));
ACPI_WARNING((AE_INFO, "No FADT information present!"));
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
}
......@@ -127,8 +126,8 @@ acpi_status acpi_disable(void)
status = acpi_hw_set_mode(ACPI_SYS_MODE_LEGACY);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not exit ACPI mode to legacy mode"));
ACPI_ERROR((AE_INFO,
"Could not exit ACPI mode to legacy mode"));
return_ACPI_STATUS(status);
}
......@@ -185,9 +184,9 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
}
if (value != 1) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not enable %s event\n",
acpi_ut_get_event_name(event)));
ACPI_ERROR((AE_INFO,
"Could not enable %s event",
acpi_ut_get_event_name(event)));
return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
}
......@@ -384,9 +383,9 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
}
if (value != 0) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Could not disable %s events\n",
acpi_ut_get_event_name(event)));
ACPI_ERROR((AE_INFO,
"Could not disable %s events",
acpi_ut_get_event_name(event)));
return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
}
......@@ -626,6 +625,13 @@ acpi_install_gpe_block(acpi_handle gpe_device,
goto unlock_and_exit;
}
/* Run the _PRW methods and enable the GPEs */
status = acpi_ev_initialize_gpe_block(node, gpe_block);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
}
/* Get the device_object attached to the node */
obj_desc = acpi_ns_get_attached_object(node);
......
......@@ -6,7 +6,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -413,9 +413,9 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
(!ACPI_STRNCMP(table_ptr->signature,
acpi_gbl_table_data[ACPI_TABLE_SSDT].signature,
acpi_gbl_table_data[ACPI_TABLE_SSDT].sig_length))) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Table has invalid signature [%4.4s], must be SSDT or PSDT\n",
table_ptr->signature));
ACPI_ERROR((AE_INFO,
"Table has invalid signature [%4.4s], must be SSDT or PSDT",
table_ptr->signature));
status = AE_BAD_SIGNATURE;
goto cleanup;
}
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -504,18 +504,12 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
}
/*
* Perform the conversion.
* Create a new string object and string buffer
* (-1 because of extra separator included in string_length from above)
*/
string_length--;
if (string_length > ACPI_MAX_STRING_CONVERSION) { /* ACPI limit */
return_ACPI_STATUS(AE_AML_STRING_LIMIT);
}
/* Create a new string object and string buffer */
return_desc =
acpi_ut_create_string_object((acpi_size) string_length);
acpi_ut_create_string_object((acpi_size)
(string_length - 1));
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
......@@ -647,7 +641,9 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
break;
default:
ACPI_REPORT_ERROR(("Bad destination type during conversion: %X\n", destination_type));
ACPI_ERROR((AE_INFO,
"Bad destination type during conversion: %X",
destination_type));
status = AE_AML_INTERNAL;
break;
}
......@@ -660,17 +656,13 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unknown Target type ID 0x%X Op %s dest_type %s\n",
GET_CURRENT_ARG_TYPE(walk_state->op_info->
runtime_args),
walk_state->op_info->name,
acpi_ut_get_type_name(destination_type)));
ACPI_REPORT_ERROR(("Bad Target Type (ARGI): %X\n",
GET_CURRENT_ARG_TYPE(walk_state->op_info->
runtime_args)))
status = AE_AML_INTERNAL;
ACPI_ERROR((AE_INFO,
"Unknown Target type ID 0x%X aml_opcode %X dest_type %s",
GET_CURRENT_ARG_TYPE(walk_state->op_info->
runtime_args),
walk_state->opcode,
acpi_ut_get_type_name(destination_type)));
status = AE_AML_INTERNAL;
}
/*
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -300,8 +300,8 @@ acpi_ex_create_region(u8 * aml_start,
*/
if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
(region_space < ACPI_USER_REGION_BEGIN)) {
ACPI_REPORT_ERROR(("Invalid address_space type %X\n",
region_space));
ACPI_ERROR((AE_INFO, "Invalid address_space type %X",
region_space));
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
}
......
This diff is collapsed.
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -249,13 +249,18 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
* Source must be a buffer of sufficient size (ACPI_SMBUS_BUFFER_SIZE).
*/
if (ACPI_GET_OBJECT_TYPE(source_desc) != ACPI_TYPE_BUFFER) {
ACPI_REPORT_ERROR(("SMBus write requires Buffer, found type %s\n", acpi_ut_get_object_type_name(source_desc)));
ACPI_ERROR((AE_INFO,
"SMBus write requires Buffer, found type %s",
acpi_ut_get_object_type_name(source_desc)));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
if (source_desc->buffer.length < ACPI_SMBUS_BUFFER_SIZE) {
ACPI_REPORT_ERROR(("SMBus write requires Buffer of length %X, found length %X\n", ACPI_SMBUS_BUFFER_SIZE, source_desc->buffer.length));
ACPI_ERROR((AE_INFO,
"SMBus write requires Buffer of length %X, found length %X",
ACPI_SMBUS_BUFFER_SIZE,
source_desc->buffer.length));
return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
}
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -94,10 +94,9 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
/* We must have a valid region */
if (ACPI_GET_OBJECT_TYPE(rgn_desc) != ACPI_TYPE_REGION) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Needed Region, found type %X (%s)\n",
ACPI_GET_OBJECT_TYPE(rgn_desc),
acpi_ut_get_object_type_name(rgn_desc)));
ACPI_ERROR((AE_INFO, "Needed Region, found type %X (%s)",
ACPI_GET_OBJECT_TYPE(rgn_desc),
acpi_ut_get_object_type_name(rgn_desc)));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
......@@ -162,31 +161,28 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
* than the region itself. For example, a region of length one
* byte, and a field with Dword access specified.
*/
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Field [%4.4s] access width (%d bytes) too large for region [%4.4s] (length %X)\n",
acpi_ut_get_node_name(obj_desc->
common_field.
node),
obj_desc->common_field.
access_byte_width,
acpi_ut_get_node_name(rgn_desc->
region.node),
rgn_desc->region.length));
ACPI_ERROR((AE_INFO,
"Field [%4.4s] access width (%d bytes) too large for region [%4.4s] (length %X)",
acpi_ut_get_node_name(obj_desc->
common_field.node),
obj_desc->common_field.access_byte_width,
acpi_ut_get_node_name(rgn_desc->region.
node),
rgn_desc->region.length));
}
/*
* Offset rounded up to next multiple of field width
* exceeds region length, indicate an error
*/
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Field [%4.4s] Base+Offset+Width %X+%X+%X is beyond end of region [%4.4s] (length %X)\n",
acpi_ut_get_node_name(obj_desc->common_field.
node),
obj_desc->common_field.base_byte_offset,
field_datum_byte_offset,
obj_desc->common_field.access_byte_width,
acpi_ut_get_node_name(rgn_desc->region.node),
rgn_desc->region.length));
ACPI_ERROR((AE_INFO,
"Field [%4.4s] Base+Offset+Width %X+%X+%X is beyond end of region [%4.4s] (length %X)",
acpi_ut_get_node_name(obj_desc->common_field.node),
obj_desc->common_field.base_byte_offset,
field_datum_byte_offset,
obj_desc->common_field.access_byte_width,
acpi_ut_get_node_name(rgn_desc->region.node),
rgn_desc->region.length));
return_ACPI_STATUS(AE_AML_REGION_LIMIT);
}
......@@ -270,18 +266,17 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_IMPLEMENTED) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Region %s(%X) not implemented\n",
acpi_ut_get_region_name(rgn_desc->
region.
space_id),
rgn_desc->region.space_id));
ACPI_ERROR((AE_INFO,
"Region %s(%X) not implemented",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id));
} else if (status == AE_NOT_EXIST) {
ACPI_REPORT_ERROR(("Region %s(%X) has no handler\n",
acpi_ut_get_region_name(rgn_desc->
region.
space_id),
rgn_desc->region.space_id));
ACPI_ERROR((AE_INFO,
"Region %s(%X) has no handler",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id));
}
}
......@@ -514,8 +509,8 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
default:
ACPI_REPORT_ERROR(("Wrong object type in field I/O %X\n",
ACPI_GET_OBJECT_TYPE(obj_desc)));
ACPI_ERROR((AE_INFO, "Wrong object type in field I/O %X",
ACPI_GET_OBJECT_TYPE(obj_desc)));
status = AE_AML_INTERNAL;
break;
}
......@@ -618,11 +613,11 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
default:
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"write_with_update_rule: Unknown update_rule setting: %X\n",
(obj_desc->common_field.
field_flags &
AML_FIELD_UPDATE_RULE_MASK)));
ACPI_ERROR((AE_INFO,
"Unknown update_rule value: %X",
(obj_desc->common_field.
field_flags &
AML_FIELD_UPDATE_RULE_MASK)));
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
}
......@@ -677,10 +672,9 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
if (buffer_length <
ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Field size %X (bits) is too large for buffer (%X)\n",
obj_desc->common_field.bit_length,
buffer_length));
ACPI_ERROR((AE_INFO,
"Field size %X (bits) is too large for buffer (%X)",
obj_desc->common_field.bit_length, buffer_length));
return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
}
......@@ -792,10 +786,9 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
if (buffer_length <
ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Field size %X (bits) is too large for buffer (%X)\n",
obj_desc->common_field.bit_length,
buffer_length));
ACPI_ERROR((AE_INFO,
"Field size %X (bits) is too large for buffer (%X)",
obj_desc->common_field.bit_length, buffer_length));
return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
}
......
This diff is collapsed.
......@@ -6,7 +6,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -153,7 +153,9 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
/* Sanity check -- we must have a valid thread ID */
if (!walk_state->thread) {
ACPI_REPORT_ERROR(("Cannot acquire Mutex [%4.4s], null thread info\n", acpi_ut_get_node_name(obj_desc->mutex.node)));
ACPI_ERROR((AE_INFO,
"Cannot acquire Mutex [%4.4s], null thread info",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
......@@ -162,7 +164,9 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
* mutex. This mechanism provides some deadlock prevention
*/
if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
ACPI_REPORT_ERROR(("Cannot acquire Mutex [%4.4s], incorrect sync_level\n", acpi_ut_get_node_name(obj_desc->mutex.node)));
ACPI_ERROR((AE_INFO,
"Cannot acquire Mutex [%4.4s], incorrect sync_level",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
}
......@@ -237,14 +241,18 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
/* The mutex must have been previously acquired in order to release it */
if (!obj_desc->mutex.owner_thread) {
ACPI_REPORT_ERROR(("Cannot release Mutex [%4.4s], not acquired\n", acpi_ut_get_node_name(obj_desc->mutex.node)));
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], not acquired",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED);
}
/* Sanity check -- we must have a valid thread ID */
if (!walk_state->thread) {
ACPI_REPORT_ERROR(("Cannot release Mutex [%4.4s], null thread info\n", acpi_ut_get_node_name(obj_desc->mutex.node)));
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], null thread info",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
......@@ -255,7 +263,11 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
if ((obj_desc->mutex.owner_thread->thread_id !=
walk_state->thread->thread_id)
&& (obj_desc->mutex.semaphore != acpi_gbl_global_lock_semaphore)) {
ACPI_REPORT_ERROR(("Thread %X cannot release Mutex [%4.4s] acquired by thread %X\n", walk_state->thread->thread_id, acpi_ut_get_node_name(obj_desc->mutex.node), obj_desc->mutex.owner_thread->thread_id));
ACPI_ERROR((AE_INFO,
"Thread %X cannot release Mutex [%4.4s] acquired by thread %X",
walk_state->thread->thread_id,
acpi_ut_get_node_name(obj_desc->mutex.node),
obj_desc->mutex.owner_thread->thread_id));
return_ACPI_STATUS(AE_AML_NOT_OWNER);
}
......@@ -264,7 +276,9 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
* equal to the current sync level
*/
if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) {
ACPI_REPORT_ERROR(("Cannot release Mutex [%4.4s], incorrect sync_level\n", acpi_ut_get_node_name(obj_desc->mutex.node)));
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], incorrect sync_level",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
}
......
......@@ -6,7 +6,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2005, R. Byron Moore
* Copyright (C) 2000 - 2006, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -99,7 +99,8 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
*/
name_string = ACPI_MEM_ALLOCATE(size_needed);
if (!name_string) {
ACPI_REPORT_ERROR(("ex_allocate_name_string: Could not allocate size %d\n", size_needed));
ACPI_ERROR((AE_INFO,
"Could not allocate size %d", size_needed));
return_PTR(NULL);
}
......@@ -167,8 +168,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
char_buf[0] = *aml_address;
if ('0' <= char_buf[0] && char_buf[0] <= '9') {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "leading digit: %c\n",
char_buf[0]));
ACPI_ERROR((AE_INFO, "Invalid leading digit: %c", char_buf[0]));
return_ACPI_STATUS(AE_CTRL_PENDING);
}
......@@ -191,10 +191,10 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
if (name_string) {
ACPI_STRCAT(name_string, char_buf);
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Appended to - %s \n", name_string));
"Appended to - %s\n", name_string));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"No Name string - %s \n", char_buf));
"No Name string - %s\n", char_buf));
}
} else if (index == 0) {
/*
......@@ -211,12 +211,12 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
* the required 4
*/
status = AE_AML_BAD_NAME;
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Bad character %02x in name, at %p\n",
*aml_address, aml_address));
ACPI_ERROR((AE_INFO,
"Bad character %02x in name, at %p",
*aml_address, aml_address));
}
*in_aml_address = (u8 *) aml_address;
*in_aml_address = ACPI_CAST_PTR(u8, aml_address);
return_ACPI_STATUS(status);
}
......@@ -412,8 +412,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
if (AE_CTRL_PENDING == status && has_prefix) {
/* Ran out of segments after processing a prefix */
ACPI_REPORT_ERROR(("ex_do_name: Malformed Name at %p\n",
name_string));
ACPI_ERROR((AE_INFO, "Malformed Name at %p", name_string));
status = AE_AML_BAD_NAME;
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment