Commit 57e1c5c8 authored by Len Brown's avatar Len Brown

Pull test into release branch

parents 62d0cfcb 76a2e849
......@@ -274,6 +274,7 @@ Who: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
---------------------------
<<<<<<< test:Documentation/feature-removal-schedule.txt
What: ACPI hotkey driver (CONFIG_ACPI_HOTKEY)
When: 2.6.21
Why: hotkey.c was an attempt to consolidate multiple drivers that use
......@@ -306,11 +307,18 @@ Why: The ACPI namespace is effectively the symbol list for
the BIOS can be extracted and disassembled with acpidump
and iasl as documented in the pmtools package here:
http://ftp.kernel.org/pub/linux/kernel/people/lenb/acpi/utils
Who: Len Brown <len.brown@intel.com>
---------------------------
What: ACPI procfs interface
When: July 2007
Why: After ACPI sysfs conversion, ACPI attributes will be duplicated
in sysfs and the ACPI procfs interface should be removed.
Who: Zhang Rui <rui.zhang@intel.com>
---------------------------
What: /proc/acpi/button
When: August 2007
Why: /proc/acpi/button has been replaced by events to the input layer
......
Video Output Switcher Control
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2006 luming.yu@intel.com
The output sysfs class driver provides an abstract video output layer that
can be used to hook platform specific methods to enable/disable video output
device through common sysfs interface. For example, on my IBM ThinkPad T42
laptop, The ACPI video driver registered its output devices and read/write
method for 'state' with output sysfs class. The user interface under sysfs is:
linux:/sys/class/video_output # tree .
.
|-- CRT0
| |-- device -> ../../../devices/pci0000:00/0000:00:01.0
| |-- state
| |-- subsystem -> ../../../class/video_output
| `-- uevent
|-- DVI0
| |-- device -> ../../../devices/pci0000:00/0000:00:01.0
| |-- state
| |-- subsystem -> ../../../class/video_output
| `-- uevent
|-- LCD0
| |-- device -> ../../../devices/pci0000:00/0000:00:01.0
| |-- state
| |-- subsystem -> ../../../class/video_output
| `-- uevent
`-- TV0
|-- device -> ../../../devices/pci0000:00/0000:00:01.0
|-- state
|-- subsystem -> ../../../class/video_output
`-- uevent
......@@ -584,6 +584,14 @@ W: http://sourceforge.net/projects/acpi4asus
W: http://xf.iksaif.net/acpi4asus
S: Maintained
ASUS LAPTOP EXTRAS DRIVER
P: Corentin Chary
M: corentincj@iksaif.net
L: acpi4asus-user@lists.sourceforge.net
W: http://sourceforge.net/projects/acpi4asus
W: http://xf.iksaif.net/acpi4asus
S: Maintained
ATA OVER ETHERNET DRIVER
P: Ed L. Cashin
M: ecashin@coraid.com
......
......@@ -466,7 +466,8 @@ CONFIG_FW_LOADER=y
#
# Plug and Play support
#
# CONFIG_PNP is not set
CONFIG_PNP=y
CONFIG_PNPACPI=y
#
# Block devices
......
This diff is collapsed.
......@@ -16,7 +16,7 @@
static int nvidia_hpet_detected __initdata;
static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
static int __init nvidia_hpet_check(struct acpi_table_header *header)
{
nvidia_hpet_detected = 1;
return 0;
......@@ -30,7 +30,7 @@ static int __init check_bridge(int vendor, int device)
is enabled. */
if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) {
nvidia_hpet_detected = 0;
acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check);
if (nvidia_hpet_detected == 0) {
acpi_skip_timer_override = 1;
printk(KERN_INFO "Nvidia board "
......
......@@ -190,7 +190,7 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
/* Invoke C3 */
inb(cx_address);
/* Dummy op - must do something useless after P_LVL3 read */
t = inl(acpi_fadt.xpm_tmr_blk.address);
t = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
/* Disable bus ratio bit */
local_irq_disable();
......@@ -250,8 +250,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
outb(3, 0x22);
} else if ((pr != NULL) && pr->flags.bm_control) {
/* Disable bus master arbitration */
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1,
ACPI_MTX_DO_NOT_LOCK);
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
}
switch (longhaul_version) {
......@@ -281,8 +280,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
case TYPE_POWERSAVER:
if (longhaul_flags & USE_ACPI_C3) {
/* Don't allow wakeup */
acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0,
ACPI_MTX_DO_NOT_LOCK);
acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
do_powersaver(cx->address, clock_ratio_index);
} else {
do_powersaver(0, clock_ratio_index);
......@@ -295,8 +293,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
outb(0, 0x22);
} else if ((pr != NULL) && pr->flags.bm_control) {
/* Enable bus master arbitration */
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0,
ACPI_MTX_DO_NOT_LOCK);
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
}
outb(pic2_mask,0xA1); /* restore mask */
outb(pic1_mask,0x21);
......
......@@ -1057,7 +1057,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
static int gsi_to_irq[MAX_GSI_NUM];
/* Don't set up the ACPI SCI because it's already set up */
if (acpi_fadt.sci_int == gsi)
if (acpi_gbl_FADT.sci_interrupt == gsi)
return gsi;
ioapic = mp_find_ioapic(gsi);
......@@ -1114,7 +1114,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
/*
* Don't assign IRQ used by ACPI SCI
*/
if (gsi == acpi_fadt.sci_int)
if (gsi == acpi_gbl_FADT.sci_interrupt)
gsi = pci_irq++;
gsi_to_irq[irq] = gsi;
} else {
......
......@@ -62,19 +62,19 @@ extern void * boot_ioremap(unsigned long, unsigned long);
/* Identify CPU proximity domains */
static void __init parse_cpu_affinity_structure(char *p)
{
struct acpi_table_processor_affinity *cpu_affinity =
(struct acpi_table_processor_affinity *) p;
struct acpi_srat_cpu_affinity *cpu_affinity =
(struct acpi_srat_cpu_affinity *) p;
if (!cpu_affinity->flags.enabled)
if ((cpu_affinity->flags & ACPI_SRAT_CPU_ENABLED) == 0)
return; /* empty entry */
/* mark this node as "seen" in node bitmap */
BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain);
BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain_lo);
apicid_to_pxm[cpu_affinity->apic_id] = cpu_affinity->proximity_domain;
apicid_to_pxm[cpu_affinity->apic_id] = cpu_affinity->proximity_domain_lo;
printk("CPU 0x%02X in proximity domain 0x%02X\n",
cpu_affinity->apic_id, cpu_affinity->proximity_domain);
cpu_affinity->apic_id, cpu_affinity->proximity_domain_lo);
}
/*
......@@ -87,25 +87,24 @@ static void __init parse_memory_affinity_structure (char *sratp)
unsigned long start_pfn, end_pfn;
u8 pxm;
struct node_memory_chunk_s *p, *q, *pend;
struct acpi_table_memory_affinity *memory_affinity =
(struct acpi_table_memory_affinity *) sratp;
struct acpi_srat_mem_affinity *memory_affinity =
(struct acpi_srat_mem_affinity *) sratp;
if (!memory_affinity->flags.enabled)
if ((memory_affinity->flags & ACPI_SRAT_MEM_ENABLED) == 0)
return; /* empty entry */
pxm = memory_affinity->proximity_domain & 0xff;
/* mark this node as "seen" in node bitmap */
BMAP_SET(pxm_bitmap, memory_affinity->proximity_domain);
BMAP_SET(pxm_bitmap, pxm);
/* calculate info for memory chunk structure */
paddr = memory_affinity->base_addr_hi;
paddr = (paddr << 32) | memory_affinity->base_addr_lo;
size = memory_affinity->length_hi;
size = (size << 32) | memory_affinity->length_lo;
paddr = memory_affinity->base_address;
size = memory_affinity->length;
start_pfn = paddr >> PAGE_SHIFT;
end_pfn = (paddr + size) >> PAGE_SHIFT;
pxm = memory_affinity->proximity_domain;
if (num_memory_chunks >= MAXCHUNKS) {
printk("Too many mem chunks in SRAT. Ignoring %lld MBytes at %llx\n",
......@@ -132,8 +131,8 @@ static void __init parse_memory_affinity_structure (char *sratp)
printk("Memory range 0x%lX to 0x%lX (type 0x%X) in proximity domain 0x%02X %s\n",
start_pfn, end_pfn,
memory_affinity->memory_type,
memory_affinity->proximity_domain,
(memory_affinity->flags.hot_pluggable ?
pxm,
((memory_affinity->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ?
"enabled and removable" : "enabled" ) );
}
......@@ -185,10 +184,10 @@ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
num_memory_chunks = 0;
while (p < end) {
switch (*p) {
case ACPI_SRAT_PROCESSOR_AFFINITY:
case ACPI_SRAT_TYPE_CPU_AFFINITY:
parse_cpu_affinity_structure(p);
break;
case ACPI_SRAT_MEMORY_AFFINITY:
case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
parse_memory_affinity_structure(p);
break;
default:
......@@ -262,31 +261,30 @@ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
return 0;
}
struct acpi_static_rsdt {
struct acpi_table_rsdt table;
u32 padding[7]; /* Allow for 7 more table entries */
};
int __init get_memcfg_from_srat(void)
{
struct acpi_table_header *header = NULL;
struct acpi_table_rsdp *rsdp = NULL;
struct acpi_table_rsdt *rsdt = NULL;
struct acpi_pointer *rsdp_address = NULL;
struct acpi_table_rsdt saved_rsdt;
acpi_native_uint rsdp_address = 0;
struct acpi_static_rsdt saved_rsdt;
int tables = 0;
int i = 0;
if (ACPI_FAILURE(acpi_find_root_pointer(ACPI_PHYSICAL_ADDRESSING,
rsdp_address))) {
rsdp_address = acpi_find_rsdp();
if (!rsdp_address) {
printk("%s: System description tables not found\n",
__FUNCTION__);
goto out_err;
}
if (rsdp_address->pointer_type == ACPI_PHYSICAL_POINTER) {
printk("%s: assigning address to rsdp\n", __FUNCTION__);
rsdp = (struct acpi_table_rsdp *)
(u32)rsdp_address->pointer.physical;
} else {
printk("%s: rsdp_address is not a physical pointer\n", __FUNCTION__);
goto out_err;
}
rsdp = (struct acpi_table_rsdp *)(u32)rsdp_address;
if (!rsdp) {
printk("%s: Didn't find ACPI root!\n", __FUNCTION__);
goto out_err;
......@@ -295,13 +293,13 @@ int __init get_memcfg_from_srat(void)
printk(KERN_INFO "%.8s v%d [%.6s]\n", rsdp->signature, rsdp->revision,
rsdp->oem_id);
if (strncmp(rsdp->signature, RSDP_SIG,strlen(RSDP_SIG))) {
if (strncmp(rsdp->signature, ACPI_SIG_RSDP,strlen(ACPI_SIG_RSDP))) {
printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __FUNCTION__);
goto out_err;
}
rsdt = (struct acpi_table_rsdt *)
boot_ioremap(rsdp->rsdt_address, sizeof(struct acpi_table_rsdt));
boot_ioremap(rsdp->rsdt_physical_address, sizeof(struct acpi_table_rsdt));
if (!rsdt) {
printk(KERN_WARNING
......@@ -310,9 +308,9 @@ int __init get_memcfg_from_srat(void)
goto out_err;
}
header = & rsdt->header;
header = &rsdt->header;
if (strncmp(header->signature, RSDT_SIG, strlen(RSDT_SIG))) {
if (strncmp(header->signature, ACPI_SIG_RSDT, strlen(ACPI_SIG_RSDT))) {
printk(KERN_WARNING "ACPI: RSDT signature incorrect\n");
goto out_err;
}
......@@ -330,9 +328,9 @@ int __init get_memcfg_from_srat(void)
memcpy(&saved_rsdt, rsdt, sizeof(saved_rsdt));
if (saved_rsdt.header.length > sizeof(saved_rsdt)) {
if (saved_rsdt.table.header.length > sizeof(saved_rsdt)) {
printk(KERN_WARNING "ACPI: Too big length in RSDT: %d\n",
saved_rsdt.header.length);
saved_rsdt.table.header.length);
goto out_err;
}
......@@ -341,15 +339,15 @@ int __init get_memcfg_from_srat(void)
for (i = 0; i < tables; i++) {
/* Map in header, then map in full table length. */
header = (struct acpi_table_header *)
boot_ioremap(saved_rsdt.entry[i], sizeof(struct acpi_table_header));
boot_ioremap(saved_rsdt.table.table_offset_entry[i], sizeof(struct acpi_table_header));
if (!header)
break;
header = (struct acpi_table_header *)
boot_ioremap(saved_rsdt.entry[i], header->length);
boot_ioremap(saved_rsdt.table.table_offset_entry[i], header->length);
if (!header)
break;
if (strncmp((char *) &header->signature, "SRAT", 4))
if (strncmp((char *) &header->signature, ACPI_SIG_SRAT, 4))
continue;
/* we've found the srat table. don't need to look at any more tables */
......
......@@ -84,15 +84,6 @@ struct es7000_oem_table {
};
#ifdef CONFIG_ACPI
struct acpi_table_sdt {
unsigned long pa;
unsigned long count;
struct {
unsigned long pa;
enum acpi_table_id id;
unsigned long size;
} entry[50];
};
struct oem_table {
struct acpi_table_header Header;
......
......@@ -160,53 +160,16 @@ parse_unisys_oem (char *oemptr)
int __init
find_unisys_acpi_oem_table(unsigned long *oem_addr)
{
struct acpi_table_rsdp *rsdp = NULL;
unsigned long rsdp_phys = 0;
struct acpi_table_header *header = NULL;
int i;
struct acpi_table_sdt sdt;
rsdp_phys = acpi_find_rsdp();
rsdp = __va(rsdp_phys);
if (rsdp->rsdt_address) {
struct acpi_table_rsdt *mapped_rsdt = NULL;
sdt.pa = rsdp->rsdt_address;
header = (struct acpi_table_header *)
__acpi_map_table(sdt.pa, sizeof(struct acpi_table_header));
if (!header)
return -ENODEV;
sdt.count = (header->length - sizeof(struct acpi_table_header)) >> 3;
mapped_rsdt = (struct acpi_table_rsdt *)
__acpi_map_table(sdt.pa, header->length);
if (!mapped_rsdt)
return -ENODEV;
header = &mapped_rsdt->header;
for (i = 0; i < sdt.count; i++)
sdt.entry[i].pa = (unsigned long) mapped_rsdt->entry[i];
};
for (i = 0; i < sdt.count; i++) {
header = (struct acpi_table_header *)
__acpi_map_table(sdt.entry[i].pa,
sizeof(struct acpi_table_header));
if (!header)
continue;
if (!strncmp((char *) &header->signature, "OEM1", 4)) {
if (!strncmp((char *) &header->oem_id, "UNISYS", 6)) {
void *addr;
struct oem_table *t;
acpi_table_print(header, sdt.entry[i].pa);
t = (struct oem_table *) __acpi_map_table(sdt.entry[i].pa, header->length);
addr = (void *) __acpi_map_table(t->OEMTableAddr, t->OEMTableSize);
*oem_addr = (unsigned long) addr;
int i = 0;
while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) {
if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
struct oem_table *t = (struct oem_table *)header;
*oem_addr = (unsigned long)__acpi_map_table(t->OEMTableAddr,
t->OEMTableSize);
return 0;
}
}
}
return -1;
}
#endif
......
......@@ -36,7 +36,7 @@ static DECLARE_BITMAP(fallback_slots, MAX_CHECK_BUS*32);
static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
{
int cfg_num = -1;
struct acpi_table_mcfg_config *cfg;
struct acpi_mcfg_allocation *cfg;
if (seg == 0 && bus < MAX_CHECK_BUS &&
test_bit(PCI_SLOT(devfn) + 32*bus, fallback_slots))
......@@ -48,11 +48,11 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
break;
}
cfg = &pci_mmcfg_config[cfg_num];
if (cfg->pci_segment_group_number != seg)
if (cfg->pci_segment != seg)
continue;
if ((cfg->start_bus_number <= bus) &&
(cfg->end_bus_number >= bus))
return cfg->base_address;
return cfg->address;
}
/* Handle more broken MCFG tables on Asus etc.
......@@ -60,9 +60,9 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
this applies to all busses. */
cfg = &pci_mmcfg_config[0];
if (pci_mmcfg_config_num == 1 &&
cfg->pci_segment_group_number == 0 &&
cfg->pci_segment == 0 &&
(cfg->start_bus_number | cfg->end_bus_number) == 0)
return cfg->base_address;
return cfg->address;
/* Fall back to type 0 */
return 0;
......@@ -199,19 +199,19 @@ void __init pci_mmcfg_init(int type)
if ((pci_probe & PCI_PROBE_MMCONF) == 0)
return;
acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg);
acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
if ((pci_mmcfg_config_num == 0) ||
(pci_mmcfg_config == NULL) ||
(pci_mmcfg_config[0].base_address == 0))
(pci_mmcfg_config[0].address == 0))
return;
/* Only do this check when type 1 works. If it doesn't work
assume we run on a Mac and always use MCFG */
if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].base_address,
pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN,
if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].address,
pci_mmcfg_config[0].address + MMCONFIG_APER_MIN,
E820_RESERVED)) {
printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n",
pci_mmcfg_config[0].base_address);
printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %lx is not E820-reserved\n",
(unsigned long)pci_mmcfg_config[0].address);
printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
return;
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -26,14 +26,10 @@
#include <linux/acpi.h>
#include <asm/sn/sn2/sn_hwperf.h>
#include <asm/sn/acpi.h>
#include "acpi/acglobal.h"
extern void sn_init_cpei_timer(void);
extern void register_sn_procfs(void);
extern void sn_acpi_bus_fixup(struct pci_bus *);
extern void sn_bus_fixup(struct pci_bus *);
extern void sn_acpi_slot_fixup(struct pci_dev *, struct pcidev_info *);
extern void sn_more_slot_fixup(struct pci_dev *, struct pcidev_info *);
extern void sn_legacy_pci_window_fixup(struct pci_controller *, u64, u64);
extern void sn_io_acpi_init(void);
extern void sn_io_init(void);
......@@ -48,6 +44,9 @@ struct sysdata_el {
int sn_ioif_inited; /* SN I/O infrastructure initialized? */
int sn_acpi_rev; /* SN ACPI revision */
EXPORT_SYMBOL_GPL(sn_acpi_rev);
struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
/*
......@@ -98,25 +97,6 @@ sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
return ret_stuff.status;
}
/*
* Retrieve the pci device information given the bus and device|function number.
*/
static inline u64
sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
u64 sn_irq_info)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
(u64) segment, (u64) bus_number, (u64) devfn,
(u64) pci_dev,
sn_irq_info, 0, 0);
return ret_stuff.v0;
}
/*
* sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
* device.
......@@ -249,47 +229,22 @@ void sn_pci_unfixup_slot(struct pci_dev *dev)
}
/*
* sn_pci_fixup_slot() - This routine sets up a slot's resources consistent
* with the Linux PCI abstraction layer. Resources
* acquired from our PCI provider include PIO maps
* to BAR space and interrupt objects.
* sn_pci_fixup_slot()
*/
void sn_pci_fixup_slot(struct pci_dev *dev)
void sn_pci_fixup_slot(struct pci_dev *dev, struct pcidev_info *pcidev_info,
struct sn_irq_info *sn_irq_info)
{
int segment = pci_domain_nr(dev->bus);
int status = 0;
struct pcibus_bussoft *bs;
struct pci_bus *host_pci_bus;
struct pci_dev *host_pci_dev;
struct pcidev_info *pcidev_info;
struct sn_irq_info *sn_irq_info;
unsigned int bus_no, devfn;
pci_dev_get(dev); /* for the sysdata pointer */
pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
if (!pcidev_info)
BUG(); /* Cannot afford to run out of memory */
sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
if (!sn_irq_info)
BUG(); /* Cannot afford to run out of memory */
/* Call to retrieve pci device information needed by kernel. */
status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
dev->devfn,
(u64) __pa(pcidev_info),
(u64) __pa(sn_irq_info));
if (status)
BUG(); /* Cannot get platform pci device information */
/* Add pcidev_info to list in pci_controller.platform_data */
list_add_tail(&pcidev_info->pdi_list,
&(SN_PLATFORM_DATA(dev->bus)->pcidev_info));
if (SN_ACPI_BASE_SUPPORT())
sn_acpi_slot_fixup(dev, pcidev_info);
else
sn_more_slot_fixup(dev, pcidev_info);
/*
* Using the PROMs values for the PCI host bus, get the Linux
* PCI host_pci_dev struct and set up host bus linkages
......@@ -489,11 +444,6 @@ void sn_generate_path(struct pci_bus *pci_bus, char *address)
sprintf(address, "%s^%d", address, geo_slot(geoid));
}
/*
* sn_pci_fixup_bus() - Perform SN specific setup of software structs
* (pcibus_bussoft, pcidev_info) and hardware
* registers, for the specified bus and devices under it.
*/
void __devinit
sn_pci_fixup_bus(struct pci_bus *bus)
{
......@@ -519,6 +469,15 @@ sn_io_early_init(void)
if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
return 0;
/* we set the acpi revision to that of the DSDT table OEM rev. */
{
struct acpi_table_header *header = NULL;
acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header);
BUG_ON(header == NULL);
sn_acpi_rev = header->oem_revision;
}
/*
* prime sn_pci_provider[]. Individial provider init routines will
* override their respective default entries.
......@@ -544,8 +503,12 @@ sn_io_early_init(void)
register_sn_procfs();
#endif
{
struct acpi_table_header *header;
(void)acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header);
printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n",
acpi_gbl_DSDT->oem_revision);
header->oem_revision);
}
if (SN_ACPI_BASE_SUPPORT())
sn_io_acpi_init();
else
......@@ -605,7 +568,6 @@ sn_io_late_init(void)
fs_initcall(sn_io_late_init);
EXPORT_SYMBOL(sn_pci_fixup_slot);
EXPORT_SYMBOL(sn_pci_unfixup_slot);
EXPORT_SYMBOL(sn_bus_store_sysdata);
EXPORT_SYMBOL(sn_bus_free_sysdata);
......
......@@ -56,6 +56,25 @@ static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
return ret_stuff.v0;
}
/*
* Retrieve the pci device information given the bus and device|function number.
*/
static inline u64
sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
u64 sn_irq_info)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
(u64) segment, (u64) bus_number, (u64) devfn,
(u64) pci_dev,
sn_irq_info, 0, 0);
return ret_stuff.v0;
}
/*
* sn_fixup_ionodes() - This routine initializes the HUB data structure for
......@@ -172,18 +191,40 @@ sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
}
/*
* sn_more_slot_fixup() - We are not running with an ACPI capable PROM,
* sn_io_slot_fixup() - We are not running with an ACPI capable PROM,
* and need to convert the pci_dev->resource
* 'start' and 'end' addresses to mapped addresses,
* and setup the pci_controller->window array entries.
*/
void
sn_more_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
sn_io_slot_fixup(struct pci_dev *dev)
{
unsigned int count = 0;
int idx;
s64 pci_addrs[PCI_ROM_RESOURCE + 1];
unsigned long addr, end, size, start;
struct pcidev_info *pcidev_info;
struct sn_irq_info *sn_irq_info;
int status;
pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
if (!pcidev_info)
panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__);
sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
if (!sn_irq_info)
panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__);
/* Call to retrieve pci device information needed by kernel. */
status = sal_get_pcidev_info((u64) pci_domain_nr(dev),
(u64) dev->bus->number,
dev->devfn,
(u64) __pa(pcidev_info),
(u64) __pa(sn_irq_info));
if (status)
BUG(); /* Cannot get platform pci device information */
/* Copy over PIO Mapped Addresses */
for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
......@@ -219,8 +260,12 @@ sn_more_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
*/
if (count > 0)
sn_pci_window_fixup(dev, count, pci_addrs);
sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
}
EXPORT_SYMBOL(sn_io_slot_fixup);
/*
* sn_pci_controller_fixup() - This routine sets up a bus's resources
* consistent with the Linux PCI abstraction layer.
......@@ -272,9 +317,6 @@ sn_bus_fixup(struct pci_bus *bus)
{
struct pci_dev *pci_dev = NULL;
struct pcibus_bussoft *prom_bussoft_ptr;
extern void sn_common_bus_fixup(struct pci_bus *,
struct pcibus_bussoft *);
if (!bus->parent) { /* If root bus */
prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data;
......@@ -291,7 +333,7 @@ sn_bus_fixup(struct pci_bus *bus)
prom_bussoft_ptr->bs_legacy_mem);
}
list_for_each_entry(pci_dev, &bus->devices, bus_list) {
sn_pci_fixup_slot(pci_dev);
sn_io_slot_fixup(pci_dev);
}
}
......
......@@ -29,6 +29,7 @@
* on IA64. This routine will convert a port number into a valid
* SN i/o address. Used by sn_in*() and sn_out*().
*/
void *sn_io_addr(unsigned long port)
{
if (!IS_RUNNING_ON_SIMULATOR()) {
......
......@@ -20,7 +20,8 @@
#include "xtalk/hubdev.h"
int
sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp)
sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp,
char **ssdt)
{
struct ia64_sal_retval ret_stuff;
u64 busnum;
......@@ -32,7 +33,8 @@ sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp)
segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment,
busnum, (u64) device, (u64) resp, 0, 0, 0);
busnum, (u64) device, (u64) resp, (u64)ia64_tpa(ssdt),
0, 0);
return (int)ret_stuff.v0;
}
......
......@@ -32,7 +32,7 @@ static void via_bugs(void)
static int nvidia_hpet_detected __initdata;
static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
static int __init nvidia_hpet_check(struct acpi_table_header *header)
{
nvidia_hpet_detected = 1;
return 0;
......@@ -53,7 +53,7 @@ static void nvidia_bugs(void)
return;
nvidia_hpet_detected = 0;
acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check);
if (nvidia_hpet_detected == 0) {
acpi_skip_timer_override = 1;
printk(KERN_INFO "Nvidia board "
......
......@@ -58,8 +58,8 @@ void __init clustered_apic_check(void)
* Some x86_64 machines use physical APIC mode regardless of how many
* procs/clusters are present (x86_64 ES7000 is an example).
*/
if (acpi_fadt.revision > FADT2_REVISION_ID)
if (acpi_fadt.force_apic_physical_destination_mode) {
if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID)
if (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) {
genapic = &apic_cluster;
goto print;
}
......
......@@ -798,7 +798,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
return gsi;
/* Don't set up the ACPI SCI because it's already set up */
if (acpi_fadt.sci_int == gsi)
if (acpi_gbl_FADT.sci_interrupt == gsi)
return gsi;
ioapic = mp_find_ioapic(gsi);
......
......@@ -498,7 +498,7 @@ static unsigned long get_cmos_time(void)
{
unsigned int year, mon, day, hour, min, sec;
unsigned long flags;
unsigned extyear = 0;
unsigned century = 0;
spin_lock_irqsave(&rtc_lock, flags);
......@@ -510,9 +510,9 @@ static unsigned long get_cmos_time(void)
mon = CMOS_READ(RTC_MONTH);
year = CMOS_READ(RTC_YEAR);
#ifdef CONFIG_ACPI
if (acpi_fadt.revision >= FADT2_REVISION_ID &&
acpi_fadt.century)
extyear = CMOS_READ(acpi_fadt.century);
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
acpi_gbl_FADT.century)
century = CMOS_READ(acpi_gbl_FADT.century);
#endif
} while (sec != CMOS_READ(RTC_SECONDS));
......@@ -530,10 +530,10 @@ static unsigned long get_cmos_time(void)
BCD_TO_BIN(mon);
BCD_TO_BIN(year);
if (extyear) {
BCD_TO_BIN(extyear);
year += extyear;
printk(KERN_INFO "Extended CMOS year: %d\n", extyear);
if (century) {
BCD_TO_BIN(century);
year += century * 100;
printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
} else {
/*
* x86-64 systems only exists since 2002.
......@@ -954,7 +954,7 @@ __cpuinit int unsynchronized_tsc(void)
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
#ifdef CONFIG_ACPI
/* But TSC doesn't tick in C3 so don't use it there */
if (acpi_fadt.length > 0 && acpi_fadt.plvl3_lat < 1000)
if (acpi_gbl_FADT.header.length > 0 && acpi_gbl_FADT.C3latency < 1000)
return 1;
#endif
return 0;
......
......@@ -101,7 +101,7 @@ static __init inline int srat_disabled(void)
static __init int slit_valid(struct acpi_table_slit *slit)
{
int i, j;
int d = slit->localities;
int d = slit->locality_count;
for (i = 0; i < d; i++) {
for (j = 0; j < d; j++) {
u8 val = slit->entry[d*i + j];
......@@ -127,18 +127,18 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
/* Callback for Proximity Domain -> LAPIC mapping */
void __init
acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
{
int pxm, node;
if (srat_disabled())
return;
if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) {
if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
bad_srat();
return;
}
if (pa->flags.enabled == 0)
if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
return;
pxm = pa->proximity_domain;
pxm = pa->proximity_domain_lo;
node = setup_node(pxm);
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
......@@ -279,7 +279,7 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end)
/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
void __init
acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
struct bootnode *nd, oldnode;
unsigned long start, end;
......@@ -288,16 +288,17 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
if (srat_disabled())
return;
if (ma->header.length != sizeof(struct acpi_table_memory_affinity)) {
if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
bad_srat();
return;
}
if (ma->flags.enabled == 0)
if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
return;
if (ma->flags.hot_pluggable && !save_add_info())
if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
return;
start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32);
end = start + (ma->length_lo | ((u64)ma->length_hi << 32));
start = ma->base_address;
end = start + ma->length;
pxm = ma->proximity_domain;
node = setup_node(pxm);
if (node < 0) {
......@@ -337,7 +338,8 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
push_node_boundaries(node, nd->start >> PAGE_SHIFT,
nd->end >> PAGE_SHIFT);
if (ma->flags.hot_pluggable && (reserve_hotadd(node, start, end) < 0)) {
if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) &&
(reserve_hotadd(node, start, end) < 0)) {
/* Ignore hotadd region. Undo damage */
printk(KERN_NOTICE "SRAT: Hotplug region ignored\n");
*nd = oldnode;
......@@ -461,7 +463,7 @@ int __node_distance(int a, int b)
if (!acpi_slit)
return a == b ? 10 : 20;
index = acpi_slit->localities * node_to_pxm(a);
index = acpi_slit->locality_count * node_to_pxm(a);
return acpi_slit->entry[index + node_to_pxm(b)];
}
......
......@@ -25,7 +25,7 @@ static DECLARE_BITMAP(fallback_slots, 32*MAX_CHECK_BUS);
/* Static virtual mapping of the MMCONFIG aperture */
struct mmcfg_virt {
struct acpi_table_mcfg_config *cfg;
struct acpi_mcfg_allocation *cfg;
char __iomem *virt;
};
static struct mmcfg_virt *pci_mmcfg_virt;
......@@ -33,14 +33,14 @@ static struct mmcfg_virt *pci_mmcfg_virt;
static char __iomem *get_virt(unsigned int seg, unsigned bus)
{
int cfg_num = -1;
struct acpi_table_mcfg_config *cfg;
struct acpi_mcfg_allocation *cfg;
while (1) {
++cfg_num;
if (cfg_num >= pci_mmcfg_config_num)
break;
cfg = pci_mmcfg_virt[cfg_num].cfg;
if (cfg->pci_segment_group_number != seg)
if (cfg->pci_segment != seg)
continue;
if ((cfg->start_bus_number <= bus) &&
(cfg->end_bus_number >= bus))
......@@ -52,7 +52,7 @@ static char __iomem *get_virt(unsigned int seg, unsigned bus)
this applies to all busses. */
cfg = &pci_mmcfg_config[0];
if (pci_mmcfg_config_num == 1 &&
cfg->pci_segment_group_number == 0 &&
cfg->pci_segment == 0 &&
(cfg->start_bus_number | cfg->end_bus_number) == 0)
return pci_mmcfg_virt[0].virt;
......@@ -170,19 +170,19 @@ void __init pci_mmcfg_init(int type)
if ((pci_probe & PCI_PROBE_MMCONF) == 0)
return;
acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg);
acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
if ((pci_mmcfg_config_num == 0) ||
(pci_mmcfg_config == NULL) ||
(pci_mmcfg_config[0].base_address == 0))
(pci_mmcfg_config[0].address == 0))
return;
/* Only do this check when type 1 works. If it doesn't work
assume we run on a Mac and always use MCFG */
if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].base_address,
pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN,
if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].address,
pci_mmcfg_config[0].address + MMCONFIG_APER_MIN,
E820_RESERVED)) {
printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n",
pci_mmcfg_config[0].base_address);
printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %lx is not E820-reserved\n",
(unsigned long)pci_mmcfg_config[0].address);
printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
return;
}
......@@ -194,15 +194,16 @@ void __init pci_mmcfg_init(int type)
}
for (i = 0; i < pci_mmcfg_config_num; ++i) {
pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address,
pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].address,
MMCONFIG_APER_MAX);
if (!pci_mmcfg_virt[i].virt) {
printk(KERN_ERR "PCI: Cannot map mmconfig aperture for "
"segment %d\n",
pci_mmcfg_config[i].pci_segment_group_number);
pci_mmcfg_config[i].pci_segment);
return;
}
printk(KERN_INFO "PCI: Using MMCONFIG at %x\n", pci_mmcfg_config[i].base_address);
printk(KERN_INFO "PCI: Using MMCONFIG at %lx\n",
(unsigned long)pci_mmcfg_config[i].address);
}
unreachable_devices();
......
......@@ -3,6 +3,7 @@
#
menu "ACPI (Advanced Configuration and Power Interface) Support"
depends on !X86_NUMAQ
depends on !X86_VISWS
depends on !IA64_HP_SIM
depends on IA64 || X86
......@@ -77,6 +78,20 @@ config ACPI_SLEEP_PROC_SLEEP
Create /proc/acpi/sleep
Deprecated by /sys/power/state
config ACPI_PROCFS
bool "Procfs interface (deprecated)"
depends on ACPI
default y
---help---
Procfs interface for ACPI is made optional for back-compatible.
As the same functions are duplicated in sysfs interface
and this proc interface will be removed some time later,
it's marked as deprecated.
( /proc/acpi/debug_layer && debug_level are deprecated by
/sys/module/acpi/parameters/debug_layer && debug_level.
/proc/acpi/info is deprecated by
/sys/module/acpi/parameters/acpica_version )
config ACPI_AC
tristate "AC Adapter"
depends on X86
......@@ -107,7 +122,7 @@ config ACPI_BUTTON
config ACPI_VIDEO
tristate "Video"
depends on X86
depends on X86 && BACKLIGHT_CLASS_DEVICE
help
This driver implement the ACPI Extensions For Display Adapters
for integrated graphics devices on motherboard, as specified in
......@@ -139,6 +154,13 @@ config ACPI_DOCK
help
This driver adds support for ACPI controlled docking stations
config ACPI_BAY
tristate "Removable Drive Bay (EXPERIMENTAL)"
depends on EXPERIMENTAL
help
This driver adds support for ACPI controlled removable drive
bays such as the IBM ultrabay or the Dell Module Bay.
config ACPI_PROCESSOR
tristate "Processor"
default y
......@@ -197,7 +219,10 @@ config ACPI_ASUS
If you have an ACPI-compatible ASUS laptop, say Y or M here. This
driver is still under development, so if your laptop is unsupported or
something works not quite as expected, please use the mailing list
available on the above page (acpi4asus-user@lists.sourceforge.net)
available on the above page (acpi4asus-user@lists.sourceforge.net).
NOTE: This driver is deprecated and will probably be removed soon,
use asus-laptop instead.
config ACPI_IBM
tristate "IBM ThinkPad Laptop Extras"
......
......@@ -37,12 +37,14 @@ endif
obj-y += sleep/
obj-y += bus.o glue.o
obj-y += scan.o
obj-$(CONFIG_ACPI_AC) += ac.o
obj-$(CONFIG_ACPI_BATTERY) += battery.o
obj-$(CONFIG_ACPI_BUTTON) += button.o
obj-$(CONFIG_ACPI_EC) += ec.o
obj-$(CONFIG_ACPI_FAN) += fan.o
obj-$(CONFIG_ACPI_DOCK) += dock.o
obj-$(CONFIG_ACPI_BAY) += bay.o
obj-$(CONFIG_ACPI_VIDEO) += video.o
obj-$(CONFIG_ACPI_HOTKEY) += hotkey.o
obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o
......@@ -56,7 +58,6 @@ obj-$(CONFIG_ACPI_NUMA) += numa.o
obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o
obj-$(CONFIG_ACPI_IBM) += ibm_acpi.o
obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
obj-y += scan.o motherboard.o
obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o
obj-y += cm_sbs.o
obj-$(CONFIG_ACPI_SBS) += i2c_ec.o sbs.o
......@@ -26,7 +26,7 @@
* Pontus Fuchs - Helper functions, cleanup
* Johann Wiesner - Small compile fixes
* John Belmonte - ACPI code for Toshiba laptop was a good starting point.
* ric Burghard - LED display support for W1N
* ic Burghard - LED display support for W1N
*
*/
......@@ -1128,7 +1128,6 @@ static int asus_model_match(char *model)
static int asus_hotk_get_info(void)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer dsdt = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *model = NULL;
int bsts_result;
char *string = NULL;
......@@ -1142,11 +1141,9 @@ static int asus_hotk_get_info(void)
* HID), this bit will be moved. A global variable asus_info contains
* the DSDT header.
*/
status = acpi_get_table(ACPI_TABLE_ID_DSDT, 1, &dsdt);
status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info);
if (ACPI_FAILURE(status))
printk(KERN_WARNING " Couldn't get the DSDT table header\n");
else
asus_info = dsdt.pointer;
/* We have to write 0 on init this far for all ASUS models */
if (!write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
......@@ -1358,8 +1355,6 @@ static void __exit asus_acpi_exit(void)
acpi_bus_unregister_driver(&asus_hotk_driver);
remove_proc_entry(PROC_ASUS, acpi_root_dir);
kfree(asus_info);
return;
}
......
......@@ -64,7 +64,7 @@ extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
static int acpi_battery_add(struct acpi_device *device);
static int acpi_battery_remove(struct acpi_device *device, int type);
static int acpi_battery_resume(struct acpi_device *device, int status);
static int acpi_battery_resume(struct acpi_device *device);
static struct acpi_driver acpi_battery_driver = {
.name = ACPI_BATTERY_DRIVER_NAME,
......@@ -753,7 +753,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
}
/* this is needed to learn about changes made in suspended state */
static int acpi_battery_resume(struct acpi_device *device, int state)
static int acpi_battery_resume(struct acpi_device *device)
{
struct acpi_battery *battery;
......
This diff is collapsed.
......@@ -44,7 +44,7 @@ struct acpi_blacklist_item {
char oem_id[7];
char oem_table_id[9];
u32 oem_revision;
acpi_table_type table;
char *table;
enum acpi_blacklist_predicates oem_revision_predicate;
char *reason;
u32 is_critical_error;
......@@ -56,18 +56,18 @@ struct acpi_blacklist_item {
*/
static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
/* Compaq Presario 1700 */
{"PTLTD ", " DSDT ", 0x06040000, ACPI_DSDT, less_than_or_equal,
{"PTLTD ", " DSDT ", 0x06040000, ACPI_SIG_DSDT, less_than_or_equal,
"Multiple problems", 1},
/* Sony FX120, FX140, FX150? */
{"SONY ", "U0 ", 0x20010313, ACPI_DSDT, less_than_or_equal,
{"SONY ", "U0 ", 0x20010313, ACPI_SIG_DSDT, less_than_or_equal,
"ACPI driver problem", 1},
/* Compaq Presario 800, Insyde BIOS */
{"INT440", "SYSFexxx", 0x00001001, ACPI_DSDT, less_than_or_equal,
{"INT440", "SYSFexxx", 0x00001001, ACPI_SIG_DSDT, less_than_or_equal,
"Does not use _REG to protect EC OpRegions", 1},
/* IBM 600E - _ADR should return 7, but it returns 1 */
{"IBM ", "TP600E ", 0x00000105, ACPI_DSDT, less_than_or_equal,
{"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal,
"Incorrect _ADR", 1},
{"ASUS\0\0", "P2B-S ", 0, ACPI_DSDT, all_versions,
{"ASUS\0\0", "P2B-S ", 0, ACPI_SIG_DSDT, all_versions,
"Bogus PCI routing", 1},
{""}
......@@ -103,22 +103,21 @@ int __init acpi_blacklisted(void)
{
int i = 0;
int blacklisted = 0;
struct acpi_table_header *table_header;
struct acpi_table_header table_header;
while (acpi_blacklist[i].oem_id[0] != '\0') {
if (acpi_get_table_header_early
(acpi_blacklist[i].table, &table_header)) {
if (acpi_get_table_header(acpi_blacklist[i].table, 0, &table_header)) {
i++;
continue;
}
if (strncmp(acpi_blacklist[i].oem_id, table_header->oem_id, 6)) {
if (strncmp(acpi_blacklist[i].oem_id, table_header.oem_id, 6)) {
i++;
continue;
}
if (strncmp
(acpi_blacklist[i].oem_table_id, table_header->oem_table_id,
(acpi_blacklist[i].oem_table_id, table_header.oem_table_id,
8)) {
i++;
continue;
......@@ -127,14 +126,14 @@ int __init acpi_blacklisted(void)
if ((acpi_blacklist[i].oem_revision_predicate == all_versions)
|| (acpi_blacklist[i].oem_revision_predicate ==
less_than_or_equal
&& table_header->oem_revision <=
&& table_header.oem_revision <=
acpi_blacklist[i].oem_revision)
|| (acpi_blacklist[i].oem_revision_predicate ==
greater_than_or_equal
&& table_header->oem_revision >=
&& table_header.oem_revision >=
acpi_blacklist[i].oem_revision)
|| (acpi_blacklist[i].oem_revision_predicate == equal
&& table_header->oem_revision ==
&& table_header.oem_revision ==
acpi_blacklist[i].oem_revision)) {
printk(KERN_ERR PREFIX
......
......@@ -44,9 +44,6 @@ ACPI_MODULE_NAME("acpi_bus")
extern void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger);
#endif
struct fadt_descriptor acpi_fadt;
EXPORT_SYMBOL(acpi_fadt);
struct acpi_device *acpi_root;
struct proc_dir_entry *acpi_root_dir;
EXPORT_SYMBOL(acpi_root_dir);
......@@ -195,7 +192,7 @@ int acpi_bus_set_power(acpi_handle handle, int state)
if (!device->flags.power_manageable) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n",
device->kobj.name));
device->dev.kobj.name));
return -ENODEV;
}
/*
......@@ -582,11 +579,12 @@ static int __init acpi_bus_init_irq(void)
return 0;
}
acpi_native_uint acpi_gbl_permanent_mmap;
void __init acpi_early_init(void)
{
acpi_status status = AE_OK;
struct acpi_buffer buffer = { sizeof(acpi_fadt), &acpi_fadt };
if (acpi_disabled)
return;
......@@ -597,6 +595,15 @@ void __init acpi_early_init(void)
if (!acpi_strict)
acpi_gbl_enable_interpreter_slack = TRUE;
acpi_gbl_permanent_mmap = 1;
status = acpi_reallocate_root_table();
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX
"Unable to reallocate ACPI tables\n");
goto error0;
}
status = acpi_initialize_subsystem();
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX
......@@ -611,32 +618,25 @@ void __init acpi_early_init(void)
goto error0;
}
/*
* Get a separate copy of the FADT for use by other drivers.
*/
status = acpi_get_table(ACPI_TABLE_ID_FADT, 1, &buffer);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Unable to get the FADT\n");
goto error0;
}
#ifdef CONFIG_X86
if (!acpi_ioapic) {
extern acpi_interrupt_flags acpi_sci_flags;
extern u8 acpi_sci_flags;
/* compatible (0) means level (3) */
if (acpi_sci_flags.trigger == 0)
acpi_sci_flags.trigger = 3;
if (!(acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)) {
acpi_sci_flags &= ~ACPI_MADT_TRIGGER_MASK;
acpi_sci_flags |= ACPI_MADT_TRIGGER_LEVEL;
}
/* Set PIC-mode SCI trigger type */
acpi_pic_sci_set_trigger(acpi_fadt.sci_int,
acpi_sci_flags.trigger);
acpi_pic_sci_set_trigger(acpi_gbl_FADT.sci_interrupt,
(acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
} else {
extern int acpi_sci_override_gsi;
/*
* now that acpi_fadt is initialized,
* now that acpi_gbl_FADT is initialized,
* update it with result from INT_SRC_OVR parsing
*/
acpi_fadt.sci_int = acpi_sci_override_gsi;
acpi_gbl_FADT.sci_interrupt = acpi_sci_override_gsi;
}
#endif
......
......@@ -75,7 +75,7 @@ static int acpi_button_state_open_fs(struct inode *inode, struct file *file);
static struct acpi_driver acpi_button_driver = {
.name = ACPI_BUTTON_DRIVER_NAME,
.class = ACPI_BUTTON_CLASS,
.ids = "ACPI_FPB,ACPI_FSB,PNP0C0D,PNP0C0C,PNP0C0E",
.ids = "button_power,button_sleep,PNP0C0D,PNP0C0C,PNP0C0E",
.ops = {
.add = acpi_button_add,
.remove = acpi_button_remove,
......
......@@ -167,7 +167,7 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
if (ACPI_FAILURE(status) || !device) {
result = container_device_add(&device, handle);
if (!result)
kobject_uevent(&device->kobj,
kobject_uevent(&device->dev.kobj,
KOBJ_ONLINE);
else
printk("Failed to add container\n");
......@@ -175,13 +175,13 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
} else {
if (ACPI_SUCCESS(status)) {
/* device exist and this is a remove request */
kobject_uevent(&device->kobj, KOBJ_OFFLINE);
kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
}
}
break;
case ACPI_NOTIFY_EJECT_REQUEST:
if (!acpi_bus_get_device(handle, &device) && device) {
kobject_uevent(&device->kobj, KOBJ_OFFLINE);
kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
}
break;
default:
......
......@@ -13,14 +13,11 @@
#define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME("debug")
#define ACPI_SYSTEM_FILE_DEBUG_LAYER "debug_layer"
#define ACPI_SYSTEM_FILE_DEBUG_LEVEL "debug_level"
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
#define MODULE_PARAM_PREFIX
module_param(acpi_dbg_layer, uint, 0400);
module_param(acpi_dbg_level, uint, 0400);
#define MODULE_PARAM_PREFIX "acpi."
struct acpi_dlayer {
const char *name;
......@@ -86,6 +83,60 @@ static const struct acpi_dlevel acpi_debug_levels[] = {
ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
};
/* --------------------------------------------------------------------------
FS Interface (/sys)
-------------------------------------------------------------------------- */
static int param_get_debug_layer(char *buffer, struct kernel_param *kp) {
int result = 0;
int i;
result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
for(i = 0; i <ARRAY_SIZE(acpi_debug_layers); i++) {
result += sprintf(buffer+result, "%-25s\t0x%08lX [%c]\n",
acpi_debug_layers[i].name,
acpi_debug_layers[i].value,
(acpi_dbg_layer & acpi_debug_layers[i].value) ? '*' : ' ');
}
result += sprintf(buffer+result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
ACPI_ALL_DRIVERS,
(acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer &
ACPI_ALL_DRIVERS) == 0 ? ' ' : '-');
result += sprintf(buffer+result, "--\ndebug_layer = 0x%08X ( * = enabled)\n", acpi_dbg_layer);
return result;
}
static int param_get_debug_level(char *buffer, struct kernel_param *kp) {
int result = 0;
int i;
result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
result += sprintf(buffer+result, "%-25s\t0x%08lX [%c]\n",
acpi_debug_levels[i].name,
acpi_debug_levels[i].value,
(acpi_dbg_level & acpi_debug_levels[i].
value) ? '*' : ' ');
}
result += sprintf(buffer+result, "--\ndebug_level = 0x%08X (* = enabled)\n",
acpi_dbg_level);
return result;
}
module_param_call(debug_layer, param_set_uint, param_get_debug_layer, &acpi_dbg_layer, 0644);
module_param_call(debug_level, param_set_uint, param_get_debug_level, &acpi_dbg_level, 0644);
/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
#ifdef CONFIG_ACPI_PROCFS
#define ACPI_SYSTEM_FILE_DEBUG_LAYER "debug_layer"
#define ACPI_SYSTEM_FILE_DEBUG_LEVEL "debug_level"
static int
acpi_system_read_debug(char *page,
char **start, off_t off, int count, int *eof, void *data)
......@@ -221,3 +272,4 @@ static int __init acpi_debug_init(void)
}
subsys_initcall(acpi_debug_init);
#endif
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -133,7 +133,8 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
}
}
/* We could put the returned object (Node) on the object stack for later,
/*
* We could put the returned object (Node) on the object stack for later,
* but for now, we will put it in the "op" object that the parser uses,
* so we can get it again at the end of this scope
*/
......@@ -514,8 +515,33 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
/* Third arg is the bank_value */
/* TBD: This arg is a term_arg, not a constant, and must be evaluated */
arg = arg->common.next;
/* Currently, only the following constants are supported */
switch (arg->common.aml_opcode) {
case AML_ZERO_OP:
info.bank_value = 0;
break;
case AML_ONE_OP:
info.bank_value = 1;
break;
case AML_BYTE_OP:
case AML_WORD_OP:
case AML_DWORD_OP:
case AML_QWORD_OP:
info.bank_value = (u32) arg->common.value.integer;
break;
default:
info.bank_value = 0;
ACPI_ERROR((AE_INFO,
"Non-constant BankValue for BankField is not implemented"));
}
/* Fourth arg is the field flags */
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -44,6 +44,7 @@
#include <acpi/acpi.h>
#include <acpi/acdispat.h>
#include <acpi/acnamesp.h>
#include <acpi/actables.h>
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsinit")
......@@ -90,7 +91,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
* We are only interested in NS nodes owned by the table that
* was just loaded
*/
if (node->owner_id != info->table_desc->owner_id) {
if (node->owner_id != info->owner_id) {
return (AE_OK);
}
......@@ -150,14 +151,21 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
******************************************************************************/
acpi_status
acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
acpi_ds_initialize_objects(acpi_native_uint table_index,
struct acpi_namespace_node * start_node)
{
acpi_status status;
struct acpi_init_walk_info info;
struct acpi_table_header *table;
acpi_owner_id owner_id;
ACPI_FUNCTION_TRACE(ds_initialize_objects);
status = acpi_tb_get_owner_id(table_index, &owner_id);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Starting initialization of namespace objects ****\n"));
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Parsing all Control Methods:"));
......@@ -166,7 +174,8 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
info.op_region_count = 0;
info.object_count = 0;
info.device_count = 0;
info.table_desc = table_desc;
info.table_index = table_index;
info.owner_id = owner_id;
/* Walk entire namespace from the supplied root */
......@@ -176,10 +185,14 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
}
status = acpi_get_table_by_index(table_index, &table);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
"\nTable [%4.4s](id %4.4X) - %hd Objects with %hd Devices %hd Methods %hd Regions\n",
table_desc->pointer->signature,
table_desc->owner_id, info.object_count,
table->signature, owner_id, info.object_count,
info.device_count, info.method_count,
info.op_region_count));
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -327,7 +327,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Execute method %p, currentstate=%p\n",
"Calling method %p, currentstate=%p\n",
this_walk_state->prev_op, this_walk_state));
/*
......@@ -351,49 +351,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(status);
}
/*
* 1) Parse the method. All "normal" methods are parsed for each execution.
* Internal methods (_OSI, etc.) do not require parsing.
*/
if (!(obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY)) {
/* Create a new walk state for the parse */
next_walk_state =
acpi_ds_create_walk_state(obj_desc->method.owner_id, op,
obj_desc, NULL);
if (!next_walk_state) {
status = AE_NO_MEMORY;
goto cleanup;
}
/* Create and init a parse tree root */
op = acpi_ps_create_scope_op();
if (!op) {
status = AE_NO_MEMORY;
goto cleanup;
}
status = acpi_ds_init_aml_walk(next_walk_state, op, method_node,
obj_desc->method.aml_start,
obj_desc->method.aml_length,
NULL, 1);
if (ACPI_FAILURE(status)) {
acpi_ps_delete_parse_tree(op);
goto cleanup;
}
/* Begin AML parse (deletes next_walk_state) */
status = acpi_ps_parse_aml(next_walk_state);
acpi_ps_delete_parse_tree(op);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
}
/* 2) Begin method execution. Create a new walk state */
/* Begin method parse/execution. Create a new walk state */
next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
NULL, obj_desc, thread);
......@@ -424,7 +382,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
obj_desc->method.aml_start,
obj_desc->method.aml_length, info, 3);
obj_desc->method.aml_length, info,
ACPI_IMODE_EXECUTE);
ACPI_FREE(info);
if (ACPI_FAILURE(status)) {
......@@ -445,8 +404,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
this_walk_state->num_operands = 0;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Starting nested execution, newstate=%p\n",
next_walk_state));
"**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
method_node->name.ascii, next_walk_state));
/* Invoke an internal method if necessary */
......
......@@ -5,7 +5,7 @@
******************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -260,7 +260,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
}
obj_desc->buffer.flags |= AOPOBJ_DATA_VALID;
op->common.node = (struct acpi_namespace_node *)obj_desc;
op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
return_ACPI_STATUS(AE_OK);
}
......@@ -270,7 +270,8 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
*
* PARAMETERS: walk_state - Current walk state
* Op - Parser object to be translated
* package_length - Number of elements in the package
* element_count - Number of elements in the package - this is
* the num_elements argument to Package()
* obj_desc_ptr - Where the ACPI internal object is returned
*
* RETURN: Status
......@@ -278,18 +279,29 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
* DESCRIPTION: Translate a parser Op package object to the equivalent
* namespace object
*
* NOTE: The number of elements in the package will be always be the num_elements
* count, regardless of the number of elements in the package list. If
* num_elements is smaller, only that many package list elements are used.
* if num_elements is larger, the Package object is padded out with
* objects of type Uninitialized (as per ACPI spec.)
*
* Even though the ASL compilers do not allow num_elements to be smaller
* than the Package list length (for the fixed length package opcode), some
* BIOS code modifies the AML on the fly to adjust the num_elements, and
* this code compensates for that. This also provides compatibility with
* other AML interpreters.
*
******************************************************************************/
acpi_status
acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
u32 package_length,
u32 element_count,
union acpi_operand_object **obj_desc_ptr)
{
union acpi_parse_object *arg;
union acpi_parse_object *parent;
union acpi_operand_object *obj_desc = NULL;
u32 package_list_length;
acpi_status status = AE_OK;
acpi_native_uint i;
......@@ -318,32 +330,13 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
obj_desc->package.node = parent->common.node;
}
obj_desc->package.count = package_length;
/* Count the number of items in the package list */
arg = op->common.value.arg;
arg = arg->common.next;
for (package_list_length = 0; arg; package_list_length++) {
arg = arg->common.next;
}
/*
* The package length (number of elements) will be the greater
* of the specified length and the length of the initializer list
*/
if (package_list_length > package_length) {
obj_desc->package.count = package_list_length;
}
/*
* Allocate the pointer array (array of pointers to the
* individual objects). Add an extra pointer slot so
* that the list is always null terminated.
* Allocate the element array (array of pointers to the individual
* objects) based on the num_elements parameter. Add an extra pointer slot
* so that the list is always null terminated.
*/
obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
obj_desc->package.
count +
element_count +
1) * sizeof(void *));
if (!obj_desc->package.elements) {
......@@ -351,15 +344,20 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(AE_NO_MEMORY);
}
obj_desc->package.count = element_count;
/*
* Initialize all elements of the package
* Initialize the elements of the package, up to the num_elements count.
* Package is automatically padded with uninitialized (NULL) elements
* if num_elements is greater than the package list length. Likewise,
* Package is truncated if num_elements is less than the list length.
*/
arg = op->common.value.arg;
arg = arg->common.next;
for (i = 0; arg; i++) {
for (i = 0; arg && (i < element_count); i++) {
if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
/* Object (package or buffer) is already built */
/* This package element is already built, just get it */
obj_desc->package.elements[i] =
ACPI_CAST_PTR(union acpi_operand_object,
......@@ -373,8 +371,14 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
arg = arg->common.next;
}
if (!arg) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Package List length larger than NumElements count (%X), truncated\n",
element_count));
}
obj_desc->package.flags |= AOPOBJ_DATA_VALID;
op->common.node = (struct acpi_namespace_node *)obj_desc;
op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
return_ACPI_STATUS(status);
}
......@@ -488,8 +492,9 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
/*
* Defer evaluation of Buffer term_arg operand
*/
obj_desc->buffer.node = (struct acpi_namespace_node *)
walk_state->operands[0];
obj_desc->buffer.node =
ACPI_CAST_PTR(struct acpi_namespace_node,
walk_state->operands[0]);
obj_desc->buffer.aml_start = op->named.data;
obj_desc->buffer.aml_length = op->named.length;
break;
......@@ -499,8 +504,9 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
/*
* Defer evaluation of Package term_arg operand
*/
obj_desc->package.node = (struct acpi_namespace_node *)
walk_state->operands[0];
obj_desc->package.node =
ACPI_CAST_PTR(struct acpi_namespace_node,
walk_state->operands[0]);
obj_desc->package.aml_start = op->named.data;
obj_desc->package.aml_length = op->named.length;
break;
......
......@@ -6,7 +6,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -114,7 +114,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
}
status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
aml_length, NULL, 1);
aml_length, NULL, ACPI_IMODE_LOAD_PASS1);
if (ACPI_FAILURE(status)) {
acpi_ds_delete_walk_state(walk_state);
goto cleanup;
......@@ -157,7 +157,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
/* Execute the opcode and arguments */
status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
aml_length, NULL, 3);
aml_length, NULL, ACPI_IMODE_EXECUTE);
if (ACPI_FAILURE(status)) {
acpi_ds_delete_walk_state(walk_state);
goto cleanup;
......
......@@ -5,7 +5,7 @@
******************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......
......@@ -6,7 +6,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -219,7 +219,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
if (!op) {
status = acpi_ds_load2_begin_op(walk_state, out_op);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
goto error_exit;
}
op = *out_op;
......@@ -238,7 +238,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
status = acpi_ds_scope_stack_pop(walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
goto error_exit;
}
}
}
......@@ -287,7 +287,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
status = acpi_ds_result_stack_push(walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
goto error_exit;
}
status = acpi_ds_exec_begin_control_op(walk_state, op);
......@@ -328,6 +328,10 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
/* Nothing to do here during method execution */
return_ACPI_STATUS(status);
error_exit:
status = acpi_ds_method_error(status, walk_state);
return_ACPI_STATUS(status);
}
/*****************************************************************************
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -196,6 +196,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
* one of the opcodes that actually opens a scope
*/
switch (node->type) {
case ACPI_TYPE_ANY:
case ACPI_TYPE_LOCAL_SCOPE: /* Scope */
case ACPI_TYPE_DEVICE:
case ACPI_TYPE_POWER:
......@@ -546,6 +547,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
acpi_status status;
acpi_object_type object_type;
char *buffer_ptr;
u32 flags;
ACPI_FUNCTION_TRACE(ds_load2_begin_op);
......@@ -669,6 +671,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
* one of the opcodes that actually opens a scope
*/
switch (node->type) {
case ACPI_TYPE_ANY:
case ACPI_TYPE_LOCAL_SCOPE: /* Scope */
case ACPI_TYPE_DEVICE:
case ACPI_TYPE_POWER:
......@@ -750,12 +753,20 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
break;
}
/* Add new entry into namespace */
flags = ACPI_NS_NO_UPSEARCH;
if (walk_state->pass_number == ACPI_IMODE_EXECUTE) {
/* Execution mode, node cannot already exist, node is temporary */
flags |= (ACPI_NS_ERROR_IF_FOUND | ACPI_NS_TEMPORARY);
}
/* Add new entry or lookup existing entry */
status =
acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
object_type, ACPI_IMODE_LOAD_PASS2,
ACPI_NS_NO_UPSEARCH, walk_state, &(node));
object_type, ACPI_IMODE_LOAD_PASS2, flags,
walk_state, &node);
break;
}
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......
......@@ -615,20 +615,28 @@ static acpi_status
find_dock_devices(acpi_handle handle, u32 lvl, void *context, void **rv)
{
acpi_status status;
acpi_handle tmp;
acpi_handle tmp, parent;
struct dock_station *ds = context;
struct dock_dependent_device *dd;
status = acpi_bus_get_ejd(handle, &tmp);
if (ACPI_FAILURE(status)) {
/* try the parent device as well */
status = acpi_get_parent(handle, &parent);
if (ACPI_FAILURE(status))
return AE_OK;
goto fdd_out;
/* see if parent is dependent on dock */
status = acpi_bus_get_ejd(parent, &tmp);
if (ACPI_FAILURE(status))
goto fdd_out;
}
if (tmp == ds->handle) {
dd = alloc_dock_dependent_device(handle);
if (dd)
add_dock_dependent_device(ds, dd);
}
fdd_out:
return AE_OK;
}
......
......@@ -872,9 +872,8 @@ static int __init acpi_ec_get_real_ecdt(void)
acpi_status status;
struct acpi_table_ecdt *ecdt_ptr;
status = acpi_get_firmware_table("ECDT", 1, ACPI_LOGICAL_ADDRESSING,
(struct acpi_table_header **)
&ecdt_ptr);
status = acpi_get_table(ACPI_SIG_ECDT, 1,
(struct acpi_table_header **)&ecdt_ptr);
if (ACPI_FAILURE(status))
return -ENODEV;
......@@ -891,14 +890,14 @@ static int __init acpi_ec_get_real_ecdt(void)
if (acpi_ec_mode == EC_INTR) {
init_waitqueue_head(&ec_ecdt->wait);
}
ec_ecdt->command_addr = ecdt_ptr->ec_control.address;
ec_ecdt->data_addr = ecdt_ptr->ec_data.address;
ec_ecdt->gpe = ecdt_ptr->gpe_bit;
ec_ecdt->command_addr = ecdt_ptr->control.address;
ec_ecdt->data_addr = ecdt_ptr->data.address;
ec_ecdt->gpe = ecdt_ptr->gpe;
/* use the GL just to be safe */
ec_ecdt->global_lock = TRUE;
ec_ecdt->uid = ecdt_ptr->uid;
status = acpi_get_handle(NULL, ecdt_ptr->ec_id, &ec_ecdt->handle);
status = acpi_get_handle(NULL, ecdt_ptr->id, &ec_ecdt->handle);
if (ACPI_FAILURE(status)) {
goto error;
}
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -70,13 +70,6 @@ acpi_status acpi_ev_initialize_events(void)
ACPI_FUNCTION_TRACE(ev_initialize_events);
/* Make sure we have ACPI tables */
if (!acpi_gbl_DSDT) {
ACPI_WARNING((AE_INFO, "No ACPI tables present!"));
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
}
/*
* Initialize the Fixed and General Purpose Events. This is done prior to
* enabling SCIs to prevent interrupts from occurring before the handlers are
......@@ -211,8 +204,7 @@ static acpi_status acpi_ev_fixed_event_initialize(void)
if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) {
status =
acpi_set_register(acpi_gbl_fixed_event_info[i].
enable_register_id, 0,
ACPI_MTX_LOCK);
enable_register_id, 0);
if (ACPI_FAILURE(status)) {
return (status);
}
......@@ -298,7 +290,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
/* Clear the status bit */
(void)acpi_set_register(acpi_gbl_fixed_event_info[event].
status_register_id, 1, ACPI_MTX_DO_NOT_LOCK);
status_register_id, 1);
/*
* Make sure we've got a handler. If not, report an error.
......@@ -306,8 +298,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
*/
if (NULL == acpi_gbl_fixed_event_handlers[event].handler) {
(void)acpi_set_register(acpi_gbl_fixed_event_info[event].
enable_register_id, 0,
ACPI_MTX_DO_NOT_LOCK);
enable_register_id, 0);
ACPI_ERROR((AE_INFO,
"No installed handler for fixed event [%08X]",
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -121,7 +121,9 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
if (!gpe_register_info) {
return_ACPI_STATUS(AE_NOT_EXIST);
}
register_bit = gpe_event_info->register_bit;
register_bit = (u8)
(1 <<
(gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
/* 1) Disable case. Simply clear all enable bits */
......@@ -458,8 +460,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
/* Examine one GPE bit */
if (enabled_status_byte &
acpi_gbl_decode_to8bit[j]) {
if (enabled_status_byte & (1 << j)) {
/*
* Found an active GPE. Dispatch the event to a handler
* or method.
......@@ -570,7 +571,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"While evaluating GPE method [%4.4s]",
"while evaluating GPE method [%4.4s]",
acpi_ut_get_node_name
(local_gpe_event_info.dispatch.
method_node)));
......@@ -618,6 +619,8 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
acpi_gpe_count++;
/*
* If edge-triggered, clear the GPE status bit now. Note that
* level-triggered events are cleared after the GPE is serviced.
......@@ -633,20 +636,23 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
}
}
/* Save current system state */
if (acpi_gbl_system_awake_and_running) {
ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_SYSTEM_RUNNING);
} else {
ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_SYSTEM_RUNNING);
if (!acpi_gbl_system_awake_and_running) {
/*
* We just woke up because of a wake GPE. Disable any further GPEs
* until we are fully up and running (Only wake GPEs should be enabled
* at this time, but we just brute-force disable them all.)
* 1) We must disable this particular wake GPE so it won't fire again
* 2) We want to disable all wake GPEs, since we are now awake
*/
(void)acpi_hw_disable_all_gpes();
}
/*
* Dispatch the GPE to either an installed handler, or the control
* method associated with this GPE (_Lxx or _Exx).
* If a handler exists, we invoke it and do not attempt to run the method.
* If there is neither a handler nor a method, we disable the level to
* prevent further events from coming in here.
* Dispatch the GPE to either an installed handler, or the control method
* associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
* it and do not attempt to run the method. If there is neither a handler
* nor a method, we disable this GPE to prevent further such pointless
* events from firing.
*/
switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
case ACPI_GPE_DISPATCH_HANDLER:
......@@ -677,8 +683,8 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
case ACPI_GPE_DISPATCH_METHOD:
/*
* Disable GPE, so it doesn't keep firing before the method has a
* chance to run.
* Disable the GPE, so it doesn't keep firing before the method has a
* chance to run (it runs asynchronously with interrupts enabled).
*/
status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
......@@ -726,50 +732,3 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
return_UINT32(ACPI_INTERRUPT_HANDLED);
}
#ifdef ACPI_GPE_NOTIFY_CHECK
/*******************************************************************************
* TBD: NOT USED, PROTOTYPE ONLY AND WILL PROBABLY BE REMOVED
*
* FUNCTION: acpi_ev_check_for_wake_only_gpe
*
* PARAMETERS: gpe_event_info - info for this GPE
*
* RETURN: Status
*
* DESCRIPTION: Determine if a a GPE is "wake-only".
*
* Called from Notify() code in interpreter when a "DeviceWake"
* Notify comes in.
*
******************************************************************************/
acpi_status
acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ev_check_for_wake_only_gpe);
if ((gpe_event_info) && /* Only >0 for _Lxx/_Exx */
((gpe_event_info->flags & ACPI_GPE_SYSTEM_MASK) == ACPI_GPE_SYSTEM_RUNNING)) { /* System state at GPE time */
/* This must be a wake-only GPE, disable it */
status = acpi_ev_disable_gpe(gpe_event_info);
/* Set GPE to wake-only. Do not change wake disabled/enabled status */
acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
ACPI_INFO((AE_INFO,
"GPE %p was updated from wake/run to wake-only",
gpe_event_info));
/* This was a wake-only GPE */
return_ACPI_STATUS(AE_WAKE_ONLY_GPE);
}
return_ACPI_STATUS(AE_OK);
}
#endif
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -529,7 +529,7 @@ static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
/* Install new interrupt handler if not SCI_INT */
if (interrupt_number != acpi_gbl_FADT->sci_int) {
if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
status = acpi_os_install_interrupt_handler(interrupt_number,
acpi_ev_gpe_xrupt_handler,
gpe_xrupt);
......@@ -567,7 +567,7 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
/* We never want to remove the SCI interrupt handler */
if (gpe_xrupt->interrupt_number == acpi_gbl_FADT->sci_int) {
if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
gpe_xrupt->gpe_block_list_head = NULL;
return_ACPI_STATUS(AE_OK);
}
......@@ -796,30 +796,31 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
(u8) (gpe_block->block_base_number +
(i * ACPI_GPE_REGISTER_WIDTH));
ACPI_STORE_ADDRESS(this_register->status_address.address,
(gpe_block->block_address.address + i));
this_register->status_address.address =
gpe_block->block_address.address + i;
ACPI_STORE_ADDRESS(this_register->enable_address.address,
(gpe_block->block_address.address
+ i + gpe_block->register_count));
this_register->enable_address.address =
gpe_block->block_address.address + i +
gpe_block->register_count;
this_register->status_address.address_space_id =
gpe_block->block_address.address_space_id;
this_register->enable_address.address_space_id =
gpe_block->block_address.address_space_id;
this_register->status_address.register_bit_width =
this_register->status_address.space_id =
gpe_block->block_address.space_id;
this_register->enable_address.space_id =
gpe_block->block_address.space_id;
this_register->status_address.bit_width =
ACPI_GPE_REGISTER_WIDTH;
this_register->enable_address.register_bit_width =
this_register->enable_address.bit_width =
ACPI_GPE_REGISTER_WIDTH;
this_register->status_address.register_bit_offset =
this_register->status_address.bit_offset =
ACPI_GPE_REGISTER_WIDTH;
this_register->enable_address.register_bit_offset =
this_register->enable_address.bit_offset =
ACPI_GPE_REGISTER_WIDTH;
/* Init the event_info for each GPE within this register */
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
this_event->register_bit = acpi_gbl_decode_to8bit[j];
this_event->gpe_number =
(u8) (this_register->base_gpe_number + j);
this_event->register_info = this_register;
this_event++;
}
......@@ -1109,11 +1110,12 @@ acpi_status acpi_ev_gpe_initialize(void)
* If EITHER the register length OR the block address are zero, then that
* particular block is not supported.
*/
if (acpi_gbl_FADT->gpe0_blk_len && acpi_gbl_FADT->xgpe0_blk.address) {
if (acpi_gbl_FADT.gpe0_block_length &&
acpi_gbl_FADT.xgpe0_block.address) {
/* GPE block 0 exists (has both length and address > 0) */
register_count0 = (u16) (acpi_gbl_FADT->gpe0_blk_len / 2);
register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
gpe_number_max =
(register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
......@@ -1121,9 +1123,9 @@ acpi_status acpi_ev_gpe_initialize(void)
/* Install GPE Block 0 */
status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
&acpi_gbl_FADT->xgpe0_blk,
&acpi_gbl_FADT.xgpe0_block,
register_count0, 0,
acpi_gbl_FADT->sci_int,
acpi_gbl_FADT.sci_interrupt,
&acpi_gbl_gpe_fadt_blocks[0]);
if (ACPI_FAILURE(status)) {
......@@ -1132,20 +1134,21 @@ acpi_status acpi_ev_gpe_initialize(void)
}
}
if (acpi_gbl_FADT->gpe1_blk_len && acpi_gbl_FADT->xgpe1_blk.address) {
if (acpi_gbl_FADT.gpe1_block_length &&
acpi_gbl_FADT.xgpe1_block.address) {
/* GPE block 1 exists (has both length and address > 0) */
register_count1 = (u16) (acpi_gbl_FADT->gpe1_blk_len / 2);
register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
/* Check for GPE0/GPE1 overlap (if both banks exist) */
if ((register_count0) &&
(gpe_number_max >= acpi_gbl_FADT->gpe1_base)) {
(gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
ACPI_ERROR((AE_INFO,
"GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1",
gpe_number_max, acpi_gbl_FADT->gpe1_base,
acpi_gbl_FADT->gpe1_base +
gpe_number_max, acpi_gbl_FADT.gpe1_base,
acpi_gbl_FADT.gpe1_base +
((register_count1 *
ACPI_GPE_REGISTER_WIDTH) - 1)));
......@@ -1157,10 +1160,11 @@ acpi_status acpi_ev_gpe_initialize(void)
status =
acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
&acpi_gbl_FADT->xgpe1_blk,
&acpi_gbl_FADT.xgpe1_block,
register_count1,
acpi_gbl_FADT->gpe1_base,
acpi_gbl_FADT->sci_int,
acpi_gbl_FADT.gpe1_base,
acpi_gbl_FADT.
sci_interrupt,
&acpi_gbl_gpe_fadt_blocks
[1]);
......@@ -1173,7 +1177,7 @@ acpi_status acpi_ev_gpe_initialize(void)
* GPE0 and GPE1 do not have to be contiguous in the GPE number
* space. However, GPE0 always starts at GPE number zero.
*/
gpe_number_max = acpi_gbl_FADT->gpe1_base +
gpe_number_max = acpi_gbl_FADT.gpe1_base +
((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
}
}
......
This diff is collapsed.
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -291,7 +291,6 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
u32 bit_width, acpi_integer * value)
{
acpi_status status;
acpi_status status2;
acpi_adr_space_handler handler;
acpi_adr_space_setup region_setup;
union acpi_operand_object *handler_desc;
......@@ -345,7 +344,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
* setup will potentially execute control methods
* (e.g., _REG method for this region)
*/
acpi_ex_exit_interpreter();
acpi_ex_relinquish_interpreter();
status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
handler_desc->address_space.context,
......@@ -353,10 +352,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
/* Re-enter the interpreter */
status2 = acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status2)) {
return_ACPI_STATUS(status2);
}
acpi_ex_reacquire_interpreter();
/* Check for failure of the Region Setup */
......@@ -409,7 +405,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
* exit the interpreter because the handler *might* block -- we don't
* know what it will do, so we can't hold the lock on the intepreter.
*/
acpi_ex_exit_interpreter();
acpi_ex_relinquish_interpreter();
}
/* Call the handler */
......@@ -430,10 +426,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
* We just returned from a non-default handler, we must re-enter the
* interpreter
*/
status2 = acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status2)) {
return_ACPI_STATUS(status2);
}
acpi_ex_reacquire_interpreter();
}
return_ACPI_STATUS(status);
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -48,6 +48,11 @@
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evrgnini")
/* Local prototypes */
static u8 acpi_ev_match_pci_root_bridge(char *id);
static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
/*******************************************************************************
*
* FUNCTION: acpi_ev_system_memory_region_setup
......@@ -62,6 +67,7 @@ ACPI_MODULE_NAME("evrgnini")
* DESCRIPTION: Setup a system_memory operation region
*
******************************************************************************/
acpi_status
acpi_ev_system_memory_region_setup(acpi_handle handle,
u32 function,
......@@ -168,9 +174,9 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
union acpi_operand_object *handler_obj;
struct acpi_namespace_node *parent_node;
struct acpi_namespace_node *pci_root_node;
struct acpi_namespace_node *pci_device_node;
union acpi_operand_object *region_obj =
(union acpi_operand_object *)handle;
struct acpi_device_id object_hID;
ACPI_FUNCTION_TRACE(ev_pci_config_region_setup);
......@@ -215,27 +221,14 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
pci_root_node = parent_node;
while (pci_root_node != acpi_gbl_root_node) {
status =
acpi_ut_execute_HID(pci_root_node, &object_hID);
if (ACPI_SUCCESS(status)) {
/*
* Got a valid _HID string, check if this is a PCI root.
* New for ACPI 3.0: check for a PCI Express root also.
*/
if (!
(ACPI_STRNCMP
(object_hID.value, PCI_ROOT_HID_STRING,
sizeof(PCI_ROOT_HID_STRING)))
||
!(ACPI_STRNCMP
(object_hID.value,
PCI_EXPRESS_ROOT_HID_STRING,
sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) {
/* Get the _HID/_CID in order to detect a root_bridge */
if (acpi_ev_is_pci_root_bridge(pci_root_node)) {
/* Install a handler for this PCI root bridge */
status =
acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
status = acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
if (ACPI_FAILURE(status)) {
if (status == AE_SAME_HANDLER) {
/*
......@@ -245,8 +238,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
*/
status = AE_OK;
} else {
ACPI_EXCEPTION((AE_INFO,
status,
ACPI_EXCEPTION((AE_INFO, status,
"Could not install PciConfig handler for Root Bridge %4.4s",
acpi_ut_get_node_name
(pci_root_node)));
......@@ -254,7 +246,6 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
}
break;
}
}
pci_root_node = acpi_ns_get_parent_node(pci_root_node);
}
......@@ -282,14 +273,25 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
/*
* For PCI_Config space access, we need the segment, bus,
* device and function numbers. Acquire them here.
*
* Find the parent device object. (This allows the operation region to be
* within a subscope under the device, such as a control method.)
*/
pci_device_node = region_obj->region.node;
while (pci_device_node && (pci_device_node->type != ACPI_TYPE_DEVICE)) {
pci_device_node = acpi_ns_get_parent_node(pci_device_node);
}
if (!pci_device_node) {
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
/*
* Get the PCI device and function numbers from the _ADR object
* contained in the parent's scope.
*/
status =
acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, parent_node,
acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, pci_device_node,
&pci_value);
/*
......@@ -327,6 +329,91 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_match_pci_root_bridge
*
* PARAMETERS: Id - The HID/CID in string format
*
* RETURN: TRUE if the Id is a match for a PCI/PCI-Express Root Bridge
*
* DESCRIPTION: Determine if the input ID is a PCI Root Bridge ID.
*
******************************************************************************/
static u8 acpi_ev_match_pci_root_bridge(char *id)
{
/*
* Check if this is a PCI root.
* ACPI 3.0+: check for a PCI Express root also.
*/
if (!(ACPI_STRNCMP(id,
PCI_ROOT_HID_STRING,
sizeof(PCI_ROOT_HID_STRING))) ||
!(ACPI_STRNCMP(id,
PCI_EXPRESS_ROOT_HID_STRING,
sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) {
return (TRUE);
}
return (FALSE);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_is_pci_root_bridge
*
* PARAMETERS: Node - Device node being examined
*
* RETURN: TRUE if device is a PCI/PCI-Express Root Bridge
*
* DESCRIPTION: Determine if the input device represents a PCI Root Bridge by
* examining the _HID and _CID for the device.
*
******************************************************************************/
static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
{
acpi_status status;
struct acpi_device_id hid;
struct acpi_compatible_id_list *cid;
acpi_native_uint i;
/*
* Get the _HID and check for a PCI Root Bridge
*/
status = acpi_ut_execute_HID(node, &hid);
if (ACPI_FAILURE(status)) {
return (FALSE);
}
if (acpi_ev_match_pci_root_bridge(hid.value)) {
return (TRUE);
}
/*
* The _HID did not match.
* Get the _CID and check for a PCI Root Bridge
*/
status = acpi_ut_execute_CID(node, &cid);
if (ACPI_FAILURE(status)) {
return (FALSE);
}
/* Check all _CIDs in the returned list */
for (i = 0; i < cid->count; i++) {
if (acpi_ev_match_pci_root_bridge(cid->id[i].value)) {
ACPI_FREE(cid);
return (TRUE);
}
}
ACPI_FREE(cid);
return (FALSE);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_pci_bar_region_setup
......@@ -432,6 +519,9 @@ acpi_ev_default_region_setup(acpi_handle handle,
* a PCI address in the scope of the definition. This address is
* required to perform an access to PCI config space.
*
* MUTEX: Interpreter should be unlocked, because we may run the _REG
* method for this region.
*
******************************************************************************/
acpi_status
......
......@@ -6,7 +6,7 @@
******************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -142,7 +142,8 @@ u32 acpi_ev_install_sci_handler(void)
ACPI_FUNCTION_TRACE(ev_install_sci_handler);
status = acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT->sci_int,
status =
acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
acpi_ev_sci_xrupt_handler,
acpi_gbl_gpe_xrupt_list_head);
return_ACPI_STATUS(status);
......@@ -175,7 +176,8 @@ acpi_status acpi_ev_remove_sci_handler(void)
/* Just let the OS remove the handler and disable the level */
status = acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT->sci_int,
status =
acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
acpi_ev_sci_xrupt_handler);
return_ACPI_STATUS(status);
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -768,11 +768,9 @@ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
return (AE_BAD_PARAMETER);
}
status = acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status)) {
return (status);
}
/* Must lock interpreter to prevent race conditions */
acpi_ex_enter_interpreter();
status = acpi_ev_acquire_global_lock(timeout);
acpi_ex_exit_interpreter();
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -44,6 +44,7 @@
#include <acpi/acpi.h>
#include <acpi/acevents.h>
#include <acpi/acnamesp.h>
#include <acpi/actables.h>
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evxfevnt")
......@@ -65,13 +66,14 @@ acpi_status acpi_enable(void)
ACPI_FUNCTION_TRACE(acpi_enable);
/* Make sure we have the FADT */
/* ACPI tables must be present */
if (!acpi_gbl_FADT) {
ACPI_WARNING((AE_INFO, "No FADT information present!"));
if (!acpi_tb_tables_loaded()) {
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
}
/* Check current mode */
if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"System is already in ACPI mode\n"));
......@@ -111,11 +113,6 @@ acpi_status acpi_disable(void)
ACPI_FUNCTION_TRACE(acpi_disable);
if (!acpi_gbl_FADT) {
ACPI_WARNING((AE_INFO, "No FADT information present!"));
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
}
if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) {
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"System is already in legacy (non-ACPI) mode\n"));
......@@ -169,7 +166,7 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
*/
status =
acpi_set_register(acpi_gbl_fixed_event_info[event].
enable_register_id, 1, ACPI_MTX_LOCK);
enable_register_id, 1);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
......@@ -178,7 +175,7 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
status =
acpi_get_register(acpi_gbl_fixed_event_info[event].
enable_register_id, &value, ACPI_MTX_LOCK);
enable_register_id, &value);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
......@@ -368,14 +365,14 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
*/
status =
acpi_set_register(acpi_gbl_fixed_event_info[event].
enable_register_id, 0, ACPI_MTX_LOCK);
enable_register_id, 0);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status =
acpi_get_register(acpi_gbl_fixed_event_info[event].
enable_register_id, &value, ACPI_MTX_LOCK);
enable_register_id, &value);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
......@@ -421,7 +418,7 @@ acpi_status acpi_clear_event(u32 event)
*/
status =
acpi_set_register(acpi_gbl_fixed_event_info[event].
status_register_id, 1, ACPI_MTX_LOCK);
status_register_id, 1);
return_ACPI_STATUS(status);
}
......@@ -510,7 +507,7 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
status =
acpi_get_register(acpi_gbl_fixed_event_info[event].
status_register_id, event_status, ACPI_MTX_LOCK);
status_register_id, event_status);
return_ACPI_STATUS(status);
}
......
......@@ -6,7 +6,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......
This diff is collapsed.
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -359,8 +359,9 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
union acpi_operand_object **operand = &walk_state->operands[0];
union acpi_operand_object *obj_desc;
struct acpi_namespace_node *node;
struct acpi_table_header *table;
union acpi_operand_object *region_obj2;
acpi_native_uint table_index;
struct acpi_table_header *table;
ACPI_FUNCTION_TRACE(ex_create_table_region);
......@@ -380,7 +381,7 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
status = acpi_tb_find_table(operand[1]->string.pointer,
operand[2]->string.pointer,
operand[3]->string.pointer, &table);
operand[3]->string.pointer, &table_index);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
......@@ -395,6 +396,11 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
region_obj2 = obj_desc->common.next_object;
region_obj2->extra.region_context = NULL;
status = acpi_get_table_by_index(table_index, &table);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Init the region from the operands */
obj_desc->region.space_id = REGION_DATA_TABLE;
......@@ -553,7 +559,8 @@ acpi_ex_create_method(u8 * aml_start,
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
if (!obj_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
status = AE_NO_MEMORY;
goto exit;
}
/* Save the method's AML pointer and length */
......@@ -576,10 +583,7 @@ acpi_ex_create_method(u8 * aml_start,
* Get the sync_level. If method is serialized, a mutex will be
* created for this method when it is parsed.
*/
if (acpi_gbl_all_methods_serialized) {
obj_desc->method.sync_level = 0;
obj_desc->method.method_flags |= AML_METHOD_SERIALIZED;
} else if (method_flags & AML_METHOD_SERIALIZED) {
if (method_flags & AML_METHOD_SERIALIZED) {
/*
* ACPI 1.0: sync_level = 0
* ACPI 2.0: sync_level = sync_level in method declaration
......@@ -597,6 +601,7 @@ acpi_ex_create_method(u8 * aml_start,
acpi_ut_remove_reference(obj_desc);
exit:
/* Remove a reference to the operand */
acpi_ut_remove_reference(operand[1]);
......
This diff is collapsed.
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -257,14 +257,13 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
" Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n",
" Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id,
obj_desc->common_field.access_byte_width,
obj_desc->common_field.base_byte_offset,
field_datum_byte_offset,
ACPI_FORMAT_UINT64(address)));
field_datum_byte_offset, (void *)address));
/* Invoke the appropriate address_space/op_region handler */
......
......@@ -6,7 +6,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......
This diff is collapsed.
......@@ -6,7 +6,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......
This diff is collapsed.
......@@ -5,7 +5,7 @@
*****************************************************************************/
/*
* Copyright (C) 2000 - 2006, R. Byron Moore
* Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment