Commit 9a90e098 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6: (27 commits)
  ACPI: Don't let acpi_pad needlessly mark TSC unstable
  drivers/acpi/sleep.h: Checkpatch cleanup
  ACPI: Minor cleanup eliminating redundant PMTIMER_TICKS to NS conversion
  ACPI: delete unused c-state promotion/demotion data strucutures
  ACPI: video: fix acpi_backlight=video
  ACPI: EC: Use kmemdup
  drivers/acpi: use kasprintf
  ACPI, APEI, EINJ injection parameters support
  Add x64 support to debugfs
  ACPI, APEI, Use ERST for persistent storage of MCE
  ACPI, APEI, Error Record Serialization Table (ERST) support
  ACPI, APEI, Generic Hardware Error Source memory error support
  ACPI, APEI, UEFI Common Platform Error Record (CPER) header
  Unified UUID/GUID definition
  ACPI Hardware Error Device (PNP0C33) support
  ACPI, APEI, PCIE AER, use general HEST table parsing in AER firmware_first setup
  ACPI, APEI, Document for APEI
  ACPI, APEI, EINJ support
  ACPI, APEI, HEST table parsing
  ACPI, APEI, APEI supporting infrastructure
  ...
parents d372e7fe d3b38333
APEI Error INJection
~~~~~~~~~~~~~~~~~~~~
EINJ provides a hardware error injection mechanism
It is very useful for debugging and testing of other APEI and RAS features.
To use EINJ, make sure the following are enabled in your kernel
configuration:
CONFIG_DEBUG_FS
CONFIG_ACPI_APEI
CONFIG_ACPI_APEI_EINJ
The user interface of EINJ is debug file system, under the
directory apei/einj. The following files are provided.
- available_error_type
Reading this file returns the error injection capability of the
platform, that is, which error types are supported. The error type
definition is as follow, the left field is the error type value, the
right field is error description.
0x00000001 Processor Correctable
0x00000002 Processor Uncorrectable non-fatal
0x00000004 Processor Uncorrectable fatal
0x00000008 Memory Correctable
0x00000010 Memory Uncorrectable non-fatal
0x00000020 Memory Uncorrectable fatal
0x00000040 PCI Express Correctable
0x00000080 PCI Express Uncorrectable fatal
0x00000100 PCI Express Uncorrectable non-fatal
0x00000200 Platform Correctable
0x00000400 Platform Uncorrectable non-fatal
0x00000800 Platform Uncorrectable fatal
The format of file contents are as above, except there are only the
available error type lines.
- error_type
This file is used to set the error type value. The error type value
is defined in "available_error_type" description.
- error_inject
Write any integer to this file to trigger the error
injection. Before this, please specify all necessary error
parameters.
- param1
This file is used to set the first error parameter value. Effect of
parameter depends on error_type specified. For memory error, this is
physical memory address.
- param2
This file is used to set the second error parameter value. Effect of
parameter depends on error_type specified. For memory error, this is
physical memory address mask.
For more information about EINJ, please refer to ACPI specification
version 4.0, section 17.5.
......@@ -145,11 +145,10 @@ and is between 256 and 4096 characters. It is defined in the file
acpi= [HW,ACPI,X86]
Advanced Configuration and Power Interface
Format: { force | off | ht | strict | noirq | rsdt }
Format: { force | off | strict | noirq | rsdt }
force -- enable ACPI if default was off
off -- disable ACPI if default was on
noirq -- do not use ACPI for IRQ routing
ht -- run only enough ACPI to enable Hyper Threading
strict -- Be less tolerant of platforms that are not
strictly ACPI specification compliant.
rsdt -- prefer RSDT over (default) XSDT
......@@ -758,6 +757,10 @@ and is between 256 and 4096 characters. It is defined in the file
Default value is 0.
Value can be changed at runtime via /selinux/enforce.
erst_disable [ACPI]
Disable Error Record Serialization Table (ERST)
support.
ether= [HW,NET] Ethernet cards parameters
This option is obsoleted by the "netdev=" option, which
has equivalent usage. See its documentation for details.
......@@ -852,6 +855,11 @@ and is between 256 and 4096 characters. It is defined in the file
hd= [EIDE] (E)IDE hard drive subsystem geometry
Format: <cyl>,<head>,<sect>
hest_disable [ACPI]
Disable Hardware Error Source Table (HEST) support;
corresponding firmware-first mode error processing
logic will be disabled.
highmem=nn[KMG] [KNL,BOOT] forces the highmem zone to have an exact
size of <nn>. This works even on boxes that have no
highmem otherwise. This also works to reduce highmem
......
......@@ -94,7 +94,6 @@ ia64_acpi_release_global_lock (unsigned int *lock)
#define acpi_noirq 0 /* ACPI always enabled on IA64 */
#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
#define acpi_ht 0 /* no HT-only mode on IA64 */
#endif
#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
static inline void disable_acpi(void) { }
......
......@@ -335,8 +335,11 @@ pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl)
}
struct pci_bus * __devinit
pci_acpi_scan_root(struct acpi_device *device, int domain, int bus)
pci_acpi_scan_root(struct acpi_pci_root *root)
{
struct acpi_device *device = root->device;
int domain = root->segment;
int bus = root->secondary.start;
struct pci_controller *controller;
unsigned int windows = 0;
struct pci_bus *pbus;
......
......@@ -85,7 +85,6 @@ extern int acpi_ioapic;
extern int acpi_noirq;
extern int acpi_strict;
extern int acpi_disabled;
extern int acpi_ht;
extern int acpi_pci_disabled;
extern int acpi_skip_timer_override;
extern int acpi_use_timer_override;
......@@ -97,7 +96,6 @@ void acpi_pic_sci_set_trigger(unsigned int, u16);
static inline void disable_acpi(void)
{
acpi_disabled = 1;
acpi_ht = 0;
acpi_pci_disabled = 1;
acpi_noirq = 1;
}
......
......@@ -225,5 +225,13 @@ extern void mcheck_intel_therm_init(void);
static inline void mcheck_intel_therm_init(void) { }
#endif
/*
* Used by APEI to report memory error via /dev/mcelog
*/
struct cper_sec_mem_err;
extern void apei_mce_report_mem_error(int corrected,
struct cper_sec_mem_err *mem_err);
#endif /* __KERNEL__ */
#endif /* _ASM_X86_MCE_H */
......@@ -63,7 +63,6 @@ EXPORT_SYMBOL(acpi_disabled);
int acpi_noirq; /* skip ACPI IRQ initialization */
int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */
EXPORT_SYMBOL(acpi_pci_disabled);
int acpi_ht __initdata = 1; /* enable HT */
int acpi_lapic;
int acpi_ioapic;
......@@ -1501,9 +1500,8 @@ void __init acpi_boot_table_init(void)
/*
* If acpi_disabled, bail out
* One exception: acpi=ht continues far enough to enumerate LAPICs
*/
if (acpi_disabled && !acpi_ht)
if (acpi_disabled)
return;
/*
......@@ -1534,9 +1532,8 @@ int __init early_acpi_boot_init(void)
{
/*
* If acpi_disabled, bail out
* One exception: acpi=ht continues far enough to enumerate LAPICs
*/
if (acpi_disabled && !acpi_ht)
if (acpi_disabled)
return 1;
/*
......@@ -1554,9 +1551,8 @@ int __init acpi_boot_init(void)
/*
* If acpi_disabled, bail out
* One exception: acpi=ht continues far enough to enumerate LAPICs
*/
if (acpi_disabled && !acpi_ht)
if (acpi_disabled)
return 1;
acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
......@@ -1591,21 +1587,12 @@ static int __init parse_acpi(char *arg)
/* acpi=force to over-ride black-list */
else if (strcmp(arg, "force") == 0) {
acpi_force = 1;
acpi_ht = 1;
acpi_disabled = 0;
}
/* acpi=strict disables out-of-spec workarounds */
else if (strcmp(arg, "strict") == 0) {
acpi_strict = 1;
}
/* Limit ACPI just to boot-time to enable HT */
else if (strcmp(arg, "ht") == 0) {
if (!acpi_force) {
printk(KERN_WARNING "acpi=ht will be removed in Linux-2.6.35\n");
disable_acpi();
}
acpi_ht = 1;
}
/* acpi=rsdt use RSDT instead of XSDT */
else if (strcmp(arg, "rsdt") == 0) {
acpi_rsdt_forced = 1;
......
......@@ -162,8 +162,6 @@ static int __init acpi_sleep_setup(char *str)
#endif
if (strncmp(str, "old_ordering", 12) == 0)
acpi_old_suspend_ordering();
if (strncmp(str, "sci_force_enable", 16) == 0)
acpi_set_sci_en_on_resume();
str = strchr(str, ',');
if (str != NULL)
str += strspn(str, ", \t");
......
......@@ -7,3 +7,5 @@ obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o
obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o
obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o
obj-$(CONFIG_ACPI_APEI) += mce-apei.o
/*
* Bridge between MCE and APEI
*
* On some machine, corrected memory errors are reported via APEI
* generic hardware error source (GHES) instead of corrected Machine
* Check. These corrected memory errors can be reported to user space
* through /dev/mcelog via faking a corrected Machine Check, so that
* the error memory page can be offlined by /sbin/mcelog if the error
* count for one page is beyond the threshold.
*
* For fatal MCE, save MCE record into persistent storage via ERST, so
* that the MCE record can be logged after reboot via ERST.
*
* Copyright 2010 Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/cper.h>
#include <acpi/apei.h>
#include <asm/mce.h>
#include "mce-internal.h"
void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err)
{
struct mce m;
/* Only corrected MC is reported */
if (!corrected)
return;
mce_setup(&m);
m.bank = 1;
/* Fake a memory read corrected error with unknown channel */
m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f;
m.addr = mem_err->physical_addr;
mce_log(&m);
mce_notify_irq();
}
EXPORT_SYMBOL_GPL(apei_mce_report_mem_error);
#define CPER_CREATOR_MCE \
UUID_LE(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \
0x64, 0x90, 0xb8, 0x9d)
#define CPER_SECTION_TYPE_MCE \
UUID_LE(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \
0x04, 0x4a, 0x38, 0xfc)
/*
* CPER specification (in UEFI specification 2.3 appendix N) requires
* byte-packed.
*/
struct cper_mce_record {
struct cper_record_header hdr;
struct cper_section_descriptor sec_hdr;
struct mce mce;
} __packed;
int apei_write_mce(struct mce *m)
{
struct cper_mce_record rcd;
memset(&rcd, 0, sizeof(rcd));
memcpy(rcd.hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE);
rcd.hdr.revision = CPER_RECORD_REV;
rcd.hdr.signature_end = CPER_SIG_END;
rcd.hdr.section_count = 1;
rcd.hdr.error_severity = CPER_SER_FATAL;
/* timestamp, platform_id, partition_id are all invalid */
rcd.hdr.validation_bits = 0;
rcd.hdr.record_length = sizeof(rcd);
rcd.hdr.creator_id = CPER_CREATOR_MCE;
rcd.hdr.notification_type = CPER_NOTIFY_MCE;
rcd.hdr.record_id = cper_next_record_id();
rcd.hdr.flags = CPER_HW_ERROR_FLAGS_PREVERR;
rcd.sec_hdr.section_offset = (void *)&rcd.mce - (void *)&rcd;
rcd.sec_hdr.section_length = sizeof(rcd.mce);
rcd.sec_hdr.revision = CPER_SEC_REV;
/* fru_id and fru_text is invalid */
rcd.sec_hdr.validation_bits = 0;
rcd.sec_hdr.flags = CPER_SEC_PRIMARY;
rcd.sec_hdr.section_type = CPER_SECTION_TYPE_MCE;
rcd.sec_hdr.section_severity = CPER_SER_FATAL;
memcpy(&rcd.mce, m, sizeof(*m));
return erst_write(&rcd.hdr);
}
ssize_t apei_read_mce(struct mce *m, u64 *record_id)
{
struct cper_mce_record rcd;
ssize_t len;
len = erst_read_next(&rcd.hdr, sizeof(rcd));
if (len <= 0)
return len;
/* Can not skip other records in storage via ERST unless clear them */
else if (len != sizeof(rcd) ||
uuid_le_cmp(rcd.hdr.creator_id, CPER_CREATOR_MCE)) {
if (printk_ratelimit())
pr_warning(
"MCE-APEI: Can not skip the unknown record in ERST");
return -EIO;
}
memcpy(m, &rcd.mce, sizeof(*m));
*record_id = rcd.hdr.record_id;
return sizeof(*m);
}
/* Check whether there is record in ERST */
int apei_check_mce(void)
{
return erst_get_record_count();
}
int apei_clear_mce(u64 record_id)
{
return erst_clear(record_id);
}
......@@ -28,3 +28,26 @@ extern int mce_ser;
extern struct mce_bank *mce_banks;
#ifdef CONFIG_ACPI_APEI
int apei_write_mce(struct mce *m);
ssize_t apei_read_mce(struct mce *m, u64 *record_id);
int apei_check_mce(void);
int apei_clear_mce(u64 record_id);
#else
static inline int apei_write_mce(struct mce *m)
{
return -EINVAL;
}
static inline ssize_t apei_read_mce(struct mce *m, u64 *record_id)
{
return 0;
}
static inline int apei_check_mce(void)
{
return 0;
}
static inline int apei_clear_mce(u64 record_id)
{
return -EINVAL;
}
#endif
......@@ -264,7 +264,7 @@ static void wait_for_panic(void)
static void mce_panic(char *msg, struct mce *final, char *exp)
{
int i;
int i, apei_err = 0;
if (!fake_panic) {
/*
......@@ -287,8 +287,11 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
struct mce *m = &mcelog.entry[i];
if (!(m->status & MCI_STATUS_VAL))
continue;
if (!(m->status & MCI_STATUS_UC))
if (!(m->status & MCI_STATUS_UC)) {
print_mce(m);
if (!apei_err)
apei_err = apei_write_mce(m);
}
}
/* Now print uncorrected but with the final one last */
for (i = 0; i < MCE_LOG_LEN; i++) {
......@@ -297,11 +300,17 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
continue;
if (!(m->status & MCI_STATUS_UC))
continue;
if (!final || memcmp(m, final, sizeof(struct mce)))
if (!final || memcmp(m, final, sizeof(struct mce))) {
print_mce(m);
if (!apei_err)
apei_err = apei_write_mce(m);
}
if (final)
}
if (final) {
print_mce(final);
if (!apei_err)
apei_err = apei_write_mce(final);
}
if (cpu_missing)
printk(KERN_EMERG "Some CPUs didn't answer in synchronization\n");
print_mce_tail();
......@@ -1493,6 +1502,43 @@ static void collect_tscs(void *data)
rdtscll(cpu_tsc[smp_processor_id()]);
}
static int mce_apei_read_done;
/* Collect MCE record of previous boot in persistent storage via APEI ERST. */
static int __mce_read_apei(char __user **ubuf, size_t usize)
{
int rc;
u64 record_id;
struct mce m;
if (usize < sizeof(struct mce))
return -EINVAL;
rc = apei_read_mce(&m, &record_id);
/* Error or no more MCE record */
if (rc <= 0) {
mce_apei_read_done = 1;
return rc;
}
rc = -EFAULT;
if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
return rc;
/*
* In fact, we should have cleared the record after that has
* been flushed to the disk or sent to network in
* /sbin/mcelog, but we have no interface to support that now,
* so just clear it to avoid duplication.
*/
rc = apei_clear_mce(record_id);
if (rc) {
mce_apei_read_done = 1;
return rc;
}
*ubuf += sizeof(struct mce);
return 0;
}
static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
loff_t *off)
{
......@@ -1506,15 +1552,19 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
return -ENOMEM;
mutex_lock(&mce_read_mutex);
if (!mce_apei_read_done) {
err = __mce_read_apei(&buf, usize);
if (err || buf != ubuf)
goto out;
}
next = rcu_dereference_check_mce(mcelog.next);
/* Only supports full reads right now */
if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
mutex_unlock(&mce_read_mutex);
kfree(cpu_tsc);
return -EINVAL;
}
err = -EINVAL;
if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
goto out;
err = 0;
prev = 0;
......@@ -1562,10 +1612,15 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
memset(&mcelog.entry[i], 0, sizeof(struct mce));
}
}
if (err)
err = -EFAULT;
out:
mutex_unlock(&mce_read_mutex);
kfree(cpu_tsc);
return err ? -EFAULT : buf - ubuf;
return err ? err : buf - ubuf;
}
static unsigned int mce_poll(struct file *file, poll_table *wait)
......@@ -1573,6 +1628,8 @@ static unsigned int mce_poll(struct file *file, poll_table *wait)
poll_wait(file, &mce_wait, wait);
if (rcu_dereference_check_mce(mcelog.next))
return POLLIN | POLLRDNORM;
if (!mce_apei_read_done && apei_check_mce())
return POLLIN | POLLRDNORM;
return 0;
}
......
......@@ -1390,7 +1390,6 @@ __init void lguest_init(void)
#endif
#ifdef CONFIG_ACPI
acpi_disabled = 1;
acpi_ht = 0;
#endif
/*
......
......@@ -224,8 +224,11 @@ get_current_resources(struct acpi_device *device, int busnum,
return;
}
struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int domain, int busnum)
struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
{
struct acpi_device *device = root->device;
int domain = root->segment;
int busnum = root->secondary.start;
struct pci_bus *bus;
struct pci_sysdata *sd;
int node;
......
......@@ -360,4 +360,13 @@ config ACPI_SBS
To compile this driver as a module, choose M here:
the modules will be called sbs and sbshc.
config ACPI_HED
tristate "Hardware Error Device"
help
This driver supports the Hardware Error Device (PNP0C33),
which is used to report some hardware errors notified via
SCI, mainly the corrected errors.
source "drivers/acpi/apei/Kconfig"
endif # ACPI
......@@ -19,7 +19,7 @@ obj-y += acpi.o \
# All the builtin files are in the "acpi." module_param namespace.
acpi-y += osl.o utils.o reboot.o
acpi-y += hest.o
acpi-y += atomicio.o
# sleep related files
acpi-y += wakeup.o
......@@ -59,6 +59,7 @@ obj-$(CONFIG_ACPI_BATTERY) += battery.o
obj-$(CONFIG_ACPI_SBS) += sbshc.o
obj-$(CONFIG_ACPI_SBS) += sbs.o
obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o
obj-$(CONFIG_ACPI_HED) += hed.o
# processor has its own "processor." module_param namespace
processor-y := processor_driver.o processor_throttling.o
......@@ -66,3 +67,5 @@ processor-y += processor_idle.o processor_thermal.o
processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
obj-$(CONFIG_ACPI_APEI) += apei/
......@@ -43,6 +43,10 @@ static DEFINE_MUTEX(isolated_cpus_lock);
#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
static unsigned long power_saving_mwait_eax;
static unsigned char tsc_detected_unstable;
static unsigned char tsc_marked_unstable;
static void power_saving_mwait_init(void)
{
unsigned int eax, ebx, ecx, edx;
......@@ -87,8 +91,8 @@ static void power_saving_mwait_init(void)
/*FALL THROUGH*/
default:
/* TSC could halt in idle, so notify users */
mark_tsc_unstable("TSC halts in idle");
/* TSC could halt in idle */
tsc_detected_unstable = 1;
}
#endif
}
......@@ -178,6 +182,11 @@ static int power_saving_thread(void *data)
expire_time = jiffies + HZ * (100 - idle_pct) / 100;
while (!need_resched()) {
if (tsc_detected_unstable && !tsc_marked_unstable) {
/* TSC could halt in idle, so notify users */
mark_tsc_unstable("TSC halts in idle");
tsc_marked_unstable = 1;
}
local_irq_disable();
cpu = smp_processor_id();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
......
......@@ -69,7 +69,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
acpi_status acpi_enable(void)
{
acpi_status status = AE_OK;
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_enable);
......@@ -84,7 +84,9 @@ acpi_status acpi_enable(void)
if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"System is already in ACPI mode\n"));
} else {
return_ACPI_STATUS(AE_OK);
}
/* Transition to ACPI mode */
status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
......@@ -94,11 +96,18 @@ acpi_status acpi_enable(void)
return_ACPI_STATUS(status);
}
/* Sanity check that transition succeeded */
if (acpi_hw_get_mode() != ACPI_SYS_MODE_ACPI) {
ACPI_ERROR((AE_INFO,
"Hardware did not enter ACPI mode"));
return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
}
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"Transition to ACPI mode successful\n"));
}
return_ACPI_STATUS(status);
return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_enable)
......
......@@ -63,7 +63,6 @@ acpi_status acpi_hw_set_mode(u32 mode)
{
acpi_status status;
u32 retry;
ACPI_FUNCTION_TRACE(hw_set_mode);
......@@ -125,24 +124,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
return_ACPI_STATUS(status);
}
/*
* Some hardware takes a LONG time to switch modes. Give them 3 sec to
* do so, but allow faster systems to proceed more quickly.
*/
retry = 3000;
while (retry) {
if (acpi_hw_get_mode() == mode) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Mode %X successfully enabled\n",
mode));
return_ACPI_STATUS(AE_OK);
}
acpi_os_stall(1000);
retry--;
}
ACPI_ERROR((AE_INFO, "Hardware did not change modes"));
return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
}
/*******************************************************************************
......
config ACPI_APEI
bool "ACPI Platform Error Interface (APEI)"
depends on X86
help
APEI allows to report errors (for example from the chipset)
to the operating system. This improves NMI handling
especially. In addition it supports error serialization and
error injection.
config ACPI_APEI_GHES
tristate "APEI Generic Hardware Error Source"
depends on ACPI_APEI && X86
select ACPI_HED
help
Generic Hardware Error Source provides a way to report
platform hardware errors (such as that from chipset). It
works in so called "Firmware First" mode, that is, hardware
errors are reported to firmware firstly, then reported to
Linux by firmware. This way, some non-standard hardware
error registers or non-standard hardware link can be checked
by firmware to produce more valuable hardware error
information for Linux.
config ACPI_APEI_EINJ
tristate "APEI Error INJection (EINJ)"
depends on ACPI_APEI && DEBUG_FS
help
EINJ provides a hardware error injection mechanism, it is
mainly used for debugging and testing the other parts of
APEI and some other RAS features.
obj-$(CONFIG_ACPI_APEI) += apei.o
obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o
obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
apei-y := apei-base.o hest.o cper.o erst.o
/*
* apei-base.c - ACPI Platform Error Interface (APEI) supporting
* infrastructure
*
* APEI allows to report errors (for example from the chipset) to the
* the operating system. This improves NMI handling especially. In
* addition it supports error serialization and error injection.
*
* For more information about APEI, please refer to ACPI Specification
* version 4.0, chapter 17.
*
* This file has Common functions used by more than one APEI table,
* including framework of interpreter for ERST and EINJ; resource
* management for APEI registers.
*
* Copyright (C) 2009, Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/kref.h>
#include <linux/rculist.h>
#include <linux/interrupt.h>
#include <linux/debugfs.h>
#include <acpi/atomicio.h>
#include "apei-internal.h"
#define APEI_PFX "APEI: "
/*
* APEI ERST (Error Record Serialization Table) and EINJ (Error
* INJection) interpreter framework.
*/
#define APEI_EXEC_PRESERVE_REGISTER 0x1
void apei_exec_ctx_init(struct apei_exec_context *ctx,
struct apei_exec_ins_type *ins_table,
u32 instructions,
struct acpi_whea_header *action_table,
u32 entries)
{
ctx->ins_table = ins_table;
ctx->instructions = instructions;
ctx->action_table = action_table;
ctx->entries = entries;
}
EXPORT_SYMBOL_GPL(apei_exec_ctx_init);
int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
{
int rc;
rc = acpi_atomic_read(val, &entry->register_region);
if (rc)
return rc;
*val >>= entry->register_region.bit_offset;
*val &= entry->mask;
return 0;
}
int apei_exec_read_register(struct apei_exec_context *ctx,
struct acpi_whea_header *entry)
{
int rc;
u64 val = 0;
rc = __apei_exec_read_register(entry, &val);
if (rc)
return rc;
ctx->value = val;
return 0;
}
EXPORT_SYMBOL_GPL(apei_exec_read_register);
int apei_exec_read_register_value(struct apei_exec_context *ctx,
struct acpi_whea_header *entry)
{
int rc;
rc = apei_exec_read_register(ctx, entry);
if (rc)
return rc;
ctx->value = (ctx->value == entry->value);
return 0;
}
EXPORT_SYMBOL_GPL(apei_exec_read_register_value);
int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
{
int rc;
val &= entry->mask;
val <<= entry->register_region.bit_offset;
if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
u64 valr = 0;
rc = acpi_atomic_read(&valr, &entry->register_region);
if (rc)
return rc;
valr &= ~(entry->mask << entry->register_region.bit_offset);
val |= valr;
}
rc = acpi_atomic_write(val, &entry->register_region);
return rc;
}
int apei_exec_write_register(struct apei_exec_context *ctx,
struct acpi_whea_header *entry)
{
return __apei_exec_write_register(entry, ctx->value);
}
EXPORT_SYMBOL_GPL(apei_exec_write_register);
int apei_exec_write_register_value(struct apei_exec_context *ctx,
struct acpi_whea_header *entry)
{
int rc;
ctx->value = entry->value;
rc = apei_exec_write_register(ctx, entry);
return rc;
}
EXPORT_SYMBOL_GPL(apei_exec_write_register_value);
int apei_exec_noop(struct apei_exec_context *ctx,
struct acpi_whea_header *entry)
{
return 0;
}
EXPORT_SYMBOL_GPL(apei_exec_noop);
/*
* Interpret the specified action. Go through whole action table,
* execute all instructions belong to the action.
*/
int apei_exec_run(struct apei_exec_context *ctx, u8 action)
{
int rc;
u32 i, ip;
struct acpi_whea_header *entry;
apei_exec_ins_func_t run;
ctx->ip = 0;
/*
* "ip" is the instruction pointer of current instruction,
* "ctx->ip" specifies the next instruction to executed,
* instruction "run" function may change the "ctx->ip" to
* implement "goto" semantics.
*/
rewind:
ip = 0;
for (i = 0; i < ctx->entries; i++) {
entry = &ctx->action_table[i];
if (entry->action != action)
continue;
if (ip == ctx->ip) {
if (entry->instruction >= ctx->instructions ||
!ctx->ins_table[entry->instruction].run) {
pr_warning(FW_WARN APEI_PFX
"Invalid action table, unknown instruction type: %d\n",
entry->instruction);
return -EINVAL;
}
run = ctx->ins_table[entry->instruction].run;
rc = run(ctx, entry);
if (rc < 0)
return rc;
else if (rc != APEI_EXEC_SET_IP)
ctx->ip++;
}
ip++;
if (ctx->ip < ip)
goto rewind;
}
return 0;
}
EXPORT_SYMBOL_GPL(apei_exec_run);
typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
struct acpi_whea_header *entry,
void *data);
static int apei_exec_for_each_entry(struct apei_exec_context *ctx,
apei_exec_entry_func_t func,
void *data,
int *end)
{
u8 ins;
int i, rc;
struct acpi_whea_header *entry;
struct apei_exec_ins_type *ins_table = ctx->ins_table;
for (i = 0; i < ctx->entries; i++) {
entry = ctx->action_table + i;
ins = entry->instruction;
if (end)
*end = i;
if (ins >= ctx->instructions || !ins_table[ins].run) {
pr_warning(FW_WARN APEI_PFX
"Invalid action table, unknown instruction type: %d\n",
ins);
return -EINVAL;
}
rc = func(ctx, entry, data);
if (rc)
return rc;
}
return 0;
}
static int pre_map_gar_callback(struct apei_exec_context *ctx,
struct acpi_whea_header *entry,
void *data)
{
u8 ins = entry->instruction;
if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
return acpi_pre_map_gar(&entry->register_region);
return 0;
}
/*
* Pre-map all GARs in action table to make it possible to access them
* in NMI handler.
*/
int apei_exec_pre_map_gars(struct apei_exec_context *ctx)
{
int rc, end;
rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback,
NULL, &end);
if (rc) {
struct apei_exec_context ctx_unmap;
memcpy(&ctx_unmap, ctx, sizeof(*ctx));
ctx_unmap.entries = end;
apei_exec_post_unmap_gars(&ctx_unmap);
}
return rc;
}
EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars);
static int post_unmap_gar_callback(struct apei_exec_context *ctx,
struct acpi_whea_header *entry,
void *data)
{
u8 ins = entry->instruction;
if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
acpi_post_unmap_gar(&entry->register_region);
return 0;
}
/* Post-unmap all GAR in action table. */
int apei_exec_post_unmap_gars(struct apei_exec_context *ctx)
{
return apei_exec_for_each_entry(ctx, post_unmap_gar_callback,
NULL, NULL);
}
EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars);
/*
* Resource management for GARs in APEI
*/
struct apei_res {
struct list_head list;
unsigned long start;
unsigned long end;
};
/* Collect all resources requested, to avoid conflict */
struct apei_resources apei_resources_all = {
.iomem = LIST_HEAD_INIT(apei_resources_all.iomem),
.ioport = LIST_HEAD_INIT(apei_resources_all.ioport),
};
static int apei_res_add(struct list_head *res_list,
unsigned long start, unsigned long size)
{
struct apei_res *res, *resn, *res_ins = NULL;
unsigned long end = start + size;
if (end <= start)
return 0;
repeat:
list_for_each_entry_safe(res, resn, res_list, list) {
if (res->start > end || res->end < start)
continue;
else if (end <= res->end && start >= res->start) {
kfree(res_ins);
return 0;
}
list_del(&res->list);
res->start = start = min(res->start, start);
res->end = end = max(res->end, end);
kfree(res_ins);
res_ins = res;
goto repeat;
}
if (res_ins)
list_add(&res_ins->list, res_list);
else {
res_ins = kmalloc(sizeof(*res), GFP_KERNEL);
if (!res_ins)
return -ENOMEM;
res_ins->start = start;
res_ins->end = end;
list_add(&res_ins->list, res_list);
}
return 0;
}
static int apei_res_sub(struct list_head *res_list1,
struct list_head *res_list2)
{
struct apei_res *res1, *resn1, *res2, *res;
res1 = list_entry(res_list1->next, struct apei_res, list);
resn1 = list_entry(res1->list.next, struct apei_res, list);
while (&res1->list != res_list1) {
list_for_each_entry(res2, res_list2, list) {
if (res1->start >= res2->end ||
res1->end <= res2->start)
continue;
else if (res1->end <= res2->end &&
res1->start >= res2->start) {
list_del(&res1->list);
kfree(res1);
break;
} else if (res1->end > res2->end &&
res1->start < res2->start) {
res = kmalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return -ENOMEM;
res->start = res2->end;
res->end = res1->end;
res1->end = res2->start;
list_add(&res->list, &res1->list);
resn1 = res;
} else {
if (res1->start < res2->start)
res1->end = res2->start;
else
res1->start = res2->end;
}
}
res1 = resn1;
resn1 = list_entry(resn1->list.next, struct apei_res, list);
}
return 0;
}
static void apei_res_clean(struct list_head *res_list)
{
struct apei_res *res, *resn;
list_for_each_entry_safe(res, resn, res_list, list) {
list_del(&res->list);
kfree(res);
}
}
void apei_resources_fini(struct apei_resources *resources)
{
apei_res_clean(&resources->iomem);
apei_res_clean(&resources->ioport);
}
EXPORT_SYMBOL_GPL(apei_resources_fini);
static int apei_resources_merge(struct apei_resources *resources1,
struct apei_resources *resources2)
{
int rc;
struct apei_res *res;
list_for_each_entry(res, &resources2->iomem, list) {
rc = apei_res_add(&resources1->iomem, res->start,
res->end - res->start);
if (rc)
return rc;
}
list_for_each_entry(res, &resources2->ioport, list) {
rc = apei_res_add(&resources1->ioport, res->start,
res->end - res->start);
if (rc)
return rc;
}
return 0;
}
/*
* EINJ has two groups of GARs (EINJ table entry and trigger table
* entry), so common resources are subtracted from the trigger table
* resources before the second requesting.
*/
int apei_resources_sub(struct apei_resources *resources1,
struct apei_resources *resources2)
{
int rc;
rc = apei_res_sub(&resources1->iomem, &resources2->iomem);
if (rc)
return rc;
return apei_res_sub(&resources1->ioport, &resources2->ioport);
}
EXPORT_SYMBOL_GPL(apei_resources_sub);
/*
* IO memory/port rersource management mechanism is used to check
* whether memory/port area used by GARs conflicts with normal memory
* or IO memory/port of devices.
*/
int apei_resources_request(struct apei_resources *resources,
const char *desc)
{
struct apei_res *res, *res_bak;
struct resource *r;
apei_resources_sub(resources, &apei_resources_all);
list_for_each_entry(res, &resources->iomem, list) {
r = request_mem_region(res->start, res->end - res->start,
desc);
if (!r) {
pr_err(APEI_PFX
"Can not request iomem region <%016llx-%016llx> for GARs.\n",
(unsigned long long)res->start,
(unsigned long long)res->end);
res_bak = res;
goto err_unmap_iomem;
}
}
list_for_each_entry(res, &resources->ioport, list) {
r = request_region(res->start, res->end - res->start, desc);
if (!r) {
pr_err(APEI_PFX
"Can not request ioport region <%016llx-%016llx> for GARs.\n",
(unsigned long long)res->start,
(unsigned long long)res->end);
res_bak = res;
goto err_unmap_ioport;
}
}
apei_resources_merge(&apei_resources_all, resources);
return 0;
err_unmap_ioport:
list_for_each_entry(res, &resources->ioport, list) {
if (res == res_bak)
break;
release_mem_region(res->start, res->end - res->start);
}
res_bak = NULL;
err_unmap_iomem:
list_for_each_entry(res, &resources->iomem, list) {
if (res == res_bak)
break;
release_region(res->start, res->end - res->start);
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(apei_resources_request);
void apei_resources_release(struct apei_resources *resources)
{
struct apei_res *res;
list_for_each_entry(res, &resources->iomem, list)
release_mem_region(res->start, res->end - res->start);
list_for_each_entry(res, &resources->ioport, list)
release_region(res->start, res->end - res->start);
apei_resources_sub(&apei_resources_all, resources);
}
EXPORT_SYMBOL_GPL(apei_resources_release);
static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
{
u32 width, space_id;
width = reg->bit_width;
space_id = reg->space_id;
/* Handle possible alignment issues */
memcpy(paddr, &reg->address, sizeof(*paddr));
if (!*paddr) {
pr_warning(FW_BUG APEI_PFX
"Invalid physical address in GAR [0x%llx/%u/%u]\n",
*paddr, width, space_id);
return -EINVAL;
}
if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
pr_warning(FW_BUG APEI_PFX
"Invalid bit width in GAR [0x%llx/%u/%u]\n",
*paddr, width, space_id);
return -EINVAL;
}
if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
pr_warning(FW_BUG APEI_PFX
"Invalid address space type in GAR [0x%llx/%u/%u]\n",
*paddr, width, space_id);
return -EINVAL;
}
return 0;
}
static int collect_res_callback(struct apei_exec_context *ctx,
struct acpi_whea_header *entry,
void *data)
{
struct apei_resources *resources = data;
struct acpi_generic_address *reg = &entry->register_region;
u8 ins = entry->instruction;
u64 paddr;
int rc;
if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
return 0;
rc = apei_check_gar(reg, &paddr);
if (rc)
return rc;
switch (reg->space_id) {
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
return apei_res_add(&resources->iomem, paddr,
reg->bit_width / 8);
case ACPI_ADR_SPACE_SYSTEM_IO:
return apei_res_add(&resources->ioport, paddr,
reg->bit_width / 8);
default:
return -EINVAL;
}
}
/*
* Same register may be used by multiple instructions in GARs, so
* resources are collected before requesting.
*/
int apei_exec_collect_resources(struct apei_exec_context *ctx,
struct apei_resources *resources)
{
return apei_exec_for_each_entry(ctx, collect_res_callback,
resources, NULL);
}
EXPORT_SYMBOL_GPL(apei_exec_collect_resources);
struct dentry *apei_get_debugfs_dir(void)
{
static struct dentry *dapei;
if (!dapei)
dapei = debugfs_create_dir("apei", NULL);
return dapei;
}
EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
/*
* apei-internal.h - ACPI Platform Error Interface internal
* definations.
*/
#ifndef APEI_INTERNAL_H
#define APEI_INTERNAL_H
#include <linux/cper.h>
struct apei_exec_context;
typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
struct acpi_whea_header *entry);
#define APEI_EXEC_INS_ACCESS_REGISTER 0x0001
struct apei_exec_ins_type {
u32 flags;
apei_exec_ins_func_t run;
};
struct apei_exec_context {
u32 ip;
u64 value;
u64 var1;
u64 var2;
u64 src_base;
u64 dst_base;
struct apei_exec_ins_type *ins_table;
u32 instructions;
struct acpi_whea_header *action_table;
u32 entries;
};
void apei_exec_ctx_init(struct apei_exec_context *ctx,
struct apei_exec_ins_type *ins_table,
u32 instructions,
struct acpi_whea_header *action_table,
u32 entries);
static inline void apei_exec_ctx_set_input(struct apei_exec_context *ctx,
u64 input)
{
ctx->value = input;
}
static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx)
{
return ctx->value;
}
int apei_exec_run(struct apei_exec_context *ctx, u8 action);
/* Common instruction implementation */
/* IP has been set in instruction function */
#define APEI_EXEC_SET_IP 1
int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val);
int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val);
int apei_exec_read_register(struct apei_exec_context *ctx,
struct acpi_whea_header *entry);
int apei_exec_read_register_value(struct apei_exec_context *ctx,
struct acpi_whea_header *entry);
int apei_exec_write_register(struct apei_exec_context *ctx,
struct acpi_whea_header *entry);
int apei_exec_write_register_value(struct apei_exec_context *ctx,
struct acpi_whea_header *entry);
int apei_exec_noop(struct apei_exec_context *ctx,
struct acpi_whea_header *entry);
int apei_exec_pre_map_gars(struct apei_exec_context *ctx);
int apei_exec_post_unmap_gars(struct apei_exec_context *ctx);
struct apei_resources {
struct list_head iomem;
struct list_head ioport;
};
static inline void apei_resources_init(struct apei_resources *resources)
{
INIT_LIST_HEAD(&resources->iomem);
INIT_LIST_HEAD(&resources->ioport);
}
void apei_resources_fini(struct apei_resources *resources);
int apei_resources_sub(struct apei_resources *resources1,
struct apei_resources *resources2);
int apei_resources_request(struct apei_resources *resources,
const char *desc);
void apei_resources_release(struct apei_resources *resources);
int apei_exec_collect_resources(struct apei_exec_context *ctx,
struct apei_resources *resources);
struct dentry;
struct dentry *apei_get_debugfs_dir(void);
#define apei_estatus_for_each_section(estatus, section) \
for (section = (struct acpi_hest_generic_data *)(estatus + 1); \
(void *)section - (void *)estatus < estatus->data_length; \
section = (void *)(section+1) + section->error_data_length)
static inline u32 apei_estatus_len(struct acpi_hest_generic_status *estatus)
{
if (estatus->raw_data_length)
return estatus->raw_data_offset + \
estatus->raw_data_length;
else
return sizeof(*estatus) + estatus->data_length;
}
int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus);
int apei_estatus_check(const struct acpi_hest_generic_status *estatus);
#endif
/*
* UEFI Common Platform Error Record (CPER) support
*
* Copyright (C) 2010, Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* CPER is the format used to describe platform hardware error by
* various APEI tables, such as ERST, BERT and HEST etc.
*
* For more information about CPER, please refer to Appendix N of UEFI
* Specification version 2.3.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/time.h>
#include <linux/cper.h>
#include <linux/acpi.h>
/*
* CPER record ID need to be unique even after reboot, because record
* ID is used as index for ERST storage, while CPER records from
* multiple boot may co-exist in ERST.
*/
u64 cper_next_record_id(void)
{
static atomic64_t seq;
if (!atomic64_read(&seq))
atomic64_set(&seq, ((u64)get_seconds()) << 32);
return atomic64_inc_return(&seq);
}
EXPORT_SYMBOL_GPL(cper_next_record_id);
int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus)
{
if (estatus->data_length &&
estatus->data_length < sizeof(struct acpi_hest_generic_data))
return -EINVAL;
if (estatus->raw_data_length &&
estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(apei_estatus_check_header);
int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
{
struct acpi_hest_generic_data *gdata;
unsigned int data_len, gedata_len;
int rc;
rc = apei_estatus_check_header(estatus);
if (rc)
return rc;
data_len = estatus->data_length;
gdata = (struct acpi_hest_generic_data *)(estatus + 1);
while (data_len > sizeof(*gdata)) {
gedata_len = gdata->error_data_length;
if (gedata_len > data_len - sizeof(*gdata))
return -EINVAL;
data_len -= gedata_len + sizeof(*gdata);
}
if (data_len)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(apei_estatus_check);
/*
* APEI Error INJection support
*
* EINJ provides a hardware error injection mechanism, this is useful
* for debugging and testing of other APEI and RAS features.
*
* For more information about EINJ, please refer to ACPI Specification
* version 4.0, section 17.5.
*
* Copyright 2009-2010 Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/nmi.h>
#include <linux/delay.h>
#include <acpi/acpi.h>
#include "apei-internal.h"
#define EINJ_PFX "EINJ: "
#define SPIN_UNIT 100 /* 100ns */
/* Firmware should respond within 1 miliseconds */
#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
/*
* Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
* EINJ table through an unpublished extension. Use with caution as
* most will ignore the parameter and make their own choice of address
* for error injection.
*/
struct einj_parameter {
u64 type;
u64 reserved1;
u64 reserved2;
u64 param1;
u64 param2;
};
#define EINJ_OP_BUSY 0x1
#define EINJ_STATUS_SUCCESS 0x0
#define EINJ_STATUS_FAIL 0x1
#define EINJ_STATUS_INVAL 0x2
#define EINJ_TAB_ENTRY(tab) \
((struct acpi_whea_header *)((char *)(tab) + \
sizeof(struct acpi_table_einj)))
static struct acpi_table_einj *einj_tab;
static struct apei_resources einj_resources;
static struct apei_exec_ins_type einj_ins_type[] = {
[ACPI_EINJ_READ_REGISTER] = {
.flags = APEI_EXEC_INS_ACCESS_REGISTER,
.run = apei_exec_read_register,
},
[ACPI_EINJ_READ_REGISTER_VALUE] = {
.flags = APEI_EXEC_INS_ACCESS_REGISTER,
.run = apei_exec_read_register_value,
},
[ACPI_EINJ_WRITE_REGISTER] = {
.flags = APEI_EXEC_INS_ACCESS_REGISTER,
.run = apei_exec_write_register,
},
[ACPI_EINJ_WRITE_REGISTER_VALUE] = {
.flags = APEI_EXEC_INS_ACCESS_REGISTER,
.run = apei_exec_write_register_value,
},
[ACPI_EINJ_NOOP] = {
.flags = 0,
.run = apei_exec_noop,
},
};
/*
* Prevent EINJ interpreter to run simultaneously, because the
* corresponding firmware implementation may not work properly when
* invoked simultaneously.
*/
static DEFINE_MUTEX(einj_mutex);
static struct einj_parameter *einj_param;
static void einj_exec_ctx_init(struct apei_exec_context *ctx)
{
apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type),
EINJ_TAB_ENTRY(einj_tab), einj_tab->entries);
}
static int __einj_get_available_error_type(u32 *type)
{
struct apei_exec_context ctx;
int rc;
einj_exec_ctx_init(&ctx);
rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE);
if (rc)
return rc;
*type = apei_exec_ctx_get_output(&ctx);
return 0;
}
/* Get error injection capabilities of the platform */
static int einj_get_available_error_type(u32 *type)
{
int rc;
mutex_lock(&einj_mutex);
rc = __einj_get_available_error_type(type);
mutex_unlock(&einj_mutex);
return rc;
}
static int einj_timedout(u64 *t)
{
if ((s64)*t < SPIN_UNIT) {
pr_warning(FW_WARN EINJ_PFX
"Firmware does not respond in time\n");
return 1;
}
*t -= SPIN_UNIT;
ndelay(SPIN_UNIT);
touch_nmi_watchdog();
return 0;
}
static u64 einj_get_parameter_address(void)
{
int i;
u64 paddr = 0;
struct acpi_whea_header *entry;
entry = EINJ_TAB_ENTRY(einj_tab);
for (i = 0; i < einj_tab->entries; i++) {
if (entry->action == ACPI_EINJ_SET_ERROR_TYPE &&
entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
entry->register_region.space_id ==
ACPI_ADR_SPACE_SYSTEM_MEMORY)
memcpy(&paddr, &entry->register_region.address,
sizeof(paddr));
entry++;
}
return paddr;
}
/* do sanity check to trigger table */
static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
{
if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger))
return -EINVAL;
if (trigger_tab->table_size > PAGE_SIZE ||
trigger_tab->table_size <= trigger_tab->header_size)
return -EINVAL;
if (trigger_tab->entry_count !=
(trigger_tab->table_size - trigger_tab->header_size) /
sizeof(struct acpi_einj_entry))
return -EINVAL;
return 0;
}
/* Execute instructions in trigger error action table */
static int __einj_error_trigger(u64 trigger_paddr)
{
struct acpi_einj_trigger *trigger_tab = NULL;
struct apei_exec_context trigger_ctx;
struct apei_resources trigger_resources;
struct acpi_whea_header *trigger_entry;
struct resource *r;
u32 table_size;
int rc = -EIO;
r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
"APEI EINJ Trigger Table");
if (!r) {
pr_err(EINJ_PFX
"Can not request iomem region <%016llx-%016llx> for Trigger table.\n",
(unsigned long long)trigger_paddr,
(unsigned long long)trigger_paddr+sizeof(*trigger_tab));
goto out;
}
trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
if (!trigger_tab) {
pr_err(EINJ_PFX "Failed to map trigger table!\n");
goto out_rel_header;
}
rc = einj_check_trigger_header(trigger_tab);
if (rc) {
pr_warning(FW_BUG EINJ_PFX
"The trigger error action table is invalid\n");
goto out_rel_header;
}
rc = -EIO;
table_size = trigger_tab->table_size;
r = request_mem_region(trigger_paddr + sizeof(*trigger_tab),
table_size - sizeof(*trigger_tab),
"APEI EINJ Trigger Table");
if (!r) {
pr_err(EINJ_PFX
"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n",
(unsigned long long)trigger_paddr+sizeof(*trigger_tab),
(unsigned long long)trigger_paddr + table_size);
goto out_rel_header;
}
iounmap(trigger_tab);
trigger_tab = ioremap_cache(trigger_paddr, table_size);
if (!trigger_tab) {
pr_err(EINJ_PFX "Failed to map trigger table!\n");
goto out_rel_entry;
}
trigger_entry = (struct acpi_whea_header *)
((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
apei_resources_init(&trigger_resources);
apei_exec_ctx_init(&trigger_ctx, einj_ins_type,
ARRAY_SIZE(einj_ins_type),
trigger_entry, trigger_tab->entry_count);
rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources);
if (rc)
goto out_fini;
rc = apei_resources_sub(&trigger_resources, &einj_resources);
if (rc)
goto out_fini;
rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger");
if (rc)
goto out_fini;
rc = apei_exec_pre_map_gars(&trigger_ctx);
if (rc)
goto out_release;
rc = apei_exec_run(&trigger_ctx, ACPI_EINJ_TRIGGER_ERROR);
apei_exec_post_unmap_gars(&trigger_ctx);
out_release:
apei_resources_release(&trigger_resources);
out_fini:
apei_resources_fini(&trigger_resources);
out_rel_entry:
release_mem_region(trigger_paddr + sizeof(*trigger_tab),
table_size - sizeof(*trigger_tab));
out_rel_header:
release_mem_region(trigger_paddr, sizeof(*trigger_tab));
out:
if (trigger_tab)
iounmap(trigger_tab);
return rc;
}
static int __einj_error_inject(u32 type, u64 param1, u64 param2)
{
struct apei_exec_context ctx;
u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT;
int rc;
einj_exec_ctx_init(&ctx);
rc = apei_exec_run(&ctx, ACPI_EINJ_BEGIN_OPERATION);
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, type);
rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
if (rc)
return rc;
if (einj_param) {
writeq(param1, &einj_param->param1);
writeq(param2, &einj_param->param2);
}
rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
if (rc)
return rc;
for (;;) {
rc = apei_exec_run(&ctx, ACPI_EINJ_CHECK_BUSY_STATUS);
if (rc)
return rc;
val = apei_exec_ctx_get_output(&ctx);
if (!(val & EINJ_OP_BUSY))
break;
if (einj_timedout(&timeout))
return -EIO;
}
rc = apei_exec_run(&ctx, ACPI_EINJ_GET_COMMAND_STATUS);
if (rc)
return rc;
val = apei_exec_ctx_get_output(&ctx);
if (val != EINJ_STATUS_SUCCESS)
return -EBUSY;
rc = apei_exec_run(&ctx, ACPI_EINJ_GET_TRIGGER_TABLE);
if (rc)
return rc;
trigger_paddr = apei_exec_ctx_get_output(&ctx);
rc = __einj_error_trigger(trigger_paddr);
if (rc)
return rc;
rc = apei_exec_run(&ctx, ACPI_EINJ_END_OPERATION);
return rc;
}
/* Inject the specified hardware error */
static int einj_error_inject(u32 type, u64 param1, u64 param2)
{
int rc;
mutex_lock(&einj_mutex);
rc = __einj_error_inject(type, param1, param2);
mutex_unlock(&einj_mutex);
return rc;
}
static u32 error_type;
static u64 error_param1;
static u64 error_param2;
static struct dentry *einj_debug_dir;
static int available_error_type_show(struct seq_file *m, void *v)
{
int rc;
u32 available_error_type = 0;
rc = einj_get_available_error_type(&available_error_type);
if (rc)
return rc;
if (available_error_type & 0x0001)
seq_printf(m, "0x00000001\tProcessor Correctable\n");
if (available_error_type & 0x0002)
seq_printf(m, "0x00000002\tProcessor Uncorrectable non-fatal\n");
if (available_error_type & 0x0004)
seq_printf(m, "0x00000004\tProcessor Uncorrectable fatal\n");
if (available_error_type & 0x0008)
seq_printf(m, "0x00000008\tMemory Correctable\n");
if (available_error_type & 0x0010)
seq_printf(m, "0x00000010\tMemory Uncorrectable non-fatal\n");
if (available_error_type & 0x0020)
seq_printf(m, "0x00000020\tMemory Uncorrectable fatal\n");
if (available_error_type & 0x0040)
seq_printf(m, "0x00000040\tPCI Express Correctable\n");
if (available_error_type & 0x0080)
seq_printf(m, "0x00000080\tPCI Express Uncorrectable non-fatal\n");
if (available_error_type & 0x0100)
seq_printf(m, "0x00000100\tPCI Express Uncorrectable fatal\n");
if (available_error_type & 0x0200)
seq_printf(m, "0x00000200\tPlatform Correctable\n");
if (available_error_type & 0x0400)
seq_printf(m, "0x00000400\tPlatform Uncorrectable non-fatal\n");
if (available_error_type & 0x0800)
seq_printf(m, "0x00000800\tPlatform Uncorrectable fatal\n");
return 0;
}
static int available_error_type_open(struct inode *inode, struct file *file)
{
return single_open(file, available_error_type_show, NULL);
}
static const struct file_operations available_error_type_fops = {
.open = available_error_type_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int error_type_get(void *data, u64 *val)
{
*val = error_type;
return 0;
}
static int error_type_set(void *data, u64 val)
{
int rc;
u32 available_error_type = 0;
/* Only one error type can be specified */
if (val & (val - 1))
return -EINVAL;
rc = einj_get_available_error_type(&available_error_type);
if (rc)
return rc;
if (!(val & available_error_type))
return -EINVAL;
error_type = val;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(error_type_fops, error_type_get,
error_type_set, "0x%llx\n");
static int error_inject_set(void *data, u64 val)
{
if (!error_type)
return -EINVAL;
return einj_error_inject(error_type, error_param1, error_param2);
}
DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
error_inject_set, "%llu\n");
static int einj_check_table(struct acpi_table_einj *einj_tab)
{
if (einj_tab->header_length != sizeof(struct acpi_table_einj))
return -EINVAL;
if (einj_tab->header.length < sizeof(struct acpi_table_einj))
return -EINVAL;
if (einj_tab->entries !=
(einj_tab->header.length - sizeof(struct acpi_table_einj)) /
sizeof(struct acpi_einj_entry))
return -EINVAL;
return 0;
}
static int __init einj_init(void)
{
int rc;
u64 param_paddr;
acpi_status status;
struct dentry *fentry;
struct apei_exec_context ctx;
if (acpi_disabled)
return -ENODEV;
status = acpi_get_table(ACPI_SIG_EINJ, 0,
(struct acpi_table_header **)&einj_tab);
if (status == AE_NOT_FOUND) {
pr_info(EINJ_PFX "Table is not found!\n");
return -ENODEV;
} else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
return -EINVAL;
}
rc = einj_check_table(einj_tab);
if (rc) {
pr_warning(FW_BUG EINJ_PFX "EINJ table is invalid\n");
return -EINVAL;
}
rc = -ENOMEM;
einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir());
if (!einj_debug_dir)
goto err_cleanup;
fentry = debugfs_create_file("available_error_type", S_IRUSR,
einj_debug_dir, NULL,
&available_error_type_fops);
if (!fentry)
goto err_cleanup;
fentry = debugfs_create_file("error_type", S_IRUSR | S_IWUSR,
einj_debug_dir, NULL, &error_type_fops);
if (!fentry)
goto err_cleanup;
fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
einj_debug_dir, &error_param1);
if (!fentry)
goto err_cleanup;
fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
einj_debug_dir, &error_param2);
if (!fentry)
goto err_cleanup;
fentry = debugfs_create_file("error_inject", S_IWUSR,
einj_debug_dir, NULL, &error_inject_fops);
if (!fentry)
goto err_cleanup;
apei_resources_init(&einj_resources);
einj_exec_ctx_init(&ctx);
rc = apei_exec_collect_resources(&ctx, &einj_resources);
if (rc)
goto err_fini;
rc = apei_resources_request(&einj_resources, "APEI EINJ");
if (rc)
goto err_fini;
rc = apei_exec_pre_map_gars(&ctx);
if (rc)
goto err_release;
param_paddr = einj_get_parameter_address();
if (param_paddr) {
einj_param = ioremap(param_paddr, sizeof(*einj_param));
rc = -ENOMEM;
if (!einj_param)
goto err_unmap;
}
pr_info(EINJ_PFX "Error INJection is initialized.\n");
return 0;
err_unmap:
apei_exec_post_unmap_gars(&ctx);
err_release:
apei_resources_release(&einj_resources);
err_fini:
apei_resources_fini(&einj_resources);
err_cleanup:
debugfs_remove_recursive(einj_debug_dir);
return rc;
}
static void __exit einj_exit(void)
{
struct apei_exec_context ctx;
if (einj_param)
iounmap(einj_param);
einj_exec_ctx_init(&ctx);
apei_exec_post_unmap_gars(&ctx);
apei_resources_release(&einj_resources);
apei_resources_fini(&einj_resources);
debugfs_remove_recursive(einj_debug_dir);
}
module_init(einj_init);
module_exit(einj_exit);
MODULE_AUTHOR("Huang Ying");
MODULE_DESCRIPTION("APEI Error INJection support");
MODULE_LICENSE("GPL");
/*
* APEI Error Record Serialization Table support
*
* ERST is a way provided by APEI to save and retrieve hardware error
* infomation to and from a persistent store.
*
* For more information about ERST, please refer to ACPI Specification
* version 4.0, section 17.4.
*
* Copyright 2010 Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/acpi.h>
#include <linux/uaccess.h>
#include <linux/cper.h>
#include <linux/nmi.h>
#include <acpi/apei.h>
#include "apei-internal.h"
#define ERST_PFX "ERST: "
/* ERST command status */
#define ERST_STATUS_SUCCESS 0x0
#define ERST_STATUS_NOT_ENOUGH_SPACE 0x1
#define ERST_STATUS_HARDWARE_NOT_AVAILABLE 0x2
#define ERST_STATUS_FAILED 0x3
#define ERST_STATUS_RECORD_STORE_EMPTY 0x4
#define ERST_STATUS_RECORD_NOT_FOUND 0x5
#define ERST_TAB_ENTRY(tab) \
((struct acpi_whea_header *)((char *)(tab) + \
sizeof(struct acpi_table_erst)))
#define SPIN_UNIT 100 /* 100ns */
/* Firmware should respond within 1 miliseconds */
#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
#define FIRMWARE_MAX_STALL 50 /* 50us */
int erst_disable;
EXPORT_SYMBOL_GPL(erst_disable);
static struct acpi_table_erst *erst_tab;
/* ERST Error Log Address Range atrributes */
#define ERST_RANGE_RESERVED 0x0001
#define ERST_RANGE_NVRAM 0x0002
#define ERST_RANGE_SLOW 0x0004
/*
* ERST Error Log Address Range, used as buffer for reading/writing
* error records.
*/
static struct erst_erange {
u64 base;
u64 size;
void __iomem *vaddr;
u32 attr;
} erst_erange;
/*
* Prevent ERST interpreter to run simultaneously, because the
* corresponding firmware implementation may not work properly when
* invoked simultaneously.
*
* It is used to provide exclusive accessing for ERST Error Log
* Address Range too.
*/
static DEFINE_SPINLOCK(erst_lock);
static inline int erst_errno(int command_status)
{
switch (command_status) {
case ERST_STATUS_SUCCESS:
return 0;
case ERST_STATUS_HARDWARE_NOT_AVAILABLE:
return -ENODEV;
case ERST_STATUS_NOT_ENOUGH_SPACE:
return -ENOSPC;
case ERST_STATUS_RECORD_STORE_EMPTY:
case ERST_STATUS_RECORD_NOT_FOUND:
return -ENOENT;
default:
return -EINVAL;
}
}
static int erst_timedout(u64 *t, u64 spin_unit)
{
if ((s64)*t < spin_unit) {
pr_warning(FW_WARN ERST_PFX
"Firmware does not respond in time\n");
return 1;
}
*t -= spin_unit;
ndelay(spin_unit);
touch_nmi_watchdog();
return 0;
}
static int erst_exec_load_var1(struct apei_exec_context *ctx,
struct acpi_whea_header *entry)
{
return __apei_exec_read_register(entry, &ctx->var1);
}
static int erst_exec_load_var2(struct apei_exec_context *ctx,
struct acpi_whea_header *entry)
{
return __apei_exec_read_register(entry, &ctx->var2);
}
static int erst_exec_store_var1(struct apei_exec_context *ctx,
struct acpi_whea_header *entry)
{
return __apei_exec_write_register(entry, ctx->var1);
}
static int erst_exec_add(struct apei_exec_context *ctx,
struct acpi_whea_header *entry)
{
ctx->var1 += ctx->var2;
return 0;
}
static int erst_exec_subtract(struct apei_exec_context *ctx,
struct acpi_whea_header *entry)
{
ctx->var1 -= ctx->var2;
return 0;
}
static int erst_exec_add_value(struct apei_exec_context *ctx,
struct acpi_whea_header *entry)
{
int rc;
u64 val;
rc = __apei_exec_read_register(entry, &val);
if (rc)
return rc;
val += ctx->value;
rc = __apei_exec_write_register(entry, val);
return rc;
}
static int erst_exec_subtract_value(struct apei_exec_context *ctx,
struct acpi_whea_header *entry)
{
int rc;
u64 val;
rc = __apei_exec_read_register(entry, &val);
if (rc)
return rc;
val -= ctx->value;
rc = __apei_exec_write_register(entry, val);
return rc;
}
static int erst_exec_stall(struct apei_exec_context *ctx,
struct acpi_whea_header *entry)
{
u64 stall_time;
if (ctx->value > FIRMWARE_MAX_STALL) {
if (!in_nmi())
pr_warning(FW_WARN ERST_PFX
"Too long stall time for stall instruction: %llx.\n",
ctx->value);
stall_time = FIRMWARE_MAX_STALL;
} else
stall_time = ctx->value;
udelay(stall_time);
return 0;
}
static int erst_exec_stall_while_true(struct apei_exec_context *ctx,
struct acpi_whea_header *entry)
{
int rc;
u64 val;
u64 timeout = FIRMWARE_TIMEOUT;
u64 stall_time;
if (ctx->var1 > FIRMWARE_MAX_STALL) {
if (!in_nmi())
pr_warning(FW_WARN ERST_PFX
"Too long stall time for stall while true instruction: %llx.\n",
ctx->var1);
stall_time = FIRMWARE_MAX_STALL;
} else
stall_time = ctx->var1;
for (;;) {
rc = __apei_exec_read_register(entry, &val);
if (rc)
return rc;
if (val != ctx->value)
break;
if (erst_timedout(&timeout, stall_time * NSEC_PER_USEC))
return -EIO;
}
return 0;
}
static int erst_exec_skip_next_instruction_if_true(
struct apei_exec_context *ctx,
struct acpi_whea_header *entry)
{
int rc;
u64 val;
rc = __apei_exec_read_register(entry, &val);
if (rc)
return rc;
if (val == ctx->value) {
ctx->ip += 2;
return APEI_EXEC_SET_IP;
}
return 0;
}
static int erst_exec_goto(struct apei_exec_context *ctx,
struct acpi_whea_header *entry)
{
ctx->ip = ctx->value;
return APEI_EXEC_SET_IP;
}
static int erst_exec_set_src_address_base(struct apei_exec_context *ctx,
struct acpi_whea_header *entry)
{
return __apei_exec_read_register(entry, &ctx->src_base);
}
static int erst_exec_set_dst_address_base(struct apei_exec_context *ctx,
struct acpi_whea_header *entry)
{
return __apei_exec_read_register(entry, &ctx->dst_base);
}
static int erst_exec_move_data(struct apei_exec_context *ctx,
struct acpi_whea_header *entry)
{
int rc;
u64 offset;
rc = __apei_exec_read_register(entry, &offset);
if (rc)
return rc;
memmove((void *)ctx->dst_base + offset,
(void *)ctx->src_base + offset,
ctx->var2);
return 0;
}
static struct apei_exec_ins_type erst_ins_type[] = {
[ACPI_ERST_READ_REGISTER] = {
.flags = APEI_EXEC_INS_ACCESS_REGISTER,
.run = apei_exec_read_register,
},
[ACPI_ERST_READ_REGISTER_VALUE] = {
.flags = APEI_EXEC_INS_ACCESS_REGISTER,
.run = apei_exec_read_register_value,
},
[ACPI_ERST_WRITE_REGISTER] = {
.flags = APEI_EXEC_INS_ACCESS_REGISTER,
.run = apei_exec_write_register,
},
[ACPI_ERST_WRITE_REGISTER_VALUE] = {
.flags = APEI_EXEC_INS_ACCESS_REGISTER,
.run = apei_exec_write_register_value,
},
[ACPI_ERST_NOOP] = {
.flags = 0,
.run = apei_exec_noop,
},
[ACPI_ERST_LOAD_VAR1] = {
.flags = APEI_EXEC_INS_ACCESS_REGISTER,
.run = erst_exec_load_var1,
},
[ACPI_ERST_LOAD_VAR2] = {
.flags = APEI_EXEC_INS_ACCESS_REGISTER,
.run = erst_exec_load_var2,
},
[ACPI_ERST_STORE_VAR1] = {
.flags = APEI_EXEC_INS_ACCESS_REGISTER,
.run = erst_exec_store_var1,
},
[ACPI_ERST_ADD] = {
.flags = 0,
.run = erst_exec_add,
},
[ACPI_ERST_SUBTRACT] = {
.flags = 0,
.run = erst_exec_subtract,
},
[ACPI_ERST_ADD_VALUE] = {
.flags = APEI_EXEC_INS_ACCESS_REGISTER,
.run = erst_exec_add_value,
},
[ACPI_ERST_SUBTRACT_VALUE] = {
.flags = APEI_EXEC_INS_ACCESS_REGISTER,
.run = erst_exec_subtract_value,
},
[ACPI_ERST_STALL] = {
.flags = 0,
.run = erst_exec_stall,
},
[ACPI_ERST_STALL_WHILE_TRUE] = {
.flags = APEI_EXEC_INS_ACCESS_REGISTER,
.run = erst_exec_stall_while_true,
},
[ACPI_ERST_SKIP_NEXT_IF_TRUE] = {
.flags = APEI_EXEC_INS_ACCESS_REGISTER,
.run = erst_exec_skip_next_instruction_if_true,
},
[ACPI_ERST_GOTO] = {
.flags = 0,
.run = erst_exec_goto,
},
[ACPI_ERST_SET_SRC_ADDRESS_BASE] = {
.flags = APEI_EXEC_INS_ACCESS_REGISTER,
.run = erst_exec_set_src_address_base,
},
[ACPI_ERST_SET_DST_ADDRESS_BASE] = {
.flags = APEI_EXEC_INS_ACCESS_REGISTER,
.run = erst_exec_set_dst_address_base,
},
[ACPI_ERST_MOVE_DATA] = {
.flags = APEI_EXEC_INS_ACCESS_REGISTER,
.run = erst_exec_move_data,
},
};
static inline void erst_exec_ctx_init(struct apei_exec_context *ctx)
{
apei_exec_ctx_init(ctx, erst_ins_type, ARRAY_SIZE(erst_ins_type),
ERST_TAB_ENTRY(erst_tab), erst_tab->entries);
}
static int erst_get_erange(struct erst_erange *range)
{
struct apei_exec_context ctx;
int rc;
erst_exec_ctx_init(&ctx);
rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_RANGE);
if (rc)
return rc;
range->base = apei_exec_ctx_get_output(&ctx);
rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_LENGTH);
if (rc)
return rc;
range->size = apei_exec_ctx_get_output(&ctx);
rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_ATTRIBUTES);
if (rc)
return rc;
range->attr = apei_exec_ctx_get_output(&ctx);
return 0;
}
static ssize_t __erst_get_record_count(void)
{
struct apei_exec_context ctx;
int rc;
erst_exec_ctx_init(&ctx);
rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_COUNT);
if (rc)
return rc;
return apei_exec_ctx_get_output(&ctx);
}
ssize_t erst_get_record_count(void)
{
ssize_t count;
unsigned long flags;
if (erst_disable)
return -ENODEV;
spin_lock_irqsave(&erst_lock, flags);
count = __erst_get_record_count();
spin_unlock_irqrestore(&erst_lock, flags);
return count;
}
EXPORT_SYMBOL_GPL(erst_get_record_count);
static int __erst_get_next_record_id(u64 *record_id)
{
struct apei_exec_context ctx;
int rc;
erst_exec_ctx_init(&ctx);
rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_ID);
if (rc)
return rc;
*record_id = apei_exec_ctx_get_output(&ctx);
return 0;
}
/*
* Get the record ID of an existing error record on the persistent
* storage. If there is no error record on the persistent storage, the
* returned record_id is APEI_ERST_INVALID_RECORD_ID.
*/
int erst_get_next_record_id(u64 *record_id)
{
int rc;
unsigned long flags;
if (erst_disable)
return -ENODEV;
spin_lock_irqsave(&erst_lock, flags);
rc = __erst_get_next_record_id(record_id);
spin_unlock_irqrestore(&erst_lock, flags);
return rc;
}
EXPORT_SYMBOL_GPL(erst_get_next_record_id);
static int __erst_write_to_storage(u64 offset)
{
struct apei_exec_context ctx;
u64 timeout = FIRMWARE_TIMEOUT;
u64 val;
int rc;
erst_exec_ctx_init(&ctx);
rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_WRITE);
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, offset);
rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET);
if (rc)
return rc;
rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
if (rc)
return rc;
for (;;) {
rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
if (rc)
return rc;
val = apei_exec_ctx_get_output(&ctx);
if (!val)
break;
if (erst_timedout(&timeout, SPIN_UNIT))
return -EIO;
}
rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
if (rc)
return rc;
val = apei_exec_ctx_get_output(&ctx);
rc = apei_exec_run(&ctx, ACPI_ERST_END);
if (rc)
return rc;
return erst_errno(val);
}
static int __erst_read_from_storage(u64 record_id, u64 offset)
{
struct apei_exec_context ctx;
u64 timeout = FIRMWARE_TIMEOUT;
u64 val;
int rc;
erst_exec_ctx_init(&ctx);
rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_READ);
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, offset);
rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET);
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, record_id);
rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID);
if (rc)
return rc;
rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
if (rc)
return rc;
for (;;) {
rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
if (rc)
return rc;
val = apei_exec_ctx_get_output(&ctx);
if (!val)
break;
if (erst_timedout(&timeout, SPIN_UNIT))
return -EIO;
};
rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
if (rc)
return rc;
val = apei_exec_ctx_get_output(&ctx);
rc = apei_exec_run(&ctx, ACPI_ERST_END);
if (rc)
return rc;
return erst_errno(val);
}
static int __erst_clear_from_storage(u64 record_id)
{
struct apei_exec_context ctx;
u64 timeout = FIRMWARE_TIMEOUT;
u64 val;
int rc;
erst_exec_ctx_init(&ctx);
rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_CLEAR);
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, record_id);
rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID);
if (rc)
return rc;
rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
if (rc)
return rc;
for (;;) {
rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
if (rc)
return rc;
val = apei_exec_ctx_get_output(&ctx);
if (!val)
break;
if (erst_timedout(&timeout, SPIN_UNIT))
return -EIO;
}
rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
if (rc)
return rc;
val = apei_exec_ctx_get_output(&ctx);
rc = apei_exec_run(&ctx, ACPI_ERST_END);
if (rc)
return rc;
return erst_errno(val);
}
/* NVRAM ERST Error Log Address Range is not supported yet */
static void pr_unimpl_nvram(void)
{
if (printk_ratelimit())
pr_warning(ERST_PFX
"NVRAM ERST Log Address Range is not implemented yet\n");
}
static int __erst_write_to_nvram(const struct cper_record_header *record)
{
/* do not print message, because printk is not safe for NMI */
return -ENOSYS;
}
static int __erst_read_to_erange_from_nvram(u64 record_id, u64 *offset)
{
pr_unimpl_nvram();
return -ENOSYS;
}
static int __erst_clear_from_nvram(u64 record_id)
{
pr_unimpl_nvram();
return -ENOSYS;
}
int erst_write(const struct cper_record_header *record)
{
int rc;
unsigned long flags;
struct cper_record_header *rcd_erange;
if (erst_disable)
return -ENODEV;
if (memcmp(record->signature, CPER_SIG_RECORD, CPER_SIG_SIZE))
return -EINVAL;
if (erst_erange.attr & ERST_RANGE_NVRAM) {
if (!spin_trylock_irqsave(&erst_lock, flags))
return -EBUSY;
rc = __erst_write_to_nvram(record);
spin_unlock_irqrestore(&erst_lock, flags);
return rc;
}
if (record->record_length > erst_erange.size)
return -EINVAL;
if (!spin_trylock_irqsave(&erst_lock, flags))
return -EBUSY;
memcpy(erst_erange.vaddr, record, record->record_length);
rcd_erange = erst_erange.vaddr;
/* signature for serialization system */
memcpy(&rcd_erange->persistence_information, "ER", 2);
rc = __erst_write_to_storage(0);
spin_unlock_irqrestore(&erst_lock, flags);
return rc;
}
EXPORT_SYMBOL_GPL(erst_write);
static int __erst_read_to_erange(u64 record_id, u64 *offset)
{
int rc;
if (erst_erange.attr & ERST_RANGE_NVRAM)
return __erst_read_to_erange_from_nvram(
record_id, offset);
rc = __erst_read_from_storage(record_id, 0);
if (rc)
return rc;
*offset = 0;
return 0;
}
static ssize_t __erst_read(u64 record_id, struct cper_record_header *record,
size_t buflen)
{
int rc;
u64 offset, len = 0;
struct cper_record_header *rcd_tmp;
rc = __erst_read_to_erange(record_id, &offset);
if (rc)
return rc;
rcd_tmp = erst_erange.vaddr + offset;
len = rcd_tmp->record_length;
if (len <= buflen)
memcpy(record, rcd_tmp, len);
return len;
}
/*
* If return value > buflen, the buffer size is not big enough,
* else if return value < 0, something goes wrong,
* else everything is OK, and return value is record length
*/
ssize_t erst_read(u64 record_id, struct cper_record_header *record,
size_t buflen)
{
ssize_t len;
unsigned long flags;
if (erst_disable)
return -ENODEV;
spin_lock_irqsave(&erst_lock, flags);
len = __erst_read(record_id, record, buflen);
spin_unlock_irqrestore(&erst_lock, flags);
return len;
}
EXPORT_SYMBOL_GPL(erst_read);
/*
* If return value > buflen, the buffer size is not big enough,
* else if return value = 0, there is no more record to read,
* else if return value < 0, something goes wrong,
* else everything is OK, and return value is record length
*/
ssize_t erst_read_next(struct cper_record_header *record, size_t buflen)
{
int rc;
ssize_t len;
unsigned long flags;
u64 record_id;
if (erst_disable)
return -ENODEV;
spin_lock_irqsave(&erst_lock, flags);
rc = __erst_get_next_record_id(&record_id);
if (rc) {
spin_unlock_irqrestore(&erst_lock, flags);
return rc;
}
/* no more record */
if (record_id == APEI_ERST_INVALID_RECORD_ID) {
spin_unlock_irqrestore(&erst_lock, flags);
return 0;
}
len = __erst_read(record_id, record, buflen);
spin_unlock_irqrestore(&erst_lock, flags);
return len;
}
EXPORT_SYMBOL_GPL(erst_read_next);
int erst_clear(u64 record_id)
{
int rc;
unsigned long flags;
if (erst_disable)
return -ENODEV;
spin_lock_irqsave(&erst_lock, flags);
if (erst_erange.attr & ERST_RANGE_NVRAM)
rc = __erst_clear_from_nvram(record_id);
else
rc = __erst_clear_from_storage(record_id);
spin_unlock_irqrestore(&erst_lock, flags);
return rc;
}
EXPORT_SYMBOL_GPL(erst_clear);
static int __init setup_erst_disable(char *str)
{
erst_disable = 1;
return 0;
}
__setup("erst_disable", setup_erst_disable);
static int erst_check_table(struct acpi_table_erst *erst_tab)
{
if (erst_tab->header_length != sizeof(struct acpi_table_erst))
return -EINVAL;
if (erst_tab->header.length < sizeof(struct acpi_table_erst))
return -EINVAL;
if (erst_tab->entries !=
(erst_tab->header.length - sizeof(struct acpi_table_erst)) /
sizeof(struct acpi_erst_entry))
return -EINVAL;
return 0;
}
static int __init erst_init(void)
{
int rc = 0;
acpi_status status;
struct apei_exec_context ctx;
struct apei_resources erst_resources;
struct resource *r;
if (acpi_disabled)
goto err;
if (erst_disable) {
pr_info(ERST_PFX
"Error Record Serialization Table (ERST) support is disabled.\n");
goto err;
}
status = acpi_get_table(ACPI_SIG_ERST, 0,
(struct acpi_table_header **)&erst_tab);
if (status == AE_NOT_FOUND) {
pr_err(ERST_PFX "Table is not found!\n");
goto err;
} else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
pr_err(ERST_PFX "Failed to get table, %s\n", msg);
rc = -EINVAL;
goto err;
}
rc = erst_check_table(erst_tab);
if (rc) {
pr_err(FW_BUG ERST_PFX "ERST table is invalid\n");
goto err;
}
apei_resources_init(&erst_resources);
erst_exec_ctx_init(&ctx);
rc = apei_exec_collect_resources(&ctx, &erst_resources);
if (rc)
goto err_fini;
rc = apei_resources_request(&erst_resources, "APEI ERST");
if (rc)
goto err_fini;
rc = apei_exec_pre_map_gars(&ctx);
if (rc)
goto err_release;
rc = erst_get_erange(&erst_erange);
if (rc) {
if (rc == -ENODEV)
pr_info(ERST_PFX
"The corresponding hardware device or firmware implementation "
"is not available.\n");
else
pr_err(ERST_PFX
"Failed to get Error Log Address Range.\n");
goto err_unmap_reg;
}
r = request_mem_region(erst_erange.base, erst_erange.size, "APEI ERST");
if (!r) {
pr_err(ERST_PFX
"Can not request iomem region <0x%16llx-0x%16llx> for ERST.\n",
(unsigned long long)erst_erange.base,
(unsigned long long)erst_erange.base + erst_erange.size);
rc = -EIO;
goto err_unmap_reg;
}
rc = -ENOMEM;
erst_erange.vaddr = ioremap_cache(erst_erange.base,
erst_erange.size);
if (!erst_erange.vaddr)
goto err_release_erange;
pr_info(ERST_PFX
"Error Record Serialization Table (ERST) support is initialized.\n");
return 0;
err_release_erange:
release_mem_region(erst_erange.base, erst_erange.size);
err_unmap_reg:
apei_exec_post_unmap_gars(&ctx);
err_release:
apei_resources_release(&erst_resources);
err_fini:
apei_resources_fini(&erst_resources);
err:
erst_disable = 1;
return rc;
}
device_initcall(erst_init);
/*
* APEI Generic Hardware Error Source support
*
* Generic Hardware Error Source provides a way to report platform
* hardware errors (such as that from chipset). It works in so called
* "Firmware First" mode, that is, hardware errors are reported to
* firmware firstly, then reported to Linux by firmware. This way,
* some non-standard hardware error registers or non-standard hardware
* link can be checked by firmware to produce more hardware error
* information for Linux.
*
* For more information about Generic Hardware Error Source, please
* refer to ACPI Specification version 4.0, section 17.3.2.6
*
* Now, only SCI notification type and memory errors are
* supported. More notification type and hardware error type will be
* added later.
*
* Copyright 2010 Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/cper.h>
#include <linux/kdebug.h>
#include <acpi/apei.h>
#include <acpi/atomicio.h>
#include <acpi/hed.h>
#include <asm/mce.h>
#include "apei-internal.h"
#define GHES_PFX "GHES: "
#define GHES_ESTATUS_MAX_SIZE 65536
/*
* One struct ghes is created for each generic hardware error
* source.
*
* It provides the context for APEI hardware error timer/IRQ/SCI/NMI
* handler. Handler for one generic hardware error source is only
* triggered after the previous one is done. So handler can uses
* struct ghes without locking.
*
* estatus: memory buffer for error status block, allocated during
* HEST parsing.
*/
#define GHES_TO_CLEAR 0x0001
struct ghes {
struct acpi_hest_generic *generic;
struct acpi_hest_generic_status *estatus;
struct list_head list;
u64 buffer_paddr;
unsigned long flags;
};
/*
* Error source lists, one list for each notification method. The
* members in lists are struct ghes.
*
* The list members are only added in HEST parsing and deleted during
* module_exit, that is, single-threaded. So no lock is needed for
* that.
*
* But the mutual exclusion is needed between members adding/deleting
* and timer/IRQ/SCI/NMI handler, which may traverse the list. RCU is
* used for that.
*/
static LIST_HEAD(ghes_sci);
static struct ghes *ghes_new(struct acpi_hest_generic *generic)
{
struct ghes *ghes;
unsigned int error_block_length;
int rc;
ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
if (!ghes)
return ERR_PTR(-ENOMEM);
ghes->generic = generic;
INIT_LIST_HEAD(&ghes->list);
rc = acpi_pre_map_gar(&generic->error_status_address);
if (rc)
goto err_free;
error_block_length = generic->error_block_length;
if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
pr_warning(FW_WARN GHES_PFX
"Error status block length is too long: %u for "
"generic hardware error source: %d.\n",
error_block_length, generic->header.source_id);
error_block_length = GHES_ESTATUS_MAX_SIZE;
}
ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
if (!ghes->estatus) {
rc = -ENOMEM;
goto err_unmap;
}
return ghes;
err_unmap:
acpi_post_unmap_gar(&generic->error_status_address);
err_free:
kfree(ghes);
return ERR_PTR(rc);
}
static void ghes_fini(struct ghes *ghes)
{
kfree(ghes->estatus);
acpi_post_unmap_gar(&ghes->generic->error_status_address);
}
enum {
GHES_SER_NO = 0x0,
GHES_SER_CORRECTED = 0x1,
GHES_SER_RECOVERABLE = 0x2,
GHES_SER_PANIC = 0x3,
};
static inline int ghes_severity(int severity)
{
switch (severity) {
case CPER_SER_INFORMATIONAL:
return GHES_SER_NO;
case CPER_SER_CORRECTED:
return GHES_SER_CORRECTED;
case CPER_SER_RECOVERABLE:
return GHES_SER_RECOVERABLE;
case CPER_SER_FATAL:
return GHES_SER_PANIC;
default:
/* Unkown, go panic */
return GHES_SER_PANIC;
}
}
/* SCI handler run in work queue, so ioremap can be used here */
static int ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
int from_phys)
{
void *vaddr;
vaddr = ioremap_cache(paddr, len);
if (!vaddr)
return -ENOMEM;
if (from_phys)
memcpy(buffer, vaddr, len);
else
memcpy(vaddr, buffer, len);
iounmap(vaddr);
return 0;
}
static int ghes_read_estatus(struct ghes *ghes, int silent)
{
struct acpi_hest_generic *g = ghes->generic;
u64 buf_paddr;
u32 len;
int rc;
rc = acpi_atomic_read(&buf_paddr, &g->error_status_address);
if (rc) {
if (!silent && printk_ratelimit())
pr_warning(FW_WARN GHES_PFX
"Failed to read error status block address for hardware error source: %d.\n",
g->header.source_id);
return -EIO;
}
if (!buf_paddr)
return -ENOENT;
rc = ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
sizeof(*ghes->estatus), 1);
if (rc)
return rc;
if (!ghes->estatus->block_status)
return -ENOENT;
ghes->buffer_paddr = buf_paddr;
ghes->flags |= GHES_TO_CLEAR;
rc = -EIO;
len = apei_estatus_len(ghes->estatus);
if (len < sizeof(*ghes->estatus))
goto err_read_block;
if (len > ghes->generic->error_block_length)
goto err_read_block;
if (apei_estatus_check_header(ghes->estatus))
goto err_read_block;
rc = ghes_copy_tofrom_phys(ghes->estatus + 1,
buf_paddr + sizeof(*ghes->estatus),
len - sizeof(*ghes->estatus), 1);
if (rc)
return rc;
if (apei_estatus_check(ghes->estatus))
goto err_read_block;
rc = 0;
err_read_block:
if (rc && !silent)
pr_warning(FW_WARN GHES_PFX
"Failed to read error status block!\n");
return rc;
}
static void ghes_clear_estatus(struct ghes *ghes)
{
ghes->estatus->block_status = 0;
if (!(ghes->flags & GHES_TO_CLEAR))
return;
ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr,
sizeof(ghes->estatus->block_status), 0);
ghes->flags &= ~GHES_TO_CLEAR;
}
static void ghes_do_proc(struct ghes *ghes)
{
int ser, processed = 0;
struct acpi_hest_generic_data *gdata;
ser = ghes_severity(ghes->estatus->error_severity);
apei_estatus_for_each_section(ghes->estatus, gdata) {
#ifdef CONFIG_X86_MCE
if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
CPER_SEC_PLATFORM_MEM)) {
apei_mce_report_mem_error(
ser == GHES_SER_CORRECTED,
(struct cper_sec_mem_err *)(gdata+1));
processed = 1;
}
#endif
}
if (!processed && printk_ratelimit())
pr_warning(GHES_PFX
"Unknown error record from generic hardware error source: %d\n",
ghes->generic->header.source_id);
}
static int ghes_proc(struct ghes *ghes)
{
int rc;
rc = ghes_read_estatus(ghes, 0);
if (rc)
goto out;
ghes_do_proc(ghes);
out:
ghes_clear_estatus(ghes);
return 0;
}
static int ghes_notify_sci(struct notifier_block *this,
unsigned long event, void *data)
{
struct ghes *ghes;
int ret = NOTIFY_DONE;
rcu_read_lock();
list_for_each_entry_rcu(ghes, &ghes_sci, list) {
if (!ghes_proc(ghes))
ret = NOTIFY_OK;
}
rcu_read_unlock();
return ret;
}
static struct notifier_block ghes_notifier_sci = {
.notifier_call = ghes_notify_sci,
};
static int hest_ghes_parse(struct acpi_hest_header *hest_hdr, void *data)
{
struct acpi_hest_generic *generic;
struct ghes *ghes = NULL;
int rc = 0;
if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
return 0;
generic = (struct acpi_hest_generic *)hest_hdr;
if (!generic->enabled)
return 0;
if (generic->error_block_length <
sizeof(struct acpi_hest_generic_status)) {
pr_warning(FW_BUG GHES_PFX
"Invalid error block length: %u for generic hardware error source: %d\n",
generic->error_block_length,
generic->header.source_id);
goto err;
}
if (generic->records_to_preallocate == 0) {
pr_warning(FW_BUG GHES_PFX
"Invalid records to preallocate: %u for generic hardware error source: %d\n",
generic->records_to_preallocate,
generic->header.source_id);
goto err;
}
ghes = ghes_new(generic);
if (IS_ERR(ghes)) {
rc = PTR_ERR(ghes);
ghes = NULL;
goto err;
}
switch (generic->notify.type) {
case ACPI_HEST_NOTIFY_POLLED:
pr_warning(GHES_PFX
"Generic hardware error source: %d notified via POLL is not supported!\n",
generic->header.source_id);
break;
case ACPI_HEST_NOTIFY_EXTERNAL:
case ACPI_HEST_NOTIFY_LOCAL:
pr_warning(GHES_PFX
"Generic hardware error source: %d notified via IRQ is not supported!\n",
generic->header.source_id);
break;
case ACPI_HEST_NOTIFY_SCI:
if (list_empty(&ghes_sci))
register_acpi_hed_notifier(&ghes_notifier_sci);
list_add_rcu(&ghes->list, &ghes_sci);
break;
case ACPI_HEST_NOTIFY_NMI:
pr_warning(GHES_PFX
"Generic hardware error source: %d notified via NMI is not supported!\n",
generic->header.source_id);
break;
default:
pr_warning(FW_WARN GHES_PFX
"Unknown notification type: %u for generic hardware error source: %d\n",
generic->notify.type, generic->header.source_id);
break;
}
return 0;
err:
if (ghes)
ghes_fini(ghes);
return rc;
}
static void ghes_cleanup(void)
{
struct ghes *ghes, *nghes;
if (!list_empty(&ghes_sci))
unregister_acpi_hed_notifier(&ghes_notifier_sci);
synchronize_rcu();
list_for_each_entry_safe(ghes, nghes, &ghes_sci, list) {
list_del(&ghes->list);
ghes_fini(ghes);
kfree(ghes);
}
}
static int __init ghes_init(void)
{
int rc;
if (acpi_disabled)
return -ENODEV;
if (hest_disable) {
pr_info(GHES_PFX "HEST is not enabled!\n");
return -EINVAL;
}
rc = apei_hest_parse(hest_ghes_parse, NULL);
if (rc) {
pr_err(GHES_PFX
"Error during parsing HEST generic hardware error sources.\n");
goto err_cleanup;
}
if (list_empty(&ghes_sci)) {
pr_info(GHES_PFX
"No functional generic hardware error sources.\n");
rc = -ENODEV;
goto err_cleanup;
}
pr_info(GHES_PFX
"Generic Hardware Error Source support is initialized.\n");
return 0;
err_cleanup:
ghes_cleanup();
return rc;
}
static void __exit ghes_exit(void)
{
ghes_cleanup();
}
module_init(ghes_init);
module_exit(ghes_exit);
MODULE_AUTHOR("Huang Ying");
MODULE_DESCRIPTION("APEI Generic Hardware Error Source support");
MODULE_LICENSE("GPL");
/*
* APEI Hardware Error Souce Table support
*
* HEST describes error sources in detail; communicates operational
* parameters (i.e. severity levels, masking bits, and threshold
* values) to Linux as necessary. It also allows the BIOS to report
* non-standard error sources to Linux (for example, chipset-specific
* error registers).
*
* For more information about HEST, please refer to ACPI Specification
* version 4.0, section 17.3.2.
*
* Copyright 2009 Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/kdebug.h>
#include <linux/highmem.h>
#include <linux/io.h>
#include <acpi/apei.h>
#include "apei-internal.h"
#define HEST_PFX "HEST: "
int hest_disable;
EXPORT_SYMBOL_GPL(hest_disable);
/* HEST table parsing */
static struct acpi_table_hest *hest_tab;
static int hest_void_parse(struct acpi_hest_header *hest_hdr, void *data)
{
return 0;
}
static int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = {
[ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */
[ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1,
[ACPI_HEST_TYPE_IA32_NMI] = sizeof(struct acpi_hest_ia_nmi),
[ACPI_HEST_TYPE_AER_ROOT_PORT] = sizeof(struct acpi_hest_aer_root),
[ACPI_HEST_TYPE_AER_ENDPOINT] = sizeof(struct acpi_hest_aer),
[ACPI_HEST_TYPE_AER_BRIDGE] = sizeof(struct acpi_hest_aer_bridge),
[ACPI_HEST_TYPE_GENERIC_ERROR] = sizeof(struct acpi_hest_generic),
};
static int hest_esrc_len(struct acpi_hest_header *hest_hdr)
{
u16 hest_type = hest_hdr->type;
int len;
if (hest_type >= ACPI_HEST_TYPE_RESERVED)
return 0;
len = hest_esrc_len_tab[hest_type];
if (hest_type == ACPI_HEST_TYPE_IA32_CORRECTED_CHECK) {
struct acpi_hest_ia_corrected *cmc;
cmc = (struct acpi_hest_ia_corrected *)hest_hdr;
len = sizeof(*cmc) + cmc->num_hardware_banks *
sizeof(struct acpi_hest_ia_error_bank);
} else if (hest_type == ACPI_HEST_TYPE_IA32_CHECK) {
struct acpi_hest_ia_machine_check *mc;
mc = (struct acpi_hest_ia_machine_check *)hest_hdr;
len = sizeof(*mc) + mc->num_hardware_banks *
sizeof(struct acpi_hest_ia_error_bank);
}
BUG_ON(len == -1);
return len;
};
int apei_hest_parse(apei_hest_func_t func, void *data)
{
struct acpi_hest_header *hest_hdr;
int i, rc, len;
if (hest_disable)
return -EINVAL;
hest_hdr = (struct acpi_hest_header *)(hest_tab + 1);
for (i = 0; i < hest_tab->error_source_count; i++) {
len = hest_esrc_len(hest_hdr);
if (!len) {
pr_warning(FW_WARN HEST_PFX
"Unknown or unused hardware error source "
"type: %d for hardware error source: %d.\n",
hest_hdr->type, hest_hdr->source_id);
return -EINVAL;
}
if ((void *)hest_hdr + len >
(void *)hest_tab + hest_tab->header.length) {
pr_warning(FW_BUG HEST_PFX
"Table contents overflow for hardware error source: %d.\n",
hest_hdr->source_id);
return -EINVAL;
}
rc = func(hest_hdr, data);
if (rc)
return rc;
hest_hdr = (void *)hest_hdr + len;
}
return 0;
}
EXPORT_SYMBOL_GPL(apei_hest_parse);
static int __init setup_hest_disable(char *str)
{
hest_disable = 1;
return 0;
}
__setup("hest_disable", setup_hest_disable);
static int __init hest_init(void)
{
acpi_status status;
int rc = -ENODEV;
if (acpi_disabled)
goto err;
if (hest_disable) {
pr_info(HEST_PFX "HEST tabling parsing is disabled.\n");
goto err;
}
status = acpi_get_table(ACPI_SIG_HEST, 0,
(struct acpi_table_header **)&hest_tab);
if (status == AE_NOT_FOUND) {
pr_info(HEST_PFX "Table is not found!\n");
goto err;
} else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
pr_err(HEST_PFX "Failed to get table, %s\n", msg);
rc = -EINVAL;
goto err;
}
rc = apei_hest_parse(hest_void_parse, NULL);
if (rc)
goto err;
pr_info(HEST_PFX "HEST table parsing is initialized.\n");
return 0;
err:
hest_disable = 1;
return rc;
}
subsys_initcall(hest_init);
/*
* atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then
* accessing in atomic context.
*
* This is used for NMI handler to access IO memory area, because
* ioremap/iounmap can not be used in NMI handler. The IO memory area
* is pre-mapped in process context and accessed in NMI handler.
*
* Copyright (C) 2009-2010, Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/kref.h>
#include <linux/rculist.h>
#include <linux/interrupt.h>
#include <acpi/atomicio.h>
#define ACPI_PFX "ACPI: "
static LIST_HEAD(acpi_iomaps);
/*
* Used for mutual exclusion between writers of acpi_iomaps list, for
* synchronization between readers and writer, RCU is used.
*/
static DEFINE_SPINLOCK(acpi_iomaps_lock);
struct acpi_iomap {
struct list_head list;
void __iomem *vaddr;
unsigned long size;
phys_addr_t paddr;
struct kref ref;
};
/* acpi_iomaps_lock or RCU read lock must be held before calling */
static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr,
unsigned long size)
{
struct acpi_iomap *map;
list_for_each_entry_rcu(map, &acpi_iomaps, list) {
if (map->paddr + map->size >= paddr + size &&
map->paddr <= paddr)
return map;
}
return NULL;
}
/*
* Atomic "ioremap" used by NMI handler, if the specified IO memory
* area is not pre-mapped, NULL will be returned.
*
* acpi_iomaps_lock or RCU read lock must be held before calling
*/
static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
unsigned long size)
{
struct acpi_iomap *map;
map = __acpi_find_iomap(paddr, size);
if (map)
return map->vaddr + (paddr - map->paddr);
else
return NULL;
}
/* acpi_iomaps_lock must be held before calling */
static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
unsigned long size)
{
struct acpi_iomap *map;
map = __acpi_find_iomap(paddr, size);
if (map) {
kref_get(&map->ref);
return map->vaddr + (paddr - map->paddr);
} else
return NULL;
}
/*
* Used to pre-map the specified IO memory area. First try to find
* whether the area is already pre-mapped, if it is, increase the
* reference count (in __acpi_try_ioremap) and return; otherwise, do
* the real ioremap, and add the mapping into acpi_iomaps list.
*/
static void __iomem *acpi_pre_map(phys_addr_t paddr,
unsigned long size)
{
void __iomem *vaddr;
struct acpi_iomap *map;
unsigned long pg_sz, flags;
phys_addr_t pg_off;
spin_lock_irqsave(&acpi_iomaps_lock, flags);
vaddr = __acpi_try_ioremap(paddr, size);
spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
if (vaddr)
return vaddr;
pg_off = paddr & PAGE_MASK;
pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
vaddr = ioremap(pg_off, pg_sz);
if (!vaddr)
return NULL;
map = kmalloc(sizeof(*map), GFP_KERNEL);
if (!map)
goto err_unmap;
INIT_LIST_HEAD(&map->list);
map->paddr = pg_off;
map->size = pg_sz;
map->vaddr = vaddr;
kref_init(&map->ref);
spin_lock_irqsave(&acpi_iomaps_lock, flags);
vaddr = __acpi_try_ioremap(paddr, size);
if (vaddr) {
spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
iounmap(map->vaddr);
kfree(map);
return vaddr;
}
list_add_tail_rcu(&map->list, &acpi_iomaps);
spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
return vaddr + (paddr - pg_off);
err_unmap:
iounmap(vaddr);
return NULL;
}
/* acpi_iomaps_lock must be held before calling */
static void __acpi_kref_del_iomap(struct kref *ref)
{
struct acpi_iomap *map;
map = container_of(ref, struct acpi_iomap, ref);
list_del_rcu(&map->list);
}
/*
* Used to post-unmap the specified IO memory area. The iounmap is
* done only if the reference count goes zero.
*/
static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
{
struct acpi_iomap *map;
unsigned long flags;
int del;
spin_lock_irqsave(&acpi_iomaps_lock, flags);
map = __acpi_find_iomap(paddr, size);
BUG_ON(!map);
del = kref_put(&map->ref, __acpi_kref_del_iomap);
spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
if (!del)
return;
synchronize_rcu();
iounmap(map->vaddr);
kfree(map);
}
/* In NMI handler, should set silent = 1 */
static int acpi_check_gar(struct acpi_generic_address *reg,
u64 *paddr, int silent)
{
u32 width, space_id;
width = reg->bit_width;
space_id = reg->space_id;
/* Handle possible alignment issues */
memcpy(paddr, &reg->address, sizeof(*paddr));
if (!*paddr) {
if (!silent)
pr_warning(FW_BUG ACPI_PFX
"Invalid physical address in GAR [0x%llx/%u/%u]\n",
*paddr, width, space_id);
return -EINVAL;
}
if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
if (!silent)
pr_warning(FW_BUG ACPI_PFX
"Invalid bit width in GAR [0x%llx/%u/%u]\n",
*paddr, width, space_id);
return -EINVAL;
}
if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
if (!silent)
pr_warning(FW_BUG ACPI_PFX
"Invalid address space type in GAR [0x%llx/%u/%u]\n",
*paddr, width, space_id);
return -EINVAL;
}
return 0;
}
/* Pre-map, working on GAR */
int acpi_pre_map_gar(struct acpi_generic_address *reg)
{
u64 paddr;
void __iomem *vaddr;
int rc;
if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return 0;
rc = acpi_check_gar(reg, &paddr, 0);
if (rc)
return rc;
vaddr = acpi_pre_map(paddr, reg->bit_width / 8);
if (!vaddr)
return -EIO;
return 0;
}
EXPORT_SYMBOL_GPL(acpi_pre_map_gar);
/* Post-unmap, working on GAR */
int acpi_post_unmap_gar(struct acpi_generic_address *reg)
{
u64 paddr;
int rc;
if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return 0;
rc = acpi_check_gar(reg, &paddr, 0);
if (rc)
return rc;
acpi_post_unmap(paddr, reg->bit_width / 8);
return 0;
}
EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
/*
* Can be used in atomic (including NMI) or process context. RCU read
* lock can only be released after the IO memory area accessing.
*/
static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
{
void __iomem *addr;
rcu_read_lock();
addr = __acpi_ioremap_fast(paddr, width);
switch (width) {
case 8:
*val = readb(addr);
break;
case 16:
*val = readw(addr);
break;
case 32:
*val = readl(addr);
break;
case 64:
*val = readq(addr);
break;
default:
return -EINVAL;
}
rcu_read_unlock();
return 0;
}
static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
{
void __iomem *addr;
rcu_read_lock();
addr = __acpi_ioremap_fast(paddr, width);
switch (width) {
case 8:
writeb(val, addr);
break;
case 16:
writew(val, addr);
break;
case 32:
writel(val, addr);
break;
case 64:
writeq(val, addr);
break;
default:
return -EINVAL;
}
rcu_read_unlock();
return 0;
}
/* GAR accessing in atomic (including NMI) or process context */
int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg)
{
u64 paddr;
int rc;
rc = acpi_check_gar(reg, &paddr, 1);
if (rc)
return rc;
*val = 0;
switch (reg->space_id) {
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
return acpi_atomic_read_mem(paddr, val, reg->bit_width);
case ACPI_ADR_SPACE_SYSTEM_IO:
return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width);
default:
return -EINVAL;
}
}
EXPORT_SYMBOL_GPL(acpi_atomic_read);
int acpi_atomic_write(u64 val, struct acpi_generic_address *reg)
{
u64 paddr;
int rc;
rc = acpi_check_gar(reg, &paddr, 1);
if (rc)
return rc;
switch (reg->space_id) {
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
return acpi_atomic_write_mem(paddr, val, reg->bit_width);
case ACPI_ADR_SPACE_SYSTEM_IO:
return acpi_os_write_port(paddr, val, reg->bit_width);
default:
return -EINVAL;
}
}
EXPORT_SYMBOL_GPL(acpi_atomic_write);
......@@ -1027,10 +1027,9 @@ int __init acpi_ec_ecdt_probe(void)
/* Don't trust ECDT, which comes from ASUSTek */
if (!EC_FLAGS_VALIDATE_ECDT)
goto install;
saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL);
saved_ec = kmemdup(boot_ec, sizeof(struct acpi_ec), GFP_KERNEL);
if (!saved_ec)
return -ENOMEM;
memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec));
/* fall through */
}
......
/*
* ACPI Hardware Error Device (PNP0C33) Driver
*
* Copyright (C) 2010, Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* ACPI Hardware Error Device is used to report some hardware errors
* notified via SCI, mainly the corrected errors.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <acpi/hed.h>
static struct acpi_device_id acpi_hed_ids[] = {
{"PNP0C33", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, acpi_hed_ids);
static acpi_handle hed_handle;
static BLOCKING_NOTIFIER_HEAD(acpi_hed_notify_list);
int register_acpi_hed_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&acpi_hed_notify_list, nb);
}
EXPORT_SYMBOL_GPL(register_acpi_hed_notifier);
void unregister_acpi_hed_notifier(struct notifier_block *nb)
{
blocking_notifier_chain_unregister(&acpi_hed_notify_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_acpi_hed_notifier);
/*
* SCI to report hardware error is forwarded to the listeners of HED,
* it is used by HEST Generic Hardware Error Source with notify type
* SCI.
*/
static void acpi_hed_notify(struct acpi_device *device, u32 event)
{
blocking_notifier_call_chain(&acpi_hed_notify_list, 0, NULL);
}
static int __devinit acpi_hed_add(struct acpi_device *device)
{
/* Only one hardware error device */
if (hed_handle)
return -EINVAL;
hed_handle = device->handle;
return 0;
}
static int __devexit acpi_hed_remove(struct acpi_device *device, int type)
{
hed_handle = NULL;
return 0;
}
static struct acpi_driver acpi_hed_driver = {
.name = "hardware_error_device",
.class = "hardware_error",
.ids = acpi_hed_ids,
.ops = {
.add = acpi_hed_add,
.remove = acpi_hed_remove,
.notify = acpi_hed_notify,
},
};
static int __init acpi_hed_init(void)
{
if (acpi_disabled)
return -ENODEV;
if (acpi_bus_register_driver(&acpi_hed_driver) < 0)
return -ENODEV;
return 0;
}
static void __exit acpi_hed_exit(void)
{
acpi_bus_unregister_driver(&acpi_hed_driver);
}
module_init(acpi_hed_init);
module_exit(acpi_hed_exit);
ACPI_MODULE_NAME("hed");
MODULE_AUTHOR("Huang Ying");
MODULE_DESCRIPTION("ACPI Hardware Error Device Driver");
MODULE_LICENSE("GPL");
#include <linux/acpi.h>
#include <linux/pci.h>
#define PREFIX "ACPI: "
static inline unsigned long parse_acpi_hest_ia_machine_check(struct acpi_hest_ia_machine_check *p)
{
return sizeof(*p) +
(sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
}
static inline unsigned long parse_acpi_hest_ia_corrected(struct acpi_hest_ia_corrected *p)
{
return sizeof(*p) +
(sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
}
static inline unsigned long parse_acpi_hest_ia_nmi(struct acpi_hest_ia_nmi *p)
{
return sizeof(*p);
}
static inline unsigned long parse_acpi_hest_generic(struct acpi_hest_generic *p)
{
return sizeof(*p);
}
static inline unsigned int hest_match_pci(struct acpi_hest_aer_common *p, struct pci_dev *pci)
{
return (0 == pci_domain_nr(pci->bus) &&
p->bus == pci->bus->number &&
p->device == PCI_SLOT(pci->devfn) &&
p->function == PCI_FUNC(pci->devfn));
}
static unsigned long parse_acpi_hest_aer(void *hdr, int type, struct pci_dev *pci, int *firmware_first)
{
struct acpi_hest_aer_common *p = hdr + sizeof(struct acpi_hest_header);
unsigned long rc=0;
u8 pcie_type = 0;
u8 bridge = 0;
switch (type) {
case ACPI_HEST_TYPE_AER_ROOT_PORT:
rc = sizeof(struct acpi_hest_aer_root);
pcie_type = PCI_EXP_TYPE_ROOT_PORT;
break;
case ACPI_HEST_TYPE_AER_ENDPOINT:
rc = sizeof(struct acpi_hest_aer);
pcie_type = PCI_EXP_TYPE_ENDPOINT;
break;
case ACPI_HEST_TYPE_AER_BRIDGE:
rc = sizeof(struct acpi_hest_aer_bridge);
if ((pci->class >> 16) == PCI_BASE_CLASS_BRIDGE)
bridge = 1;
break;
}
if (p->flags & ACPI_HEST_GLOBAL) {
if ((pci->is_pcie && (pci->pcie_type == pcie_type)) || bridge)
*firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
}
else
if (hest_match_pci(p, pci))
*firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
return rc;
}
static int acpi_hest_firmware_first(struct acpi_table_header *stdheader, struct pci_dev *pci)
{
struct acpi_table_hest *hest = (struct acpi_table_hest *)stdheader;
void *p = (void *)hest + sizeof(*hest); /* defined by the ACPI 4.0 spec */
struct acpi_hest_header *hdr = p;
int i;
int firmware_first = 0;
static unsigned char printed_unused = 0;
static unsigned char printed_reserved = 0;
for (i=0, hdr=p; p < (((void *)hest) + hest->header.length) && i < hest->error_source_count; i++) {
switch (hdr->type) {
case ACPI_HEST_TYPE_IA32_CHECK:
p += parse_acpi_hest_ia_machine_check(p);
break;
case ACPI_HEST_TYPE_IA32_CORRECTED_CHECK:
p += parse_acpi_hest_ia_corrected(p);
break;
case ACPI_HEST_TYPE_IA32_NMI:
p += parse_acpi_hest_ia_nmi(p);
break;
/* These three should never appear */
case ACPI_HEST_TYPE_NOT_USED3:
case ACPI_HEST_TYPE_NOT_USED4:
case ACPI_HEST_TYPE_NOT_USED5:
if (!printed_unused) {
printk(KERN_DEBUG PREFIX
"HEST Error Source list contains an obsolete type (%d).\n", hdr->type);
printed_unused = 1;
}
break;
case ACPI_HEST_TYPE_AER_ROOT_PORT:
case ACPI_HEST_TYPE_AER_ENDPOINT:
case ACPI_HEST_TYPE_AER_BRIDGE:
p += parse_acpi_hest_aer(p, hdr->type, pci, &firmware_first);
break;
case ACPI_HEST_TYPE_GENERIC_ERROR:
p += parse_acpi_hest_generic(p);
break;
/* These should never appear either */
case ACPI_HEST_TYPE_RESERVED:
default:
if (!printed_reserved) {
printk(KERN_DEBUG PREFIX
"HEST Error Source list contains a reserved type (%d).\n", hdr->type);
printed_reserved = 1;
}
break;
}
}
return firmware_first;
}
int acpi_hest_firmware_first_pci(struct pci_dev *pci)
{
acpi_status status = AE_NOT_FOUND;
struct acpi_table_header *hest = NULL;
if (acpi_disabled)
return 0;
status = acpi_get_table(ACPI_SIG_HEST, 1, &hest);
if (ACPI_SUCCESS(status)) {
if (acpi_hest_firmware_first(hest, pci)) {
return 1;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(acpi_hest_firmware_first_pci);
......@@ -120,7 +120,8 @@ acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
struct acpi_pci_root *root;
list_for_each_entry(root, &acpi_pci_roots, node)
if ((root->segment == (u16) seg) && (root->bus_nr == (u16) bus))
if ((root->segment == (u16) seg) &&
(root->secondary.start == (u16) bus))
return root->device->handle;
return NULL;
}
......@@ -154,7 +155,7 @@ EXPORT_SYMBOL_GPL(acpi_is_root_bridge);
static acpi_status
get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
{
int *busnr = data;
struct resource *res = data;
struct acpi_resource_address64 address;
if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 &&
......@@ -164,28 +165,27 @@ get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
acpi_resource_to_address64(resource, &address);
if ((address.address_length > 0) &&
(address.resource_type == ACPI_BUS_NUMBER_RANGE))
*busnr = address.minimum;
(address.resource_type == ACPI_BUS_NUMBER_RANGE)) {
res->start = address.minimum;
res->end = address.minimum + address.address_length - 1;
}
return AE_OK;
}
static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
unsigned long long *bus)
struct resource *res)
{
acpi_status status;
int busnum;
busnum = -1;
res->start = -1;
status =
acpi_walk_resources(handle, METHOD_NAME__CRS,
get_root_bridge_busnr_callback, &busnum);
get_root_bridge_busnr_callback, res);
if (ACPI_FAILURE(status))
return status;
/* Check if we really get a bus number from _CRS */
if (busnum == -1)
if (res->start == -1)
return AE_ERROR;
*bus = busnum;
return AE_OK;
}
......@@ -429,34 +429,47 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
struct acpi_device *child;
u32 flags, base_flags;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
return -ENOMEM;
segment = 0;
status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL,
&segment);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
printk(KERN_ERR PREFIX "can't evaluate _SEG\n");
return -ENODEV;
result = -ENODEV;
goto end;
}
/* Check _CRS first, then _BBN. If no _BBN, default to zero. */
bus = 0;
status = try_get_root_bridge_busnr(device->handle, &bus);
root->secondary.flags = IORESOURCE_BUS;
status = try_get_root_bridge_busnr(device->handle, &root->secondary);
if (ACPI_FAILURE(status)) {
/*
* We need both the start and end of the downstream bus range
* to interpret _CBA (MMCONFIG base address), so it really is
* supposed to be in _CRS. If we don't find it there, all we
* can do is assume [_BBN-0xFF] or [0-0xFF].
*/
root->secondary.end = 0xFF;
printk(KERN_WARNING FW_BUG PREFIX
"no secondary bus range in _CRS\n");
status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
printk(KERN_ERR PREFIX
"no bus number in _CRS and can't evaluate _BBN\n");
return -ENODEV;
if (ACPI_SUCCESS(status))
root->secondary.start = bus;
else if (status == AE_NOT_FOUND)
root->secondary.start = 0;
else {
printk(KERN_ERR PREFIX "can't evaluate _BBN\n");
result = -ENODEV;
goto end;
}
}
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
return -ENOMEM;
INIT_LIST_HEAD(&root->node);
root->device = device;
root->segment = segment & 0xFFFF;
root->bus_nr = bus & 0xFF;
strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;
......@@ -475,9 +488,9 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
/* TBD: Locking */
list_add_tail(&root->node, &acpi_pci_roots);
printk(KERN_INFO PREFIX "%s [%s] (%04x:%02x)\n",
printk(KERN_INFO PREFIX "%s [%s] (domain %04x %pR)\n",
acpi_device_name(device), acpi_device_bid(device),
root->segment, root->bus_nr);
root->segment, &root->secondary);
/*
* Scan the Root Bridge
......@@ -486,11 +499,11 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
* PCI namespace does not get created until this call is made (and
* thus the root bridge's pci_dev does not exist).
*/
root->bus = pci_acpi_scan_root(device, segment, bus);
root->bus = pci_acpi_scan_root(root);
if (!root->bus) {
printk(KERN_ERR PREFIX
"Bus %04x:%02x not present in PCI namespace\n",
root->segment, root->bus_nr);
root->segment, (unsigned int)root->secondary.start);
result = -ENODEV;
goto end;
}
......
......@@ -727,18 +727,8 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
break;
}
if (pr->power.states[i].promotion.state)
seq_printf(seq, "promotion[C%zd] ",
(pr->power.states[i].promotion.state -
pr->power.states));
else
seq_puts(seq, "promotion[--] ");
if (pr->power.states[i].demotion.state)
seq_printf(seq, "demotion[C%zd] ",
(pr->power.states[i].demotion.state -
pr->power.states));
else
seq_puts(seq, "demotion[--] ");
seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
......@@ -869,6 +859,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
ktime_t kt1, kt2;
s64 idle_time_ns;
s64 idle_time;
s64 sleep_ticks = 0;
......@@ -910,12 +901,14 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
sched_clock_idle_sleep_event();
acpi_idle_do_entry(cx);
kt2 = ktime_get_real();
idle_time = ktime_to_us(ktime_sub(kt2, kt1));
idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
idle_time = idle_time_ns;
do_div(idle_time, NSEC_PER_USEC);
sleep_ticks = us_to_pm_timer_ticks(idle_time);
/* Tell the scheduler how much we idled: */
sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
sched_clock_idle_wakeup_event(idle_time_ns);
local_irq_enable();
current_thread_info()->status |= TS_POLLING;
......@@ -943,6 +936,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
ktime_t kt1, kt2;
s64 idle_time_ns;
s64 idle_time;
s64 sleep_ticks = 0;
......@@ -1025,11 +1019,13 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
spin_unlock(&c3_lock);
}
kt2 = ktime_get_real();
idle_time = ktime_to_us(ktime_sub(kt2, kt1));
idle_time_ns = ktime_to_us(ktime_sub(kt2, kt1));
idle_time = idle_time_ns;
do_div(idle_time, NSEC_PER_USEC);
sleep_ticks = us_to_pm_timer_ticks(idle_time);
/* Tell the scheduler how much we idled: */
sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
sched_clock_idle_wakeup_event(idle_time_ns);
local_irq_enable();
current_thread_info()->status |= TS_POLLING;
......
......@@ -80,22 +80,6 @@ static int acpi_sleep_prepare(u32 acpi_state)
#ifdef CONFIG_ACPI_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
/*
* According to the ACPI specification the BIOS should make sure that ACPI is
* enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
* some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
* on such systems during resume. Unfortunately that doesn't help in
* particularly pathological cases in which SCI_EN has to be set directly on
* resume, although the specification states very clearly that this flag is
* owned by the hardware. The set_sci_en_on_resume variable will be set in such
* cases.
*/
static bool set_sci_en_on_resume;
void __init acpi_set_sci_en_on_resume(void)
{
set_sci_en_on_resume = true;
}
/*
* ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
......@@ -253,11 +237,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
break;
}
/* If ACPI is not enabled by the BIOS, we need to enable it here. */
if (set_sci_en_on_resume)
/* This violates the spec but is required for bug compatibility. */
acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
else
acpi_enable();
/* Reprogram control registers and execute _BFS */
acpi_leave_sleep_state_prep(acpi_state);
......@@ -346,12 +327,6 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
return 0;
}
static int __init init_set_sci_en_on_resume(const struct dmi_system_id *d)
{
set_sci_en_on_resume = true;
return 0;
}
static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
{
.callback = init_old_suspend_ordering,
......@@ -370,22 +345,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
},
{
.callback = init_set_sci_en_on_resume,
.ident = "Apple MacBook 1,1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
},
},
{
.callback = init_set_sci_en_on_resume,
.ident = "Apple MacMini 1,1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
},
},
{
.callback = init_old_suspend_ordering,
.ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
.matches = {
......@@ -394,94 +353,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
},
{
.callback = init_set_sci_en_on_resume,
.ident = "Toshiba Satellite L300",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
},
},
{
.callback = init_set_sci_en_on_resume,
.ident = "Hewlett-Packard HP G7000 Notebook PC",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP G7000 Notebook PC"),
},
},
{
.callback = init_set_sci_en_on_resume,
.ident = "Hewlett-Packard HP Pavilion dv3 Notebook PC",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv3 Notebook PC"),
},
},
{
.callback = init_set_sci_en_on_resume,
.ident = "Hewlett-Packard Pavilion dv4",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4"),
},
},
{
.callback = init_set_sci_en_on_resume,
.ident = "Hewlett-Packard Pavilion dv7",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7"),
},
},
{
.callback = init_set_sci_en_on_resume,
.ident = "Hewlett-Packard Compaq Presario C700 Notebook PC",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario C700 Notebook PC"),
},
},
{
.callback = init_set_sci_en_on_resume,
.ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario CQ40 Notebook PC"),
},
},
{
.callback = init_set_sci_en_on_resume,
.ident = "Lenovo ThinkPad T410",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
},
},
{
.callback = init_set_sci_en_on_resume,
.ident = "Lenovo ThinkPad T510",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
},
},
{
.callback = init_set_sci_en_on_resume,
.ident = "Lenovo ThinkPad W510",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
},
},
{
.callback = init_set_sci_en_on_resume,
.ident = "Lenovo ThinkPad X201[s]",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
},
},
{
.callback = init_old_suspend_ordering,
.ident = "Panasonic CF51-2L",
.matches = {
......@@ -490,30 +361,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
},
},
{
.callback = init_set_sci_en_on_resume,
.ident = "Dell Studio 1558",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1558"),
},
},
{
.callback = init_set_sci_en_on_resume,
.ident = "Dell Studio 1557",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"),
},
},
{
.callback = init_set_sci_en_on_resume,
.ident = "Dell Studio 1555",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1555"),
},
},
{},
};
#endif /* CONFIG_SUSPEND */
......
extern u8 sleep_states[];
extern int acpi_suspend (u32 state);
extern int acpi_suspend(u32 state);
extern void acpi_enable_wakeup_device_prep(u8 sleep_state);
extern void acpi_enable_wakeup_device(u8 sleep_state);
......
......@@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id,
unsigned long table_end;
acpi_size tbl_size;
if (acpi_disabled && !acpi_ht)
if (acpi_disabled)
return -ENODEV;
if (!handler)
......@@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
struct acpi_table_header *table = NULL;
acpi_size tbl_size;
if (acpi_disabled && !acpi_ht)
if (acpi_disabled)
return -ENODEV;
if (!handler)
......
......@@ -45,6 +45,7 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <linux/suspend.h>
#include <acpi/video.h>
#define PREFIX "ACPI: "
......@@ -65,11 +66,6 @@
#define MAX_NAME_LEN 20
#define ACPI_VIDEO_DISPLAY_CRT 1
#define ACPI_VIDEO_DISPLAY_TV 2
#define ACPI_VIDEO_DISPLAY_DVI 3
#define ACPI_VIDEO_DISPLAY_LCD 4
#define _COMPONENT ACPI_VIDEO_COMPONENT
ACPI_MODULE_NAME("video");
......@@ -1007,11 +1003,11 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
result = acpi_video_init_brightness(device);
if (result)
return;
name = kzalloc(MAX_NAME_LEN, GFP_KERNEL);
name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
if (!name)
return;
count++;
sprintf(name, "acpi_video%d", count++);
memset(&props, 0, sizeof(struct backlight_properties));
props.max_brightness = device->brightness->count - 3;
device->backlight = backlight_device_register(name, NULL, device,
......@@ -1067,10 +1063,10 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
if (device->cap._DCS && device->cap._DSS) {
static int count;
char *name;
name = kzalloc(MAX_NAME_LEN, GFP_KERNEL);
name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
if (!name)
return;
sprintf(name, "acpi_video%d", count++);
count++;
device->output_dev = video_output_register(name,
NULL, device, &acpi_output_properties);
kfree(name);
......@@ -1747,12 +1743,28 @@ acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id
return NULL;
}
static int
acpi_video_get_device_type(struct acpi_video_bus *video,
unsigned long device_id)
{
struct acpi_video_enumerated_device *ids;
int i;
for (i = 0; i < video->attached_count; i++) {
ids = &video->attached_array[i];
if ((ids->value.int_val & 0xffff) == device_id)
return ids->value.int_val;
}
return 0;
}
static int
acpi_video_bus_get_one_device(struct acpi_device *device,
struct acpi_video_bus *video)
{
unsigned long long device_id;
int status;
int status, device_type;
struct acpi_video_device *data;
struct acpi_video_device_attrib* attribute;
......@@ -1797,8 +1809,25 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
}
if(attribute->bios_can_detect)
data->flags.bios = 1;
} else
} else {
/* Check for legacy IDs */
device_type = acpi_video_get_device_type(video,
device_id);
/* Ignore bits 16 and 18-20 */
switch (device_type & 0xffe2ffff) {
case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR:
data->flags.crt = 1;
break;
case ACPI_VIDEO_DISPLAY_LEGACY_PANEL:
data->flags.lcd = 1;
break;
case ACPI_VIDEO_DISPLAY_LEGACY_TV:
data->flags.tvout = 1;
break;
default:
data->flags.unknown = 1;
}
}
acpi_video_device_bind(video, data);
acpi_video_device_find_cap(data);
......@@ -2032,6 +2061,71 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event)
return result;
}
int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
void **edid)
{
struct acpi_video_bus *video;
struct acpi_video_device *video_device;
union acpi_object *buffer = NULL;
acpi_status status;
int i, length;
if (!device || !acpi_driver_data(device))
return -EINVAL;
video = acpi_driver_data(device);
for (i = 0; i < video->attached_count; i++) {
video_device = video->attached_array[i].bind_info;
length = 256;
if (!video_device)
continue;
if (type) {
switch (type) {
case ACPI_VIDEO_DISPLAY_CRT:
if (!video_device->flags.crt)
continue;
break;
case ACPI_VIDEO_DISPLAY_TV:
if (!video_device->flags.tvout)
continue;
break;
case ACPI_VIDEO_DISPLAY_DVI:
if (!video_device->flags.dvi)
continue;
break;
case ACPI_VIDEO_DISPLAY_LCD:
if (!video_device->flags.lcd)
continue;
break;
}
} else if (video_device->device_id != device_id) {
continue;
}
status = acpi_video_device_EDID(video_device, &buffer, length);
if (ACPI_FAILURE(status) || !buffer ||
buffer->type != ACPI_TYPE_BUFFER) {
length = 128;
status = acpi_video_device_EDID(video_device, &buffer,
length);
if (ACPI_FAILURE(status) || !buffer ||
buffer->type != ACPI_TYPE_BUFFER) {
continue;
}
}
*edid = buffer->buffer.pointer;
return length;
}
return -ENODEV;
}
EXPORT_SYMBOL(acpi_video_get_edid);
static int
acpi_video_bus_get_devices(struct acpi_video_bus *video,
struct acpi_device *device)
......
......@@ -250,7 +250,7 @@ static int __init acpi_backlight(char *str)
ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR;
if (!strcmp("video", str))
acpi_video_support |=
ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO;
ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO;
}
return 1;
}
......
......@@ -130,4 +130,21 @@ static inline int aer_osc_setup(struct pcie_device *pciedev)
}
#endif
#ifdef CONFIG_ACPI_APEI
extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
#else
static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
{
if (pci_dev->__aer_firmware_first_valid)
return pci_dev->__aer_firmware_first;
return 0;
}
#endif
static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev,
int enable)
{
pci_dev->__aer_firmware_first = !!enable;
pci_dev->__aer_firmware_first_valid = 1;
}
#endif /* _AERDRV_H_ */
......@@ -16,6 +16,7 @@
#include <linux/acpi.h>
#include <linux/pci-acpi.h>
#include <linux/delay.h>
#include <acpi/apei.h>
#include "aerdrv.h"
/**
......@@ -53,3 +54,79 @@ int aer_osc_setup(struct pcie_device *pciedev)
return 0;
}
#ifdef CONFIG_ACPI_APEI
static inline int hest_match_pci(struct acpi_hest_aer_common *p,
struct pci_dev *pci)
{
return (0 == pci_domain_nr(pci->bus) &&
p->bus == pci->bus->number &&
p->device == PCI_SLOT(pci->devfn) &&
p->function == PCI_FUNC(pci->devfn));
}
struct aer_hest_parse_info {
struct pci_dev *pci_dev;
int firmware_first;
};
static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data)
{
struct aer_hest_parse_info *info = data;
struct acpi_hest_aer_common *p;
u8 pcie_type = 0;
u8 bridge = 0;
int ff = 0;
switch (hest_hdr->type) {
case ACPI_HEST_TYPE_AER_ROOT_PORT:
pcie_type = PCI_EXP_TYPE_ROOT_PORT;
break;
case ACPI_HEST_TYPE_AER_ENDPOINT:
pcie_type = PCI_EXP_TYPE_ENDPOINT;
break;
case ACPI_HEST_TYPE_AER_BRIDGE:
if ((info->pci_dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
bridge = 1;
break;
default:
return 0;
}
p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
if (p->flags & ACPI_HEST_GLOBAL) {
if ((info->pci_dev->is_pcie &&
info->pci_dev->pcie_type == pcie_type) || bridge)
ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
} else
if (hest_match_pci(p, info->pci_dev))
ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
info->firmware_first = ff;
return 0;
}
static void aer_set_firmware_first(struct pci_dev *pci_dev)
{
int rc;
struct aer_hest_parse_info info = {
.pci_dev = pci_dev,
.firmware_first = 0,
};
rc = apei_hest_parse(aer_hest_parse, &info);
if (rc)
pci_dev->__aer_firmware_first = 0;
else
pci_dev->__aer_firmware_first = info.firmware_first;
pci_dev->__aer_firmware_first_valid = 1;
}
int pcie_aer_get_firmware_first(struct pci_dev *dev)
{
if (!dev->__aer_firmware_first_valid)
aer_set_firmware_first(dev);
return dev->__aer_firmware_first;
}
#endif
......@@ -36,7 +36,7 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev)
u16 reg16 = 0;
int pos;
if (dev->aer_firmware_first)
if (pcie_aer_get_firmware_first(dev))
return -EIO;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
......@@ -63,7 +63,7 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev)
u16 reg16 = 0;
int pos;
if (dev->aer_firmware_first)
if (pcie_aer_get_firmware_first(dev))
return -EIO;
pos = pci_pcie_cap(dev);
......@@ -771,7 +771,7 @@ void aer_isr(struct work_struct *work)
*/
int aer_init(struct pcie_device *dev)
{
if (dev->port->aer_firmware_first) {
if (pcie_aer_get_firmware_first(dev->port)) {
dev_printk(KERN_DEBUG, &dev->device,
"PCIe errors handled by platform firmware.\n");
goto out;
......@@ -785,7 +785,7 @@ int aer_init(struct pcie_device *dev)
if (forceload) {
dev_printk(KERN_DEBUG, &dev->device,
"aerdrv forceload requested.\n");
dev->port->aer_firmware_first = 0;
pcie_aer_force_firmware_first(dev->port, 0);
return 0;
}
return -ENXIO;
......
......@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/cpumask.h>
#include <linux/pci-aspm.h>
#include <acpi/acpi_hest.h>
#include "pci.h"
#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
......@@ -904,12 +903,6 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev)
pdev->is_hotplug_bridge = 1;
}
static void set_pci_aer_firmware_first(struct pci_dev *pdev)
{
if (acpi_hest_firmware_first_pci(pdev))
pdev->aer_firmware_first = 1;
}
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
/**
......@@ -939,7 +932,6 @@ int pci_setup_device(struct pci_dev *dev)
dev->multifunction = !!(hdr_type & 0x80);
dev->error_state = pci_channel_io_normal;
set_pcie_port_type(dev);
set_pci_aer_firmware_first(dev);
list_for_each_entry(slot, &dev->bus->slots, list)
if (PCI_SLOT(dev->devfn) == slot->number)
......
......@@ -277,8 +277,10 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_x32, debugfs_u32_get, debugfs_u32_set, "0x%08llx\n"
DEFINE_SIMPLE_ATTRIBUTE(fops_x32_ro, debugfs_u32_get, NULL, "0x%08llx\n");
DEFINE_SIMPLE_ATTRIBUTE(fops_x32_wo, NULL, debugfs_u32_set, "0x%08llx\n");
DEFINE_SIMPLE_ATTRIBUTE(fops_x64, debugfs_u64_get, debugfs_u64_set, "0x%016llx\n");
/*
* debugfs_create_x{8,16,32} - create a debugfs file that is used to read and write an unsigned {8,16,32}-bit value
* debugfs_create_x{8,16,32,64} - create a debugfs file that is used to read and write an unsigned {8,16,32,64}-bit value
*
* These functions are exactly the same as the above functions (but use a hex
* output for the decimal challenged). For details look at the above unsigned
......@@ -357,6 +359,23 @@ struct dentry *debugfs_create_x32(const char *name, mode_t mode,
}
EXPORT_SYMBOL_GPL(debugfs_create_x32);
/**
* debugfs_create_x64 - create a debugfs file that is used to read and write an unsigned 64-bit value
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the debugfs filesystem.
* @value: a pointer to the variable that the file should read to and write
* from.
*/
struct dentry *debugfs_create_x64(const char *name, mode_t mode,
struct dentry *parent, u64 *value)
{
return debugfs_create_file(name, mode, parent, value, &fops_x64);
}
EXPORT_SYMBOL_GPL(debugfs_create_x64);
static int debugfs_size_t_set(void *data, u64 val)
{
......
......@@ -373,7 +373,7 @@ struct acpi_pci_root {
struct acpi_pci_id id;
struct pci_bus *bus;
u16 segment;
u8 bus_nr;
struct resource secondary; /* downstream bus range */
u32 osc_support_set; /* _OSC state of support bits */
u32 osc_control_set; /* _OSC state of control bits */
......
......@@ -104,8 +104,7 @@ int acpi_pci_bind_root(struct acpi_device *device);
/* Arch-defined function to add a bus to the system */
struct pci_bus *pci_acpi_scan_root(struct acpi_device *device, int domain,
int bus);
struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root);
void pci_acpi_crs_quirks(void);
/* --------------------------------------------------------------------------
......
#ifndef __ACPI_HEST_H
#define __ACPI_HEST_H
#include <linux/pci.h>
#ifdef CONFIG_ACPI
extern int acpi_hest_firmware_first_pci(struct pci_dev *pci);
#else
static inline int acpi_hest_firmware_first_pci(struct pci_dev *pci) { return 0; }
#endif
#endif
/*
* apei.h - ACPI Platform Error Interface
*/
#ifndef ACPI_APEI_H
#define ACPI_APEI_H
#include <linux/acpi.h>
#include <linux/cper.h>
#include <asm/ioctls.h>
#define APEI_ERST_INVALID_RECORD_ID 0xffffffffffffffffULL
#define APEI_ERST_CLEAR_RECORD _IOW('E', 1, u64)
#define APEI_ERST_GET_RECORD_COUNT _IOR('E', 2, u32)
#ifdef __KERNEL__
extern int hest_disable;
extern int erst_disable;
typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data);
int apei_hest_parse(apei_hest_func_t func, void *data);
int erst_write(const struct cper_record_header *record);
ssize_t erst_get_record_count(void);
int erst_get_next_record_id(u64 *record_id);
ssize_t erst_read(u64 record_id, struct cper_record_header *record,
size_t buflen);
ssize_t erst_read_next(struct cper_record_header *record, size_t buflen);
int erst_clear(u64 record_id);
#endif
#endif
#ifndef ACPI_ATOMIC_IO_H
#define ACPI_ATOMIC_IO_H
int acpi_pre_map_gar(struct acpi_generic_address *reg);
int acpi_post_unmap_gar(struct acpi_generic_address *reg);
int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg);
int acpi_atomic_write(u64 val, struct acpi_generic_address *reg);
#endif
/*
* hed.h - ACPI Hardware Error Device
*
* Copyright (C) 2009, Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* This file is released under the GPLv2.
*/
#ifndef ACPI_HED_H
#define ACPI_HED_H
#include <linux/notifier.h>
int register_acpi_hed_notifier(struct notifier_block *nb);
void unregister_acpi_hed_notifier(struct notifier_block *nb);
#endif
......@@ -52,17 +52,6 @@ struct acpi_power_register {
u64 address;
} __attribute__ ((packed));
struct acpi_processor_cx_policy {
u32 count;
struct acpi_processor_cx *state;
struct {
u32 time;
u32 ticks;
u32 count;
u32 bm;
} threshold;
};
struct acpi_processor_cx {
u8 valid;
u8 type;
......@@ -74,8 +63,6 @@ struct acpi_processor_cx {
u32 power;
u32 usage;
u64 time;
struct acpi_processor_cx_policy promotion;
struct acpi_processor_cx_policy demotion;
char desc[ACPI_CX_DESC_LEN];
};
......
#ifndef __ACPI_VIDEO_H
#define __ACPI_VIDEO_H
#define ACPI_VIDEO_DISPLAY_CRT 1
#define ACPI_VIDEO_DISPLAY_TV 2
#define ACPI_VIDEO_DISPLAY_DVI 3
#define ACPI_VIDEO_DISPLAY_LCD 4
#define ACPI_VIDEO_DISPLAY_LEGACY_MONITOR 0x0100
#define ACPI_VIDEO_DISPLAY_LEGACY_PANEL 0x0110
#define ACPI_VIDEO_DISPLAY_LEGACY_TV 0x0200
#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
extern int acpi_video_register(void);
extern void acpi_video_unregister(void);
extern int acpi_video_get_edid(struct acpi_device *device, int type,
int device_id, void **edid);
#else
static inline int acpi_video_register(void) { return 0; }
static inline void acpi_video_unregister(void) { return; }
static inline int acpi_video_get_edid(struct acpi_device *device, int type,
int device_id, void **edid)
{
return -ENODEV;
}
#endif
#endif
......
......@@ -254,7 +254,6 @@ int acpi_resources_are_enforced(void);
void __init acpi_no_s4_hw_signature(void);
void __init acpi_old_suspend_ordering(void);
void __init acpi_s4_no_nvs(void);
void __init acpi_set_sci_en_on_resume(void);
#endif /* CONFIG_PM_SLEEP */
struct acpi_osc_context {
......
/*
* UEFI Common Platform Error Record
*
* Copyright (C) 2010, Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef LINUX_CPER_H
#define LINUX_CPER_H
#include <linux/uuid.h>
/* CPER record signature and the size */
#define CPER_SIG_RECORD "CPER"
#define CPER_SIG_SIZE 4
/* Used in signature_end field in struct cper_record_header */
#define CPER_SIG_END 0xffffffff
/*
* CPER record header revision, used in revision field in struct
* cper_record_header
*/
#define CPER_RECORD_REV 0x0100
/*
* Severity difinition for error_severity in struct cper_record_header
* and section_severity in struct cper_section_descriptor
*/
#define CPER_SER_RECOVERABLE 0x0
#define CPER_SER_FATAL 0x1
#define CPER_SER_CORRECTED 0x2
#define CPER_SER_INFORMATIONAL 0x3
/*
* Validation bits difinition for validation_bits in struct
* cper_record_header. If set, corresponding fields in struct
* cper_record_header contain valid information.
*
* corresponds platform_id
*/
#define CPER_VALID_PLATFORM_ID 0x0001
/* corresponds timestamp */
#define CPER_VALID_TIMESTAMP 0x0002
/* corresponds partition_id */
#define CPER_VALID_PARTITION_ID 0x0004
/*
* Notification type used to generate error record, used in
* notification_type in struct cper_record_header
*
* Corrected Machine Check
*/
#define CPER_NOTIFY_CMC \
UUID_LE(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \
0xEB, 0xD4, 0xF8, 0x90)
/* Corrected Platform Error */
#define CPER_NOTIFY_CPE \
UUID_LE(0x4E292F96, 0xD843, 0x4a55, 0xA8, 0xC2, 0xD4, 0x81, \
0xF2, 0x7E, 0xBE, 0xEE)
/* Machine Check Exception */
#define CPER_NOTIFY_MCE \
UUID_LE(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \
0xE1, 0x49, 0x13, 0xBB)
/* PCI Express Error */
#define CPER_NOTIFY_PCIE \
UUID_LE(0xCF93C01F, 0x1A16, 0x4dfc, 0xB8, 0xBC, 0x9C, 0x4D, \
0xAF, 0x67, 0xC1, 0x04)
/* INIT Record (for IPF) */
#define CPER_NOTIFY_INIT \
UUID_LE(0xCC5263E8, 0x9308, 0x454a, 0x89, 0xD0, 0x34, 0x0B, \
0xD3, 0x9B, 0xC9, 0x8E)
/* Non-Maskable Interrupt */
#define CPER_NOTIFY_NMI \
UUID_LE(0x5BAD89FF, 0xB7E6, 0x42c9, 0x81, 0x4A, 0xCF, 0x24, \
0x85, 0xD6, 0xE9, 0x8A)
/* BOOT Error Record */
#define CPER_NOTIFY_BOOT \
UUID_LE(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \
0xD4, 0x64, 0xB3, 0x8F)
/* DMA Remapping Error */
#define CPER_NOTIFY_DMAR \
UUID_LE(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \
0x72, 0x2D, 0xEB, 0x41)
/*
* Flags bits definitions for flags in struct cper_record_header
* If set, the error has been recovered
*/
#define CPER_HW_ERROR_FLAGS_RECOVERED 0x1
/* If set, the error is for previous boot */
#define CPER_HW_ERROR_FLAGS_PREVERR 0x2
/* If set, the error is injected for testing */
#define CPER_HW_ERROR_FLAGS_SIMULATED 0x4
/*
* CPER section header revision, used in revision field in struct
* cper_section_descriptor
*/
#define CPER_SEC_REV 0x0100
/*
* Validation bits difinition for validation_bits in struct
* cper_section_descriptor. If set, corresponding fields in struct
* cper_section_descriptor contain valid information.
*
* corresponds fru_id
*/
#define CPER_SEC_VALID_FRU_ID 0x1
/* corresponds fru_text */
#define CPER_SEC_VALID_FRU_TEXT 0x2
/*
* Flags bits definitions for flags in struct cper_section_descriptor
*
* If set, the section is associated with the error condition
* directly, and should be focused on
*/
#define CPER_SEC_PRIMARY 0x0001
/*
* If set, the error was not contained within the processor or memory
* hierarchy and the error may have propagated to persistent storage
* or network
*/
#define CPER_SEC_CONTAINMENT_WARNING 0x0002
/* If set, the component must be re-initialized or re-enabled prior to use */
#define CPER_SEC_RESET 0x0004
/* If set, Linux may choose to discontinue use of the resource */
#define CPER_SEC_ERROR_THRESHOLD_EXCEEDED 0x0008
/*
* If set, resource could not be queried for error information due to
* conflicts with other system software or resources. Some fields of
* the section will be invalid
*/
#define CPER_SEC_RESOURCE_NOT_ACCESSIBLE 0x0010
/*
* If set, action has been taken to ensure error containment (such as
* poisoning data), but the error has not been fully corrected and the
* data has not been consumed. Linux may choose to take further
* corrective action before the data is consumed
*/
#define CPER_SEC_LATENT_ERROR 0x0020
/*
* Section type definitions, used in section_type field in struct
* cper_section_descriptor
*
* Processor Generic
*/
#define CPER_SEC_PROC_GENERIC \
UUID_LE(0x9876CCAD, 0x47B4, 0x4bdb, 0xB6, 0x5E, 0x16, 0xF1, \
0x93, 0xC4, 0xF3, 0xDB)
/* Processor Specific: X86/X86_64 */
#define CPER_SEC_PROC_IA \
UUID_LE(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \
0x24, 0x2B, 0x6E, 0x1D)
/* Processor Specific: IA64 */
#define CPER_SEC_PROC_IPF \
UUID_LE(0xE429FAF1, 0x3CB7, 0x11D4, 0x0B, 0xCA, 0x07, 0x00, \
0x80, 0xC7, 0x3C, 0x88, 0x81)
/* Platform Memory */
#define CPER_SEC_PLATFORM_MEM \
UUID_LE(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \
0xED, 0x7C, 0x83, 0xB1)
#define CPER_SEC_PCIE \
UUID_LE(0xD995E954, 0xBBC1, 0x430F, 0xAD, 0x91, 0xB4, 0x4D, \
0xCB, 0x3C, 0x6F, 0x35)
/* Firmware Error Record Reference */
#define CPER_SEC_FW_ERR_REC_REF \
UUID_LE(0x81212A96, 0x09ED, 0x4996, 0x94, 0x71, 0x8D, 0x72, \
0x9C, 0x8E, 0x69, 0xED)
/* PCI/PCI-X Bus */
#define CPER_SEC_PCI_X_BUS \
UUID_LE(0xC5753963, 0x3B84, 0x4095, 0xBF, 0x78, 0xED, 0xDA, \
0xD3, 0xF9, 0xC9, 0xDD)
/* PCI Component/Device */
#define CPER_SEC_PCI_DEV \
UUID_LE(0xEB5E4685, 0xCA66, 0x4769, 0xB6, 0xA2, 0x26, 0x06, \
0x8B, 0x00, 0x13, 0x26)
#define CPER_SEC_DMAR_GENERIC \
UUID_LE(0x5B51FEF7, 0xC79D, 0x4434, 0x8F, 0x1B, 0xAA, 0x62, \
0xDE, 0x3E, 0x2C, 0x64)
/* Intel VT for Directed I/O specific DMAr */
#define CPER_SEC_DMAR_VT \
UUID_LE(0x71761D37, 0x32B2, 0x45cd, 0xA7, 0xD0, 0xB0, 0xFE, \
0xDD, 0x93, 0xE8, 0xCF)
/* IOMMU specific DMAr */
#define CPER_SEC_DMAR_IOMMU \
UUID_LE(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \
0xDF, 0xAA, 0x84, 0xEC)
/*
* All tables and structs must be byte-packed to match CPER
* specification, since the tables are provided by the system BIOS
*/
#pragma pack(1)
struct cper_record_header {
char signature[CPER_SIG_SIZE]; /* must be CPER_SIG_RECORD */
__u16 revision; /* must be CPER_RECORD_REV */
__u32 signature_end; /* must be CPER_SIG_END */
__u16 section_count;
__u32 error_severity;
__u32 validation_bits;
__u32 record_length;
__u64 timestamp;
uuid_le platform_id;
uuid_le partition_id;
uuid_le creator_id;
uuid_le notification_type;
__u64 record_id;
__u32 flags;
__u64 persistence_information;
__u8 reserved[12]; /* must be zero */
};
struct cper_section_descriptor {
__u32 section_offset; /* Offset in bytes of the
* section body from the base
* of the record header */
__u32 section_length;
__u16 revision; /* must be CPER_RECORD_REV */
__u8 validation_bits;
__u8 reserved; /* must be zero */
__u32 flags;
uuid_le section_type;
uuid_le fru_id;
__u32 section_severity;
__u8 fru_text[20];
};
/* Generic Processor Error Section */
struct cper_sec_proc_generic {
__u64 validation_bits;
__u8 proc_type;
__u8 proc_isa;
__u8 proc_error_type;
__u8 operation;
__u8 flags;
__u8 level;
__u16 reserved;
__u64 cpu_version;
char cpu_brand[128];
__u64 proc_id;
__u64 target_addr;
__u64 requestor_id;
__u64 responder_id;
__u64 ip;
};
/* IA32/X64 Processor Error Section */
struct cper_sec_proc_ia {
__u64 validation_bits;
__u8 lapic_id;
__u8 cpuid[48];
};
/* IA32/X64 Processor Error Infomation Structure */
struct cper_ia_err_info {
uuid_le err_type;
__u64 validation_bits;
__u64 check_info;
__u64 target_id;
__u64 requestor_id;
__u64 responder_id;
__u64 ip;
};
/* IA32/X64 Processor Context Information Structure */
struct cper_ia_proc_ctx {
__u16 reg_ctx_type;
__u16 reg_arr_size;
__u32 msr_addr;
__u64 mm_reg_addr;
};
/* Memory Error Section */
struct cper_sec_mem_err {
__u64 validation_bits;
__u64 error_status;
__u64 physical_addr;
__u64 physical_addr_mask;
__u16 node;
__u16 card;
__u16 module;
__u16 bank;
__u16 device;
__u16 row;
__u16 column;
__u16 bit_pos;
__u64 requestor_id;
__u64 responder_id;
__u64 target_id;
__u8 error_type;
};
/* Reset to default packing */
#pragma pack()
u64 cper_next_record_id(void);
#endif
......@@ -63,6 +63,8 @@ struct dentry *debugfs_create_x16(const char *name, mode_t mode,
struct dentry *parent, u16 *value);
struct dentry *debugfs_create_x32(const char *name, mode_t mode,
struct dentry *parent, u32 *value);
struct dentry *debugfs_create_x64(const char *name, mode_t mode,
struct dentry *parent, u64 *value);
struct dentry *debugfs_create_size_t(const char *name, mode_t mode,
struct dentry *parent, size_t *value);
struct dentry *debugfs_create_bool(const char *name, mode_t mode,
......
......@@ -311,7 +311,8 @@ struct pci_dev {
unsigned int is_virtfn:1;
unsigned int reset_fn:1;
unsigned int is_hotplug_bridge:1;
unsigned int aer_firmware_first:1;
unsigned int __aer_firmware_first_valid:1;
unsigned int __aer_firmware_first:1;
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
......
/*
* UUID/GUID definition
*
* Copyright (C) 2010, Intel Corp.
* Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _LINUX_UUID_H_
#define _LINUX_UUID_H_
#include <linux/types.h>
#include <linux/string.h>
typedef struct {
__u8 b[16];
} uuid_le;
typedef struct {
__u8 b[16];
} uuid_be;
#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
((uuid_le) \
{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
(b) & 0xff, ((b) >> 8) & 0xff, \
(c) & 0xff, ((c) >> 8) & 0xff, \
(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
#define UUID_BE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
((uuid_be) \
{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \
((b) >> 8) & 0xff, (b) & 0xff, \
((c) >> 8) & 0xff, (c) & 0xff, \
(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
#define NULL_UUID_LE \
UUID_LE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00)
#define NULL_UUID_BE \
UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00)
static inline int uuid_le_cmp(const uuid_le u1, const uuid_le u2)
{
return memcmp(&u1, &u2, sizeof(uuid_le));
}
static inline int uuid_be_cmp(const uuid_be u1, const uuid_be u2)
{
return memcmp(&u1, &u2, sizeof(uuid_be));
}
extern void uuid_le_gen(uuid_le *u);
extern void uuid_be_gen(uuid_be *u);
#endif
......@@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
string_helpers.o gcd.o lcm.o list_sort.o
string_helpers.o gcd.o lcm.o list_sort.o uuid.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
......
/*
* Unified UUID/GUID definition
*
* Copyright (C) 2009, Intel Corp.
* Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/uuid.h>
#include <linux/random.h>
static void __uuid_gen_common(__u8 b[16])
{
int i;
u32 r;
for (i = 0; i < 4; i++) {
r = random32();
memcpy(b + i * 4, &r, 4);
}
/* reversion 0b10 */
b[8] = (b[8] & 0x3F) | 0x80;
}
void uuid_le_gen(uuid_le *lu)
{
__uuid_gen_common(lu->b);
/* version 4 : random generation */
lu->b[7] = (lu->b[7] & 0x0F) | 0x40;
}
EXPORT_SYMBOL_GPL(uuid_le_gen);
void uuid_be_gen(uuid_be *bu)
{
__uuid_gen_common(bu->b);
/* version 4 : random generation */
bu->b[6] = (bu->b[6] & 0x0F) | 0x40;
}
EXPORT_SYMBOL_GPL(uuid_be_gen);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment