Commit e54c378a authored by Anton Blanchard's avatar Anton Blanchard

ppc64: EEH cleanup from Todd Inglett

parent 84f2d288
......@@ -141,11 +141,12 @@ chrp_setup_arch(void)
fwnmi_init();
#ifndef CONFIG_PPC_ISERIES
/* Find and initialize PCI host bridges */
/* iSeries needs to be done much later. */
#ifndef CONFIG_PPC_ISERIES
eeh_init();
find_and_init_phbs();
#endif
#endif
/* Find the Open PIC if present */
root = find_path_device("/");
......
......@@ -43,25 +43,25 @@ static int ibm_set_eeh_option;
static int ibm_set_slot_reset;
static int ibm_read_slot_reset_state;
int eeh_implemented;
static int eeh_implemented;
#define EEH_MAX_OPTS 4096
static char *eeh_opts;
static int eeh_opts_last;
static int eeh_check_opts_config(struct pci_dev *dev, int default_state);
pte_t *find_linux_pte(pgd_t *pgdir, unsigned long va); /* from htab.c */
static int eeh_check_opts_config(struct device_node *dn,
int class_code, int vendor_id, int device_id,
int default_state);
unsigned long eeh_token(unsigned long phb, unsigned long bus, unsigned long devfn, unsigned long offset)
unsigned long eeh_token_to_phys(unsigned long token)
{
if (phb > 0xff)
panic("eeh_token: phb 0x%lx is too large\n", phb);
if (offset & 0x0fffffff00000000)
panic("eeh_token: offset 0x%lx is out of range\n", offset);
return ((IO_UNMAPPED_REGION_ID << 60) | (phb << 48UL) | ((bus & 0xff) << 40UL) | (devfn << 32UL) | (offset & 0xffffffff));
}
int eeh_get_state(unsigned long ea)
{
return 0;
if (REGION_ID(token) == EEH_REGION_ID) {
unsigned long vaddr = IO_TOKEN_TO_ADDR(token);
pte_t *ptep = find_linux_pte(ioremap_mm.pgd, vaddr);
unsigned long pa = pte_pfn(*ptep) << PAGE_SHIFT;
return pa | (vaddr & (PAGE_SIZE-1));
} else
return token;
}
/* Check for an eeh failure at the given token address.
......@@ -73,40 +73,54 @@ int eeh_get_state(unsigned long ea)
*/
unsigned long eeh_check_failure(void *token, unsigned long val)
{
unsigned long config_addr = (unsigned long)token >> 24; /* PPBBDDRR */
unsigned long phbidx = (config_addr >> 24) & 0xff;
struct pci_controller *phb;
unsigned long addr;
struct pci_dev *dev;
struct device_node *dn;
unsigned long ret, rets[2];
config_addr &= 0xffff00; /* 00BBDD00 */
/* IO BAR access could get us here...or if we manually force EEH
* operation on even if the hardware won't support it.
*/
if (!eeh_implemented || ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE)
return val;
if (phbidx >= global_phb_number) {
panic("EEH: checking token %p phb index of %ld is greater than max of %d\n", token, phbidx, global_phb_number-1);
/* Finding the phys addr + pci device is quite expensive.
* However, the RTAS call is MUCH slower.... :(
*/
addr = eeh_token_to_phys((unsigned long)token);
dev = pci_find_dev_by_addr(addr);
if (!dev) {
printk("EEH: no pci dev found for addr=0x%lx\n", addr);
return val;
}
dn = pci_device_to_OF_node(dev);
if (!dn) {
printk("EEH: no pci dn found for addr=0x%lx\n", addr);
return val;
}
phb = phbtab[phbidx];
/* Access to IO BARs might get this far and still not want checking. */
if (!(dn->eeh_mode & EEH_MODE_SUPPORTED) || dn->eeh_mode & EEH_MODE_NOCHECK)
return val;
/* Now test for an EEH failure. This is VERY expensive.
* Note that the eeh_config_addr may be a parent device
* in the case of a device behind a bridge, or it may be
* function zero of a multi-function device.
* In any case they must share a common PHB.
*/
if (dn->eeh_config_addr) {
ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets,
config_addr, BUID_HI(phb->buid), BUID_LO(phb->buid));
dn->eeh_config_addr, BUID_HI(dn->phb->buid), BUID_LO(dn->phb->buid));
if (ret == 0 && rets[1] == 1 && rets[0] >= 2) {
struct pci_dev *dev;
int bus = ((unsigned long)token >> 40) & 0xffff; /* include PHB# in bus */
int devfn = (config_addr >> 8) & 0xff;
dev = pci_find_slot(bus, devfn);
if (dev) {
printk(KERN_ERR "EEH: MMIO failure (%ld) on device:\n %s %s\n",
rets[0], dev->slot_name, dev->dev.name);
PPCDBG_ENTER_DEBUGGER();
panic("EEH: MMIO failure (%ld) on device:\n %s %s\n",
rets[0], dev->slot_name, dev->dev.name);
} else {
printk(KERN_ERR "EEH: MMIO failure (%ld) on device buid %lx, config_addr %lx\n", rets[0], phb->buid, config_addr);
PPCDBG_ENTER_DEBUGGER();
panic("EEH: MMIO failure (%ld) on device buid %lx, config_addr %lx\n", rets[0], phb->buid, config_addr);
}
}
eeh_false_positives++;
return val; /* good case */
}
struct eeh_early_enable_info {
......@@ -120,13 +134,69 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
{
struct eeh_early_enable_info *info = data;
long ret;
char *status = get_property(dn, "status", 0);
u32 *class_code = (u32 *)get_property(dn, "class-code", 0);
u32 *vendor_id =(u32 *) get_property(dn, "vendor-id", 0);
u32 *device_id = (u32 *)get_property(dn, "device-id", 0);
u32 *regs;
int enable;
if (status && strcmp(status, "ok") != 0)
return NULL; /* ignore devices with bad status */
/* Weed out PHBs or other bad nodes. */
if (!class_code || !vendor_id || !device_id)
return NULL;
/* Ignore known PHBs and EADs bridges */
if (*vendor_id == PCI_VENDOR_ID_IBM &&
(*device_id == 0x0102 || *device_id == 0x008b ||
*device_id == 0x0188 || *device_id == 0x0302))
return NULL;
/* Now decide if we are going to "Disable" EEH checking
* for this device. We still run with the EEH hardware active,
* but we won't be checking for ff's. This means a driver
* could return bad data (very bad!), an interrupt handler could
* hang waiting on status bits that won't change, etc.
* But there are a few cases like display devices that make sense.
*/
enable = 1; /* i.e. we will do checking */
if ((*class_code >> 16) == PCI_BASE_CLASS_DISPLAY)
enable = 0;
if (!eeh_check_opts_config(dn, *class_code, *vendor_id, *device_id, enable)) {
if (enable) {
printk(KERN_INFO "EEH: %s user requested to run without EEH.\n", dn->full_name);
enable = 0;
}
}
if (!enable)
dn->eeh_mode = EEH_MODE_NOCHECK;
/* This device may already have an EEH parent. */
if (dn->parent && (dn->parent->eeh_mode & EEH_MODE_SUPPORTED)) {
/* Parent supports EEH. */
dn->eeh_mode |= EEH_MODE_SUPPORTED;
dn->eeh_config_addr = dn->parent->eeh_config_addr;
return NULL;
}
/* Ok..see if this device supports EEH. */
regs = (u32 *)get_property(dn, "reg", 0);
if (regs) {
/* First register entry is addr (00BBSS00) */
/* Try to enable eeh */
ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
CONFIG_ADDR(dn->busno, dn->devfn),
info->buid_hi, info->buid_lo, EEH_ENABLE);
if (ret == 0)
regs[0], info->buid_hi, info->buid_lo,
EEH_ENABLE);
if (ret == 0) {
info->adapters_enabled++;
dn->eeh_mode |= EEH_MODE_SUPPORTED;
dn->eeh_config_addr = regs[0];
}
}
return NULL;
}
......@@ -157,7 +227,13 @@ void eeh_init(void)
ibm_set_slot_reset = rtas_token("ibm,set-slot-reset");
ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state");
if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE)
/* Allow user to force eeh mode on or off -- even if the hardware
* doesn't exist. This allows driver writers to at least test use
* of I/O macros even if we can't actually test for EEH failure.
*/
if (eeh_force_on > eeh_force_off)
eeh_implemented = 1;
else if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE)
return;
if (eeh_force_off > eeh_force_on) {
......@@ -168,8 +244,6 @@ void eeh_init(void)
return;
}
if (eeh_force_on > eeh_force_off)
eeh_implemented = 1; /* User is forcing it on. */
/* Enable EEH for all adapters. Note that eeh requires buid's */
info.adapters_enabled = 0;
......@@ -197,44 +271,6 @@ void eeh_init(void)
}
/* Given a PCI device check if eeh should be configured or not.
* This may look at firmware properties and/or kernel cmdline options.
*/
int is_eeh_configured(struct pci_dev *dev)
{
struct device_node *dn = pci_device_to_OF_node(dev);
struct pci_controller *phb = PCI_GET_PHB_PTR(dev);
unsigned long ret, rets[2];
int eeh_capable;
int default_state = 1; /* default enable EEH if we can. */
if (dn == NULL || phb == NULL || !eeh_implemented)
return 0;
/* Hack: turn off eeh for display class devices by default.
* This fixes matrox accel framebuffer.
*/
if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
default_state = 0;
/* Ignore known PHBs and EADs bridges */
if (dev->vendor == PCI_VENDOR_ID_IBM &&
(dev->device == 0x0102 || dev->device == 0x008b))
default_state = 0;
if (!eeh_check_opts_config(dev, default_state)) {
if (default_state)
printk(KERN_INFO "EEH: %s %s user requested to run without EEH.\n", dev->slot_name, dev->dev.name);
return 0;
}
ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets,
CONFIG_ADDR(dn->busno, dn->devfn),
BUID_HI(phb->buid), BUID_LO(phb->buid));
eeh_capable = (ret == 0 && rets[1] == 1);
return eeh_capable;
}
int eeh_set_option(struct pci_dev *dev, int option)
{
struct device_node *dn = pci_device_to_OF_node(dev);
......@@ -249,6 +285,30 @@ int eeh_set_option(struct pci_dev *dev, int option)
}
/* If EEH is implemented, find the PCI device using given phys addr
* and check to see if eeh failure checking is disabled.
* Remap the addr (trivially) to the EEH region if not.
* For addresses not known to PCI the vaddr is simply returned unchanged.
*/
void *eeh_ioremap(unsigned long addr, void *vaddr)
{
struct pci_dev *dev;
struct device_node *dn;
if (!eeh_implemented)
return vaddr;
dev = pci_find_dev_by_addr(addr);
if (!dev)
return vaddr;
dn = pci_device_to_OF_node(dev);
if (!dn)
return vaddr;
if (dn->eeh_mode & EEH_MODE_NOCHECK)
return vaddr;
return (void *)IO_ADDR_TO_TOKEN(vaddr);
}
static int eeh_proc_falsepositive_read(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
......@@ -279,28 +339,24 @@ static int __init eeh_init_proc(void)
* This lets the user specify stupid combinations of options,
* but at least the result should be very predictable.
*/
static int eeh_check_opts_config(struct pci_dev *dev, int default_state)
static int eeh_check_opts_config(struct device_node *dn,
int class_code, int vendor_id, int device_id,
int default_state)
{
struct device_node *dn = pci_device_to_OF_node(dev);
struct pci_controller *phb = PCI_GET_PHB_PTR(dev);
char devname[32], classname[32], phbname[32];
char devname[32], classname[32];
char *strs[8], *s;
int nstrs, i;
int ret = default_state;
if (dn == NULL || phb == NULL)
return 0;
/* Build list of strings to match */
nstrs = 0;
s = (char *)get_property(dn, "ibm,loc-code", 0);
if (s)
strs[nstrs++] = s;
sprintf(devname, "dev%04x:%04x", dev->vendor, dev->device);
sprintf(devname, "dev%04x:%04x", vendor_id, device_id);
strs[nstrs++] = devname;
sprintf(classname, "class%04x", dev->class);
sprintf(classname, "class%04x", class_code);
strs[nstrs++] = classname;
sprintf(phbname, "pci@%lx", phb->buid);
strs[nstrs++] = phbname;
strs[nstrs++] = ""; /* yes, this matches the empty string */
/* Now see if any string matches the eeh_opts list.
......@@ -332,7 +388,6 @@ static int eeh_check_opts_config(struct pci_dev *dev, int default_state)
*
* dev#:# vendor:device id in hex (e.g. dev1022:2000)
* class# class id in hex (e.g. class0200)
* pci@buid all devices under phb (e.g. pci@fef00000)
*
* If no location code is specified all devices are assumed
* so eeh-off means eeh by default is off.
......@@ -340,13 +395,11 @@ static int eeh_check_opts_config(struct pci_dev *dev, int default_state)
/* This is implemented as a null separated list of strings.
* Each string looks like this: "+X" or "-X"
* where X is a loc code, dev, class or pci string (as shown above)
* where X is a loc code, vendor:device, class (as shown above)
* or empty which is used to indicate all.
*
* We interpret this option string list during the buswalk
* so that it will literally behave left-to-right even if
* some combinations don't make sense. Give the user exactly
* what they want! :)
* We interpret this option string list so that it will literally
* behave left-to-right even if some combinations don't make sense.
*/
static int __init eeh_parm(char *str, int state)
......
......@@ -393,7 +393,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
mm = &init_mm;
vsid = get_kernel_vsid(ea);
break;
case IO_UNMAPPED_REGION_ID:
case EEH_REGION_ID:
/*
* Should only be hit if there is an access to MMIO space
* which is protected by EEH.
......
......@@ -39,7 +39,6 @@
#include <asm/ppcdebug.h>
#include <asm/naca.h>
#include <asm/pci_dma.h>
#include <asm/eeh.h>
#include "xics.h"
#include "open_pic.h"
......@@ -250,8 +249,6 @@ find_and_init_phbs(void)
ibm_read_pci_config = rtas_token("ibm,read-pci-config");
ibm_write_pci_config = rtas_token("ibm,write-pci-config");
eeh_init();
if (naca->interrupt_controller == IC_OPEN_PIC) {
opprop = (unsigned int *)get_property(find_path_device("/"),
"platform-open-pic", NULL);
......@@ -350,24 +347,16 @@ find_and_init_phbs(void)
res = &phb->io_resource;
res->name = Pci_Node->full_name;
res->flags = IORESOURCE_IO;
if (is_eeh_implemented()) {
if (!isa_io_base && has_isa) {
/* map a page for ISA ports. Not EEH protected. */
isa_io_base = (unsigned long)__ioremap(phb->io_base_phys, PAGE_SIZE, _PAGE_NO_CACHE);
}
res->start = phb->io_base_virt = eeh_token(index, 0, 0, 0);
res->end = eeh_token(index, 0xff, 0xff, 0xffffffff);
} else {
phb->io_base_virt = ioremap(phb->io_base_phys, range.size);
phb->io_base_virt = __ioremap(phb->io_base_phys, range.size, _PAGE_NO_CACHE);
printk("back\n");
if (!pci_io_base) {
pci_io_base = (unsigned long)phb->io_base_virt;
if (has_isa)
isa_io_base = pci_io_base;
}
res->start = ((((unsigned long) range.child_addr.a_mid) << 32) | (range.child_addr.a_lo));
res->start += (unsigned long)phb->io_base_virt;
res->start += (unsigned long)phb->io_base_virt - pci_io_base;
res->end = res->start + range.size - 1;
}
res->parent = NULL;
res->sibling = NULL;
res->child = NULL;
......@@ -391,13 +380,8 @@ find_and_init_phbs(void)
++memno;
res->name = Pci_Node->full_name;
res->flags = IORESOURCE_MEM;
if (is_eeh_implemented()) {
res->start = eeh_token(index, 0, 0, 0);
res->end = eeh_token(index, 0xff, 0xff, 0xffffffff);
} else {
res->start = range.parent_addr;
res->end = range.parent_addr + range.size - 1;
}
res->parent = NULL;
res->sibling = NULL;
res->child = NULL;
......@@ -574,7 +558,6 @@ fixup_resources(struct pci_dev *dev)
int i;
struct pci_controller *phb = PCI_GET_PHB_PTR(dev);
struct device_node *dn;
unsigned long eeh_disable_bit;
/* Add IBM loc code (slot) as a prefix to the device names for service */
dn = pci_device_to_OF_node(dev);
......@@ -591,20 +574,6 @@ fixup_resources(struct pci_dev *dev)
}
}
if (is_eeh_implemented()) {
if (is_eeh_configured(dev)) {
eeh_disable_bit = 0;
if (eeh_set_option(dev, EEH_ENABLE) != 0) {
printk("PCI: failed to enable EEH for %s %s\n", dev->slot_name, dev->dev.name);
eeh_disable_bit = EEH_TOKEN_DISABLED;
}
} else {
/* Assume device is by default EEH_DISABLE'd */
printk("PCI: eeh NOT configured for %s %s\n", dev->slot_name, dev->dev.name);
eeh_disable_bit = EEH_TOKEN_DISABLED;
}
}
PPCDBG(PPCDBG_PHBINIT, "fixup_resources:\n");
PPCDBG(PPCDBG_PHBINIT, "\tphb = 0x%016LX\n", phb);
PPCDBG(PPCDBG_PHBINIT, "\tphb->pci_io_offset = 0x%016LX\n", phb->pci_io_offset);
......@@ -633,39 +602,19 @@ fixup_resources(struct pci_dev *dev)
}
if (dev->resource[i].flags & IORESOURCE_IO) {
if (is_eeh_implemented()) {
unsigned int busno = dev->bus ? dev->bus->number : 0;
unsigned long size = dev->resource[i].end - dev->resource[i].start;
unsigned long addr = (unsigned long)__ioremap(dev->resource[i].start + phb->io_base_phys, size, _PAGE_NO_CACHE);
if (!addr)
panic("fixup_resources: ioremap failed!\n");
dev->resource[i].start = eeh_token(phb->global_number, busno, dev->devfn, addr) | eeh_disable_bit;
dev->resource[i].end = dev->resource[i].start + size;
} else {
unsigned long offset = (unsigned long)phb->io_base_virt;
unsigned long offset = (unsigned long)phb->io_base_virt - pci_io_base;
dev->resource[i].start += offset;
dev->resource[i].end += offset;
}
PPCDBG(PPCDBG_PHBINIT, "\t\t-> now [%lx .. %lx]\n",
dev->resource[i].start, dev->resource[i].end);
} else if (dev->resource[i].flags & IORESOURCE_MEM) {
if (dev->resource[i].start == 0) {
/* Bogus. Probably an unused bridge. */
dev->resource[i].end = 0;
} else {
if (is_eeh_implemented()) {
unsigned int busno = dev->bus ? dev->bus->number : 0;
unsigned long size = dev->resource[i].end - dev->resource[i].start;
unsigned long addr = (unsigned long)__ioremap(dev->resource[i].start + phb->pci_mem_offset, size, _PAGE_NO_CACHE);
if (!addr)
panic("fixup_resources: ioremap failed!\n");
dev->resource[i].start = eeh_token(phb->global_number, busno, dev->devfn, addr) | eeh_disable_bit;
dev->resource[i].end = dev->resource[i].start + size;
} else {
dev->resource[i].start += phb->pci_mem_offset;
dev->resource[i].end += phb->pci_mem_offset;
}
}
PPCDBG(PPCDBG_PHBINIT, "\t\t-> now [%lx..%lx]\n",
dev->resource[i].start, dev->resource[i].end);
......
......@@ -31,7 +31,6 @@
#include <asm/naca.h>
#include <asm/pci_dma.h>
#include <asm/machdep.h>
#include <asm/eeh.h>
#include "pci.h"
......@@ -121,6 +120,43 @@ static void fixup_windbond_82c105(struct pci_dev* dev)
}
/* Given an mmio phys address, find a pci device that implements
* this address. This is of course expensive, but only used
* for device initialization or error paths.
* For io BARs it is assumed the pci_io_base has already been added
* into addr.
*
* Bridges are ignored although they could be used to optimize the search.
*/
struct pci_dev *pci_find_dev_by_addr(unsigned long addr)
{
struct pci_dev *dev;
int i;
unsigned long ioaddr;
ioaddr = (addr > _IO_BASE) ? addr - _IO_BASE : 0;
pci_for_each_dev(dev) {
if ((dev->class >> 8) == PCI_BASE_CLASS_BRIDGE)
continue;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
unsigned long start = pci_resource_start(dev,i);
unsigned long end = pci_resource_end(dev,i);
unsigned int flags = pci_resource_flags(dev,i);
if (start == 0 || ~start == 0 ||
end == 0 || ~end == 0)
continue;
if ((flags & IORESOURCE_IO) &&
(ioaddr >= start && ioaddr <= end))
return dev;
else if ((flags & IORESOURCE_MEM) &&
(addr >= start && addr <= end))
return dev;
}
}
return NULL;
}
void __devinit pcibios_fixup_pbus_ranges(struct pci_bus *pbus,
struct pbus_set_ranges_data *pranges)
{
......@@ -486,15 +522,10 @@ void __init pcibios_fixup_bus(struct pci_bus *bus)
/* Transparent resource -- don't try to "fix" it. */
continue;
}
if (is_eeh_implemented()) {
if (res->flags & (IORESOURCE_IO|IORESOURCE_MEM)) {
res->start = eeh_token(phb->global_number, bus->number, 0, 0);
res->end = eeh_token(phb->global_number, bus->number, 0xff, 0xffffffff);
}
} else {
if (res->flags & IORESOURCE_IO) {
res->start += (unsigned long)phb->io_base_virt;
res->end += (unsigned long)phb->io_base_virt;
unsigned long offset = (unsigned long)phb->io_base_virt - pci_io_base;
res->start += offset;
res->end += offset;
} else if (phb->pci_mem_offset
&& (res->flags & IORESOURCE_MEM)) {
if (res->start < phb->pci_mem_offset) {
......@@ -504,7 +535,6 @@ void __init pcibios_fixup_bus(struct pci_bus *bus)
}
}
}
}
#endif
if ( ppc_md.pcibios_fixup_bus )
ppc_md.pcibios_fixup_bus(bus);
......
......@@ -56,6 +56,7 @@ typedef void *(*traverse_func)(struct device_node *me, void *data);
void *traverse_pci_devices(struct device_node *start, traverse_func pre, traverse_func post, void *data);
void *traverse_all_pci_devices(traverse_func pre);
struct pci_dev *pci_find_dev_by_addr(unsigned long addr);
void pci_devs_phb_init(void);
void pci_fix_bus_sysdata(void);
struct device_node *fetch_dev_dn(struct pci_dev *dev);
......
......@@ -132,12 +132,10 @@ ioremap(unsigned long addr, unsigned long size)
#ifdef CONFIG_PPC_ISERIES
return (void*)addr;
#else
if(mem_init_done && (addr >> 60UL)) {
if (IS_EEH_TOKEN_DISABLED(addr))
return (void *)IO_TOKEN_TO_ADDR(addr);
return (void*)addr; /* already mapped address or EEH token. */
}
return __ioremap(addr, size, _PAGE_NO_CACHE);
void *ret = __ioremap(addr, size, _PAGE_NO_CACHE);
if(mem_init_done)
return eeh_ioremap(addr, ret); /* may remap the addr */
return ret;
#endif
}
......
......@@ -27,25 +27,26 @@
struct pci_dev;
#define IO_UNMAPPED_REGION_ID 0xaUL
#define IO_TOKEN_TO_ADDR(token) ((((unsigned long)(token)) & 0xFFFFFFFF) | (0xEUL << 60))
/* Flag bits encoded in the 3 unused function bits of devfn */
#define EEH_TOKEN_DISABLED (1UL << 34UL) /* eeh is disabled for this token */
#define IS_EEH_TOKEN_DISABLED(token) ((unsigned long)(token) & EEH_TOKEN_DISABLED)
/* I/O addresses are converted to EEH "tokens" such that a driver will cause
* a bad page fault if the address is used directly (i.e. these addresses are
* never actually mapped. Translation between IO <-> EEH region is 1 to 1.
*/
#define IO_TOKEN_TO_ADDR(token) (((unsigned long)(token) & ~(0xfUL << REGION_SHIFT)) | \
(IO_REGION_ID << REGION_SHIFT))
#define IO_ADDR_TO_TOKEN(addr) (((unsigned long)(addr) & ~(0xfUL << REGION_SHIFT)) | \
(EEH_REGION_ID << REGION_SHIFT))
#define EEH_STATE_OVERRIDE 1 /* IOA does not require eeh traps */
#define EEH_STATE_FAILURE 16 /* */
/* Values for eeh_mode bits in device_node */
#define EEH_MODE_SUPPORTED (1<<0)
#define EEH_MODE_NOCHECK (1<<1)
/* This is for profiling only */
extern unsigned long eeh_total_mmio_ffs;
extern int eeh_implemented;
void eeh_init(void);
static inline int is_eeh_implemented(void) { return eeh_implemented; }
int eeh_get_state(unsigned long ea);
unsigned long eeh_check_failure(void *token, unsigned long val);
void *eeh_ioremap(unsigned long addr, void *vaddr);
#define EEH_DISABLE 0
#define EEH_ENABLE 1
......@@ -58,15 +59,11 @@ int eeh_set_option(struct pci_dev *dev, int options);
*/
int is_eeh_configured(struct pci_dev *dev);
/* Generate an EEH token.
* The high nibble of the offset is cleared, otherwise bounds checking is performed.
* Use IO_TOKEN_TO_ADDR(token) to translate this token back to a mapped virtual addr.
* Do NOT do this to perform IO -- use the read/write macros!
/* Translate a (possible) eeh token to a physical addr.
* If "token" is not an eeh token it is simply returned under
* the assumption that it is already a physical addr.
*/
unsigned long eeh_token(unsigned long phb,
unsigned long bus,
unsigned long devfn,
unsigned long offset);
unsigned long eeh_token_to_phys(unsigned long token);
extern void *memcpy(void *, const void *, unsigned long);
extern void *memset(void *,int, unsigned long);
......@@ -77,15 +74,16 @@ extern void *memset(void *,int, unsigned long);
* If EEH is off for a device and it is a memory BAR, ioremap will
* map it to the IOREGION. In this case addr == vaddr and since these
* should be in registers we compare them first. Next we check for
* all ones which is perhaps fastest as ~val. Finally we weed out
* EEH disabled IO BARs.
* ff's which indicates a (very) possible failure.
*
* If this macro yields TRUE, the caller relays to eeh_check_failure()
* which does further tests out of line.
*/
/* #define EEH_POSSIBLE_ERROR(addr, vaddr, val) ((vaddr) != (addr) && ~(val) == 0 && !IS_EEH_TOKEN_DISABLED(addr)) */
/* #define EEH_POSSIBLE_IO_ERROR(val) (~(val) == 0) */
/* #define EEH_POSSIBLE_ERROR(addr, vaddr, val) ((vaddr) != (addr) && EEH_POSSIBLE_IO_ERROR(val) */
/* This version is rearranged to collect some profiling data */
#define EEH_POSSIBLE_ERROR(addr, vaddr, val) (~(val) == 0 && (++eeh_total_mmio_ffs, (vaddr) != (addr) && !IS_EEH_TOKEN_DISABLED(addr)))
#define EEH_POSSIBLE_IO_ERROR(val) (~(val) == 0 && ++eeh_total_mmio_ffs)
#define EEH_POSSIBLE_ERROR(addr, vaddr, val) (EEH_POSSIBLE_IO_ERROR(val) && (vaddr) != (addr))
/*
* MMIO read/write operations with EEH support.
......@@ -149,38 +147,56 @@ static inline void eeh_memcpy_toio(void *dest, void *src, unsigned long n) {
memcpy(vdest, src, n);
}
static inline void eeh_insb(volatile u8 *addr, void *buf, int n) {
volatile u8 *vaddr = (volatile u8 *)IO_TOKEN_TO_ADDR(addr);
_insb(vaddr, buf, n);
/* ToDo: look for ff's in buf[n] */
/* The I/O macros must handle ISA ports as well as PCI I/O bars.
* ISA does not implement EEH and ISA may not exist in the system.
* For PCI we check for EEH failures.
*/
#define _IO_IS_ISA(port) ((port) < 0x10000)
#define _IO_HAS_ISA_BUS (isa_io_base != 0)
static inline u8 eeh_inb(unsigned long port) {
u8 val;
if (_IO_IS_ISA(port) && !_IO_HAS_ISA_BUS)
return ~0;
val = in_8((u8 *)(port+pci_io_base));
if (!_IO_IS_ISA(port) && EEH_POSSIBLE_IO_ERROR(val))
return eeh_check_failure((void*)(port+pci_io_base), val);
return val;
}
static inline void eeh_outsb(volatile u8 *addr, const void *buf, int n) {
volatile u8 *vaddr = (volatile u8 *)IO_TOKEN_TO_ADDR(addr);
_outsb(vaddr, buf, n);
static inline void eeh_outb(u8 val, unsigned long port) {
if (!_IO_IS_ISA(port) || _IO_HAS_ISA_BUS)
return out_8((u8 *)(port+pci_io_base), val);
}
static inline void eeh_insw_ns(volatile u16 *addr, void *buf, int n) {
volatile u16 *vaddr = (volatile u16 *)IO_TOKEN_TO_ADDR(addr);
_insw_ns(vaddr, buf, n);
/* ToDo: look for ffff's in buf[n] */
static inline u16 eeh_inw(unsigned long port) {
u16 val;
if (_IO_IS_ISA(port) && !_IO_HAS_ISA_BUS)
return ~0;
val = in_le16((u16 *)(port+pci_io_base));
if (!_IO_IS_ISA(port) && EEH_POSSIBLE_IO_ERROR(val))
return eeh_check_failure((void*)(port+pci_io_base), val);
return val;
}
static inline void eeh_outsw_ns(volatile u16 *addr, const void *buf, int n) {
volatile u16 *vaddr = (volatile u16 *)IO_TOKEN_TO_ADDR(addr);
_outsw_ns(vaddr, buf, n);
static inline void eeh_outw(u16 val, unsigned long port) {
if (!_IO_IS_ISA(port) || _IO_HAS_ISA_BUS)
return out_le16((u16 *)(port+pci_io_base), val);
}
static inline void eeh_insl_ns(volatile u32 *addr, void *buf, int n) {
volatile u32 *vaddr = (volatile u32 *)IO_TOKEN_TO_ADDR(addr);
_insl_ns(vaddr, buf, n);
/* ToDo: look for ffffffff's in buf[n] */
static inline u32 eeh_inl(unsigned long port) {
u32 val;
if (_IO_IS_ISA(port) && !_IO_HAS_ISA_BUS)
return ~0;
val = in_le32((u32 *)(port+pci_io_base));
if (!_IO_IS_ISA(port) && EEH_POSSIBLE_IO_ERROR(val))
return eeh_check_failure((void*)(port+pci_io_base), val);
return val;
}
static inline void eeh_outsl_ns(volatile u32 *addr, const void *buf, int n) {
volatile u32 *vaddr = (volatile u32 *)IO_TOKEN_TO_ADDR(addr);
_outsl_ns(vaddr, buf, n);
static inline void eeh_outl(u32 val, unsigned long port) {
if (!_IO_IS_ISA(port) || _IO_HAS_ISA_BUS)
return out_le32((u32 *)(port+pci_io_base), val);
}
#endif /* _EEH_H */
......@@ -50,36 +50,45 @@ extern int have_print;
#define outw(data,addr) writew(data,((unsigned long)(addr)))
#define outl(data,addr) writel(data,((unsigned long)(addr)))
#else
#define IS_MAPPED_VADDR(port) ((unsigned long)(port) >> 60UL)
#define readb(addr) eeh_readb((void*)(addr))
#define readw(addr) eeh_readw((void*)(addr))
#define readl(addr) eeh_readl((void*)(addr))
#define writeb(data, addr) eeh_writeb((data), ((void*)(addr)))
#define writew(data, addr) eeh_writew((data), ((void*)(addr)))
#define writel(data, addr) eeh_writel((data), ((void*)(addr)))
#define memset_io(a,b,c) eeh_memset((void *)(a),(b),(c))
#define memset_io(a,b,c) eeh_memset_io((void *)(a),(b),(c))
#define memcpy_fromio(a,b,c) eeh_memcpy_fromio((a),(void *)(b),(c))
#define memcpy_toio(a,b,c) eeh_memcpy_toio((void *)(a),(b),(c))
#define inb(port) _inb((unsigned long)port)
#define outb(val, port) _outb(val, (unsigned long)port)
#define inw(port) _inw((unsigned long)port)
#define outw(val, port) _outw(val, (unsigned long)port)
#define inl(port) _inl((unsigned long)port)
#define outl(val, port) _outl(val, (unsigned long)port)
#define inb(port) eeh_inb((unsigned long)port)
#define outb(val, port) eeh_outb(val, (unsigned long)port)
#define inw(port) eeh_inw((unsigned long)port)
#define outw(val, port) eeh_outw(val, (unsigned long)port)
#define inl(port) eeh_inl((unsigned long)port)
#define outl(val, port) eeh_outl(val, (unsigned long)port)
/*
* The insw/outsw/insl/outsl macros don't do byte-swapping.
* They are only used in practice for transferring buffers which
* are arrays of bytes, and byte-swapping is not appropriate in
* that case. - paulus */
#define insb(port, buf, ns) eeh_insb((u8 *)(port), (buf), (ns))
#define outsb(port, buf, ns) eeh_outsb((u8 *)(port), (buf), (ns))
#define insw(port, buf, ns) eeh_insw_ns((u16 *)(port), (buf), (ns))
#define outsw(port, buf, ns) eeh_outsw_ns((u16 *)(port), (buf), (ns))
#define insl(port, buf, nl) eeh_insl_ns((u32 *)(port), (buf), (nl))
#define outsl(port, buf, nl) eeh_outsl_ns((u32 *)(port), (buf), (nl))
#define insb(port, buf, ns) _insb((u8 *)((port)+pci_io_base), (buf), (ns))
#define outsb(port, buf, ns) _outsb((u8 *)((port)+pci_io_base), (buf), (ns))
#define insw(port, buf, ns) _insw_ns((u16 *)((port)+pci_io_base), (buf), (ns))
#define outsw(port, buf, ns) _outsw_ns((u16 *)((port)+pci_io_base), (buf), (ns))
#define insl(port, buf, nl) _insl_ns((u32 *)((port)+pci_io_base), (buf), (nl))
#define outsl(port, buf, nl) _outsl_ns((u32 *)((port)+pci_io_base), (buf), (nl))
#endif
extern void _insb(volatile u8 *port, void *buf, int ns);
extern void _outsb(volatile u8 *port, const void *buf, int ns);
extern void _insw(volatile u16 *port, void *buf, int ns);
extern void _outsw(volatile u16 *port, const void *buf, int ns);
extern void _insl(volatile u32 *port, void *buf, int nl);
extern void _outsl(volatile u32 *port, const void *buf, int nl);
extern void _insw_ns(volatile u16 *port, void *buf, int ns);
extern void _outsw_ns(volatile u16 *port, const void *buf, int ns);
extern void _insl_ns(volatile u32 *port, void *buf, int nl);
extern void _outsl_ns(volatile u32 *port, const void *buf, int nl);
/*
* output pause versions need a delay at least for the
......@@ -92,27 +101,15 @@ extern int have_print;
#define inl_p(port) inl(port)
#define outl_p(val, port) (udelay(1), outl((val, (port)))
extern void _insb(volatile u8 *port, void *buf, int ns);
extern void _outsb(volatile u8 *port, const void *buf, int ns);
extern void _insw(volatile u16 *port, void *buf, int ns);
extern void _outsw(volatile u16 *port, const void *buf, int ns);
extern void _insl(volatile u32 *port, void *buf, int nl);
extern void _outsl(volatile u32 *port, const void *buf, int nl);
extern void _insw_ns(volatile u16 *port, void *buf, int ns);
extern void _outsw_ns(volatile u16 *port, const void *buf, int ns);
extern void _insl_ns(volatile u32 *port, void *buf, int nl);
extern void _outsl_ns(volatile u32 *port, const void *buf, int nl);
/*
* The *_ns versions below don't do byte-swapping.
* Neither do the standard versions now, these are just here
* for older code.
*/
#define insw_ns(port, buf, ns) insw(port, buf, ns)
#define outsw_ns(port, buf, ns) outsw(port, buf, ns)
#define insl_ns(port, buf, nl) insl(port, buf, nl)
#define outsl_ns(port, buf, nl) outsl(port, buf, nl)
#define insw_ns(port, buf, ns) _insw_ns((u16 *)((port)+pci_io_base), (buf), (ns))
#define outsw_ns(port, buf, ns) _outsw_ns((u16 *)((port)+pci_io_base), (buf), (ns))
#define insl_ns(port, buf, nl) _insl_ns((u32 *)((port)+pci_io_base), (buf), (nl))
#define outsl_ns(port, buf, nl) _outsl_ns((u32 *)((port)+pci_io_base), (buf), (nl))
#define IO_SPACE_LIMIT ~(0UL)
......@@ -249,49 +246,6 @@ static inline void out_be32(volatile unsigned *addr, int val)
#ifndef CONFIG_PPC_ISERIES
#include <asm/eeh.h>
static inline u8 _inb(unsigned long port) {
if (IS_MAPPED_VADDR(port))
return readb((void *)port);
else if (_IO_BASE)
return in_8((u8 *)((port)+_IO_BASE));
else
return 0xff;
}
static inline void _outb(u8 val, unsigned long port) {
if (IS_MAPPED_VADDR(port))
return writeb(val, (void *)port);
else if (_IO_BASE)
out_8((u8 *)((port)+_IO_BASE), val);
}
static inline u16 _inw(unsigned long port) {
if (IS_MAPPED_VADDR(port))
return readw((void *)port);
else if (_IO_BASE)
return in_le16((u16 *)((port)+_IO_BASE));
else
return 0xffff;
}
static inline void _outw(u16 val, unsigned long port) {
if (IS_MAPPED_VADDR(port))
return writew(val, (void *)port);
else if (_IO_BASE)
out_le16((u16 *)((port)+_IO_BASE), val);
}
static inline u32 _inl(unsigned long port) {
if (IS_MAPPED_VADDR(port))
return readl((void *)port);
else if (_IO_BASE)
return in_le32((u32 *)((port)+_IO_BASE));
else
return 0xffffffff;
}
static inline void _outl(u32 val, unsigned long port) {
if (IS_MAPPED_VADDR(port))
return writel(val, (void *)port);
else if (_IO_BASE)
out_le32((u32 *)((port)+_IO_BASE), val);
}
#endif
#ifdef __KERNEL__
......
......@@ -168,8 +168,10 @@ static inline int get_order(unsigned long size)
#define KERNELBASE PAGE_OFFSET
#define VMALLOCBASE 0xD000000000000000
#define IOREGIONBASE 0xE000000000000000
#define EEHREGIONBASE 0xA000000000000000
#define IO_REGION_ID (IOREGIONBASE>>REGION_SHIFT)
#define EEH_REGION_ID (EEHREGIONBASE>>REGION_SHIFT)
#define VMALLOC_REGION_ID (VMALLOCBASE>>REGION_SHIFT)
#define KERNEL_REGION_ID (KERNELBASE>>REGION_SHIFT)
#define USER_REGION_ID (0UL)
......
......@@ -125,12 +125,18 @@ struct device_node {
int n_intrs;
struct interrupt_info *intrs;
char *full_name;
/* PCI stuff probably doesn't belong here */
int busno; /* for pci devices */
int bussubno; /* for pci devices */
int devfn; /* for pci devices */
#define DN_STATUS_BIST_FAILED (1<<0)
int status; /* Current device status (non-zero is bad) */
int eeh_mode; /* See eeh.h for possible EEH_MODEs */
int eeh_config_addr;
struct pci_controller *phb; /* for pci devices */
struct TceTable *tce_table; /* for phb's or bridges */
#define DN_STATUS_BIST_FAILED (1<<0)
struct property *properties;
struct device_node *parent;
struct device_node *child;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment