Commit 4e15eda4 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] Altix update: various, mainly cleanups

From: Pat Gefre <pfg@sgi.com>

arch/ia64/sn/io/machvec/pci_bus_cvlink.c
    Changes for new pcireg_ interfaces
    pcibr reorg
    Some code cleanup/reorg

arch/ia64/sn/io/machvec/pci_dma.c
    IS_PCIA64() not needed

arch/ia64/sn/io/sn2/ml_iograph.c
    new pcireg_ interface

arch/ia64/sn/io/sn2/pcibr/pcibr_ate.c
    code reorg/clean up

arch/ia64/sn/io/sn2/pcibr/pcibr_config.c
    code reorg/cleanup

arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c
    reorg/cleanup

arch/ia64/sn/io/sn2/pcibr/pcibr_error.c
    reorg/cleanup

arch/ia64/sn/io/sn2/pcibr/pcibr_intr.c
    reorg/cleanup

arch/ia64/sn/io/sn2/pcibr/pcibr_reg.c
    Fixed the interface to these functions - one call/data type

arch/ia64/sn/io/sn2/pcibr/pcibr_rrb.c
    reorg/cleanup

arch/ia64/sn/io/sn2/pcibr/pcibr_slot.c
    reorg/cleanup

arch/ia64/sn/io/sn2/pciio.c
    removed unused functions

arch/ia64/sn/io/sn2/pic.c
    reorg/cleanup

arch/ia64/sn/kernel/irq.c
    IS_PIC_SOFT not needed
    mod for new pcireg_ interfaces

include/asm-ia64/sn/module.h
    nodes/geoid[] -> MAX_SLABS

include/asm-ia64/sn/pci/bridge.h
    IS_[X]BRIDGE not needed

include/asm-ia64/sn/pci/pci_bus_cvlink.h
    SET_PCIA64 and IS_PCIA64 not needed
    isa64, dma_buf_sync, xbow_buf_sync gone

include/asm-ia64/sn/pci/pcibr.h
    mostly cleanup
    some reorg mods

include/asm-ia64/sn/pci/pcibr_private.h
    some reorg code
    protos for new pcireg_ interfaces

include/asm-ia64/sn/pci/pciio.h
    cleanup

include/asm-ia64/sn/pci/pic.h
    cleanup

include/asm-ia64/sn/sn2/intr.h
    changed IA64_SN2_FIRST_DEVICE_VECTOR and IA64_SN2_LAST_DEVICE_VECTOR
parent 839401aa
......@@ -22,13 +22,18 @@ unsigned char num_bridges;
static int done_probing;
extern irqpda_t *irqpdaindr;
static int pci_bus_map_create(vertex_hdl_t xtalk, char * io_moduleid);
static int pci_bus_map_create(struct pcibr_list_s *softlistp, moduleid_t io_moduleid);
vertex_hdl_t devfn_to_vertex(unsigned char busnum, unsigned int devfn);
extern void register_pcibr_intr(int irq, pcibr_intr_t intr);
void sn_dma_flush_init(unsigned long start, unsigned long end, int idx, int pin, int slot);
extern int cbrick_type_get_nasid(nasid_t);
extern void ioconfig_bus_new_entries(void);
extern void ioconfig_get_busnum(char *, int *);
extern int iomoduleid_get(nasid_t);
extern int pcibr_widget_to_bus(vertex_hdl_t);
extern int isIO9(int);
#define IS_OPUS(nasid) (cbrick_type_get_nasid(nasid) == MODULE_OPUSBRICK)
#define IS_ALTIX(nasid) (cbrick_type_get_nasid(nasid) == MODULE_CBRICK)
......@@ -158,15 +163,14 @@ struct sn_flush_nasid_entry flush_nasid_list[MAX_NASIDS];
* DMA writes.
*/
void
sn_dma_flush_init(unsigned long start, unsigned long end, int idx, int pin, int slot) {
sn_dma_flush_init(unsigned long start, unsigned long end, int idx, int pin, int slot)
{
nasid_t nasid;
unsigned long dnasid;
int wid_num;
int bus;
struct sn_flush_device_list *p;
bridge_t *b;
bridgereg_t dev_sel;
extern int isIO9(int);
void *b;
int bwin;
int i;
......@@ -243,7 +247,7 @@ sn_dma_flush_init(unsigned long start, unsigned long end, int idx, int pin, int
break;
}
}
b = (bridge_t *)(NODE_SWIN_BASE(nasid, wid_num) | (bus << 23) );
b = (void *)(NODE_SWIN_BASE(nasid, wid_num) | (bus << 23) );
/* If it's IO9, then slot 2 maps to slot 7 and slot 6 maps to slot 8.
* To see this is non-trivial. By drawing pictures and reading manuals and talking
......@@ -268,38 +272,30 @@ sn_dma_flush_init(unsigned long start, unsigned long end, int idx, int pin, int
|| (IS_OPUS(nasid) && wid_num == 0xf) )
&& bus == 0) {
if (slot == 2) {
p->force_int_addr = (unsigned long)&b->b_force_always[6].intr;
dev_sel = b->b_int_device;
dev_sel |= (1<<18);
b->b_int_device = dev_sel;
p->force_int_addr = (unsigned long)pcireg_bridge_force_always_addr_get(b, 6);
pcireg_bridge_intr_device_bit_set(b, (1<<18));
dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
b->p_int_addr_64[6] = (virt_to_phys(&p->flush_addr) & 0xfffffffff) |
(dnasid << 36) | (0xfUL << 48);
pcireg_bridge_intr_addr_set(b, 6, ((virt_to_phys(&p->flush_addr) & 0xfffffffff) |
(dnasid << 36) | (0xfUL << 48)));
} else if (slot == 3) { /* 12160 SCSI device in IO9 */
p->force_int_addr = (unsigned long)&b->b_force_always[4].intr;
dev_sel = b->b_int_device;
dev_sel |= (2<<12);
b->b_int_device = dev_sel;
p->force_int_addr = (unsigned long)pcireg_bridge_force_always_addr_get(b, 4);
pcireg_bridge_intr_device_bit_set(b, (2<<12));
dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
b->p_int_addr_64[4] = (virt_to_phys(&p->flush_addr) & 0xfffffffff) |
(dnasid << 36) | (0xfUL << 48);
pcireg_bridge_intr_addr_set(b, 4, ((virt_to_phys(&p->flush_addr) & 0xfffffffff) |
(dnasid << 36) | (0xfUL << 48)));
} else { /* slot == 6 */
p->force_int_addr = (unsigned long)&b->b_force_always[7].intr;
dev_sel = b->b_int_device;
dev_sel |= (5<<21);
b->b_int_device = dev_sel;
p->force_int_addr = (unsigned long)pcireg_bridge_force_always_addr_get(b, 7);
pcireg_bridge_intr_device_bit_set(b, (5<<21));
dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
b->p_int_addr_64[7] = (virt_to_phys(&p->flush_addr) & 0xfffffffff) |
(dnasid << 36) | (0xfUL << 48);
pcireg_bridge_intr_addr_set(b, 7, ((virt_to_phys(&p->flush_addr) & 0xfffffffff) |
(dnasid << 36) | (0xfUL << 48)));
}
} else {
p->force_int_addr = (unsigned long)&b->b_force_always[pin + 2].intr;
dev_sel = b->b_int_device;
dev_sel |= ((slot - 1) << ( pin * 3) );
b->b_int_device = dev_sel;
p->force_int_addr = (unsigned long)pcireg_bridge_force_always_addr_get(b, (pin +2));
pcireg_bridge_intr_device_bit_set(b, ((slot - 1) << ( pin * 3)));
dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
b->p_int_addr_64[pin + 2] = (virt_to_phys(&p->flush_addr) & 0xfffffffff) |
(dnasid << 36) | (0xfUL << 48);
pcireg_bridge_intr_addr_set(b, (pin + 2), ((virt_to_phys(&p->flush_addr) & 0xfffffffff) |
(dnasid << 36) | (0xfUL << 48)));
}
}
......@@ -400,7 +396,6 @@ sn_pci_fixup(int arg)
}
device_sysdata->vhdl = devfn_to_vertex(device_dev->bus->number, device_dev->devfn);
device_sysdata->isa64 = 0;
device_dev->sysdata = (void *) device_sysdata;
set_pci_provider(device_sysdata);
......@@ -548,238 +543,132 @@ linux_bus_cvlink(void)
* pci_bus_map_create() - Called by pci_bus_to_hcl_cvlink() to finish the job.
*
* Linux PCI Bus numbers are assigned from lowest module_id numbers
* (rack/slot etc.) starting from HUB_WIDGET_ID_MAX down to
* HUB_WIDGET_ID_MIN:
* widgetnum 15 gets lower Bus Number than widgetnum 14 etc.
*
* Given 2 modules 001c01 and 001c02 we get the following mappings:
* 001c01, widgetnum 15 = Bus number 0
* 001c01, widgetnum 14 = Bus number 1
* 001c02, widgetnum 15 = Bus number 3
* 001c02, widgetnum 14 = Bus number 4
* etc.
*
* The rational for starting Bus Number 0 with Widget number 15 is because
* the system boot disks are always connected via Widget 15 Slot 0 of the
* I-brick. Linux creates /dev/sd* devices(naming) strating from Bus Number 0
* Therefore, /dev/sda1 will be the first disk, on Widget 15 of the lowest
* module id(Master Cnode) of the system.
*
* (rack/slot etc.)
*/
static int
pci_bus_map_create(vertex_hdl_t xtalk, char * io_moduleid)
pci_bus_map_create(struct pcibr_list_s *softlistp, moduleid_t moduleid)
{
int basebus_num, bus_number;
vertex_hdl_t pci_bus = softlistp->bl_vhdl;
char moduleid_str[16];
vertex_hdl_t master_node_vertex = NULL;
vertex_hdl_t xwidget = NULL;
vertex_hdl_t pci_bus = NULL;
hubinfo_t hubinfo = NULL;
xwidgetnum_t widgetnum;
char pathname[128];
graph_error_t rv;
int bus;
int basebus_num;
extern void ioconfig_get_busnum(char *, int *);
int bus_number;
memset(moduleid_str, 0, 16);
format_module_id(moduleid_str, moduleid, MODULE_FORMAT_BRIEF);
(void) ioconfig_get_busnum((char *)moduleid_str, &basebus_num);
/*
* Loop throught this vertex and get the Xwidgets ..
* Assign the correct bus number and also the nasid of this
* pci Xwidget.
*/
/* PCI devices */
for (widgetnum = HUB_WIDGET_ID_MAX; widgetnum >= HUB_WIDGET_ID_MIN; widgetnum--) {
sprintf(pathname, "%d", widgetnum);
xwidget = NULL;
/*
* Example - /hw/module/001c16/Pbrick/xtalk/8 is the xwidget
* /hw/module/001c16/Pbrick/xtalk/8/pci/1 is device
*/
rv = hwgraph_traverse(xtalk, pathname, &xwidget);
if ( (rv != GRAPH_SUCCESS) ) {
if (!xwidget) {
continue;
}
}
sprintf(pathname, "%d/"EDGE_LBL_PCI, widgetnum);
pci_bus = NULL;
if (hwgraph_traverse(xtalk, pathname, &pci_bus) != GRAPH_SUCCESS)
if (!pci_bus) {
continue;
}
/*
* Assign the correct bus number and also the nasid of this
* pci Xwidget.
*
* Should not be any race here ...
*/
num_bridges++;
busnum_to_pcibr_vhdl[num_bridges - 1] = pci_bus;
/*
* Get the master node and from there get the NASID.
*/
master_node_vertex = device_master_get(xwidget);
if (!master_node_vertex) {
printk("WARNING: pci_bus_map_create: Unable to get .master for vertex 0x%p\n", (void *)xwidget);
}
hubinfo_get(master_node_vertex, &hubinfo);
if (!hubinfo) {
printk("WARNING: pci_bus_map_create: Unable to get hubinfo for master node vertex 0x%p\n", (void *)master_node_vertex);
return(1);
} else {
busnum_to_nid[num_bridges - 1] = hubinfo->h_nasid;
}
/*
* Pre assign DMA maps needed for 32 Bits Page Map DMA.
*/
busnum_to_atedmamaps[num_bridges - 1] = (void *) kmalloc(
sizeof(struct pcibr_dmamap_s) * MAX_ATE_MAPS, GFP_KERNEL);
if (!busnum_to_atedmamaps[num_bridges - 1])
printk("WARNING: pci_bus_map_create: Unable to precreate ATE DMA Maps for busnum %d vertex 0x%p\n", num_bridges - 1, (void *)xwidget);
memset(busnum_to_atedmamaps[num_bridges - 1], 0x0,
sizeof(struct pcibr_dmamap_s) * MAX_ATE_MAPS);
bus_number = basebus_num + pcibr_widget_to_bus(pci_bus);
#ifdef DEBUG
{
char hwpath[MAXDEVNAME] = "\0";
extern int hwgraph_vertex_name_get(vertex_hdl_t, char *, uint);
pcibr_soft_t pcibr_soft = softlistp->bl_soft;
hwgraph_vertex_name_get(pci_bus, hwpath, MAXDEVNAME);
printk("%s:\n\tbus_num %d, basebus_num %d, brick_bus %d, "
"bus_vhdl 0x%lx, brick_type %d\n", hwpath, bus_number,
basebus_num, pcibr_widget_to_bus(pci_bus),
(uint64_t)pci_bus, pcibr_soft->bs_bricktype);
}
#endif
busnum_to_pcibr_vhdl[bus_number] = pci_bus;
/*
* PCIX devices
* We number busses differently for PCI-X devices.
* We start from Lowest Widget on up ..
* Pre assign DMA maps needed for 32 Bits Page Map DMA.
*/
(void) ioconfig_get_busnum((char *)io_moduleid, &basebus_num);
for (widgetnum = HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX; widgetnum++) {
/* Do both buses */
for ( bus = 0; bus < 2; bus++ ) {
sprintf(pathname, "%d", widgetnum);
xwidget = NULL;
/*
* Example - /hw/module/001c16/Pbrick/xtalk/8 is the xwidget
* /hw/module/001c16/Pbrick/xtalk/8/pci-x/0 is the bus
* /hw/module/001c16/Pbrick/xtalk/8/pci-x/0/1 is device
*/
rv = hwgraph_traverse(xtalk, pathname, &xwidget);
if ( (rv != GRAPH_SUCCESS) ) {
if (!xwidget) {
continue;
}
}
if ( bus == 0 )
sprintf(pathname, "%d/"EDGE_LBL_PCIX_0, widgetnum);
else
sprintf(pathname, "%d/"EDGE_LBL_PCIX_1, widgetnum);
pci_bus = NULL;
if (hwgraph_traverse(xtalk, pathname, &pci_bus) != GRAPH_SUCCESS)
if (!pci_bus) {
continue;
}
/*
* Assign the correct bus number and also the nasid of this
* pci Xwidget.
*
* Should not be any race here ...
*/
bus_number = basebus_num + bus + io_brick_map_widget(MODULE_PXBRICK, widgetnum);
#ifdef DEBUG
printk("bus_number %d basebus_num %d bus %d io %d\n",
bus_number, basebus_num, bus,
io_brick_map_widget(MODULE_PXBRICK, widgetnum));
#endif
busnum_to_pcibr_vhdl[bus_number] = pci_bus;
/*
* Pre assign DMA maps needed for 32 Bits Page Map DMA.
*/
busnum_to_atedmamaps[bus_number] = (void *) kmalloc(
sizeof(struct pcibr_dmamap_s) * MAX_ATE_MAPS, GFP_KERNEL);
if (!busnum_to_atedmamaps[bus_number])
printk("WARNING: pci_bus_map_create: Unable to precreate ATE DMA Maps for busnum %d vertex 0x%p\n", num_bridges - 1, (void *)xwidget);
memset(busnum_to_atedmamaps[bus_number], 0x0,
sizeof(struct pcibr_dmamap_s) * MAX_ATE_MAPS);
}
busnum_to_atedmamaps[bus_number] = (void *) vmalloc(
sizeof(struct pcibr_dmamap_s)*MAX_ATE_MAPS);
if (busnum_to_atedmamaps[bus_number] <= 0) {
printk("pci_bus_map_create: Cannot allocate memory for ate maps\n");
return -1;
}
return(0);
memset(busnum_to_atedmamaps[bus_number], 0x0,
sizeof(struct pcibr_dmamap_s) * MAX_ATE_MAPS);
return(0);
}
/*
* pci_bus_to_hcl_cvlink() - This routine is called after SGI IO Infrastructure
* initialization has completed to set up the mappings between Xbridge
* and logical pci bus numbers. We also set up the NASID for each of these
* xbridges.
* pci_bus_to_hcl_cvlink() - This routine is called after SGI IO Infrastructure
* initialization has completed to set up the mappings between PCI BRIDGE
* ASIC and logical pci bus numbers.
*
* Must be called before pci_init() is invoked.
*/
int
pci_bus_to_hcl_cvlink(void)
{
int i;
extern pcibr_list_p pcibr_list;
vertex_hdl_t devfs_hdl = NULL;
vertex_hdl_t xtalk = NULL;
int rv = 0;
char name[256];
char tmp_name[256];
int i, ii, j;
char *brick_name;
extern void ioconfig_bus_new_entries(void);
/*
* Figure out which IO Brick is connected to the Compute Bricks.
*/
for (i = 0; i < nummodules; i++) {
extern int iomoduleid_get(nasid_t);
moduleid_t iobrick_id;
nasid_t nasid = -1;
int nodecnt;
int n = 0;
nodecnt = modules[i]->nodecnt;
for ( n = 0; n < nodecnt; n++ ) {
nasid = cnodeid_to_nasid(modules[i]->nodes[n]);
iobrick_id = iomoduleid_get(nasid);
if ((int)iobrick_id > 0) { /* Valid module id */
char name[12];
memset(name, 0, 12);
format_module_id((char *)&(modules[i]->io[n].moduleid), iobrick_id, MODULE_FORMAT_BRIEF);
struct pcibr_list_s *softlistp = pcibr_list;
struct pcibr_list_s *first_in_list = NULL;
struct pcibr_list_s *last_in_list = NULL;
/* Walk the list of pcibr_soft structs looking for matches */
while (softlistp) {
struct pcibr_soft_s *pcibr_soft = softlistp->bl_soft;
moduleid_t moduleid;
/* Is this PCI bus associated with this moduleid? */
moduleid = NODE_MODULEID(
NASID_TO_COMPACT_NODEID(pcibr_soft->bs_nasid));
if (modules[i]->id == moduleid) {
struct pcibr_list_s *new_element;
new_element = kmalloc(sizeof (struct pcibr_soft_s), GFP_KERNEL);
if (new_element == NULL) {
printk("%s: Couldn't allocate memory\n",__FUNCTION__);
return -ENOMEM;
}
new_element->bl_soft = softlistp->bl_soft;
new_element->bl_vhdl = softlistp->bl_vhdl;
new_element->bl_next = NULL;
/* list empty so just put it on the list */
if (first_in_list == NULL) {
first_in_list = new_element;
last_in_list = new_element;
softlistp = softlistp->bl_next;
continue;
}
/*
* BASEIO IObricks attached to a module have
* a higher priority than non BASEIO IOBricks
* when it comes to persistant pci bus
* numbering, so put them on the front of the
* list.
*/
if (isIO9(pcibr_soft->bs_nasid)) {
new_element->bl_next = first_in_list;
first_in_list = new_element;
} else {
last_in_list->bl_next = new_element;
last_in_list = new_element;
}
}
softlistp = softlistp->bl_next;
}
}
devfs_hdl = hwgraph_path_to_vertex("hw/module");
for (i = 0; i < nummodules ; i++) {
for ( j = 0; j < 2; j++ ) {
if ( j == 0 )
brick_name = EDGE_LBL_PXBRICK;
else
brick_name = EDGE_LBL_IXBRICK;
for ( ii = 0; ii < 2 ; ii++ ) {
memset(name, 0, 256);
memset(tmp_name, 0, 256);
format_module_id(name, modules[i]->id, MODULE_FORMAT_BRIEF);
sprintf(tmp_name, "/slab/%d/%s/xtalk", geo_slab(modules[i]->geoid[ii]), brick_name);
strcat(name, tmp_name);
xtalk = NULL;
rv = hwgraph_edge_get(devfs_hdl, name, &xtalk);
if ( rv == 0 )
pci_bus_map_create(xtalk, (char *)&(modules[i]->io[ii].moduleid));
/*
* We now have a list of all the pci bridges associated with
* the module_id, modules[i]. Call pci_bus_map_create() for
* each pci bridge
*/
softlistp = first_in_list;
while (softlistp) {
moduleid_t iobrick;
struct pcibr_list_s *next = softlistp->bl_next;
iobrick = iomoduleid_get(softlistp->bl_soft->bs_nasid);
pci_bus_map_create(softlistp, iobrick);
kfree(softlistp);
softlistp = next;
}
}
}
/*
......
......@@ -247,18 +247,6 @@ sn_pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int dire
for (i = 0; i < nents; i++, sg++) {
phys_addr = __pa((unsigned long)page_address(sg->page) + sg->offset);
/*
* Handle the most common case: 64 bit cards. This
* call should always succeed.
*/
if (IS_PCIA64(hwdev)) {
sg->dma_address = pcibr_dmatrans_addr(vhdl, NULL, phys_addr,
sg->length,
PCIIO_DMA_DATA | PCIIO_DMA_A64);
sg->dma_length = sg->length;
continue;
}
/*
* Handle 32-63 bit cards via direct mapping
*/
......@@ -385,13 +373,6 @@ sn_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
dma_addr = 0;
phys_addr = __pa(ptr);
if (IS_PCIA64(hwdev)) {
/* This device supports 64 bit DMA addresses. */
dma_addr = pcibr_dmatrans_addr(vhdl, NULL, phys_addr, size,
PCIIO_DMA_DATA | PCIIO_DMA_A64);
return dma_addr;
}
/*
* Devices that support 32 bit to 63 bit DMA addresses get
* 32 bit DMA addresses.
......
......@@ -584,10 +584,9 @@ io_init_node(cnodeid_t cnodeid)
} else {
void *bridge;
extern uint64_t pcireg_control_get(void *);
bridge = (void *)NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0);
npdap->basew_id = pcireg_control_get(bridge) & WIDGET_WIDGET_ID;
npdap->basew_id = pcireg_bridge_control_get(bridge) & WIDGET_WIDGET_ID;
printk(" ****io_init_node: Unknown Widget Part Number 0x%x Widget ID 0x%x attached to Hubv 0x%p ****\n", widget_partnum, npdap->basew_id, (void *)hubv);
return;
......
......@@ -17,107 +17,13 @@
/*
* functions
*/
int pcibr_init_ext_ate_ram(bridge_t *);
int pcibr_ate_alloc(pcibr_soft_t, int);
void pcibr_ate_free(pcibr_soft_t, int, int);
bridge_ate_t pcibr_flags_to_ate(unsigned);
bridge_ate_p pcibr_ate_addr(pcibr_soft_t, int);
unsigned ate_freeze(pcibr_dmamap_t pcibr_dmamap,
#if PCIBR_FREEZE_TIME
unsigned *freeze_time_ptr,
#endif
unsigned *cmd_regs);
void ate_write(pcibr_soft_t pcibr_soft, bridge_ate_p ate_ptr, int ate_count, bridge_ate_t ate);
void ate_thaw(pcibr_dmamap_t pcibr_dmamap,
int ate_index,
#if PCIBR_FREEZE_TIME
bridge_ate_t ate,
int ate_total,
unsigned freeze_time_start,
#endif
unsigned *cmd_regs,
unsigned s);
int pcibr_ate_alloc(pcibr_soft_t, int, struct resource *);
void pcibr_ate_free(pcibr_soft_t, int, int, struct resource *);
bridge_ate_t pcibr_flags_to_ate(pcibr_soft_t, unsigned);
bridge_ate_p pcibr_ate_addr(pcibr_soft_t, int);
void ate_write(pcibr_soft_t, int, int, bridge_ate_t);
/* Convert from ssram_bits in control register to number of SSRAM entries */
#define ATE_NUM_ENTRIES(n) _ate_info[n]
/* Possible choices for number of ATE entries in Bridge's SSRAM */
static int _ate_info[] =
{
0, /* 0 entries */
8 * 1024, /* 8K entries */
16 * 1024, /* 16K entries */
64 * 1024 /* 64K entries */
};
#define ATE_NUM_SIZES (sizeof(_ate_info) / sizeof(int))
#define ATE_PROBE_VALUE 0x0123456789abcdefULL
/*
* Determine the size of this bridge's external mapping SSRAM, and set
* the control register appropriately to reflect this size, and initialize
* the external SSRAM.
*/
int
pcibr_init_ext_ate_ram(bridge_t *bridge)
{
int largest_working_size = 0;
int num_entries, entry;
int i, j;
bridgereg_t old_enable, new_enable;
/* Probe SSRAM to determine its size. */
old_enable = bridge->b_int_enable;
new_enable = old_enable & ~BRIDGE_IMR_PCI_MST_TIMEOUT;
bridge->b_int_enable = new_enable;
for (i = 1; i < ATE_NUM_SIZES; i++) {
/* Try writing a value */
bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] = ATE_PROBE_VALUE;
/* Guard against wrap */
for (j = 1; j < i; j++)
bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(j) - 1] = 0;
/* See if value was written */
if (bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] == ATE_PROBE_VALUE)
largest_working_size = i;
}
bridge->b_int_enable = old_enable;
bridge->b_wid_tflush; /* wait until Bridge PIO complete */
/*
* ensure that we write and read without any interruption.
* The read following the write is required for the Bridge war
*/
bridge->b_wid_control = (bridge->b_wid_control
& ~BRIDGE_CTRL_SSRAM_SIZE_MASK)
| BRIDGE_CTRL_SSRAM_SIZE(largest_working_size);
bridge->b_wid_control; /* inval addr bug war */
num_entries = ATE_NUM_ENTRIES(largest_working_size);
if (pcibr_debug_mask & PCIBR_DEBUG_ATE) {
if (num_entries) {
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATE, NULL,
"bridge at 0x%x: clearing %d external ATEs\n",
bridge, num_entries));
} else {
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATE, NULL,
"bridge at 0x%x: no external ATE RAM found\n",
bridge));
}
}
/* Initialize external mapping entries */
for (entry = 0; entry < num_entries; entry++)
bridge->b_ext_ate_ram[entry] = 0;
return (num_entries);
}
/*
* Allocate "count" contiguous Bridge Address Translation Entries
* on the specified bridge to be used for PCI to XTALK mappings.
......@@ -127,56 +33,46 @@ pcibr_init_ext_ate_ram(bridge_t *bridge)
* Return the start index on success, -1 on failure.
*/
int
pcibr_ate_alloc(pcibr_soft_t pcibr_soft, int count)
pcibr_ate_alloc(pcibr_soft_t pcibr_soft, int count, struct resource *res)
{
int status = 0;
struct resource *new_res;
struct resource **allocated_res;
unsigned long flag;
new_res = (struct resource *) kmalloc( sizeof(struct resource), GFP_ATOMIC);
memset(new_res, 0, sizeof(*new_res));
status = allocate_resource( &pcibr_soft->bs_int_ate_resource, new_res,
memset(res, 0, sizeof(struct resource));
flag = pcibr_lock(pcibr_soft);
status = allocate_resource( &pcibr_soft->bs_int_ate_resource, res,
count, pcibr_soft->bs_int_ate_resource.start,
pcibr_soft->bs_int_ate_resource.end, 1,
NULL, NULL);
if ( status && (pcibr_soft->bs_ext_ate_resource.end != 0) ) {
status = allocate_resource( &pcibr_soft->bs_ext_ate_resource, new_res,
count, pcibr_soft->bs_ext_ate_resource.start,
pcibr_soft->bs_ext_ate_resource.end, 1,
NULL, NULL);
if (status) {
new_res->start = -1;
}
}
if (status) {
/* Failed to allocate */
kfree(new_res);
pcibr_unlock(pcibr_soft, flag);
return -1;
}
/* Save the resource for freeing */
allocated_res = (struct resource **)(((unsigned long)pcibr_soft->bs_allocated_ate_res) + new_res->start * sizeof( unsigned long));
*allocated_res = new_res;
pcibr_unlock(pcibr_soft, flag);
return new_res->start;
return res->start;
}
void
pcibr_ate_free(pcibr_soft_t pcibr_soft, int index, int count)
/* Who says there's no such thing as a free meal? :-) */
pcibr_ate_free(pcibr_soft_t pcibr_soft, int index, int count, struct resource *res)
{
struct resource **allocated_res;
bridge_ate_t ate;
int status = 0;
unsigned long flags;
allocated_res = (struct resource **)(((unsigned long)pcibr_soft->bs_allocated_ate_res) + index * sizeof(unsigned long));
/* For debugging purposes, clear the valid bit in the ATE */
ate = *pcibr_ate_addr(pcibr_soft, index);
ate_write(pcibr_soft, index, count, ate & ~ATE_V);
status = release_resource(*allocated_res);
flags = pcibr_lock(pcibr_soft);
status = release_resource(res);
pcibr_unlock(pcibr_soft, flags);
if (status)
BUG(); /* Ouch .. */
kfree(*allocated_res);
}
......@@ -185,7 +81,7 @@ pcibr_ate_free(pcibr_soft_t pcibr_soft, int index, int count)
* into Bridge-specific Address Translation Entry attribute bits.
*/
bridge_ate_t
pcibr_flags_to_ate(unsigned flags)
pcibr_flags_to_ate(pcibr_soft_t pcibr_soft, unsigned flags)
{
bridge_ate_t attributes;
......@@ -232,6 +128,11 @@ pcibr_flags_to_ate(unsigned flags)
if (flags & PCIBR_NOPRECISE)
attributes &= ~ATE_PREC;
/* In PCI-X mode, Prefetch & Precise not supported */
if (IS_PCIX(pcibr_soft)) {
attributes &= ~(ATE_PREC | ATE_PREF);
}
return (attributes);
}
......@@ -243,189 +144,33 @@ bridge_ate_p
pcibr_ate_addr(pcibr_soft_t pcibr_soft,
int ate_index)
{
bridge_t *bridge = pcibr_soft->bs_base;
return (ate_index < pcibr_soft->bs_int_ate_size)
? &(bridge->b_int_ate_ram[ate_index].wr)
: &(bridge->b_ext_ate_ram[ate_index]);
}
/* We are starting to get more complexity
* surrounding writing ATEs, so pull
* the writing code into this new function.
*/
#if PCIBR_FREEZE_TIME
#define ATE_FREEZE() s = ate_freeze(pcibr_dmamap, &freeze_time, cmd_regs)
#else
#define ATE_FREEZE() s = ate_freeze(pcibr_dmamap, cmd_regs)
#endif
unsigned
ate_freeze(pcibr_dmamap_t pcibr_dmamap,
#if PCIBR_FREEZE_TIME
unsigned *freeze_time_ptr,
#endif
unsigned *cmd_regs)
{
pcibr_soft_t pcibr_soft = pcibr_dmamap->bd_soft;
#ifdef LATER
int dma_slot = pcibr_dmamap->bd_slot;
#endif
int ext_ates = pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM;
int slot;
unsigned long s;
unsigned cmd_reg;
volatile unsigned *cmd_lwa;
unsigned cmd_lwd;
if (!ext_ates)
return 0;
/* Bridge Hardware Bug WAR #484930:
* Bridge can't handle updating External ATEs
* while DMA is occurring that uses External ATEs,
* even if the particular ATEs involved are disjoint.
*/
/* need to prevent anyone else from
* unfreezing the grant while we
* are working; also need to prevent
* this thread from being interrupted
* to keep PCI grant freeze time
* at an absolute minimum.
*/
s = pcibr_lock(pcibr_soft);
#ifdef LATER
/* just in case pcibr_dmamap_done was not called */
if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_BUSY) {
pcibr_dmamap->bd_flags &= ~PCIBR_DMAMAP_BUSY;
if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM)
atomic_dec(&(pcibr_soft->bs_slot[dma_slot]. bss_ext_ates_active));
xtalk_dmamap_done(pcibr_dmamap->bd_xtalk);
}
#endif /* LATER */
#if PCIBR_FREEZE_TIME
*freeze_time_ptr = get_timestamp();
#endif
cmd_lwa = 0;
for (slot = pcibr_soft->bs_min_slot;
slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
if (atomic_read(&pcibr_soft->bs_slot[slot].bss_ext_ates_active)) {
cmd_reg = pcibr_soft->
bs_slot[slot].
bss_cmd_shadow;
if (cmd_reg & PCI_CMD_BUS_MASTER) {
cmd_lwa = pcibr_soft->
bs_slot[slot].
bss_cmd_pointer;
cmd_lwd = cmd_reg ^ PCI_CMD_BUS_MASTER;
cmd_lwa[0] = cmd_lwd;
}
cmd_regs[slot] = cmd_reg;
} else
cmd_regs[slot] = 0;
if (cmd_lwa) {
bridge_t *bridge = pcibr_soft->bs_base;
/* Read the last master bit that has been cleared. This PIO read
* on the PCI bus is to ensure the completion of any DMAs that
* are due to bus requests issued by PCI devices before the
* clearing of master bits.
*/
cmd_lwa[0];
/* Flush all the write buffers in the bridge */
for (slot = pcibr_soft->bs_min_slot;
slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
if (atomic_read(&pcibr_soft->bs_slot[slot].bss_ext_ates_active)) {
/* Flush the write buffer associated with this
* PCI device which might be using dma map RAM.
*/
bridge->b_wr_req_buf[slot].reg;
}
}
if (ate_index < pcibr_soft->bs_int_ate_size) {
return (pcireg_int_ate_addr(pcibr_soft, ate_index));
} else {
printk("pcibr_ate_addr(): INVALID ate_index 0x%x", ate_index);
return (bridge_ate_p)0;
}
return s;
}
void
ate_write(pcibr_soft_t pcibr_soft,
bridge_ate_p ate_ptr,
int ate_count,
bridge_ate_t ate)
{
while (ate_count-- > 0) {
*ate_ptr++ = ate;
ate += IOPGSIZE;
}
}
#if PCIBR_FREEZE_TIME
#define ATE_THAW() ate_thaw(pcibr_dmamap, ate_index, ate, ate_total, freeze_time, cmd_regs, s)
#else
#define ATE_THAW() ate_thaw(pcibr_dmamap, ate_index, cmd_regs, s)
#endif
/*
* Write the ATE.
*/
void
ate_thaw(pcibr_dmamap_t pcibr_dmamap,
int ate_index,
#if PCIBR_FREEZE_TIME
bridge_ate_t ate,
int ate_total,
unsigned freeze_time_start,
#endif
unsigned *cmd_regs,
unsigned s)
ate_write(pcibr_soft_t pcibr_soft, int ate_index, int count, bridge_ate_t ate)
{
pcibr_soft_t pcibr_soft = pcibr_dmamap->bd_soft;
int dma_slot = pcibr_dmamap->bd_slot;
int slot;
bridge_t *bridge = pcibr_soft->bs_base;
int ext_ates = pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM;
unsigned cmd_reg;
#if PCIBR_FREEZE_TIME
unsigned freeze_time;
static unsigned max_freeze_time = 0;
static unsigned max_ate_total;
#endif
if (!ext_ates)
return;
/* restore cmd regs */
for (slot = pcibr_soft->bs_min_slot;
slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
if ((cmd_reg = cmd_regs[slot]) & PCI_CMD_BUS_MASTER) {
pcibr_slot_config_set(bridge, slot, PCI_CFG_COMMAND/4, cmd_reg);
while (count-- > 0) {
if (ate_index < pcibr_soft->bs_int_ate_size) {
pcireg_int_ate_set(pcibr_soft, ate_index, ate);
PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_soft->bs_vhdl,
"ate_write(): ate_index=0x%x, ate=0x%lx\n",
ate_index, (uint64_t)ate));
} else {
printk("ate_write(): INVALID ate_index 0x%x", ate_index);
return;
}
ate_index++;
ate += IOPGSIZE;
}
pcibr_dmamap->bd_flags |= PCIBR_DMAMAP_BUSY;
atomic_inc(&(pcibr_soft->bs_slot[dma_slot]. bss_ext_ates_active));
#if PCIBR_FREEZE_TIME
freeze_time = get_timestamp() - freeze_time_start;
if ((max_freeze_time < freeze_time) ||
(max_ate_total < ate_total)) {
if (max_freeze_time < freeze_time)
max_freeze_time = freeze_time;
if (max_ate_total < ate_total)
max_ate_total = ate_total;
pcibr_unlock(pcibr_soft, s);
printk( "%s: pci freeze time %d usec for %d ATEs\n"
"\tfirst ate: %R\n",
pcibr_soft->bs_name,
freeze_time * 1000 / 1250,
ate_total,
ate, ate_bits);
} else
#endif
pcibr_unlock(pcibr_soft, s);
pcireg_tflush_get(pcibr_soft); /* wait until Bridge PIO complete */
}
......@@ -34,22 +34,21 @@ void do_pcibr_config_set(cfg_p, unsigned, unsigned, uint64_t);
* the 32bit word that contains the "offset" byte.
*/
cfg_p
pcibr_func_config_addr(bridge_t *bridge, pciio_bus_t bus, pciio_slot_t slot,
pcibr_func_config_addr(pcibr_soft_t soft, pciio_bus_t bus, pciio_slot_t slot,
pciio_function_t func, int offset)
{
/*
* Type 1 config space
*/
if (bus > 0) {
bridge->b_pci_cfg = ((bus << 16) | (slot << 11));
return &bridge->b_type1_cfg.f[func].l[(offset)];
pcireg_type1_cntr_set(soft, ((bus << 16) | (slot << 11)));
return (pcireg_type1_cfg_addr(soft, func, offset));
}
/*
* Type 0 config space
*/
slot++;
return &bridge->b_type0_cfg_dev[slot].f[func].l[offset];
return (pcireg_type0_cfg_addr(soft, slot, func, offset));
}
/*
......@@ -58,59 +57,21 @@ pcibr_func_config_addr(bridge_t *bridge, pciio_bus_t bus, pciio_slot_t slot,
* 32bit word that contains the "offset" byte.
*/
cfg_p
pcibr_slot_config_addr(bridge_t *bridge, pciio_slot_t slot, int offset)
pcibr_slot_config_addr(pcibr_soft_t soft, pciio_slot_t slot, int offset)
{
return pcibr_func_config_addr(bridge, 0, slot, 0, offset);
}
/*
* Return config space data for given slot / offset
*/
unsigned
pcibr_slot_config_get(bridge_t *bridge, pciio_slot_t slot, int offset)
{
cfg_p cfg_base;
cfg_base = pcibr_slot_config_addr(bridge, slot, 0);
return (do_pcibr_config_get(cfg_base, offset, sizeof(unsigned)));
}
/*
* Return config space data for given slot / func / offset
*/
unsigned
pcibr_func_config_get(bridge_t *bridge, pciio_slot_t slot,
pciio_function_t func, int offset)
{
cfg_p cfg_base;
cfg_base = pcibr_func_config_addr(bridge, 0, slot, func, 0);
return (do_pcibr_config_get(cfg_base, offset, sizeof(unsigned)));
}
/*
* Set config space data for given slot / offset
*/
void
pcibr_slot_config_set(bridge_t *bridge, pciio_slot_t slot,
int offset, unsigned val)
{
cfg_p cfg_base;
cfg_base = pcibr_slot_config_addr(bridge, slot, 0);
do_pcibr_config_set(cfg_base, offset, sizeof(unsigned), val);
return pcibr_func_config_addr(soft, 0, slot, 0, offset);
}
/*
* Set config space data for given slot / func / offset
*/
void
pcibr_func_config_set(bridge_t *bridge, pciio_slot_t slot,
pcibr_func_config_set(pcibr_soft_t soft, pciio_slot_t slot,
pciio_function_t func, int offset, unsigned val)
{
cfg_p cfg_base;
cfg_base = pcibr_func_config_addr(bridge, 0, slot, func, 0);
cfg_base = pcibr_func_config_addr(soft, 0, slot, func, 0);
do_pcibr_config_set(cfg_base, offset, sizeof(unsigned), val);
}
......@@ -124,8 +85,6 @@ pcibr_config_addr(vertex_hdl_t conn,
pciio_bus_t pciio_bus;
pciio_slot_t pciio_slot;
pciio_function_t pciio_func;
pcibr_soft_t pcibr_soft;
bridge_t *bridge;
cfg_p cfgbase = (cfg_p)0;
pciio_info_t pciio_info;
......@@ -164,11 +123,7 @@ pcibr_config_addr(vertex_hdl_t conn,
pciio_func = PCI_TYPE1_FUNC(reg);
}
pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
bridge = pcibr_soft->bs_base;
cfgbase = pcibr_func_config_addr(bridge,
cfgbase = pcibr_func_config_addr((pcibr_soft_t) pcibr_info->f_mfast,
pciio_bus, pciio_slot, pciio_func, 0);
return cfgbase;
......
......@@ -30,68 +30,17 @@
* based bricks use the corelet id.
* -pcibr_debug_slot is the pci slot you want to trace.
*/
uint32_t pcibr_debug_mask = 0x0; /* 0x00000000 to disable */
uint32_t pcibr_debug_mask; /* 0x00000000 to disable */
static char *pcibr_debug_module = "all"; /* 'all' for all modules */
static int pcibr_debug_widget = -1; /* '-1' for all widgets */
static int pcibr_debug_slot = -1; /* '-1' for all slots */
/* kbrick widgetnum-to-bus layout */
int p_busnum[MAX_PORT_NUM] = { /* widget# */
0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */
2, /* 0x8 */
1, /* 0x9 */
0, 0, /* 0xa - 0xb */
5, /* 0xc */
6, /* 0xd */
4, /* 0xe */
3, /* 0xf */
};
char *pci_space[] = {"NONE",
"ROM",
"IO",
"",
"MEM",
"MEM32",
"MEM64",
"CFG",
"WIN0",
"WIN1",
"WIN2",
"WIN3",
"WIN4",
"WIN5",
"",
"BAD"};
#if PCIBR_SOFT_LIST
pcibr_list_p pcibr_list = 0;
#endif
extern int hwgraph_vertex_name_get(vertex_hdl_t vhdl, char *buf, uint buflen);
extern long atoi(register char *p);
extern cnodeid_t nodevertex_to_cnodeid(vertex_hdl_t vhdl);
extern char *dev_to_name(vertex_hdl_t dev, char *buf, uint buflen);
extern struct map *atemapalloc(uint64_t);
extern void atefree(struct map *, size_t, uint64_t);
extern void atemapfree(struct map *);
extern pciio_dmamap_t get_free_pciio_dmamap(vertex_hdl_t);
extern void free_pciio_dmamap(pcibr_dmamap_t);
extern void xwidget_error_register(vertex_hdl_t, error_handler_f *, error_handler_arg_t);
#define ATE_WRITE() ate_write(pcibr_soft, ate_ptr, ate_count, ate)
#if PCIBR_FREEZE_TIME
#define ATE_FREEZE() s = ate_freeze(pcibr_dmamap, &freeze_time, cmd_regs)
#else
#define ATE_FREEZE() s = ate_freeze(pcibr_dmamap, cmd_regs)
#endif /* PCIBR_FREEZE_TIME */
#if PCIBR_FREEZE_TIME
#define ATE_THAW() ate_thaw(pcibr_dmamap, ate_index, ate, ate_total, freeze_time, cmd_regs, s)
#else
#define ATE_THAW() ate_thaw(pcibr_dmamap, ate_index, cmd_regs, s)
#endif
extern char *pci_space[];
/* =====================================================================
* Function Table of Contents
......@@ -102,46 +51,33 @@ extern void xwidget_error_register(vertex_hdl_t, error_handler_f *, error_handl
* perhaps bust this file into smaller chunks.
*/
extern int do_pcibr_rrb_free_all(pcibr_soft_t, bridge_t *, pciio_slot_t);
extern void do_pcibr_rrb_free_all(pcibr_soft_t, pciio_slot_t);
extern void do_pcibr_rrb_autoalloc(pcibr_soft_t, int, int, int);
extern int pcibr_wrb_flush(vertex_hdl_t);
extern int pcibr_rrb_alloc(vertex_hdl_t, int *, int *);
extern void pcibr_rrb_flush(vertex_hdl_t);
void pcibr_rrb_alloc_more(pcibr_soft_t, int, int, int);
static int pcibr_try_set_device(pcibr_soft_t, pciio_slot_t, unsigned, bridgereg_t);
void pcibr_release_device(pcibr_soft_t, pciio_slot_t, bridgereg_t);
extern void pcibr_rrb_flush(vertex_hdl_t);
extern void pcibr_setwidint(xtalk_intr_t);
extern void pcibr_clearwidint(bridge_t *);
static int pcibr_try_set_device(pcibr_soft_t, pciio_slot_t, unsigned, uint64_t);
void pcibr_release_device(pcibr_soft_t, pciio_slot_t, uint64_t);
extern iopaddr_t pcibr_bus_addr_alloc(pcibr_soft_t, pciio_win_info_t,
pciio_space_t, int, int, int);
extern int hwgraph_vertex_name_get(vertex_hdl_t vhdl, char *buf,
uint buflen);
int pcibr_attach(vertex_hdl_t);
int pcibr_attach2(vertex_hdl_t, bridge_t *, vertex_hdl_t,
int, pcibr_soft_t *);
int pcibr_detach(vertex_hdl_t);
void pcibr_directmap_init(pcibr_soft_t);
int pcibr_pcix_rbars_calc(pcibr_soft_t);
extern int pcibr_init_ext_ate_ram(bridge_t *);
extern int pcibr_ate_alloc(pcibr_soft_t, int);
extern void pcibr_ate_free(pcibr_soft_t, int, int);
extern int pcibr_ate_alloc(pcibr_soft_t, int, struct resource *);
extern void pcibr_ate_free(pcibr_soft_t, int, int, struct resource *);
extern pciio_dmamap_t get_free_pciio_dmamap(vertex_hdl_t);
extern void free_pciio_dmamap(pcibr_dmamap_t);
extern int pcibr_widget_to_bus(vertex_hdl_t pcibr_vhdl);
extern unsigned ate_freeze(pcibr_dmamap_t pcibr_dmamap,
#if PCIBR_FREEZE_TIME
unsigned *freeze_time_ptr,
#endif
unsigned *cmd_regs);
extern void ate_write(pcibr_soft_t pcibr_soft, bridge_ate_p ate_ptr, int ate_count, bridge_ate_t ate);
extern void ate_thaw(pcibr_dmamap_t pcibr_dmamap, int ate_index,
#if PCIBR_FREEZE_TIME
bridge_ate_t ate,
int ate_total,
unsigned freeze_time_start,
#endif
unsigned *cmd_regs,
unsigned s);
extern void ate_write(pcibr_soft_t, int, int, bridge_ate_t);
pcibr_info_t pcibr_info_get(vertex_hdl_t);
......@@ -156,7 +92,7 @@ iopaddr_t pcibr_piospace_alloc(vertex_hdl_t, device_desc_t, pciio_
void pcibr_piospace_free(vertex_hdl_t, pciio_space_t, iopaddr_t, size_t);
static iopaddr_t pcibr_flags_to_d64(unsigned, pcibr_soft_t);
extern bridge_ate_t pcibr_flags_to_ate(unsigned);
extern bridge_ate_t pcibr_flags_to_ate(pcibr_soft_t, unsigned);
pcibr_dmamap_t pcibr_dmamap_alloc(vertex_hdl_t, device_desc_t, size_t, unsigned);
void pcibr_dmamap_free(pcibr_dmamap_t);
......@@ -171,74 +107,18 @@ void pcibr_dmaaddr_drain(vertex_hdl_t, paddr_t, size_t);
void pcibr_dmalist_drain(vertex_hdl_t, alenlist_t);
iopaddr_t pcibr_dmamap_pciaddr_get(pcibr_dmamap_t);
extern unsigned pcibr_intr_bits(pciio_info_t info,
pciio_intr_line_t lines, int nslots);
extern pcibr_intr_t pcibr_intr_alloc(vertex_hdl_t, device_desc_t, pciio_intr_line_t, vertex_hdl_t);
extern void pcibr_intr_free(pcibr_intr_t);
extern void pcibr_setpciint(xtalk_intr_t);
extern int pcibr_intr_connect(pcibr_intr_t, intr_func_t, intr_arg_t);
extern void pcibr_intr_disconnect(pcibr_intr_t);
extern vertex_hdl_t pcibr_intr_cpu_get(pcibr_intr_t);
extern void pcibr_intr_func(intr_arg_t);
extern void print_bridge_errcmd(uint32_t, char *);
extern void pcibr_error_dump(pcibr_soft_t);
extern uint32_t pcibr_errintr_group(uint32_t);
extern void pcibr_pioerr_check(pcibr_soft_t);
extern void pcibr_error_intr_handler(int, void *, struct pt_regs *);
extern int pcibr_addr_toslot(pcibr_soft_t, iopaddr_t, pciio_space_t *, iopaddr_t *, pciio_function_t *);
extern void pcibr_error_cleanup(pcibr_soft_t, int);
extern void pcibr_device_disable(pcibr_soft_t, int);
extern int pcibr_pioerror(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
extern int pcibr_dmard_error(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
extern int pcibr_dmawr_error(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
extern int pcibr_error_handler(error_handler_arg_t, int, ioerror_mode_t, ioerror_t *);
extern int pcibr_error_handler_wrapper(error_handler_arg_t, int, ioerror_mode_t, ioerror_t *);
void pcibr_provider_startup(vertex_hdl_t);
void pcibr_provider_shutdown(vertex_hdl_t);
int pcibr_reset(vertex_hdl_t);
pciio_endian_t pcibr_endian_set(vertex_hdl_t, pciio_endian_t, pciio_endian_t);
int pcibr_priority_bits_set(pcibr_soft_t, pciio_slot_t, pciio_priority_t);
pciio_priority_t pcibr_priority_set(vertex_hdl_t, pciio_priority_t);
int pcibr_device_flags_set(vertex_hdl_t, pcibr_device_flags_t);
extern cfg_p pcibr_config_addr(vertex_hdl_t, unsigned);
extern uint64_t pcibr_config_get(vertex_hdl_t, unsigned, unsigned);
extern void pcibr_config_set(vertex_hdl_t, unsigned, unsigned, uint64_t);
extern pcibr_hints_t pcibr_hints_get(vertex_hdl_t, int);
extern void pcibr_hints_fix_rrbs(vertex_hdl_t);
extern void pcibr_hints_dualslot(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
extern void pcibr_hints_intr_bits(vertex_hdl_t, pcibr_intr_bits_f *);
extern void pcibr_set_rrb_callback(vertex_hdl_t, rrb_alloc_funct_t);
extern void pcibr_hints_handsoff(vertex_hdl_t);
extern void pcibr_hints_subdevs(vertex_hdl_t, pciio_slot_t, uint64_t);
extern int pcibr_slot_info_init(vertex_hdl_t,pciio_slot_t);
extern int pcibr_slot_info_free(vertex_hdl_t,pciio_slot_t);
extern int pcibr_slot_info_return(pcibr_soft_t, pciio_slot_t,
pcibr_slot_info_resp_t);
extern void pcibr_slot_func_info_return(pcibr_info_h, int,
pcibr_slot_func_info_resp_t);
extern int pcibr_slot_addr_space_init(vertex_hdl_t,pciio_slot_t);
extern int pcibr_slot_pcix_rbar_init(pcibr_soft_t, pciio_slot_t);
extern int pcibr_slot_device_init(vertex_hdl_t, pciio_slot_t);
extern int pcibr_slot_guest_info_init(vertex_hdl_t,pciio_slot_t);
extern int pcibr_slot_call_device_attach(vertex_hdl_t,
pciio_slot_t, int);
extern int pcibr_slot_call_device_detach(vertex_hdl_t,
pciio_slot_t, int);
extern int pcibr_slot_attach(vertex_hdl_t, pciio_slot_t, int,
char *, int *);
extern int pcibr_slot_detach(vertex_hdl_t, pciio_slot_t, int,
char *, int *);
extern int pcibr_slot_initial_rrb_alloc(vertex_hdl_t, pciio_slot_t);
extern int pcibr_initial_rrb(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
pciio_businfo_t pcibr_businfo_get(vertex_hdl_t);
/* =====================================================================
* Device(x) register management
......@@ -256,33 +136,23 @@ static int
pcibr_try_set_device(pcibr_soft_t pcibr_soft,
pciio_slot_t slot,
unsigned flags,
bridgereg_t mask)
uint64_t mask)
{
bridge_t *bridge;
pcibr_soft_slot_t slotp;
bridgereg_t old;
bridgereg_t new;
bridgereg_t chg;
bridgereg_t bad;
bridgereg_t badpmu;
bridgereg_t badd32;
bridgereg_t badd64;
bridgereg_t fix;
unsigned long s;
bridgereg_t xmask;
xmask = mask;
if (mask == BRIDGE_DEV_PMU_BITS)
xmask = XBRIDGE_DEV_PMU_BITS;
if (mask == BRIDGE_DEV_D64_BITS)
xmask = XBRIDGE_DEV_D64_BITS;
uint64_t old;
uint64_t new;
uint64_t chg;
uint64_t bad;
uint64_t badpmu;
uint64_t badd32;
uint64_t badd64;
uint64_t fix;
unsigned long s;
slotp = &pcibr_soft->bs_slot[slot];
s = pcibr_lock(pcibr_soft);
bridge = pcibr_soft->bs_base;
old = slotp->bss_device;
/* figure out what the desired
......@@ -390,21 +260,21 @@ pcibr_try_set_device(pcibr_soft_t pcibr_soft,
* PIC, can cause problems for 32-bit devices.
*/
if (mask == BRIDGE_DEV_D64_BITS &&
PCIBR_WAR_ENABLED(PV855271, pcibr_soft)) {
if (flags & PCIBR_VCHAN1) {
new |= BRIDGE_DEV_VIRTUAL_EN;
xmask |= BRIDGE_DEV_VIRTUAL_EN;
}
PCIBR_WAR_ENABLED(PV855271, pcibr_soft)) {
if (flags & PCIBR_VCHAN1) {
new |= BRIDGE_DEV_VIRTUAL_EN;
mask |= BRIDGE_DEV_VIRTUAL_EN;
}
}
/* PIC BRINGUP WAR (PV# 878674): Don't allow 64bit PIO accesses */
if (IS_PIC_SOFT(pcibr_soft) && (flags & PCIBR_64BIT) &&
if ((flags & PCIBR_64BIT) &&
PCIBR_WAR_ENABLED(PV878674, pcibr_soft)) {
new &= ~(1ull << 22);
}
chg = old ^ new; /* what are we changing, */
chg &= xmask; /* of the interesting bits */
chg &= mask; /* of the interesting bits */
if (chg) {
......@@ -476,9 +346,10 @@ pcibr_try_set_device(pcibr_soft_t pcibr_soft,
pcibr_unlock(pcibr_soft, s);
return 0;
}
bridge->b_device[slot].reg = new;
pcireg_device_set(pcibr_soft, slot, new);
slotp->bss_device = new;
bridge->b_wid_tflush; /* wait until Bridge PIO complete */
pcireg_tflush_get(pcibr_soft); /* wait until Bridge PIO complete */
pcibr_unlock(pcibr_soft, s);
PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pcibr_soft->bs_vhdl,
......@@ -489,7 +360,7 @@ pcibr_try_set_device(pcibr_soft_t pcibr_soft,
void
pcibr_release_device(pcibr_soft_t pcibr_soft,
pciio_slot_t slot,
bridgereg_t mask)
uint64_t mask)
{
pcibr_soft_slot_t slotp;
unsigned long s;
......@@ -508,22 +379,6 @@ pcibr_release_device(pcibr_soft_t pcibr_soft,
pcibr_unlock(pcibr_soft, s);
}
/*
* flush write gather buffer for slot
*/
static void
pcibr_device_write_gather_flush(pcibr_soft_t pcibr_soft,
pciio_slot_t slot)
{
bridge_t *bridge;
unsigned long s;
volatile uint32_t wrf;
s = pcibr_lock(pcibr_soft);
bridge = pcibr_soft->bs_base;
wrf = bridge->b_wr_req_buf[slot].reg;
pcibr_unlock(pcibr_soft, s);
}
/* =====================================================================
* Bridge (pcibr) "Device Driver" entry points
......@@ -535,7 +390,7 @@ pcibr_mmap(struct file * file, struct vm_area_struct * vma)
{
vertex_hdl_t pcibr_vhdl = file->f_dentry->d_fsdata;
pcibr_soft_t pcibr_soft;
bridge_t *bridge;
void *bridge;
unsigned long phys_addr;
int error = 0;
......@@ -683,7 +538,6 @@ pcibr_device_unregister(vertex_hdl_t pconn_vhdl)
vertex_hdl_t pcibr_vhdl;
pciio_slot_t slot;
pcibr_soft_t pcibr_soft;
bridge_t *bridge;
int count_vchan0, count_vchan1;
unsigned long s;
int error_call;
......@@ -695,7 +549,6 @@ pcibr_device_unregister(vertex_hdl_t pconn_vhdl)
slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
pcibr_soft = pcibr_soft_get(pcibr_vhdl);
bridge = pcibr_soft->bs_base;
/* Clear all the hardware xtalk resources for this device */
xtalk_widgetdev_shutdown(pcibr_soft->bs_conn, slot);
......@@ -718,7 +571,7 @@ pcibr_device_unregister(vertex_hdl_t pconn_vhdl)
pcibr_soft->bs_rrb_valid[slot][VCHAN3];
/* Free the rrbs allocated to this slot, both the normal & virtual */
do_pcibr_rrb_free_all(pcibr_soft, bridge, slot);
do_pcibr_rrb_free_all(pcibr_soft, slot);
count_vchan0 = pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0];
count_vchan1 = pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN1];
......@@ -823,951 +676,6 @@ pcibr_driver_unreg_callback(vertex_hdl_t pconn_vhdl,
}
}
/*
* build a convenience link path in the
* form of ".../<iobrick>/bus/<busnum>"
*
* returns 1 on success, 0 otherwise
*
* depends on hwgraph separator == '/'
*/
int
pcibr_bus_cnvlink(vertex_hdl_t f_c)
{
char dst[MAXDEVNAME];
char *dp = dst;
char *cp, *xp;
int widgetnum;
char pcibus[8];
vertex_hdl_t nvtx, svtx;
int rv;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, f_c, "pcibr_bus_cnvlink\n"));
if (GRAPH_SUCCESS != hwgraph_vertex_name_get(f_c, dst, MAXDEVNAME))
return 0;
/* dst example == /hw/module/001c02/Pbrick/xtalk/8/pci/direct */
/* find the widget number */
xp = strstr(dst, "/"EDGE_LBL_XTALK"/");
if (xp == NULL)
return 0;
widgetnum = simple_strtoul(xp+7, NULL, 0);
if (widgetnum < XBOW_PORT_8 || widgetnum > XBOW_PORT_F)
return 0;
/* remove "/pci/direct" from path */
cp = strstr(dst, "/" EDGE_LBL_PCI "/" EDGE_LBL_DIRECT);
if (cp == NULL)
return 0;
*cp = (char)NULL;
/* get the vertex for the widget */
if (GRAPH_SUCCESS != hwgraph_traverse(NULL, dp, &svtx))
return 0;
*xp = (char)NULL; /* remove "/xtalk/..." from path */
/* dst example now == /hw/module/001c02/Pbrick */
/* get the bus number */
strcat(dst, "/");
strcat(dst, EDGE_LBL_BUS);
sprintf(pcibus, "%d", p_busnum[widgetnum]);
/* link to bus to widget */
rv = hwgraph_path_add(NULL, dp, &nvtx);
if (GRAPH_SUCCESS == rv)
rv = hwgraph_edge_add(nvtx, svtx, pcibus);
return (rv == GRAPH_SUCCESS);
}
/*
* pcibr_attach: called every time the crosstalk
* infrastructure is asked to initialize a widget
* that matches the part number we handed to the
* registration routine above.
*/
/*ARGSUSED */
int
pcibr_attach(vertex_hdl_t xconn_vhdl)
{
/* REFERENCED */
graph_error_t rc;
vertex_hdl_t pcibr_vhdl;
bridge_t *bridge;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, xconn_vhdl, "pcibr_attach\n"));
bridge = (bridge_t *)
xtalk_piotrans_addr(xconn_vhdl, NULL,
0, sizeof(bridge_t), 0);
/*
* Create the vertex for the PCI bus, which we
* will also use to hold the pcibr_soft and
* which will be the "master" vertex for all the
* pciio connection points we will hang off it.
* This needs to happen before we call nic_bridge_vertex_info
* as we are some of the *_vmc functions need access to the edges.
*
* Opening this vertex will provide access to
* the Bridge registers themselves.
*/
rc = hwgraph_path_add(xconn_vhdl, EDGE_LBL_PCI, &pcibr_vhdl);
ASSERT(rc == GRAPH_SUCCESS);
pciio_provider_register(pcibr_vhdl, &pcibr_provider);
pciio_provider_startup(pcibr_vhdl);
return pcibr_attach2(xconn_vhdl, bridge, pcibr_vhdl, 0, NULL);
}
/*ARGSUSED */
int
pcibr_attach2(vertex_hdl_t xconn_vhdl, bridge_t *bridge,
vertex_hdl_t pcibr_vhdl, int busnum, pcibr_soft_t *ret_softp)
{
/* REFERENCED */
vertex_hdl_t ctlr_vhdl;
bridgereg_t id;
int rev;
pcibr_soft_t pcibr_soft;
pcibr_info_t pcibr_info;
xwidget_info_t info;
xtalk_intr_t xtalk_intr;
int slot;
int ibit;
vertex_hdl_t noslot_conn;
char devnm[MAXDEVNAME], *s;
pcibr_hints_t pcibr_hints;
uint64_t int_enable;
picreg_t int_enable_64;
unsigned rrb_fixed = 0;
#if PCI_FBBE
int fast_back_to_back_enable;
#endif
nasid_t nasid;
int iobrick_type_get_nasid(nasid_t nasid);
int iomoduleid_get(nasid_t nasid);
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
"pcibr_attach2: bridge=0x%p, busnum=%d\n", bridge, busnum));
ctlr_vhdl = NULL;
ctlr_vhdl = hwgraph_register(pcibr_vhdl, EDGE_LBL_CONTROLLER, 0,
0, 0, 0,
S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
(struct file_operations *)&pcibr_fops, (void *)pcibr_vhdl);
ASSERT(ctlr_vhdl != NULL);
/*
* Get the hint structure; if some NIC callback
* marked this vertex as "hands-off" then we
* just return here, before doing anything else.
*/
pcibr_hints = pcibr_hints_get(xconn_vhdl, 0);
if (pcibr_hints && pcibr_hints->ph_hands_off)
return -1; /* generic operations disabled */
id = bridge->b_wid_id;
rev = XWIDGET_PART_REV_NUM(id);
hwgraph_info_add_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, (arbitrary_info_t) rev);
/*
* allocate soft state structure, fill in some
* fields, and hook it up to our vertex.
*/
pcibr_soft = kmalloc(sizeof(*(pcibr_soft)), GFP_KERNEL);
if (ret_softp)
*ret_softp = pcibr_soft;
if (!pcibr_soft)
return -1;
memset(pcibr_soft, 0, sizeof *pcibr_soft);
pcibr_soft_set(pcibr_vhdl, pcibr_soft);
pcibr_soft->bs_conn = xconn_vhdl;
pcibr_soft->bs_vhdl = pcibr_vhdl;
pcibr_soft->bs_base = bridge;
pcibr_soft->bs_rev_num = rev;
pcibr_soft->bs_intr_bits = (pcibr_intr_bits_f *)pcibr_intr_bits;
pcibr_soft->bs_min_slot = 0; /* lowest possible slot# */
pcibr_soft->bs_max_slot = 7; /* highest possible slot# */
pcibr_soft->bs_busnum = busnum;
pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_PIC;
switch(pcibr_soft->bs_bridge_type) {
case PCIBR_BRIDGETYPE_BRIDGE:
pcibr_soft->bs_int_ate_size = BRIDGE_INTERNAL_ATES;
pcibr_soft->bs_bridge_mode = 0; /* speed is not available in bridge */
break;
case PCIBR_BRIDGETYPE_PIC:
pcibr_soft->bs_min_slot = 0;
pcibr_soft->bs_max_slot = 3;
pcibr_soft->bs_int_ate_size = XBRIDGE_INTERNAL_ATES;
pcibr_soft->bs_bridge_mode =
(((bridge->p_wid_stat_64 & PIC_STAT_PCIX_SPEED) >> 33) |
((bridge->p_wid_stat_64 & PIC_STAT_PCIX_ACTIVE) >> 33));
/* We have to clear PIC's write request buffer to avoid parity
* errors. See PV#854845.
*/
{
int i;
for (i=0; i < PIC_WR_REQ_BUFSIZE; i++) {
bridge->p_wr_req_lower[i] = 0;
bridge->p_wr_req_upper[i] = 0;
bridge->p_wr_req_parity[i] = 0;
}
}
break;
case PCIBR_BRIDGETYPE_XBRIDGE:
pcibr_soft->bs_int_ate_size = XBRIDGE_INTERNAL_ATES;
pcibr_soft->bs_bridge_mode =
((bridge->b_wid_control & BRIDGE_CTRL_PCI_SPEED) >> 3);
break;
}
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
"pcibr_attach2: pcibr_soft=0x%x, mode=0x%x\n",
pcibr_soft, pcibr_soft->bs_bridge_mode));
pcibr_soft->bsi_err_intr = 0;
/* Bridges up through REV C
* are unable to set the direct
* byteswappers to BYTE_STREAM.
*/
if (pcibr_soft->bs_rev_num <= BRIDGE_PART_REV_C) {
pcibr_soft->bs_pio_end_io = PCIIO_WORD_VALUES;
pcibr_soft->bs_pio_end_mem = PCIIO_WORD_VALUES;
}
#if PCIBR_SOFT_LIST
/*
* link all the pcibr_soft structs
*/
{
pcibr_list_p self;
self = kmalloc(sizeof(*(self)), GFP_KERNEL);
if (!self)
return -1;
memset(self, 0, sizeof(*(self)));
self->bl_soft = pcibr_soft;
self->bl_vhdl = pcibr_vhdl;
self->bl_next = pcibr_list;
pcibr_list = self;
}
#endif /* PCIBR_SOFT_LIST */
/*
* get the name of this bridge vertex and keep the info. Use this
* only where it is really needed now: like error interrupts.
*/
s = dev_to_name(pcibr_vhdl, devnm, MAXDEVNAME);
pcibr_soft->bs_name = kmalloc(strlen(s) + 1, GFP_KERNEL);
strcpy(pcibr_soft->bs_name, s);
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
"pcibr_attach2: %s ASIC: rev %s (code=0x%x)\n",
"PIC",
(rev == BRIDGE_PART_REV_A) ? "A" :
(rev == BRIDGE_PART_REV_B) ? "B" :
(rev == BRIDGE_PART_REV_C) ? "C" :
(rev == BRIDGE_PART_REV_D) ? "D" :
(rev == XBRIDGE_PART_REV_A) ? "A" :
(rev == XBRIDGE_PART_REV_B) ? "B" :
(IS_PIC_PART_REV_A(rev)) ? "A" :
"unknown", rev, pcibr_soft->bs_name));
info = xwidget_info_get(xconn_vhdl);
pcibr_soft->bs_xid = xwidget_info_id_get(info);
pcibr_soft->bs_master = xwidget_info_master_get(info);
pcibr_soft->bs_mxid = xwidget_info_masterid_get(info);
pcibr_soft->bs_first_slot = pcibr_soft->bs_min_slot;
pcibr_soft->bs_last_slot = pcibr_soft->bs_max_slot;
/*
* Bridge can only reset slots 0, 1, 2, and 3. Ibrick internal
* slots 4, 5, 6, and 7 must be reset as a group, so do not
* reset them.
*/
pcibr_soft->bs_last_reset = 3;
nasid = NASID_GET(bridge);
if ((pcibr_soft->bs_bricktype = iobrick_type_get_nasid(nasid)) < 0)
printk(KERN_WARNING "0x%p: Unknown bricktype : 0x%x\n", (void *)xconn_vhdl,
(unsigned int)pcibr_soft->bs_bricktype);
pcibr_soft->bs_moduleid = iomoduleid_get(nasid);
if (pcibr_soft->bs_bricktype > 0) {
switch (pcibr_soft->bs_bricktype) {
case MODULE_PXBRICK:
case MODULE_IXBRICK:
pcibr_soft->bs_first_slot = 0;
pcibr_soft->bs_last_slot = 1;
pcibr_soft->bs_last_reset = 1;
/* If Bus 1 has IO9 then there are 4 devices in that bus. Note
* we figure this out from klconfig since the kernel has yet to
* probe
*/
if (pcibr_widget_to_bus(pcibr_vhdl) == 1) {
lboard_t *brd = (lboard_t *)KL_CONFIG_INFO(nasid);
while (brd) {
if (brd->brd_flags & LOCAL_MASTER_IO6) {
pcibr_soft->bs_last_slot = 3;
pcibr_soft->bs_last_reset = 3;
}
brd = KLCF_NEXT(brd);
}
}
break;
case MODULE_PBRICK:
pcibr_soft->bs_first_slot = 1;
pcibr_soft->bs_last_slot = 2;
pcibr_soft->bs_last_reset = 2;
break;
case MODULE_IBRICK:
/*
* Here's the current baseio layout for SN1 style systems:
*
* 0 1 2 3 4 5 6 7 slot#
*
* x scsi x x ioc3 usb x x O300 Ibrick
*
* x == never occupied
* E == external (add-in) slot
*
*/
pcibr_soft->bs_first_slot = 1; /* Ibrick first slot == 1 */
if (pcibr_soft->bs_xid == 0xe) {
pcibr_soft->bs_last_slot = 2;
pcibr_soft->bs_last_reset = 2;
} else {
pcibr_soft->bs_last_slot = 6;
}
break;
default:
break;
}
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
"pcibr_attach2: %cbrick, slots %d-%d\n",
MODULE_GET_BTCHAR(pcibr_soft->bs_moduleid),
pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot));
}
/*
* Initialize bridge and bus locks
*/
spin_lock_init(&pcibr_soft->bs_lock);
/*
* If we have one, process the hints structure.
*/
if (pcibr_hints) {
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_HINTS, pcibr_vhdl,
"pcibr_attach2: pcibr_hints=0x%x\n", pcibr_hints));
rrb_fixed = pcibr_hints->ph_rrb_fixed;
pcibr_soft->bs_rrb_fixed = rrb_fixed;
if (pcibr_hints->ph_intr_bits) {
pcibr_soft->bs_intr_bits = pcibr_hints->ph_intr_bits;
}
for (slot = pcibr_soft->bs_min_slot;
slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
int hslot = pcibr_hints->ph_host_slot[slot] - 1;
if (hslot < 0) {
pcibr_soft->bs_slot[slot].host_slot = slot;
} else {
pcibr_soft->bs_slot[slot].has_host = 1;
pcibr_soft->bs_slot[slot].host_slot = hslot;
}
}
}
/*
* Set-up initial values for state fields
*/
for (slot = pcibr_soft->bs_min_slot;
slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
pcibr_soft->bs_slot[slot].bss_devio.bssd_space = PCIIO_SPACE_NONE;
pcibr_soft->bs_slot[slot].bss_devio.bssd_ref_cnt = 0;
pcibr_soft->bs_slot[slot].bss_d64_base = PCIBR_D64_BASE_UNSET;
pcibr_soft->bs_slot[slot].bss_d32_base = PCIBR_D32_BASE_UNSET;
pcibr_soft->bs_slot[slot].bss_ext_ates_active = ATOMIC_INIT(0);
pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0] = -1;
}
for (ibit = 0; ibit < 8; ++ibit) {
pcibr_soft->bs_intr[ibit].bsi_xtalk_intr = 0;
pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_soft = pcibr_soft;
pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_list = NULL;
pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_stat =
&(bridge->b_int_status);
pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_ibit = ibit;
pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_hdlrcnt = 0;
pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_shared = 0;
pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_connected = 0;
}
/*
* connect up our error handler. PIC has 2 busses (thus resulting in 2
* pcibr_soft structs under 1 widget), so only register a xwidget error
* handler for PIC's bus0. NOTE: for PIC pcibr_error_handler_wrapper()
* is a wrapper routine we register that will call the real error handler
* pcibr_error_handler() with the correct pcibr_soft struct.
*/
if (busnum == 0) {
xwidget_error_register(xconn_vhdl, pcibr_error_handler_wrapper, pcibr_soft);
}
/*
* Initialize various Bridge registers.
*/
/*
* On pre-Rev.D bridges, set the PCI_RETRY_CNT
* to zero to avoid dropping stores. (#475347)
*/
if (rev < BRIDGE_PART_REV_D)
bridge->b_bus_timeout &= ~BRIDGE_BUS_PCI_RETRY_MASK;
/*
* Clear all pending interrupts.
*/
bridge->b_int_rst_stat = (BRIDGE_IRR_ALL_CLR);
/* Initialize some PIC specific registers. */
{
picreg_t pic_ctrl_reg = bridge->p_wid_control_64;
/* Bridges Requester ID: bus = busnum, dev = 0, func = 0 */
pic_ctrl_reg &= ~PIC_CTRL_BUS_NUM_MASK;
pic_ctrl_reg |= PIC_CTRL_BUS_NUM(busnum);
pic_ctrl_reg &= ~PIC_CTRL_DEV_NUM_MASK;
pic_ctrl_reg &= ~PIC_CTRL_FUN_NUM_MASK;
pic_ctrl_reg &= ~PIC_CTRL_NO_SNOOP;
pic_ctrl_reg &= ~PIC_CTRL_RELAX_ORDER;
/* enable parity checking on PICs internal RAM */
pic_ctrl_reg |= PIC_CTRL_PAR_EN_RESP;
pic_ctrl_reg |= PIC_CTRL_PAR_EN_ATE;
/* PIC BRINGUP WAR (PV# 862253): dont enable write request
* parity checking.
*/
if (!PCIBR_WAR_ENABLED(PV862253, pcibr_soft)) {
pic_ctrl_reg |= PIC_CTRL_PAR_EN_REQ;
}
bridge->p_wid_control_64 = pic_ctrl_reg;
}
bridge->b_int_device = (uint32_t) 0x006db6db;
{
bridgereg_t dirmap;
paddr_t paddr;
iopaddr_t xbase;
xwidgetnum_t xport;
iopaddr_t offset;
int num_entries = 0;
int entry;
cnodeid_t cnodeid;
nasid_t nasid;
/* Set the Bridge's 32-bit PCI to XTalk
* Direct Map register to the most useful
* value we can determine. Note that we
* must use a single xid for all of:
* direct-mapped 32-bit DMA accesses
* direct-mapped 64-bit DMA accesses
* DMA accesses through the PMU
* interrupts
* This is the only way to guarantee that
* completion interrupts will reach a CPU
* after all DMA data has reached memory.
* (Of course, there may be a few special
* drivers/controlers that explicitly manage
* this ordering problem.)
*/
cnodeid = 0; /* default node id */
nasid = COMPACT_TO_NASID_NODEID(cnodeid);
paddr = NODE_OFFSET(nasid) + 0;
/* currently, we just assume that if we ask
* for a DMA mapping to "zero" the XIO
* host will transmute this into a request
* for the lowest hunk of memory.
*/
xbase = xtalk_dmatrans_addr(xconn_vhdl, 0,
paddr, PAGE_SIZE, 0);
if (xbase != XIO_NOWHERE) {
if (XIO_PACKED(xbase)) {
xport = XIO_PORT(xbase);
xbase = XIO_ADDR(xbase);
} else
xport = pcibr_soft->bs_mxid;
offset = xbase & ((1ull << BRIDGE_DIRMAP_OFF_ADDRSHFT) - 1ull);
xbase >>= BRIDGE_DIRMAP_OFF_ADDRSHFT;
dirmap = xport << BRIDGE_DIRMAP_W_ID_SHFT;
if (xbase)
dirmap |= BRIDGE_DIRMAP_OFF & xbase;
else if (offset >= (512 << 20))
dirmap |= BRIDGE_DIRMAP_ADD512;
bridge->b_dir_map = dirmap;
}
/*
* Set bridge's idea of page size according to the system's
* idea of "IO page size". TBD: The idea of IO page size
* should really go away.
*/
/*
* ensure that we write and read without any interruption.
* The read following the write is required for the Bridge war
*/
#if IOPGSIZE == 4096
bridge->p_wid_control_64 &= ~BRIDGE_CTRL_PAGE_SIZE;
#elif IOPGSIZE == 16384
bridge->p_wid_control_64 |= BRIDGE_CTRL_PAGE_SIZE;
#else
<<<Unable to deal with IOPGSIZE >>>;
#endif
bridge->b_wid_control; /* inval addr bug war */
/* Initialize internal mapping entries */
for (entry = 0; entry < pcibr_soft->bs_int_ate_size; entry++) {
bridge->b_int_ate_ram[entry].wr = 0;
}
/*
* Determine if there's external mapping SSRAM on this
* bridge. Set up Bridge control register appropriately,
* inititlize SSRAM, and set software up to manage RAM
* entries as an allocatable resource.
*
* Currently, we just use the rm* routines to manage ATE
* allocation. We should probably replace this with a
* Best Fit allocator.
*
* For now, if we have external SSRAM, avoid using
* the internal ssram: we can't turn PREFETCH on
* when we use the internal SSRAM; and besides,
* this also guarantees that no allocation will
* straddle the internal/external line, so we
* can increment ATE write addresses rather than
* recomparing against BRIDGE_INTERNAL_ATES every
* time.
*/
num_entries = 0;
/* we always have 128 ATEs (512 for Xbridge) inside the chip
* even if disabled for debugging.
*/
pcibr_soft->bs_int_ate_resource.start = 0;
pcibr_soft->bs_int_ate_resource.end = pcibr_soft->bs_int_ate_size - 1;
if (num_entries > pcibr_soft->bs_int_ate_size) {
#if PCIBR_ATE_NOTBOTH /* for debug -- forces us to use external ates */
printk("pcibr_attach: disabling internal ATEs.\n");
pcibr_ate_alloc(pcibr_soft, pcibr_soft->bs_int_ate_size);
#endif
pcibr_soft->bs_ext_ate_resource.start = pcibr_soft->bs_int_ate_size;
pcibr_soft->bs_ext_ate_resource.end = num_entries;
}
pcibr_soft->bs_allocated_ate_res = (void *) kmalloc(pcibr_soft->bs_int_ate_size * sizeof(unsigned long), GFP_KERNEL);
memset(pcibr_soft->bs_allocated_ate_res, 0x0, pcibr_soft->bs_int_ate_size * sizeof(unsigned long));
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATE, pcibr_vhdl,
"pcibr_attach2: %d ATEs, %d internal & %d external\n",
num_entries ? num_entries : pcibr_soft->bs_int_ate_size,
pcibr_soft->bs_int_ate_size,
num_entries ? num_entries-pcibr_soft->bs_int_ate_size : 0));
}
{
bridgereg_t dirmap;
iopaddr_t xbase;
/*
* now figure the *real* xtalk base address
* that dirmap sends us to.
*/
dirmap = bridge->b_dir_map;
if (dirmap & BRIDGE_DIRMAP_OFF)
xbase = (iopaddr_t)(dirmap & BRIDGE_DIRMAP_OFF)
<< BRIDGE_DIRMAP_OFF_ADDRSHFT;
else if (dirmap & BRIDGE_DIRMAP_ADD512)
xbase = 512 << 20;
else
xbase = 0;
pcibr_soft->bs_dir_xbase = xbase;
/* it is entirely possible that we may, at this
* point, have our dirmap pointing somewhere
* other than our "master" port.
*/
pcibr_soft->bs_dir_xport =
(dirmap & BRIDGE_DIRMAP_W_ID) >> BRIDGE_DIRMAP_W_ID_SHFT;
}
/* pcibr sources an error interrupt;
* figure out where to send it.
*
* If any interrupts are enabled in bridge,
* then the prom set us up and our interrupt
* has already been reconnected in mlreset
* above.
*
* Need to set the D_INTR_ISERR flag
* in the dev_desc used for allocating the
* error interrupt, so our interrupt will
* be properly routed and prioritized.
*
* If our crosstalk provider wants to
* fix widget error interrupts to specific
* destinations, D_INTR_ISERR is how it
* knows to do this.
*/
xtalk_intr = xtalk_intr_alloc(xconn_vhdl, (device_desc_t)0, pcibr_vhdl);
{
int irq = ((hub_intr_t)xtalk_intr)->i_bit;
int cpu = ((hub_intr_t)xtalk_intr)->i_cpuid;
intr_unreserve_level(cpu, irq);
((hub_intr_t)xtalk_intr)->i_bit = SGI_PCIBR_ERROR;
}
ASSERT(xtalk_intr != NULL);
pcibr_soft->bsi_err_intr = xtalk_intr;
/*
* On IP35 with XBridge, we do some extra checks in pcibr_setwidint
* in order to work around some addressing limitations. In order
* for that fire wall to work properly, we need to make sure we
* start from a known clean state.
*/
pcibr_clearwidint(bridge);
xtalk_intr_connect(xtalk_intr, (intr_func_t) pcibr_error_intr_handler,
(intr_arg_t) pcibr_soft, (xtalk_intr_setfunc_t)pcibr_setwidint, (void *)bridge);
request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler, SA_SHIRQ, "PCIBR error",
(intr_arg_t) pcibr_soft);
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_vhdl,
"pcibr_setwidint: b_wid_int_upper=0x%x, b_wid_int_lower=0x%x\n",
bridge->b_wid_int_upper, bridge->b_wid_int_lower));
/*
* now we can start handling error interrupts;
* enable all of them.
* NOTE: some PCI ints may already be enabled.
*/
int_enable_64 = bridge->p_int_enable_64 | BRIDGE_ISR_ERRORS;
int_enable = (uint64_t)int_enable_64;
#if BRIDGE_ERROR_INTR_WAR
if (pcibr_soft->bs_rev_num == BRIDGE_PART_REV_A) {
/*
* We commonly get master timeouts when talking to ql.
* We also see RESP_XTALK_ERROR and LLP_TX_RETRY interrupts.
* Insure that these are all disabled for now.
*/
int_enable &= ~(BRIDGE_IMR_PCI_MST_TIMEOUT |
BRIDGE_ISR_RESP_XTLK_ERR |
BRIDGE_ISR_LLP_TX_RETRY);
}
if (pcibr_soft->bs_rev_num < BRIDGE_PART_REV_C) {
int_enable &= ~BRIDGE_ISR_BAD_XRESP_PKT;
}
#endif /* BRIDGE_ERROR_INTR_WAR */
#ifdef QL_SCSI_CTRL_WAR /* for IP30 only */
/* Really a QL rev A issue, but all newer hearts have newer QLs.
* Forces all IO6/MSCSI to be new.
*/
if (heart_rev() == HEART_REV_A)
int_enable &= ~BRIDGE_IMR_PCI_MST_TIMEOUT;
#endif
#ifdef BRIDGE1_TIMEOUT_WAR
if (pcibr_soft->bs_rev_num == BRIDGE_PART_REV_A) {
/*
* Turn off these interrupts. They can't be trusted in bridge 1
*/
int_enable &= ~(BRIDGE_IMR_XREAD_REQ_TIMEOUT |
BRIDGE_IMR_UNEXP_RESP);
}
#endif
/* PIC BRINGUP WAR (PV# 856864 & 856865): allow the tnums that are
* locked out to be freed up sooner (by timing out) so that the
* read tnums are never completely used up.
*/
if (PCIBR_WAR_ENABLED(PV856864, pcibr_soft)) {
int_enable &= ~PIC_ISR_PCIX_REQ_TOUT;
int_enable &= ~BRIDGE_ISR_XREAD_REQ_TIMEOUT;
bridge->b_wid_req_timeout = 0x750;
}
/*
* PIC BRINGUP WAR (PV# 856866, 859504, 861476, 861478): Don't use
* RRB0, RRB8, RRB1, and RRB9. Assign them to DEVICE[2|3]--VCHAN3
* so they are not used
*/
if (PCIBR_WAR_ENABLED(PV856866, pcibr_soft)) {
bridge->b_even_resp |= 0x000f000f;
bridge->b_odd_resp |= 0x000f000f;
}
bridge->p_int_enable_64 = (picreg_t)int_enable;
bridge->b_int_mode = 0; /* do not send "clear interrupt" packets */
bridge->b_wid_tflush; /* wait until Bridge PIO complete */
/*
* Depending on the rev of bridge, disable certain features.
* Easiest way seems to be to force the PCIBR_NOwhatever
* flag to be on for all DMA calls, which overrides any
* PCIBR_whatever flag or even the setting of whatever
* from the PCIIO_DMA_class flags (or even from the other
* PCIBR flags, since NO overrides YES).
*/
pcibr_soft->bs_dma_flags = 0;
/* PREFETCH:
* Always completely disabled for REV.A;
* at "pcibr_prefetch_enable_rev", anyone
* asking for PCIIO_PREFETCH gets it.
* Between these two points, you have to ask
* for PCIBR_PREFETCH, which promises that
* your driver knows about known Bridge WARs.
*/
if (pcibr_soft->bs_rev_num < BRIDGE_PART_REV_B)
pcibr_soft->bs_dma_flags |= PCIBR_NOPREFETCH;
else if (pcibr_soft->bs_rev_num <
(BRIDGE_WIDGET_PART_NUM << 4))
pcibr_soft->bs_dma_flags |= PCIIO_NOPREFETCH;
/* WRITE_GATHER: Disabled */
if (pcibr_soft->bs_rev_num <
(BRIDGE_WIDGET_PART_NUM << 4))
pcibr_soft->bs_dma_flags |= PCIBR_NOWRITE_GATHER;
/* PIC only supports 64-bit direct mapping in PCI-X mode. Since
* all PCI-X devices that initiate memory transactions must be
* capable of generating 64-bit addressed, we force 64-bit DMAs.
*/
if (IS_PCIX(pcibr_soft)) {
pcibr_soft->bs_dma_flags |= PCIIO_DMA_A64;
}
{
iopaddr_t prom_base_addr = pcibr_soft->bs_xid << 24;
int prom_base_size = 0x1000000;
int status;
struct resource *res;
/* Allocate resource maps based on bus page size; for I/O and memory
* space, free all pages except those in the base area and in the
* range set by the PROM.
*
* PROM creates BAR addresses in this format: 0x0ws00000 where w is
* the widget number and s is the device register offset for the slot.
*/
/* Setup the Bus's PCI IO Root Resource. */
pcibr_soft->bs_io_win_root_resource.start = PCIBR_BUS_IO_BASE;
pcibr_soft->bs_io_win_root_resource.end = 0xffffffff;
res = (struct resource *) kmalloc( sizeof(struct resource), GFP_KERNEL);
if (!res)
panic("PCIBR:Unable to allocate resource structure\n");
/* Block off the range used by PROM. */
res->start = prom_base_addr;
res->end = prom_base_addr + (prom_base_size - 1);
status = request_resource(&pcibr_soft->bs_io_win_root_resource, res);
if (status)
panic("PCIBR:Unable to request_resource()\n");
/* Setup the Small Window Root Resource */
pcibr_soft->bs_swin_root_resource.start = PAGE_SIZE;
pcibr_soft->bs_swin_root_resource.end = 0x000FFFFF;
/* Setup the Bus's PCI Memory Root Resource */
pcibr_soft->bs_mem_win_root_resource.start = 0x200000;
pcibr_soft->bs_mem_win_root_resource.end = 0xffffffff;
res = (struct resource *) kmalloc( sizeof(struct resource), GFP_KERNEL);
if (!res)
panic("PCIBR:Unable to allocate resource structure\n");
/* Block off the range used by PROM. */
res->start = prom_base_addr;
res->end = prom_base_addr + (prom_base_size - 1);;
status = request_resource(&pcibr_soft->bs_mem_win_root_resource, res);
if (status)
panic("PCIBR:Unable to request_resource()\n");
}
/* build "no-slot" connection point
*/
pcibr_info = pcibr_device_info_new
(pcibr_soft, PCIIO_SLOT_NONE, PCIIO_FUNC_NONE,
PCIIO_VENDOR_ID_NONE, PCIIO_DEVICE_ID_NONE);
noslot_conn = pciio_device_info_register
(pcibr_vhdl, &pcibr_info->f_c);
/* Remember the no slot connection point info for tearing it
* down during detach.
*/
pcibr_soft->bs_noslot_conn = noslot_conn;
pcibr_soft->bs_noslot_info = pcibr_info;
#if PCI_FBBE
fast_back_to_back_enable = 1;
#endif
#if PCI_FBBE
if (fast_back_to_back_enable) {
/*
* All devices on the bus are capable of fast back to back, so
* we need to set the fast back to back bit in all devices on
* the bus that are capable of doing such accesses.
*/
}
#endif
for (slot = pcibr_soft->bs_min_slot;
slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
/* Find out what is out there */
(void)pcibr_slot_info_init(pcibr_vhdl,slot);
}
for (slot = pcibr_soft->bs_min_slot;
slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
/* Set up the address space for this slot in the PCI land */
(void)pcibr_slot_addr_space_init(pcibr_vhdl, slot);
for (slot = pcibr_soft->bs_min_slot;
slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
/* Setup the device register */
(void)pcibr_slot_device_init(pcibr_vhdl, slot);
if (IS_PCIX(pcibr_soft)) {
pcibr_soft->bs_pcix_rbar_inuse = 0;
pcibr_soft->bs_pcix_rbar_avail = NUM_RBAR;
pcibr_soft->bs_pcix_rbar_percent_allowed =
pcibr_pcix_rbars_calc(pcibr_soft);
for (slot = pcibr_soft->bs_min_slot;
slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
/* Setup the PCI-X Read Buffer Attribute Registers (RBARs) */
(void)pcibr_slot_pcix_rbar_init(pcibr_soft, slot);
}
/* Set up convenience links */
pcibr_bus_cnvlink(pcibr_soft->bs_vhdl);
for (slot = pcibr_soft->bs_min_slot;
slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
/* Setup host/guest relations */
(void)pcibr_slot_guest_info_init(pcibr_vhdl, slot);
/* Handle initial RRB management for Bridge and Xbridge */
pcibr_initial_rrb(pcibr_vhdl,
pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot);
{ /* Before any drivers get called that may want to re-allocate
* RRB's, let's get some special cases pre-allocated. Drivers
* may override these pre-allocations, but by doing pre-allocations
* now we're assured not to step all over what the driver intended.
*
* Note: Someday this should probably be moved over to pcibr_rrb.c
*/
/*
* Each Pbrick PCI bus only has slots 1 and 2. Similarly for
* widget 0xe on Ibricks. Allocate RRB's accordingly.
*/
if (pcibr_soft->bs_bricktype > 0) {
switch (pcibr_soft->bs_bricktype) {
case MODULE_PBRICK:
do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8);
break;
case MODULE_IBRICK:
/* port 0xe on the Ibrick only has slots 1 and 2 */
if (pcibr_soft->bs_xid == 0xe) {
do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8);
}
else {
/* allocate one RRB for the serial port */
do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 1);
}
break;
case MODULE_PXBRICK:
case MODULE_IXBRICK:
/*
* If the IO9 is in the PXBrick (bus1, slot1) allocate
* RRBs to all the devices
*/
if ((pcibr_widget_to_bus(pcibr_vhdl) == 1) &&
(pcibr_soft->bs_slot[0].bss_vendor_id == 0x10A9) &&
(pcibr_soft->bs_slot[0].bss_device_id == 0x100A)) {
do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 4);
do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 4);
do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 4);
do_pcibr_rrb_autoalloc(pcibr_soft, 3, VCHAN0, 4);
} else {
do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 8);
do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
}
break;
} /* switch */
}
#ifdef LATER
if (strstr(nicinfo, XTALK_PCI_PART_NUM)) {
do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
}
#endif
} /* OK Special RRB allocations are done. */
for (slot = pcibr_soft->bs_min_slot;
slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
/* Call the device attach */
(void)pcibr_slot_call_device_attach(pcibr_vhdl, slot, 0);
pciio_device_attach(noslot_conn, (int)0);
return 0;
}
/*
* pcibr_detach:
* Detach the bridge device from the hwgraph after cleaning out all the
......@@ -1777,11 +685,10 @@ pcibr_attach2(vertex_hdl_t xconn_vhdl, bridge_t *bridge,
int
pcibr_detach(vertex_hdl_t xconn)
{
pciio_slot_t slot;
vertex_hdl_t pcibr_vhdl;
pcibr_soft_t pcibr_soft;
bridge_t *bridge;
unsigned s;
pciio_slot_t slot;
vertex_hdl_t pcibr_vhdl;
pcibr_soft_t pcibr_soft;
unsigned long s;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DETACH, xconn, "pcibr_detach\n"));
......@@ -1790,12 +697,10 @@ pcibr_detach(vertex_hdl_t xconn)
return(1);
pcibr_soft = pcibr_soft_get(pcibr_vhdl);
bridge = pcibr_soft->bs_base;
s = pcibr_lock(pcibr_soft);
/* Disable the interrupts from the bridge */
bridge->p_int_enable_64 = 0;
s = pcibr_lock(pcibr_soft);
pcireg_intr_enable_set(pcibr_soft, 0);
pcibr_unlock(pcibr_soft, s);
/* Detach all the PCI devices talking to this bridge */
......@@ -1831,18 +736,60 @@ pcibr_detach(vertex_hdl_t xconn)
return(0);
}
/*
* Set the Bridge's 32-bit PCI to XTalk Direct Map register to the most useful
* value we can determine. Note that we must use a single xid for all of:
* -direct-mapped 32-bit DMA accesses
* -direct-mapped 64-bit DMA accesses
* -DMA accesses through the PMU
* -interrupts
* This is the only way to guarantee that completion interrupts will reach a
* CPU after all DMA data has reached memory.
*/
void
pcibr_directmap_init(pcibr_soft_t pcibr_soft)
{
paddr_t paddr;
iopaddr_t xbase;
uint64_t diroff;
cnodeid_t cnodeid = 0; /* We need api for diroff api */
nasid_t nasid;
nasid = COMPACT_TO_NASID_NODEID(cnodeid);
paddr = NODE_OFFSET(nasid) + 0;
/* Assume that if we ask for a DMA mapping to zero the XIO host will
* transmute this into a request for the lowest hunk of memory.
*/
xbase = xtalk_dmatrans_addr(pcibr_soft->bs_conn, 0, paddr, PAGE_SIZE, 0);
diroff = xbase >> BRIDGE_DIRMAP_OFF_ADDRSHFT;
pcireg_dirmap_diroff_set(pcibr_soft, diroff);
pcireg_dirmap_wid_set(pcibr_soft, pcibr_soft->bs_mxid);
pcibr_soft->bs_dir_xport = pcibr_soft->bs_mxid;
if (xbase == (512 << 20)) { /* 512Meg */
pcireg_dirmap_add512_set(pcibr_soft);
pcibr_soft->bs_dir_xbase = (512 << 20);
} else {
pcireg_dirmap_add512_clr(pcibr_soft);
pcibr_soft->bs_dir_xbase = diroff << BRIDGE_DIRMAP_OFF_ADDRSHFT;
}
}
int
pcibr_asic_rev(vertex_hdl_t pconn_vhdl)
{
vertex_hdl_t pcibr_vhdl;
int tmp_vhdl;
vertex_hdl_t pcibr_vhdl;
int rc;
arbitrary_info_t ainfo;
if (GRAPH_SUCCESS !=
hwgraph_traverse(pconn_vhdl, EDGE_LBL_MASTER, &pcibr_vhdl))
return -1;
tmp_vhdl = hwgraph_info_get_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, &ainfo);
rc = hwgraph_info_get_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, &ainfo);
/*
* Any hwgraph function that returns a vertex handle will implicity
......@@ -1855,20 +802,10 @@ pcibr_asic_rev(vertex_hdl_t pconn_vhdl)
*/
hwgraph_vertex_unref(pcibr_vhdl);
if (tmp_vhdl != GRAPH_SUCCESS)
if (rc != GRAPH_SUCCESS)
return -1;
return (int) ainfo;
}
int
pcibr_write_gather_flush(vertex_hdl_t pconn_vhdl)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
pciio_slot_t slot;
slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
pcibr_device_write_gather_flush(pcibr_soft, slot);
return 0;
return (int) ainfo;
}
/* =====================================================================
......@@ -1884,14 +821,12 @@ pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl,
unsigned flags)
{
pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
pciio_info_t pciio_info = &pcibr_info->f_c;
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
bridge_t *bridge = pcibr_soft->bs_base;
unsigned bar; /* which BASE reg on device is decoding */
iopaddr_t xio_addr = XIO_NOWHERE;
iopaddr_t base; /* base of devio(x) mapped area on PCI */
iopaddr_t limit; /* base of devio(x) mapped area on PCI */
iopaddr_t base = 0;
iopaddr_t limit = 0;
pciio_space_t wspace; /* which space device is decoding */
iopaddr_t wbase; /* base of device decode on PCI */
......@@ -2012,7 +947,7 @@ pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl,
maxtry = PCIBR_NUM_SLOTS(pcibr_soft) * 2;
halftry = PCIBR_NUM_SLOTS(pcibr_soft) - 1;
for (try = 0; try < maxtry; ++try) {
bridgereg_t devreg;
uint64_t devreg;
unsigned offset;
/* calculate win based on slot, attempt, and max possible
......@@ -2080,14 +1015,13 @@ pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl,
devreg &= ~BRIDGE_DEV_DEV_SWAP;
if (pcibr_soft->bs_slot[win].bss_device != devreg) {
bridge->b_device[win].reg = devreg;
pcireg_device_set(pcibr_soft, win, devreg);
pcibr_soft->bs_slot[win].bss_device = devreg;
bridge->b_wid_tflush; /* wait until Bridge PIO complete */
#ifdef PCI_LATER
pcireg_tflush_get(pcibr_soft);
PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pconn_vhdl,
"pcibr_addr_pci_to_xio: Device(%d): 0x%x\n",
win, devreg));
#endif
}
pcibr_soft->bs_slot[win].bss_devio.bssd_space = space;
pcibr_soft->bs_slot[win].bss_devio.bssd_base = mbase;
......@@ -2200,7 +1134,7 @@ pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl,
if (xio_addr != XIO_NOWHERE) {
unsigned bst; /* nonzero to set bytestream */
unsigned *bfp; /* addr of record of how swapper is set */
unsigned swb; /* which control bit to mung */
uint64_t swb; /* which control bit to mung */
unsigned bfo; /* current swapper setting */
unsigned bfn; /* desired swapper setting */
......@@ -2226,13 +1160,13 @@ pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl,
bfn & PCIIO_WORD_VALUES ? " WORD_VALUES" : ""));
xio_addr = XIO_NOWHERE;
} else { /* OK to make the change. */
picreg_t octl, nctl;
swb = (space == PCIIO_SPACE_IO) ? BRIDGE_CTRL_IO_SWAP : BRIDGE_CTRL_MEM_SWAP;
octl = bridge->p_wid_control_64;
nctl = bst ? octl | (uint64_t)swb : octl & ((uint64_t)~swb);
swb = (space == PCIIO_SPACE_IO) ? 0: BRIDGE_CTRL_MEM_SWAP;
if (bst) {
pcireg_control_bit_set(pcibr_soft, swb);
} else {
pcireg_control_bit_clr(pcibr_soft, swb);
}
if (octl != nctl) /* make the change if any */
bridge->b_wid_control = nctl;
*bfp = bfn; /* record the assignment */
PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
......@@ -2324,7 +1258,7 @@ pcibr_piomap_alloc(vertex_hdl_t pconn_vhdl,
pcibr_piomap->bp_pciaddr = pci_addr;
pcibr_piomap->bp_mapsz = req_size;
pcibr_piomap->bp_soft = pcibr_soft;
pcibr_piomap->bp_toc[0] = ATOMIC_INIT(0);
pcibr_piomap->bp_toc = ATOMIC_INIT(0);
if (mapptr) {
s = pcibr_lock(pcibr_soft);
......@@ -2546,6 +1480,7 @@ pcibr_piospace_alloc(vertex_hdl_t pconn_vhdl,
return start_addr;
}
#define ERR_MSG "!Device %s freeing size (0x%lx) different than allocated (0x%lx)"
/*ARGSUSED */
void
pcibr_piospace_free(vertex_hdl_t pconn_vhdl,
......@@ -2775,7 +1710,7 @@ pcibr_dmamap_alloc(vertex_hdl_t pconn_vhdl,
else
min_rrbs = 1;
if (have_rrbs < min_rrbs)
do_pcibr_rrb_autoalloc(pcibr_soft, slot, vchan,
pcibr_rrb_alloc_more(pcibr_soft, slot, vchan,
min_rrbs - have_rrbs);
}
}
......@@ -2845,7 +1780,7 @@ pcibr_dmamap_alloc(vertex_hdl_t pconn_vhdl,
- 1) + 1; /* round UP */
}
ate_index = pcibr_ate_alloc(pcibr_soft, ate_count);
ate_index = pcibr_ate_alloc(pcibr_soft, ate_count, &pcibr_dmamap->resource);
if (ate_index != -1) {
if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_PMU_BITS)) {
......@@ -2857,7 +1792,7 @@ pcibr_dmamap_alloc(vertex_hdl_t pconn_vhdl,
"pcibr_dmamap_alloc: using PMU, ate_index=%d, "
"pcibr_dmamap=0x%lx\n", ate_index, pcibr_dmamap));
ate_proto = pcibr_flags_to_ate(flags);
ate_proto = pcibr_flags_to_ate(pcibr_soft, flags);
pcibr_dmamap->bd_flags = flags;
pcibr_dmamap->bd_pci_addr =
......@@ -2890,7 +1825,7 @@ pcibr_dmamap_alloc(vertex_hdl_t pconn_vhdl,
else
min_rrbs = 1;
if (have_rrbs < min_rrbs)
do_pcibr_rrb_autoalloc(pcibr_soft, slot, vchan,
pcibr_rrb_alloc_more(pcibr_soft, slot, vchan,
min_rrbs - have_rrbs);
}
}
......@@ -2900,7 +1835,7 @@ pcibr_dmamap_alloc(vertex_hdl_t pconn_vhdl,
"pcibr_dmamap_alloc: PMU use failed, ate_index=%d\n",
ate_index));
pcibr_ate_free(pcibr_soft, ate_index, ate_count);
pcibr_ate_free(pcibr_soft, ate_index, ate_count, &pcibr_dmamap->resource);
}
/* total failure: sorry, you just can't
* get from here to there that way.
......@@ -2920,16 +1855,6 @@ pcibr_dmamap_free(pcibr_dmamap_t pcibr_dmamap)
pciio_slot_t slot = PCIBR_SLOT_TO_DEVICE(pcibr_soft,
pcibr_dmamap->bd_slot);
unsigned flags = pcibr_dmamap->bd_flags;
/* Make sure that bss_ext_ates_active
* is properly kept up to date.
*/
if (PCIBR_DMAMAP_BUSY & flags)
if (PCIBR_DMAMAP_SSRAM & flags)
atomic_dec(&(pcibr_soft->bs_slot[slot]. bss_ext_ates_active));
xtalk_dmamap_free(pcibr_dmamap->bd_xtalk);
if (pcibr_dmamap->bd_flags & PCIIO_DMA_A64) {
......@@ -2938,8 +1863,9 @@ pcibr_dmamap_free(pcibr_dmamap_t pcibr_dmamap)
if (pcibr_dmamap->bd_ate_count) {
pcibr_ate_free(pcibr_dmamap->bd_soft,
pcibr_dmamap->bd_ate_index,
pcibr_dmamap->bd_ate_count);
pcibr_release_device(pcibr_soft, slot, BRIDGE_DEV_PMU_BITS);
pcibr_dmamap->bd_ate_count,
&pcibr_dmamap->resource);
pcibr_release_device(pcibr_soft, slot, XBRIDGE_DEV_PMU_BITS);
}
PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
......@@ -2992,7 +1918,7 @@ pcibr_addr_xio_to_pci(pcibr_soft_t soft,
for (slot = soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(soft); ++slot)
if ((xio_addr >= PCIBR_BRIDGE_DEVIO(soft, slot)) &&
(xio_lim < PCIBR_BRIDGE_DEVIO(soft, slot + 1))) {
bridgereg_t dev;
uint64_t dev;
dev = soft->bs_slot[slot].bss_device;
pci_addr = dev & BRIDGE_DEV_OFF_MASK;
......@@ -3091,33 +2017,14 @@ pcibr_dmamap_addr(pcibr_dmamap_t pcibr_dmamap,
paddr, paddr + req_size - 1, xio_port, xio_addr, pci_addr));
} else {
bridge_t *bridge = pcibr_soft->bs_base;
iopaddr_t offset = IOPGOFF(xio_addr);
bridge_ate_t ate_proto = pcibr_dmamap->bd_ate_proto;
int ate_count = IOPG(offset + req_size - 1) + 1;
int ate_index = pcibr_dmamap->bd_ate_index;
unsigned cmd_regs[8];
unsigned s;
#if PCIBR_FREEZE_TIME
int ate_total = ate_count;
unsigned freeze_time;
#endif
bridge_ate_p ate_ptr = pcibr_dmamap->bd_ate_ptr;
bridge_ate_t ate;
/* Bridge Hardware WAR #482836:
* If the transfer is not cache aligned
* and the Bridge Rev is <= B, force
* prefetch to be off.
*/
if (flags & PCIBR_NOPREFETCH)
ate_proto &= ~ATE_PREF;
ate = ate_proto
| (xio_port << ATE_TIDSHIFT)
| (xio_addr - offset);
ate = ate_proto | (xio_addr - offset);
ate |= (xio_port << ATE_TIDSHIFT);
pci_addr = pcibr_dmamap->bd_pci_addr + offset;
......@@ -3128,10 +2035,8 @@ pcibr_dmamap_addr(pcibr_dmamap_t pcibr_dmamap,
ASSERT(ate_count > 0);
if (ate_count <= pcibr_dmamap->bd_ate_count) {
ATE_FREEZE();
ATE_WRITE();
ATE_THAW();
bridge->b_wid_tflush; /* wait until Bridge PIO complete */
ate_write(pcibr_soft, ate_index, ate_count, ate);
PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
"pcibr_dmamap_addr (PMU) : wanted paddr "
"[0x%lx..0x%lx] returning PCI 0x%lx\n",
......@@ -3163,19 +2068,6 @@ pcibr_dmamap_addr(pcibr_dmamap_t pcibr_dmamap,
void
pcibr_dmamap_done(pcibr_dmamap_t pcibr_dmamap)
{
/*
* We could go through and invalidate ATEs here;
* for performance reasons, we don't.
* We also don't enforce the strict alternation
* between _addr/_list and _done, but Hub does.
*/
if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_BUSY) {
pcibr_dmamap->bd_flags &= ~PCIBR_DMAMAP_BUSY;
if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM)
atomic_dec(&(pcibr_dmamap->bd_soft->bs_slot[pcibr_dmamap->bd_slot]. bss_ext_ates_active));
}
xtalk_dmamap_done(pcibr_dmamap->bd_xtalk);
PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
......@@ -3281,16 +2173,12 @@ pcibr_dmatrans_addr(vertex_hdl_t pconn_vhdl,
if ((pci_addr != PCIBR_D64_BASE_UNSET) &&
(flags == slotp->bss_d64_flags)) {
pci_addr |= xio_addr
| ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
#if HWG_PERF_CHECK
if (xio_addr != 0x20000000)
#endif
PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
"pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
"xio_port=0x%x, direct64: pci_addr=0x%x\n",
paddr, paddr + req_size - 1, xio_addr, pci_addr));
pci_addr |= xio_addr |
((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
"pcibr_dmatrans_addr: wanted paddr [0x%lx..0x%lx], "
"xio_port=0x%x, direct64: pci_addr=0x%lx\n",
paddr, paddr + req_size - 1, xio_addr, pci_addr));
return (pci_addr);
}
if (!pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D64_BITS)) {
......@@ -3313,7 +2201,7 @@ pcibr_dmatrans_addr(vertex_hdl_t pconn_vhdl,
else
min_rrbs = 1;
if (have_rrbs < min_rrbs)
do_pcibr_rrb_autoalloc(pcibr_soft, pciio_slot, vchan,
pcibr_rrb_alloc_more(pcibr_soft, pciio_slot, vchan,
min_rrbs - have_rrbs);
}
}
......@@ -3394,7 +2282,7 @@ pcibr_dmatrans_addr(vertex_hdl_t pconn_vhdl,
else
min_rrbs = 1;
if (have_rrbs < min_rrbs)
do_pcibr_rrb_autoalloc(pcibr_soft, pciio_slot,
pcibr_rrb_alloc_more(pcibr_soft, pciio_slot,
vchan, min_rrbs - have_rrbs);
}
}
......@@ -3494,8 +2382,8 @@ pcibr_endian_set(vertex_hdl_t pconn_vhdl,
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
bridgereg_t devreg;
unsigned long s;
uint64_t devreg;
unsigned long s;
/*
* Bridge supports hardware swapping; so we can always
......@@ -3514,102 +2402,17 @@ pcibr_endian_set(vertex_hdl_t pconn_vhdl,
* have to change the logic here.
*/
if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
bridge_t *bridge = pcibr_soft->bs_base;
bridge->b_device[pciio_slot].reg = devreg;
pcireg_device_set(pcibr_soft, pciio_slot, devreg);
pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
bridge->b_wid_tflush; /* wait until Bridge PIO complete */
pcireg_tflush_get(pcibr_soft);
}
pcibr_unlock(pcibr_soft, s);
printk("pcibr_endian_set: Device(%d): %x\n", pciio_slot, devreg);
return desired_end;
}
/* This (re)sets the GBR and REALTIME bits and also keeps track of how
* many sets are outstanding. Reset succeeds only if the number of outstanding
* sets == 1.
*/
int
pcibr_priority_bits_set(pcibr_soft_t pcibr_soft,
pciio_slot_t pciio_slot,
pciio_priority_t device_prio)
{
unsigned long s;
int *counter;
bridgereg_t rtbits = 0;
bridgereg_t devreg;
int rc = PRIO_SUCCESS;
/* in dual-slot configurations, the host and the
* guest have separate DMA resources, so they
* have separate requirements for priority bits.
*/
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DEVREG, pconn_vhdl,
"pcibr_endian_set: Device(%d): 0x%x\n",
pciio_slot, devreg));
counter = &(pcibr_soft->bs_slot[pciio_slot].bss_pri_uctr);
/*
* Bridge supports PCI notions of LOW and HIGH priority
* arbitration rings via a "REAL_TIME" bit in the per-device
* Bridge register. The "GBR" bit controls access to the GBR
* ring on the xbow. These two bits are (re)set together.
*
* XXX- Bug in Rev B Bridge Si:
* Symptom: Prefetcher starts operating incorrectly. This happens
* due to corruption of the address storage ram in the prefetcher
* when a non-real time PCI request is pulled and a real-time one is
* put in it's place. Workaround: Use only a single arbitration ring
* on PCI bus. GBR and RR can still be uniquely used per
* device. NETLIST MERGE DONE, WILL BE FIXED IN REV C.
*/
if (pcibr_soft->bs_rev_num != BRIDGE_PART_REV_B)
rtbits |= BRIDGE_DEV_RT;
/* NOTE- if we ever put DEV_RT or DEV_GBR on
* the disabled list, we will have to take
* it into account here.
*/
s = pcibr_lock(pcibr_soft);
devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
if (device_prio == PCI_PRIO_HIGH) {
if ((++*counter == 1)) {
if (rtbits)
devreg |= rtbits;
else
rc = PRIO_FAIL;
}
} else if (device_prio == PCI_PRIO_LOW) {
if (*counter <= 0)
rc = PRIO_FAIL;
else if (--*counter == 0)
if (rtbits)
devreg &= ~rtbits;
}
if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
bridge_t *bridge = pcibr_soft->bs_base;
bridge->b_device[pciio_slot].reg = devreg;
pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
bridge->b_wid_tflush; /* wait until Bridge PIO complete */
}
pcibr_unlock(pcibr_soft, s);
return rc;
}
pciio_priority_t
pcibr_priority_set(vertex_hdl_t pconn_vhdl,
pciio_priority_t device_prio)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
(void) pcibr_priority_bits_set(pcibr_soft, pciio_slot, device_prio);
return device_prio;
return desired_end;
}
/*
......@@ -3630,8 +2433,8 @@ pcibr_device_flags_set(vertex_hdl_t pconn_vhdl,
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
bridgereg_t set = 0;
bridgereg_t clr = 0;
uint64_t set = 0;
uint64_t clr = 0;
ASSERT((flags & PCIBR_DEVICE_FLAGS) == flags);
......@@ -3640,11 +2443,6 @@ pcibr_device_flags_set(vertex_hdl_t pconn_vhdl,
if (flags & PCIBR_NOWRITE_GATHER)
clr |= BRIDGE_DEV_PMU_WRGA_EN;
if (flags & PCIBR_WRITE_GATHER)
set |= BRIDGE_DEV_DIR_WRGA_EN;
if (flags & PCIBR_NOWRITE_GATHER)
clr |= BRIDGE_DEV_DIR_WRGA_EN;
if (flags & PCIBR_PREFETCH)
set |= BRIDGE_DEV_PREF;
if (flags & PCIBR_NOPREFETCH)
......@@ -3665,19 +2463,22 @@ pcibr_device_flags_set(vertex_hdl_t pconn_vhdl,
if (flags & PCIBR_NO64BIT)
clr |= BRIDGE_DEV_DEV_SIZE;
/* PIC BRINGUP WAR (PV# 878674): Don't allow 64bit PIO accesses */
if ((flags & PCIBR_64BIT) && PCIBR_WAR_ENABLED(PV878674, pcibr_soft)) {
set &= ~BRIDGE_DEV_DEV_SIZE;
}
if (set || clr) {
bridgereg_t devreg;
unsigned long s;
uint64_t devreg;
unsigned long s;
s = pcibr_lock(pcibr_soft);
devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
devreg = (devreg & ~clr) | set;
if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
bridge_t *bridge = pcibr_soft->bs_base;
bridge->b_device[pciio_slot].reg = devreg;
pcireg_device_set(pcibr_soft, pciio_slot, devreg);
pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
bridge->b_wid_tflush; /* wait until Bridge PIO complete */
pcireg_tflush_get(pcibr_soft);
}
pcibr_unlock(pcibr_soft, s);
......@@ -3711,7 +2512,7 @@ pcibr_pcix_rbars_calc(pcibr_soft_t pcibr_soft)
printk(KERN_WARNING
"%lx: Must oversubscribe Read Buffer Attribute Registers"
"(RBAR). Bus has %d RBARs but %d funcs need them.\n",
(unsigned long)pcibr_soft->bs_vhdl, NUM_RBAR, pcibr_soft->bs_pcix_num_funcs);
pcibr_soft->bs_name, NUM_RBAR, pcibr_soft->bs_pcix_num_funcs);
percent_allowed = 0;
} else {
percent_allowed = (((NUM_RBAR-pcibr_soft->bs_pcix_num_funcs)*100) /
......@@ -3734,45 +2535,6 @@ pcibr_pcix_rbars_calc(pcibr_soft_t pcibr_soft)
return(percent_allowed);
}
pciio_provider_t pcibr_provider =
{
(pciio_piomap_alloc_f *) pcibr_piomap_alloc,
(pciio_piomap_free_f *) pcibr_piomap_free,
(pciio_piomap_addr_f *) pcibr_piomap_addr,
(pciio_piomap_done_f *) pcibr_piomap_done,
(pciio_piotrans_addr_f *) pcibr_piotrans_addr,
(pciio_piospace_alloc_f *) pcibr_piospace_alloc,
(pciio_piospace_free_f *) pcibr_piospace_free,
(pciio_dmamap_alloc_f *) pcibr_dmamap_alloc,
(pciio_dmamap_free_f *) pcibr_dmamap_free,
(pciio_dmamap_addr_f *) pcibr_dmamap_addr,
(pciio_dmamap_done_f *) pcibr_dmamap_done,
(pciio_dmatrans_addr_f *) pcibr_dmatrans_addr,
(pciio_dmamap_drain_f *) pcibr_dmamap_drain,
(pciio_dmaaddr_drain_f *) pcibr_dmaaddr_drain,
(pciio_dmalist_drain_f *) pcibr_dmalist_drain,
(pciio_intr_alloc_f *) pcibr_intr_alloc,
(pciio_intr_free_f *) pcibr_intr_free,
(pciio_intr_connect_f *) pcibr_intr_connect,
(pciio_intr_disconnect_f *) pcibr_intr_disconnect,
(pciio_intr_cpu_get_f *) pcibr_intr_cpu_get,
(pciio_provider_startup_f *) pcibr_provider_startup,
(pciio_provider_shutdown_f *) pcibr_provider_shutdown,
(pciio_reset_f *) pcibr_reset,
(pciio_write_gather_flush_f *) pcibr_write_gather_flush,
(pciio_endian_set_f *) pcibr_endian_set,
(pciio_priority_set_f *) pcibr_priority_set,
(pciio_config_get_f *) pcibr_config_get,
(pciio_config_set_f *) pcibr_config_set,
(pciio_error_extract_f *) 0,
(pciio_driver_reg_callback_f *) 0,
(pciio_driver_unreg_callback_f *) 0,
(pciio_device_unregister_f *) pcibr_device_unregister,
};
/*
* pcibr_debug() is used to print pcibr debug messages to the console. A
* user enables tracing by setting the following global variables:
......@@ -3794,6 +2556,7 @@ pcibr_debug(uint32_t type, vertex_hdl_t vhdl, char *format, ...)
{
char hwpath[MAXDEVNAME] = "\0";
char copy_of_hwpath[MAXDEVNAME];
char *buffer;
char *module = "all";
short widget = -1;
short slot = -1;
......@@ -3836,29 +2599,46 @@ pcibr_debug(uint32_t type, vertex_hdl_t vhdl, char *format, ...)
(!strcmp(module, pcibr_debug_module) &&
(widget == pcibr_debug_widget) &&
(slot == pcibr_debug_slot))) {
#ifdef LATER
printk("PCIBR_DEBUG<%d>\t: %s :", cpuid(), hwpath);
#else
printk("PCIBR_DEBUG\t: %s :", hwpath);
#endif
/*
* Kernel printk translates to this 3 line sequence.
* Since we have a variable length argument list, we
* need to call printk this way rather than directly
*/
{
char buffer[500];
buffer = kmalloc(1024, GFP_KERNEL);
if (buffer) {
printk("PCIBR_DEBUG<%d>\t: %s :", smp_processor_id(), hwpath);
/*
* KERN_MSG translates to this 3 line sequence. Since
* we have a variable length argument list, we need to
* call KERN_MSG this way rather than directly
*/
va_start(ap, format);
vsnprintf(buffer, 500, format, ap);
memset(buffer, 0, 1024);
vsnprintf(buffer, 1024, format, ap);
va_end(ap);
buffer[499] = (char)0; /* just to be safe */
printk("%s", buffer);
printk("", "%s", buffer);
kfree(buffer);
}
}
}
}
/*
* given a xconn_vhdl and a bus number under that widget, return a
* bridge_t pointer.
*/
void *
pcibr_bridge_ptr_get(vertex_hdl_t widget_vhdl, int bus_num)
{
void *bridge;
bridge = (void *)xtalk_piotrans_addr(widget_vhdl, 0, 0,
sizeof(bridge), 0);
/* PIC ASIC has two bridges (ie. two buses) under a single widget */
if (bus_num == 1) {
bridge = (void *)((char *)bridge + PIC_BUS1_OFFSET);
}
return bridge;
}
int
isIO9(nasid_t nasid) {
lboard_t *brd = (lboard_t *)KL_CONFIG_INFO(nasid);
......
......@@ -45,7 +45,7 @@ uint64_t bridge_errors_to_dump = BRIDGE_ISR_ERROR_FATAL |
BRIDGE_ISR_PCIBUS_PIOERR;
#endif
int pcibr_llp_control_war_cnt; /* PCIBR_LLP_CONTROL_WAR */
int pcibr_pioerr_dump = 1; /* always dump pio errors */
/*
* register values
......@@ -107,29 +107,22 @@ static struct reg_desc xtalk_cmd_bits[] =
#define F(s,n) { 1l<<(s),-(s), n }
static struct reg_values space_v[] =
{
{PCIIO_SPACE_NONE, "none"},
{PCIIO_SPACE_ROM, "ROM"},
{PCIIO_SPACE_IO, "I/O"},
{PCIIO_SPACE_MEM, "MEM"},
{PCIIO_SPACE_MEM32, "MEM(32)"},
{PCIIO_SPACE_MEM64, "MEM(64)"},
{PCIIO_SPACE_CFG, "CFG"},
{PCIIO_SPACE_WIN(0), "WIN(0)"},
{PCIIO_SPACE_WIN(1), "WIN(1)"},
{PCIIO_SPACE_WIN(2), "WIN(2)"},
{PCIIO_SPACE_WIN(3), "WIN(3)"},
{PCIIO_SPACE_WIN(4), "WIN(4)"},
{PCIIO_SPACE_WIN(5), "WIN(5)"},
{PCIIO_SPACE_BAD, "BAD"},
{0}
};
struct reg_desc space_desc[] =
{
{0xFF, 0, "space", 0, space_v},
{0}
};
char *pci_space[] = {"NONE",
"ROM",
"IO",
"",
"MEM",
"MEM32",
"MEM64",
"CFG",
"WIN0",
"WIN1",
"WIN2",
"WIN3",
"WIN4",
"WIN5",
"",
"BAD"};
static char *pcibr_isr_errs[] =
{
......@@ -261,7 +254,7 @@ print_register(unsigned long long reg, struct reg_desc *addr)
static void
pcibr_show_dir_state(paddr_t paddr, char *prefix)
{
#ifdef LATER
#ifdef PCIBR_LATER
int state;
uint64_t vec_ptr;
hubreg_t elo;
......@@ -270,15 +263,18 @@ pcibr_show_dir_state(paddr_t paddr, char *prefix)
get_dir_ent(paddr, &state, &vec_ptr, &elo);
printk("%saddr 0x%lx: state 0x%x owner 0x%lx (%s)\n",
prefix, paddr, state, vec_ptr, dir_state_str[state]);
#endif
printf("%saddr 0x%lx: state 0x%x owner 0x%lx (%s)\n",
prefix, (uint64_t)paddr, state, (uint64_t)vec_ptr,
dir_state_str[state]);
#endif /* PCIBR_LATER */
}
static void
print_bridge_errcmd(uint32_t cmdword, char *errtype)
void
print_bridge_errcmd(pcibr_soft_t pcibr_soft, uint32_t cmdword, char *errtype)
{
printk("\t Bridge %s Error Command Word Register ", errtype);
printk(
"\t Bridge %sError Command Word Register ", errtype);
print_register(cmdword, xtalk_cmd_bits);
}
......@@ -290,20 +286,12 @@ print_bridge_errcmd(uint32_t cmdword, char *errtype)
void
pcibr_error_dump(pcibr_soft_t pcibr_soft)
{
bridge_t *bridge = pcibr_soft->bs_base;
uint64_t int_status;
picreg_t int_status_64;
uint64_t mult_int;
picreg_t mult_int_64;
uint64_t bit;
int number_bits;
int i;
char *reg_desc;
paddr_t addr = (paddr_t)0;
int_status_64 = (bridge->p_int_status_64 & ~BRIDGE_ISR_INT_MSK);
int_status = (uint64_t)int_status_64;
number_bits = PCIBR_ISR_MAX_ERRS_PIC;
int_status = (pcireg_intr_status_get(pcibr_soft) & ~BRIDGE_ISR_INT_MSK);
if (!int_status) {
/* No error bits set */
......@@ -320,21 +308,11 @@ pcibr_error_dump(pcibr_soft_t pcibr_soft)
int_status, pcibr_soft->bs_name,
"PIC");
for (i = PCIBR_ISR_ERR_START; i < number_bits; i++) {
for (i = PCIBR_ISR_ERR_START; i < 64; i++) {
bit = 1ull << i;
/*
* A number of int_status bits are only defined for Bridge.
* Ignore them in the case of an XBridge or PIC.
*/
if (((bit == BRIDGE_ISR_MULTI_ERR) ||
(bit == BRIDGE_ISR_SSRAM_PERR) ||
(bit == BRIDGE_ISR_GIO_B_ENBL_ERR))) {
continue;
}
/* A number of int_status bits are only valid for PIC's bus0 */
if (((pcibr_soft->bs_busnum != 0)) &&
if ((pcibr_soft->bs_busnum != 0) &&
((bit == BRIDGE_ISR_UNSUPPORTED_XOP) ||
(bit == BRIDGE_ISR_LLP_REC_SNERR) ||
(bit == BRIDGE_ISR_LLP_REC_CBERR) ||
......@@ -351,14 +329,14 @@ pcibr_error_dump(pcibr_soft_t pcibr_soft)
case PIC_ISR_INT_RAM_PERR: /* bit41 INT_RAM_PERR */
/* XXX: should breakdown meaning of bits in reg */
printk( "\t Internal RAM Parity Error: 0x%lx\n",
bridge->p_ate_parity_err_64);
printk("\t Internal RAM Parity Error: 0x%lx\n",
pcireg_parity_err_get(pcibr_soft));
break;
case PIC_ISR_PCIX_ARB_ERR: /* bit40 PCI_X_ARB_ERR */
/* XXX: should breakdown meaning of bits in reg */
printk( "\t Arbitration Reg: 0x%lx\n",
bridge->b_arb);
printk("\t Arbitration Reg: 0x%lx\n",
pcireg_arbitration_get(pcibr_soft));
break;
case PIC_ISR_PCIX_REQ_TOUT: /* bit39 PCI_X_REQ_TOUT */
......@@ -366,8 +344,8 @@ pcibr_error_dump(pcibr_soft_t pcibr_soft)
printk(
"\t PCI-X DMA Request Error Address Reg: 0x%lx\n"
"\t PCI-X DMA Request Error Attribute Reg: 0x%lx\n",
bridge->p_pcix_dma_req_err_addr_64,
bridge->p_pcix_dma_req_err_attr_64);
pcireg_pcix_req_err_addr_get(pcibr_soft),
pcireg_pcix_req_err_attr_get(pcibr_soft));
break;
case PIC_ISR_PCIX_SPLIT_MSG_PE: /* bit45 PCI_X_SPLIT_MES_PE */
......@@ -377,8 +355,8 @@ pcibr_error_dump(pcibr_soft_t pcibr_soft)
printk(
"\t PCI-X Split Request Address Reg: 0x%lx\n"
"\t PCI-X Split Request Attribute Reg: 0x%lx\n",
bridge->p_pcix_pio_split_addr_64,
bridge->p_pcix_pio_split_attr_64);
pcireg_pcix_pio_split_addr_get(pcibr_soft),
pcireg_pcix_pio_split_attr_get(pcibr_soft));
/* FALL THRU */
case PIC_ISR_PCIX_UNEX_COMP: /* bit42 PCI_X_UNEX_COMP */
......@@ -394,20 +372,19 @@ pcibr_error_dump(pcibr_soft_t pcibr_soft)
"\t PCI-X Bus Error Address Reg: 0x%lx\n"
"\t PCI-X Bus Error Attribute Reg: 0x%lx\n"
"\t PCI-X Bus Error Data Reg: 0x%lx\n",
bridge->p_pcix_bus_err_addr_64,
bridge->p_pcix_bus_err_attr_64,
bridge->p_pcix_bus_err_data_64);
pcireg_pcix_bus_err_addr_get(pcibr_soft),
pcireg_pcix_bus_err_attr_get(pcibr_soft),
pcireg_pcix_bus_err_data_get(pcibr_soft));
break;
case BRIDGE_ISR_PAGE_FAULT: /* bit30 PMU_PAGE_FAULT */
reg_desc = "Map Fault Address";
printk( "\t %s Register: 0x%x\n", reg_desc,
bridge->b_ram_perr_or_map_fault);
case BRIDGE_ISR_PAGE_FAULT: /* bit30 PMU_PAGE_FAULT */
printk("\t Map Fault Address Reg: 0x%lx\n",
pcireg_map_fault_get(pcibr_soft));
break;
case BRIDGE_ISR_UNEXP_RESP: /* bit29 UNEXPECTED_RESP */
print_bridge_errcmd(bridge->b_wid_aux_err, "Aux ");
case BRIDGE_ISR_UNEXP_RESP: /* bit29 UNEXPECTED_RESP */
print_bridge_errcmd(pcibr_soft,
pcireg_linkside_err_get(pcibr_soft), "Aux ");
/* PIC in PCI-X mode, dump the PCIX DMA Request registers */
if (IS_PCIX(pcibr_soft)) {
......@@ -415,96 +392,98 @@ pcibr_error_dump(pcibr_soft_t pcibr_soft)
printk(
"\t PCI-X DMA Request Error Addr Reg: 0x%lx\n"
"\t PCI-X DMA Request Error Attr Reg: 0x%lx\n",
bridge->p_pcix_dma_req_err_addr_64,
bridge->p_pcix_dma_req_err_attr_64);
pcireg_pcix_req_err_addr_get(pcibr_soft),
pcireg_pcix_req_err_attr_get(pcibr_soft));
}
break;
case BRIDGE_ISR_BAD_XRESP_PKT: /* bit28 BAD_RESP_PACKET */
case BRIDGE_ISR_RESP_XTLK_ERR: /* bit26 RESP_XTALK_ERROR */
print_bridge_errcmd(bridge->b_wid_aux_err, "Aux ");
case BRIDGE_ISR_BAD_XRESP_PKT: /* bit28 BAD_RESP_PACKET */
case BRIDGE_ISR_RESP_XTLK_ERR: /* bit26 RESP_XTALK_ERROR */
print_bridge_errcmd(pcibr_soft,
pcireg_linkside_err_get(pcibr_soft), "Aux ");
/* XXX: should breakdown meaning of attribute bit */
printk(
/* PCI-X mode, DMA Request Error registers are valid. But
* in PCI mode, Response Buffer Address register are valid.
*/
if (IS_PCIX(pcibr_soft)) {
/* XXX: should breakdown meaning of attribute bit */
printk(
"\t PCI-X DMA Request Error Addr Reg: 0x%lx\n"
"\t PCI-X DMA Request Error Attribute Reg: 0x%lx\n",
bridge->p_pcix_dma_req_err_addr_64,
bridge->p_pcix_dma_req_err_attr_64);
if (bit == BRIDGE_ISR_RESP_XTLK_ERR) {
pcireg_pcix_req_err_addr_get(pcibr_soft),
pcireg_pcix_req_err_attr_get(pcibr_soft));
} else {
printk(
"\t Bridge Response Buf Error Addr Reg: 0x%lx\n"
"\t dev-num %d buff-num %d addr 0x%lx\n",
pcireg_resp_err_get(pcibr_soft),
(int)pcireg_resp_err_dev_get(pcibr_soft),
(int)pcireg_resp_err_buf_get(pcibr_soft),
pcireg_resp_err_addr_get(pcibr_soft));
if (bit == BRIDGE_ISR_RESP_XTLK_ERR) {
/* display memory directory associated with cacheline */
pcibr_show_dir_state(addr, "\t ");
pcibr_show_dir_state(
pcireg_resp_err_get(pcibr_soft), "\t ");
}
}
break;
case BRIDGE_ISR_BAD_XREQ_PKT: /* bit27 BAD_XREQ_PACKET */
case BRIDGE_ISR_REQ_XTLK_ERR: /* bit25 REQ_XTALK_ERROR */
case BRIDGE_ISR_INVLD_ADDR: /* bit24 INVALID_ADDRESS */
print_bridge_errcmd(bridge->b_wid_err_cmdword, "");
printk(
"\t Bridge Error Upper Address Register: 0x%lx\n"
"\t Bridge Error Lower Address Register: 0x%lx\n"
case BRIDGE_ISR_BAD_XREQ_PKT: /* bit27 BAD_XREQ_PACKET */
case BRIDGE_ISR_REQ_XTLK_ERR: /* bit25 REQ_XTALK_ERROR */
case BRIDGE_ISR_INVLD_ADDR: /* bit24 INVALID_ADDRESS */
print_bridge_errcmd(pcibr_soft,
pcireg_cmdword_err_get(pcibr_soft), "");
printk(
"\t Bridge Error Address Register: 0x%lx\n"
"\t Bridge Error Address: 0x%lx\n",
(uint64_t) bridge->b_wid_err_upper,
(uint64_t) bridge->b_wid_err_lower,
(((uint64_t) bridge->b_wid_err_upper << 32) |
bridge->b_wid_err_lower));
pcireg_bus_err_get(pcibr_soft),
pcireg_bus_err_get(pcibr_soft));
break;
case BRIDGE_ISR_UNSUPPORTED_XOP:/* bit23 UNSUPPORTED_XOP */
print_bridge_errcmd(bridge->b_wid_aux_err, "Aux ");
printk(
"\t Address Holding Link Side Error Reg: 0x%lx\n",
bridge->p_addr_lkerr_64);
case BRIDGE_ISR_UNSUPPORTED_XOP: /* bit23 UNSUPPORTED_XOP */
print_bridge_errcmd(pcibr_soft,
pcireg_linkside_err_get(pcibr_soft), "Aux ");
printk("\t Address Holding Link Side Error Reg: 0x%lx\n",
pcireg_linkside_err_addr_get(pcibr_soft));
break;
case BRIDGE_ISR_XREQ_FIFO_OFLOW:/* bit22 XREQ_FIFO_OFLOW */
print_bridge_errcmd(bridge->b_wid_aux_err, "Aux ");
printk(
"\t Address Holding Link Side Error Reg: 0x%lx\n",
bridge->p_addr_lkerr_64);
case BRIDGE_ISR_XREQ_FIFO_OFLOW: /* bit22 XREQ_FIFO_OFLOW */
print_bridge_errcmd(pcibr_soft,
pcireg_linkside_err_get(pcibr_soft), "Aux ");
printk("\t Address Holding Link Side Error Reg: 0x%lx\n",
pcireg_linkside_err_addr_get(pcibr_soft));
break;
case BRIDGE_ISR_PCI_ABORT: /* bit15 PCI_ABORT */
case BRIDGE_ISR_PCI_PARITY: /* bit14 PCI_PARITY */
case BRIDGE_ISR_PCI_SERR: /* bit13 PCI_SERR */
case BRIDGE_ISR_PCI_PERR: /* bit12 PCI_PERR */
case BRIDGE_ISR_PCI_MST_TIMEOUT:/* bit11 PCI_MASTER_TOUT */
case BRIDGE_ISR_PCI_RETRY_CNT: /* bit10 PCI_RETRY_CNT */
case BRIDGE_ISR_GIO_B_ENBL_ERR: /* bit08 GIO BENABLE_ERR */
printk(
"\t PCI Error Upper Address Register: 0x%lx\n"
"\t PCI Error Lower Address Register: 0x%lx\n"
case BRIDGE_ISR_PCI_ABORT: /* bit15 PCI_ABORT */
case BRIDGE_ISR_PCI_PARITY: /* bit14 PCI_PARITY */
case BRIDGE_ISR_PCI_SERR: /* bit13 PCI_SERR */
case BRIDGE_ISR_PCI_PERR: /* bit12 PCI_PERR */
case BRIDGE_ISR_PCI_MST_TIMEOUT: /* bit11 PCI_MASTER_TOUT */
case BRIDGE_ISR_PCI_RETRY_CNT: /* bit10 PCI_RETRY_CNT */
printk("\t PCI Error Address Register: 0x%lx\n"
"\t PCI Error Address: 0x%lx\n",
(uint64_t) bridge->b_pci_err_upper,
(uint64_t) bridge->b_pci_err_lower,
(((uint64_t) bridge->b_pci_err_upper << 32) |
bridge->b_pci_err_lower));
pcireg_pci_bus_addr_get(pcibr_soft),
pcireg_pci_bus_addr_addr_get(pcibr_soft));
break;
case BRIDGE_ISR_XREAD_REQ_TIMEOUT: /* bit09 XREAD_REQ_TOUT */
addr = (((uint64_t)(bridge->b_wid_resp_upper & 0xFFFF) << 32)
| bridge->b_wid_resp_lower);
printk(
"\t Bridge Response Buf Error Upper Addr Reg: 0x%x\n"
"\t Bridge Response Buf Error Lower Addr Reg: 0x%x\n"
case BRIDGE_ISR_XREAD_REQ_TIMEOUT: /* bit09 XREAD_REQ_TOUT */
printk("\t Bridge Response Buf Error Addr Reg: 0x%lx\n"
"\t dev-num %d buff-num %d addr 0x%lx\n",
bridge->b_wid_resp_upper, bridge->b_wid_resp_lower,
((bridge->b_wid_resp_upper >> 20) & 0x3),
((bridge->b_wid_resp_upper >> 16) & 0xF),
addr);
pcireg_resp_err_get(pcibr_soft),
(int)pcireg_resp_err_dev_get(pcibr_soft),
(int)pcireg_resp_err_buf_get(pcibr_soft),
pcireg_resp_err_get(pcibr_soft));
break;
}
}
}
mult_int_64 = (bridge->p_mult_int_64 & ~BRIDGE_ISR_INT_MSK);
mult_int = (uint64_t)mult_int_64;
number_bits = PCIBR_ISR_MAX_ERRS_PIC;
mult_int = pcireg_intr_multiple_get(pcibr_soft);
if (mult_int & ~BRIDGE_ISR_INT_MSK) {
printk( " %s Multiple Interrupt Register is 0x%lx\n",
"PIC", mult_int);
for (i = PCIBR_ISR_ERR_START; i < number_bits; i++) {
printk(" %s Multiple Interrupt Register is 0x%lx\n",
pcibr_soft->bs_asic_name, mult_int);
for (i = PCIBR_ISR_ERR_START; i < 64; i++) {
if (mult_int & (1ull << i))
printk( "\t%s\n", pcibr_isr_errs[i]);
}
......@@ -519,11 +498,7 @@ pcibr_error_dump(pcibr_soft_t pcibr_soft)
static void
pcibr_pioerr_check(pcibr_soft_t soft)
{
bridge_t *bridge;
uint64_t int_status;
picreg_t int_status_64;
bridgereg_t pci_err_lower;
bridgereg_t pci_err_upper;
uint64_t int_status;
iopaddr_t pci_addr;
pciio_slot_t slot;
pcibr_piomap_t map;
......@@ -532,16 +507,10 @@ pcibr_pioerr_check(pcibr_soft_t soft)
unsigned win;
int func;
bridge = soft->bs_base;
int_status_64 = (bridge->p_int_status_64 & ~BRIDGE_ISR_INT_MSK);
int_status = (uint64_t)int_status_64;
int_status = pcireg_intr_status_get(soft);
if (int_status & BRIDGE_ISR_PCIBUS_PIOERR) {
pci_err_lower = bridge->b_pci_err_lower;
pci_err_upper = bridge->b_pci_err_upper;
pci_addr = pci_err_upper & BRIDGE_ERRUPPR_ADDRMASK;
pci_addr = (pci_addr << 32) | pci_err_lower;
pci_addr = pcireg_pci_bus_addr_get(soft);
slot = PCIBR_NUM_SLOTS(soft);
while (slot-- > 0) {
......@@ -564,7 +533,7 @@ pcibr_pioerr_check(pcibr_soft_t soft)
else if (map->bp_space == PCIIO_SPACE_ROM)
base += pcibr_info->f_rbase;
if ((pci_addr >= base) && (pci_addr < (base + size)))
atomic_inc(&map->bp_toc[0]);
atomic_inc(&map->bp_toc);
}
}
}
......@@ -595,11 +564,9 @@ irqreturn_t
pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *ep)
{
pcibr_soft_t pcibr_soft;
bridge_t *bridge;
uint64_t int_status;
uint64_t err_status;
picreg_t int_status_64;
int number_bits;
void *bridge;
uint64_t int_status;
uint64_t err_status;
int i;
uint64_t disable_errintr_mask = 0;
nasid_t nasid;
......@@ -662,9 +629,7 @@ pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *ep)
return(pcibr_error_intr_handler(irq, arg, ep));
}
int_status_64 = (bridge->p_int_status_64 & ~BRIDGE_ISR_INT_MSK);
int_status = (uint64_t)int_status_64;
number_bits = PCIBR_ISR_MAX_ERRS_PIC;
int_status = pcireg_intr_status_get(pcibr_soft);
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ERROR, pcibr_soft->bs_conn,
"pcibr_error_intr_handler: int_status=0x%lx\n", int_status));
......@@ -672,7 +637,7 @@ pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *ep)
/* int_status is which bits we have to clear;
* err_status is the bits we haven't handled yet.
*/
err_status = int_status & ~BRIDGE_ISR_MULTI_ERR;
err_status = int_status;
if (!(int_status & ~BRIDGE_ISR_INT_MSK)) {
/*
......@@ -688,9 +653,10 @@ pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *ep)
}
if (err_status) {
struct bs_errintr_stat_s *bs_estat = pcibr_soft->bs_errintr_stat;
struct bs_errintr_stat_s *bs_estat ;
bs_estat = &pcibr_soft->bs_errintr_stat[PCIBR_ISR_ERR_START];
for (i = PCIBR_ISR_ERR_START; i < number_bits; i++, bs_estat++) {
for (i = PCIBR_ISR_ERR_START; i < 64; i++, bs_estat++) {
if (err_status & (1ull << i)) {
uint32_t errrate = 0;
uint32_t errcount = 0;
......@@ -827,7 +793,7 @@ pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *ep)
* could eat up too much cpu time.
*/
s = pcibr_lock(pcibr_soft);
bridge->p_int_enable_64 &= (picreg_t)(~disable_errintr_mask);
pcireg_intr_enable_bit_clr(pcibr_soft, disable_errintr_mask);
pcibr_unlock(pcibr_soft, s);
}
/*
......@@ -836,31 +802,22 @@ pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *ep)
* which will cause a BRIDGE_ISR_INVLD_ADDR.
*/
if ((err_status & BRIDGE_ISR_INVLD_ADDR) &&
(0x00000000 == bridge->b_wid_err_upper) &&
(0x00C00000 == (0xFFC00000 & bridge->b_wid_err_lower)) &&
(0x00402000 == (0x00F07F00 & bridge->b_wid_err_cmdword))) {
(0x00C00000 == (pcireg_bus_err_get(pcibr_soft) & 0xFFFFFFFFFFC00000)) &&
(0x00402000 == (0x00F07F00 & pcireg_cmdword_err_get(pcibr_soft)))) {
err_status &= ~BRIDGE_ISR_INVLD_ADDR;
}
/*
* The bridge bug (PCIBR_LLP_CONTROL_WAR), where the llp_config or control registers
* need to be read back after being written, affects an MP
* system since there could be small windows between writing
* the register and reading it back on one cpu while another
* cpu is fielding an interrupt. If we run into this scenario,
* workaround the problem by ignoring the error. (bug 454474)
* pcibr_llp_control_war_cnt keeps an approximate number of
* times we saw this problem on a system.
* pcibr_pioerr_dump is a systune that make be used to not
* print bridge registers for interrupts generated by pio-errors.
* Some customers do early probes and expect a lot of failed
* pios.
*/
if ((err_status & BRIDGE_ISR_INVLD_ADDR) &&
((((uint64_t) bridge->b_wid_err_upper << 32) | (bridge->b_wid_err_lower))
== (BRIDGE_INT_RST_STAT & 0xff0))) {
pcibr_llp_control_war_cnt++;
err_status &= ~BRIDGE_ISR_INVLD_ADDR;
if (!pcibr_pioerr_dump) {
bridge_errors_to_dump &= ~BRIDGE_ISR_PCIBUS_PIOERR;
} else {
bridge_errors_to_dump |= BRIDGE_ISR_PCIBUS_PIOERR;
}
bridge_errors_to_dump |= BRIDGE_ISR_PCIBUS_PIOERR;
/* Dump/Log Bridge error interrupt info */
if (err_status & bridge_errors_to_dump) {
printk("BRIDGE ERR_STATUS 0x%lx\n", err_status);
......@@ -873,10 +830,11 @@ pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *ep)
* has only been seen in simulation
*/
if (PCIBR_WAR_ENABLED(PV867308, pcibr_soft) &&
(err_status & (BRIDGE_ISR_LLP_REC_SNERR | BRIDGE_ISR_LLP_REC_CBERR))) {
printk("BRIDGE ERR_STATUS 0x%lx\n", err_status);
pcibr_error_dump(pcibr_soft);
panic("PCI Bridge Error interrupt killed the system");
(err_status & (BRIDGE_ISR_LLP_REC_SNERR | BRIDGE_ISR_LLP_REC_CBERR))) {
printk("BRIDGE ERR_STATUS 0x%lx\n", err_status);
pcibr_error_dump(pcibr_soft);
/* machine_error_dump(""); */
panic("PCI Bridge Error interrupt killed the system");
}
if (err_status & BRIDGE_ISR_ERROR_FATAL) {
......@@ -893,7 +851,7 @@ pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *ep)
*
* PIC doesn't require groups of interrupts to be cleared...
*/
bridge->p_int_rst_stat_64 = (picreg_t)(int_status | BRIDGE_IRR_MULTI_CLR);
pcireg_intr_reset_set(pcibr_soft, (int_status | BRIDGE_IRR_MULTI_CLR));
/* PIC BRINGUP WAR (PV# 856155):
* On a PCI_X_ARB_ERR error interrupt clear the DEV_BROKE bits from
......@@ -901,7 +859,7 @@ pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *ep)
*/
if ((err_status & PIC_ISR_PCIX_ARB_ERR) &&
PCIBR_WAR_ENABLED(PV856155, pcibr_soft)) {
bridge->b_arb |= (0xf << 20);
pcireg_arbitration_bit_set(pcibr_soft, (0xf << 20));
}
/* Zero out bserr_intstat field */
......@@ -909,18 +867,196 @@ pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *ep)
return IRQ_HANDLED;
}
/*
* pcibr_addr_toslot
* Given the 'pciaddr' find out which slot this address is
* allocated to, and return the slot number.
* While we have the info handy, construct the
* function number, space code and offset as well.
*
* NOTE: if this routine is called, we don't know whether
* the address is in CFG, MEM, or I/O space. We have to guess.
* This will be the case on PIO stores, where the only way
* we have of getting the address is to check the Bridge, which
* stores the PCI address but not the space and not the xtalk
* address (from which we could get it).
*/
static int
pcibr_addr_toslot(pcibr_soft_t pcibr_soft,
iopaddr_t pciaddr,
pciio_space_t *spacep,
iopaddr_t *offsetp,
pciio_function_t *funcp)
{
int s, f = 0, w;
iopaddr_t base;
size_t size;
pciio_piospace_t piosp;
/*
* Check if the address is in config space
*/
if ((pciaddr >= BRIDGE_CONFIG_BASE) && (pciaddr < BRIDGE_CONFIG_END)) {
if (pciaddr >= BRIDGE_CONFIG1_BASE)
pciaddr -= BRIDGE_CONFIG1_BASE;
else
pciaddr -= BRIDGE_CONFIG_BASE;
s = pciaddr / BRIDGE_CONFIG_SLOT_SIZE;
pciaddr %= BRIDGE_CONFIG_SLOT_SIZE;
if (funcp) {
f = pciaddr / 0x100;
pciaddr %= 0x100;
}
if (spacep)
*spacep = PCIIO_SPACE_CFG;
if (offsetp)
*offsetp = pciaddr;
if (funcp)
*funcp = f;
return s;
}
for (s = pcibr_soft->bs_min_slot; s < PCIBR_NUM_SLOTS(pcibr_soft); ++s) {
int nf = pcibr_soft->bs_slot[s].bss_ninfo;
pcibr_info_h pcibr_infoh = pcibr_soft->bs_slot[s].bss_infos;
for (f = 0; f < nf; f++) {
pcibr_info_t pcibr_info = pcibr_infoh[f];
if (!pcibr_info)
continue;
for (w = 0; w < 6; w++) {
if (pcibr_info->f_window[w].w_space
== PCIIO_SPACE_NONE) {
continue;
}
base = pcibr_info->f_window[w].w_base;
size = pcibr_info->f_window[w].w_size;
if ((pciaddr >= base) && (pciaddr < (base + size))) {
if (spacep)
*spacep = PCIIO_SPACE_WIN(w);
if (offsetp)
*offsetp = pciaddr - base;
if (funcp)
*funcp = f;
return s;
} /* endif match */
} /* next window */
} /* next func */
} /* next slot */
/*
* Check if the address was allocated as part of the
* pcibr_piospace_alloc calls.
*/
for (s = pcibr_soft->bs_min_slot; s < PCIBR_NUM_SLOTS(pcibr_soft); ++s) {
int nf = pcibr_soft->bs_slot[s].bss_ninfo;
pcibr_info_h pcibr_infoh = pcibr_soft->bs_slot[s].bss_infos;
for (f = 0; f < nf; f++) {
pcibr_info_t pcibr_info = pcibr_infoh[f];
if (!pcibr_info)
continue;
piosp = pcibr_info->f_piospace;
while (piosp) {
if ((piosp->start <= pciaddr) &&
((piosp->count + piosp->start) > pciaddr)) {
if (spacep)
*spacep = piosp->space;
if (offsetp)
*offsetp = pciaddr - piosp->start;
return s;
} /* endif match */
piosp = piosp->next;
} /* next piosp */
} /* next func */
} /* next slot */
/*
* Some other random address on the PCI bus ...
* we have no way of knowing whether this was
* a MEM or I/O access; so, for now, we just
* assume that the low 1G is MEM, the next
* 3G is I/O, and anything above the 4G limit
* is obviously MEM.
*/
if (spacep)
*spacep = ((pciaddr < (1ul << 30)) ? PCIIO_SPACE_MEM :
(pciaddr < (4ul << 30)) ? PCIIO_SPACE_IO :
PCIIO_SPACE_MEM);
if (offsetp)
*offsetp = pciaddr;
return PCIIO_SLOT_NONE;
}
void
pcibr_error_cleanup(pcibr_soft_t pcibr_soft, int error_code)
{
bridge_t *bridge = pcibr_soft->bs_base;
uint64_t clr_bits = BRIDGE_IRR_ALL_CLR;
ASSERT(error_code & IOECODE_PIO);
error_code = error_code;
bridge->p_int_rst_stat_64 = BRIDGE_IRR_PCI_GRP_CLR |
PIC_PCIX_GRP_CLR |
BRIDGE_IRR_MULTI_CLR;
(void) bridge->b_wid_tflush; /* flushbus */
pcireg_intr_reset_set(pcibr_soft, clr_bits);
pcireg_tflush_get(pcibr_soft); /* flushbus */
}
/*
* pcibr_error_extract
* Given the 'pcibr vertex handle' find out which slot
* the bridge status error address (from pcibr_soft info
* hanging off the vertex)
* allocated to, and return the slot number.
* While we have the info handy, construct the
* space code and offset as well.
*
* NOTE: if this routine is called, we don't know whether
* the address is in CFG, MEM, or I/O space. We have to guess.
* This will be the case on PIO stores, where the only way
* we have of getting the address is to check the Bridge, which
* stores the PCI address but not the space and not the xtalk
* address (from which we could get it).
*
* XXX- this interface has no way to return the function
* number on a multifunction card, even though that data
* is available.
*/
pciio_slot_t
pcibr_error_extract(vertex_hdl_t pcibr_vhdl,
pciio_space_t *spacep,
iopaddr_t *offsetp)
{
pcibr_soft_t pcibr_soft = 0;
iopaddr_t bserr_addr;
pciio_slot_t slot = PCIIO_SLOT_NONE;
arbitrary_info_t rev;
/* Do a sanity check as to whether we really got a
* bridge vertex handle.
*/
if (hwgraph_info_get_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, &rev) !=
GRAPH_SUCCESS)
return(slot);
pcibr_soft = pcibr_soft_get(pcibr_vhdl);
if (pcibr_soft) {
bserr_addr = pcireg_pci_bus_addr_get(pcibr_soft);
slot = pcibr_addr_toslot(pcibr_soft, bserr_addr,
spacep, offsetp, NULL);
}
return slot;
}
/*ARGSUSED */
......@@ -953,6 +1089,28 @@ pcibr_device_disable(pcibr_soft_t pcibr_soft, int devnum)
* to handle the error, it expects the bus-interface to disable that
* device, and takes any steps needed here to take away any resources
* associated with this device.
*
* A note about slots:
*
* PIC-based bridges use zero-based device numbering when devices to
* internal registers. However, the physical slots are numbered using a
* one-based scheme because in PCI-X, device 0 is reserved (see comments
* in pcibr_private.h for a better description).
*
* When building up the hwgraph, we use the external (one-based) number
* scheme when numbering slot components so that hwgraph more accuratly
* reflects what is silkscreened on the bricks.
*
* Since pciio_error_handler() needs to ultimatly be able to do a hwgraph
* lookup, the ioerror that gets built up in pcibr_pioerror() encodes the
* external (one-based) slot number. However, loops in pcibr_pioerror()
* which attempt to translate the virtual address into the correct
* PCI physical address use the device (zero-based) numbering when
* walking through bridge structures.
*
* To that end, pcibr_pioerror() uses device to denote the
* zero-based device number, and external_slot to denote the corresponding
* one-based slot number. Loop counters (eg. cs) are always device based.
*/
/* BEM_ADD_IOE doesn't dump the whole ioerror, it just
......@@ -1017,7 +1175,8 @@ pcibr_pioerror(
iopaddr_t raw_paddr; /* raw PCI address */
pciio_space_t space; /* final PCI space */
pciio_slot_t slot; /* final PCI slot, if appropriate */
pciio_slot_t device; /* final PCI device if appropriate */
pciio_slot_t external_slot;/* external slot for device */
pciio_function_t func; /* final PCI func, if appropriate */
iopaddr_t offset; /* final PCI offset */
......@@ -1039,16 +1198,16 @@ pcibr_pioerror(
"pcibr_pioerror: pcibr_soft=0x%lx, bad_xaddr=0x%lx\n",
pcibr_soft, bad_xaddr));
slot = PCIIO_SLOT_NONE;
device = PCIIO_SLOT_NONE;
func = PCIIO_FUNC_NONE;
raw_space = PCIIO_SPACE_NONE;
raw_paddr = 0;
if ((bad_xaddr >= PCIBR_BUS_TYPE0_CFG_DEV0(pcibr_soft)) &&
if ((bad_xaddr >= PCIBR_BUS_TYPE0_CFG_DEV(pcibr_soft, 0)) &&
(bad_xaddr < PCIBR_TYPE1_CFG(pcibr_soft))) {
raw_paddr = bad_xaddr - PCIBR_BUS_TYPE0_CFG_DEV0(pcibr_soft);
slot = raw_paddr / BRIDGE_TYPE0_CFG_SLOT_OFF;
raw_paddr = raw_paddr % BRIDGE_TYPE0_CFG_SLOT_OFF;
raw_paddr = bad_xaddr - PCIBR_BUS_TYPE0_CFG_DEV(pcibr_soft, 0);
device = raw_paddr / BRIDGE_CONFIG_SLOT_SIZE;
raw_paddr = raw_paddr % BRIDGE_CONFIG_SLOT_SIZE;
raw_space = PCIIO_SPACE_CFG;
}
if ((bad_xaddr >= PCIBR_TYPE1_CFG(pcibr_soft)) &&
......@@ -1060,11 +1219,11 @@ pcibr_pioerror(
raw_paddr = bad_xaddr - PCIBR_TYPE1_CFG(pcibr_soft);
raw_space = PCIIO_SPACE_CFG;
}
if ((bad_xaddr >= PCIBR_BRIDGE_DEVIO0(pcibr_soft)) &&
if ((bad_xaddr >= PCIBR_BRIDGE_DEVIO(pcibr_soft, 0)) &&
(bad_xaddr < PCIBR_BRIDGE_DEVIO(pcibr_soft, BRIDGE_DEV_CNT))) {
int x;
raw_paddr = bad_xaddr - PCIBR_BRIDGE_DEVIO0(pcibr_soft);
raw_paddr = bad_xaddr - PCIBR_BRIDGE_DEVIO(pcibr_soft, 0);
x = raw_paddr / BRIDGE_DEVIO_OFF;
raw_paddr %= BRIDGE_DEVIO_OFF;
/* first two devio windows are double-sized */
......@@ -1101,25 +1260,37 @@ pcibr_pioerror(
} else
raw_paddr += pcibr_soft->bs_slot[x].bss_devio.bssd_base;
}
if ((bad_xaddr >= BRIDGE_PCI_MEM32_BASE) &&
(bad_xaddr <= BRIDGE_PCI_MEM32_LIMIT)) {
raw_space = PCIIO_SPACE_MEM32;
raw_paddr = bad_xaddr - BRIDGE_PCI_MEM32_BASE;
}
if ((bad_xaddr >= BRIDGE_PCI_MEM64_BASE) &&
(bad_xaddr <= BRIDGE_PCI_MEM64_LIMIT)) {
raw_space = PCIIO_SPACE_MEM64;
raw_paddr = bad_xaddr - BRIDGE_PCI_MEM64_BASE;
}
if ((bad_xaddr >= BRIDGE_PCI_IO_BASE) &&
(bad_xaddr <= BRIDGE_PCI_IO_LIMIT)) {
raw_space = PCIIO_SPACE_IO;
raw_paddr = bad_xaddr - BRIDGE_PCI_IO_BASE;
if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 0)) {
if ((bad_xaddr >= PICBRIDGE0_PCI_MEM32_BASE) &&
(bad_xaddr <= PICBRIDGE0_PCI_MEM32_LIMIT)) {
raw_space = PCIIO_SPACE_MEM32;
raw_paddr = bad_xaddr - PICBRIDGE0_PCI_MEM32_BASE;
}
if ((bad_xaddr >= PICBRIDGE0_PCI_MEM64_BASE) &&
(bad_xaddr <= PICBRIDGE0_PCI_MEM64_LIMIT)) {
raw_space = PCIIO_SPACE_MEM64;
raw_paddr = bad_xaddr - PICBRIDGE0_PCI_MEM64_BASE;
}
} else if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 1)) {
if ((bad_xaddr >= PICBRIDGE1_PCI_MEM32_BASE) &&
(bad_xaddr <= PICBRIDGE1_PCI_MEM32_LIMIT)) {
raw_space = PCIIO_SPACE_MEM32;
raw_paddr = bad_xaddr - PICBRIDGE1_PCI_MEM32_BASE;
}
if ((bad_xaddr >= PICBRIDGE1_PCI_MEM64_BASE) &&
(bad_xaddr <= PICBRIDGE1_PCI_MEM64_LIMIT)) {
raw_space = PCIIO_SPACE_MEM64;
raw_paddr = bad_xaddr - PICBRIDGE1_PCI_MEM64_BASE;
}
} else {
printk("pcibr_pioerror(): unknown bridge type");
return IOERROR_UNHANDLED;
}
space = raw_space;
offset = raw_paddr;
if ((slot == PCIIO_SLOT_NONE) && (space != PCIIO_SPACE_NONE)) {
if ((device == PCIIO_SLOT_NONE) && (space != PCIIO_SPACE_NONE)) {
/* we've got a space/offset but not which
* PCI slot decodes it. Check through our
* notions of which devices decode where.
......@@ -1133,16 +1304,16 @@ pcibr_pioerror(
for (cs = pcibr_soft->bs_min_slot;
(cs < PCIBR_NUM_SLOTS(pcibr_soft)) &&
(slot == PCIIO_SLOT_NONE); cs++) {
(device == PCIIO_SLOT_NONE); cs++) {
int nf = pcibr_soft->bs_slot[cs].bss_ninfo;
pcibr_info_h pcibr_infoh = pcibr_soft->bs_slot[cs].bss_infos;
for (cf = 0; (cf < nf) && (slot == PCIIO_SLOT_NONE); cf++) {
for (cf = 0; (cf < nf) && (device == PCIIO_SLOT_NONE); cf++) {
pcibr_info_t pcibr_info = pcibr_infoh[cf];
if (!pcibr_info)
continue;
for (cw = 0; (cw < 6) && (slot == PCIIO_SLOT_NONE); ++cw) {
for (cw = 0; (cw < 6) && (device == PCIIO_SLOT_NONE); ++cw) {
if (((wx = pcibr_info->f_window[cw].w_space) != PCIIO_SPACE_NONE) &&
((wb = pcibr_info->f_window[cw].w_base) != 0) &&
((ws = pcibr_info->f_window[cw].w_size) != 0) &&
......@@ -1158,7 +1329,7 @@ pcibr_pioerror(
((space == PCIIO_SPACE_MEM) ||
(space == PCIIO_SPACE_MEM32) ||
(space == PCIIO_SPACE_MEM64)))) {
slot = cs;
device = cs;
func = cf;
space = PCIIO_SPACE_WIN(cw);
offset -= wb;
......@@ -1211,7 +1382,7 @@ pcibr_pioerror(
wb = map->bp_pciaddr;
ws = map->bp_mapsz;
cw = wx - PCIIO_SPACE_WIN(0);
if (cw < 6) {
if (cw >= 0 && cw < 6) {
wb += pcibr_soft->bs_slot[cs].bss_window[cw].bssw_base;
wx = pcibr_soft->bs_slot[cs].bss_window[cw].bssw_space;
}
......@@ -1224,32 +1395,35 @@ pcibr_pioerror(
wx = PCIIO_SPACE_MEM;
wl = wb + ws;
if ((wx == raw_space) && (raw_paddr >= wb) && (raw_paddr < wl)) {
atomic_inc(&map->bp_toc[0]);
if (slot == PCIIO_SLOT_NONE) {
slot = cs;
atomic_inc(&map->bp_toc);
if (device == PCIIO_SLOT_NONE) {
device = cs;
func = cf;
space = map->bp_space;
if (cw < 6)
offset -= pcibr_soft->bs_slot[cs].bss_window[cw].bssw_base;
if (cw >= 0 && cw < 6)
offset -= pcibr_soft->bs_slot[device].bss_window[cw].bssw_base;
}
break;
}
}
}
}
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ERROR_HDLR, pcibr_soft->bs_conn,
"pcibr_pioerror: offset=0x%x, slot=0x%x, func=0x%x\n",
offset, slot, func));
"pcibr_pioerror: space=%d, offset=0x%lx, dev=0x%x, func=0x%x\n",
space, offset, device, func));
if (space != PCIIO_SPACE_NONE) {
if (slot != PCIIO_SLOT_NONE) {
if (func != PCIIO_FUNC_NONE) {
if (device != PCIIO_SLOT_NONE) {
external_slot = PCIBR_DEVICE_TO_SLOT(pcibr_soft, device);
if (func != PCIIO_FUNC_NONE)
IOERROR_SETVALUE(ioe, widgetdev,
pciio_widgetdev_create(slot,func));
}
else {
pciio_widgetdev_create(external_slot,func));
else
IOERROR_SETVALUE(ioe, widgetdev,
pciio_widgetdev_create(slot,0));
}
pciio_widgetdev_create(external_slot,0));
}
IOERROR_SETVALUE(ioe, busspace, space);
IOERROR_SETVALUE(ioe, busaddr, offset);
......@@ -1265,7 +1439,7 @@ pcibr_pioerror(
/* if appropriate, give the error handler for this slot
* a shot at this probe access as well.
*/
return (slot == PCIIO_SLOT_NONE) ? IOERROR_HANDLED :
return (device == PCIIO_SLOT_NONE) ? IOERROR_HANDLED :
pciio_error_handler(pcibr_vhdl, error_code, mode, ioe);
}
/*
......@@ -1355,10 +1529,11 @@ pcibr_pioerror(
* other errors.
*/
if (IOERROR_FIELDVALID(ioe, widgetdev)) {
short widdev;
IOERROR_GETVALUE(widdev, ioe, widgetdev);
pcibr_device_disable(pcibr_soft,
pciio_widgetdev_slot_get(widdev));
short widdev;
IOERROR_GETVALUE(widdev, ioe, widgetdev);
external_slot = pciio_widgetdev_slot_get(widdev);
device = PCIBR_SLOT_TO_DEVICE(pcibr_soft, external_slot);
pcibr_device_disable(pcibr_soft, device);
}
if (mode == MODE_DEVUSERERROR)
pcibr_error_cleanup(pcibr_soft, error_code);
......@@ -1381,10 +1556,8 @@ pcibr_dmard_error(
ioerror_t *ioe)
{
vertex_hdl_t pcibr_vhdl = pcibr_soft->bs_vhdl;
bridge_t *bridge = pcibr_soft->bs_base;
bridgereg_t bus_lowaddr, bus_uppraddr;
int retval = 0;
int bufnum;
int bufnum, device;
/*
* In case of DMA errors, bridge should have logged the
......@@ -1401,19 +1574,10 @@ pcibr_dmard_error(
/*
* read error log registers
*/
bus_lowaddr = bridge->b_wid_resp_lower;
bus_uppraddr = bridge->b_wid_resp_upper;
bufnum = BRIDGE_RESP_ERRUPPR_BUFNUM(bus_uppraddr);
IOERROR_SETVALUE(ioe, widgetdev,
pciio_widgetdev_create(
BRIDGE_RESP_ERRUPPR_DEVICE(bus_uppraddr),
0));
IOERROR_SETVALUE(ioe, busaddr,
(bus_lowaddr |
((iopaddr_t)
(bus_uppraddr &
BRIDGE_ERRUPPR_ADDRMASK) << 32)));
bufnum = pcireg_resp_err_buf_get(pcibr_soft);
device = pcireg_resp_err_dev_get(pcibr_soft);
IOERROR_SETVALUE(ioe, widgetdev, pciio_widgetdev_create(device, 0));
IOERROR_SETVALUE(ioe, busaddr, pcireg_resp_err_get(pcibr_soft));
/*
* need to ensure that the xtalk address in ioe
......@@ -1436,7 +1600,7 @@ pcibr_dmard_error(
* not is dependent on INT_ENABLE register. This write just makes sure
* that if the interrupt was enabled, we do get the interrupt.
*/
bridge->b_int_rst_stat = BRIDGE_IRR_RESP_BUF_GRP_CLR;
pcireg_intr_reset_set(pcibr_soft, BRIDGE_IRR_RESP_BUF_GRP_CLR);
/*
* Also, release the "bufnum" back to buffer pool that could be re-used.
......@@ -1445,19 +1609,13 @@ pcibr_dmard_error(
*/
{
reg_p regp;
bridgereg_t regv;
bridgereg_t mask;
regp = (bufnum & 1)
? &bridge->b_odd_resp
: &bridge->b_even_resp;
uint64_t rrb_reg;
uint64_t mask;
rrb_reg = pcireg_rrb_get(pcibr_soft, (bufnum & 1));
mask = 0xF << ((bufnum >> 1) * 4);
regv = *regp;
*regp = regv & ~mask;
*regp = regv;
pcireg_rrb_set(pcibr_soft, (bufnum & 1), (rrb_reg & ~mask));
pcireg_rrb_set(pcibr_soft, (bufnum & 1), rrb_reg);
}
return retval;
......@@ -1705,9 +1863,9 @@ pcibr_error_handler_wrapper(
*/
if ((pio_retval == -1) && (dma_retval == -1)) {
return IOERROR_BADERRORCODE;
} else if (dma_retval != IOERROR_HANDLED) {
} else if ((dma_retval != IOERROR_HANDLED) && (dma_retval != -1)) {
return dma_retval;
} else if (pio_retval != IOERROR_HANDLED) {
} else if ((pio_retval != IOERROR_HANDLED) && (pio_retval != -1)) {
return pio_retval;
} else {
return IOERROR_HANDLED;
......
......@@ -40,7 +40,7 @@ int pcibr_intr_connect(pcibr_intr_t, intr_func_t, intr_arg_t
void pcibr_intr_disconnect(pcibr_intr_t);
vertex_hdl_t pcibr_intr_cpu_get(pcibr_intr_t);
void pcibr_xintr_preset(void *, int, xwidgetnum_t, iopaddr_t, xtalk_intr_vector_t);
void pcibr_intr_func(intr_arg_t);
extern pcibr_info_t pcibr_info_get(vertex_hdl_t);
......@@ -100,7 +100,8 @@ pcibr_intr_bits(pciio_info_t info,
extern struct sn_flush_nasid_entry flush_nasid_list[MAX_NASIDS];
void
sn_dma_flush(unsigned long addr) {
sn_dma_flush(unsigned long addr)
{
nasid_t nasid;
int wid_num;
volatile struct sn_flush_device_list *p;
......@@ -144,7 +145,7 @@ sn_dma_flush(unsigned long addr) {
/* force an interrupt. */
*(bridgereg_t *)(p->force_int_addr) = 1;
*(volatile uint32_t *)(p->force_int_addr) = 1;
/* wait for the interrupt to come back. */
......@@ -152,8 +153,6 @@ sn_dma_flush(unsigned long addr) {
/* okay, everything is synched up. */
spin_unlock_irqrestore(&p->flush_lock, flags);
return;
}
EXPORT_SYMBOL(sn_dma_flush);
......@@ -200,7 +199,6 @@ pcibr_force_interrupt(pcibr_intr_t intr)
unsigned bit;
unsigned bits;
pcibr_soft_t pcibr_soft = intr->bi_soft;
bridge_t *bridge = pcibr_soft->bs_base;
bits = intr->bi_ibits;
for (bit = 0; bit < 8; bit++) {
......@@ -209,7 +207,7 @@ pcibr_force_interrupt(pcibr_intr_t intr)
PCIBR_DEBUG((PCIBR_DEBUG_INTR, pcibr_soft->bs_vhdl,
"pcibr_force_interrupt: bit=0x%x\n", bit));
bridge->b_force_pin[bit].intr = 1;
pcireg_force_intr_set(pcibr_soft, bit);
}
}
}
......@@ -225,7 +223,6 @@ pcibr_intr_alloc(vertex_hdl_t pconn_vhdl,
pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pcibr_info);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
bridge_t *bridge = pcibr_soft->bs_base;
int is_threaded = 0;
xtalk_intr_t *xtalk_intr_p;
......@@ -239,8 +236,6 @@ pcibr_intr_alloc(vertex_hdl_t pconn_vhdl,
pcibr_intr_t pcibr_intr;
pcibr_intr_list_t intr_entry;
pcibr_intr_list_t intr_list;
bridgereg_t int_dev;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
"pcibr_intr_alloc: %s%s%s%s%s\n",
......@@ -266,9 +261,9 @@ pcibr_intr_alloc(vertex_hdl_t pconn_vhdl,
pcibr_intr->bi_ibuf.ib_in = 0;
pcibr_intr->bi_ibuf.ib_out = 0;
spin_lock_init(&pcibr_intr->bi_ibuf.ib_lock);
pcibr_int_bits = pcibr_soft->bs_intr_bits((pciio_info_t)pcibr_info, lines,
PCIBR_NUM_SLOTS(pcibr_soft));
pcibr_int_bits = pcibr_soft->bs_intr_bits((pciio_info_t)pcibr_info,
lines, PCIBR_NUM_SLOTS(pcibr_soft));
/*
* For each PCI interrupt line requested, figure
......@@ -336,10 +331,10 @@ pcibr_intr_alloc(vertex_hdl_t pconn_vhdl,
* now tell the bridge which slot is
* using this interrupt line.
*/
int_dev = bridge->b_int_device;
int_dev &= ~BRIDGE_INT_DEV_MASK(pcibr_int_bit);
int_dev |= pciio_slot << BRIDGE_INT_DEV_SHFT(pcibr_int_bit);
bridge->b_int_device = int_dev; /* XXXMP */
pcireg_intr_device_bit_clr(pcibr_soft,
BRIDGE_INT_DEV_MASK(pcibr_int_bit));
pcireg_intr_device_bit_set(pcibr_soft,
(pciio_slot << BRIDGE_INT_DEV_SHFT(pcibr_int_bit)));
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
"bridge intr bit %d clears my wrb\n",
......@@ -367,7 +362,8 @@ pcibr_intr_alloc(vertex_hdl_t pconn_vhdl,
intr_entry->il_next = NULL;
intr_entry->il_intr = pcibr_intr;
intr_entry->il_wrbf = &(bridge->b_wr_req_buf[pciio_slot].reg);
intr_entry->il_soft = pcibr_soft;
intr_entry->il_slot = pciio_slot;
intr_list_p =
&pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_list;
......@@ -479,19 +475,14 @@ pcibr_intr_free(pcibr_intr_t pcibr_intr)
if ((!intr_shared) && (*xtalk_intrp)) {
bridge_t *bridge = pcibr_soft->bs_base;
bridgereg_t int_dev;
xtalk_intr_free(*xtalk_intrp);
*xtalk_intrp = 0;
/* Clear the PCI device interrupt to bridge interrupt pin
* mapping.
*/
int_dev = bridge->b_int_device;
int_dev &= ~BRIDGE_INT_DEV_MASK(pcibr_int_bit);
bridge->b_int_device = int_dev;
pcireg_intr_device_bit_clr(pcibr_soft,
BRIDGE_INT_DEV_MASK(pcibr_int_bit));
}
}
}
......@@ -504,17 +495,21 @@ pcibr_setpciint(xtalk_intr_t xtalk_intr)
iopaddr_t addr;
xtalk_intr_vector_t vect;
vertex_hdl_t vhdl;
bridge_t *bridge;
picreg_t *int_addr;
int bus_num;
int pcibr_int_bit;
void *bridge;
addr = xtalk_intr_addr_get(xtalk_intr);
vect = xtalk_intr_vector_get(xtalk_intr);
vhdl = xtalk_intr_dev_get(xtalk_intr);
bridge = (bridge_t *)xtalk_piotrans_addr(vhdl, 0, 0, sizeof(bridge_t), 0);
int_addr = (picreg_t *)xtalk_intr_sfarg_get(xtalk_intr);
*int_addr = ((PIC_INT_ADDR_FLD & ((uint64_t)vect << 48)) |
(PIC_INT_ADDR_HOST & addr));
/* bus and int_bits are stored in sfarg, bus bit3, int_bits bit2:0 */
pcibr_int_bit = *((int *)xtalk_intr_sfarg_get(xtalk_intr)) & 0x7;
bus_num = ((*((int *)xtalk_intr_sfarg_get(xtalk_intr)) & 0x8) >> 3);
bridge = pcibr_bridge_ptr_get(vhdl, bus_num);
pcireg_bridge_intr_addr_vect_set(bridge, pcibr_int_bit, vect);
pcireg_bridge_intr_addr_addr_set(bridge, pcibr_int_bit, addr);
}
/*ARGSUSED */
......@@ -522,11 +517,9 @@ int
pcibr_intr_connect(pcibr_intr_t pcibr_intr, intr_func_t intr_func, intr_arg_t intr_arg)
{
pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
bridge_t *bridge = pcibr_soft->bs_base;
unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
unsigned pcibr_int_bit;
uint64_t int_enable;
unsigned long s;
unsigned long s;
if (pcibr_intr == NULL)
return -1;
......@@ -566,37 +559,27 @@ pcibr_intr_connect(pcibr_intr_t pcibr_intr, intr_func_t intr_func, intr_arg_t in
* Use the pcibr wrapper function to handle all Bridge interrupts
* regardless of whether the interrupt line is shared or not.
*/
int_addr = (void *)&(bridge->p_int_addr_64[pcibr_int_bit]);
xtalk_intr_connect(xtalk_intr, pcibr_intr_func, (intr_arg_t) intr_wrap,
(xtalk_intr_setfunc_t) pcibr_setpciint,
(void *)int_addr);
int_addr = pcireg_intr_addr_addr(pcibr_soft, pcibr_int_bit);
pcibr_soft->bs_intr[pcibr_int_bit].bsi_int_bit =
((pcibr_soft->bs_busnum << 3) | pcibr_int_bit);
xtalk_intr_connect(xtalk_intr,
pcibr_intr_func,
(intr_arg_t) intr_wrap,
(xtalk_intr_setfunc_t) pcibr_setpciint,
&pcibr_soft->bs_intr[pcibr_int_bit].bsi_int_bit);
pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected = 1;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
"pcibr_setpciint: int_addr=0x%x, *int_addr=0x%x, "
"pcibr_int_bit=0x%x\n", int_addr,
*(picreg_t *)int_addr,
"pcibr_setpciint: int_addr=0x%lx, *int_addr=0x%lx, "
"pcibr_int_bit=0x%x\n", int_addr,
pcireg_intr_addr_get(pcibr_soft, pcibr_int_bit),
pcibr_int_bit));
}
/* PIC WAR. PV# 854697
* On PIC we must write 64-bit MMRs with 64-bit stores
*/
s = pcibr_lock(pcibr_soft);
if (PCIBR_WAR_ENABLED(PV854697, pcibr_soft)) {
int_enable = bridge->p_int_enable_64;
int_enable |= pcibr_int_bits;
bridge->p_int_enable_64 = int_enable;
} else {
bridgereg_t int_enable;
int_enable = bridge->b_int_enable;
int_enable |= pcibr_int_bits;
bridge->b_int_enable = int_enable;
}
bridge->b_wid_tflush; /* wait until Bridge PIO complete */
pcireg_intr_enable_bit_set(pcibr_soft, pcibr_int_bits);
pcireg_tflush_get(pcibr_soft);
pcibr_unlock(pcibr_soft, s);
return 0;
......@@ -607,12 +590,10 @@ void
pcibr_intr_disconnect(pcibr_intr_t pcibr_intr)
{
pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
bridge_t *bridge = pcibr_soft->bs_base;
unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
unsigned pcibr_int_bit;
pcibr_intr_wrap_t intr_wrap;
uint64_t int_enable;
unsigned long s;
pcibr_intr_wrap_t intr_wrap;
unsigned long s;
/* Stop calling the function. Now.
*/
......@@ -636,16 +617,8 @@ pcibr_intr_disconnect(pcibr_intr_t pcibr_intr)
return;
s = pcibr_lock(pcibr_soft);
if (PCIBR_WAR_ENABLED(PV854697, pcibr_soft)) {
int_enable = bridge->p_int_enable_64;
int_enable &= ~pcibr_int_bits;
bridge->p_int_enable_64 = int_enable;
} else {
int_enable = (uint64_t)bridge->b_int_enable;
int_enable &= ~pcibr_int_bits;
bridge->b_int_enable = (bridgereg_t)int_enable;
}
bridge->b_wid_tflush; /* wait until Bridge PIO complete */
pcireg_intr_enable_bit_clr(pcibr_soft, pcibr_int_bits);
pcireg_tflush_get(pcibr_soft); /* wait until Bridge PIO complete */
pcibr_unlock(pcibr_soft, s);
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
......@@ -654,7 +627,6 @@ pcibr_intr_disconnect(pcibr_intr_t pcibr_intr)
for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
if (pcibr_int_bits & (1 << pcibr_int_bit)) {
void *int_addr;
/* if the interrupt line is now shared,
* do not disconnect it.
......@@ -674,19 +646,18 @@ pcibr_intr_disconnect(pcibr_intr_t pcibr_intr)
* where the another pcibr_intr_alloc()
* was in progress as we disconnected.
*/
intr_wrap = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap;
if (!pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
continue;
intr_wrap = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap;
if (!pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
continue;
int_addr = (void *)&(bridge->p_int_addr_64[pcibr_int_bit]);
pcibr_soft->bs_intr[pcibr_int_bit].bsi_int_bit =
((pcibr_soft->bs_busnum << 3) | pcibr_int_bit);
xtalk_intr_connect(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr,
pcibr_intr_func, (intr_arg_t) intr_wrap,
(xtalk_intr_setfunc_t)pcibr_setpciint,
(void *)(long)pcibr_int_bit);
pcibr_intr_func,
(intr_arg_t) intr_wrap,
(xtalk_intr_setfunc_t) pcibr_setpciint,
&pcibr_soft->bs_intr[pcibr_int_bit].bsi_int_bit);
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
"pcibr_intr_disconnect: now-sharing int_bits=0x%x\n",
pcibr_int_bit));
......@@ -711,10 +682,9 @@ pcibr_intr_cpu_get(pcibr_intr_t pcibr_intr)
* INTERRUPT HANDLING
*/
void
pcibr_clearwidint(bridge_t *bridge)
pcibr_clearwidint(pcibr_soft_t pcibr_soft)
{
bridge->b_wid_int_upper = 0;
bridge->b_wid_int_lower = 0;
pcireg_intr_dst_set(pcibr_soft, 0);
}
......@@ -724,100 +694,12 @@ pcibr_setwidint(xtalk_intr_t intr)
xwidgetnum_t targ = xtalk_intr_target_get(intr);
iopaddr_t addr = xtalk_intr_addr_get(intr);
xtalk_intr_vector_t vect = xtalk_intr_vector_get(intr);
widgetreg_t NEW_b_wid_int_upper, NEW_b_wid_int_lower;
widgetreg_t OLD_b_wid_int_upper, OLD_b_wid_int_lower;
bridge_t *bridge = (bridge_t *)xtalk_intr_sfarg_get(intr);
NEW_b_wid_int_upper = ( (0x000F0000 & (targ << 16)) |
XTALK_ADDR_TO_UPPER(addr));
NEW_b_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
OLD_b_wid_int_upper = bridge->b_wid_int_upper;
OLD_b_wid_int_lower = bridge->b_wid_int_lower;
/* Verify that all interrupts from this Bridge are using a single PI */
if ((OLD_b_wid_int_upper != 0) && (OLD_b_wid_int_lower != 0)) {
/*
* Once set, these registers shouldn't change; they should
* be set multiple times with the same values.
*
* If we're attempting to change these registers, it means
* that our heuristics for allocating interrupts in a way
* appropriate for IP35 have failed, and the admin needs to
* explicitly direct some interrupts (or we need to make the
* heuristics more clever).
*
* In practice, we hope this doesn't happen very often, if
* at all.
*/
if ((OLD_b_wid_int_upper != NEW_b_wid_int_upper) ||
(OLD_b_wid_int_lower != NEW_b_wid_int_lower)) {
printk(KERN_WARNING "Interrupt allocation is too complex.\n");
printk(KERN_WARNING "Use explicit administrative interrupt targetting.\n");
printk(KERN_WARNING "bridge=0x%lx targ=0x%x\n", (unsigned long)bridge, targ);
printk(KERN_WARNING "NEW=0x%x/0x%x OLD=0x%x/0x%x\n",
NEW_b_wid_int_upper, NEW_b_wid_int_lower,
OLD_b_wid_int_upper, OLD_b_wid_int_lower);
panic("PCI Bridge interrupt targetting error\n");
}
}
bridge->b_wid_int_upper = NEW_b_wid_int_upper;
bridge->b_wid_int_lower = NEW_b_wid_int_lower;
bridge->b_int_host_err = vect;
}
/*
* pcibr_intr_preset: called during mlreset time
* if the platform specific code needs to route
* one of the Bridge's xtalk interrupts before the
* xtalk infrastructure is available.
*/
void
pcibr_xintr_preset(void *which_widget,
int which_widget_intr,
xwidgetnum_t targ,
iopaddr_t addr,
xtalk_intr_vector_t vect)
{
bridge_t *bridge = (bridge_t *) which_widget;
if (which_widget_intr == -1) {
/* bridge widget error interrupt */
bridge->b_wid_int_upper = ( (0x000F0000 & (targ << 16)) |
XTALK_ADDR_TO_UPPER(addr));
bridge->b_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
bridge->b_int_host_err = vect;
printk("pcibr_xintr_preset: b_wid_int_upper 0x%lx b_wid_int_lower 0x%lx b_int_host_err 0x%x\n",
( (0x000F0000 & (targ << 16)) | XTALK_ADDR_TO_UPPER(addr)),
XTALK_ADDR_TO_LOWER(addr), vect);
/* turn on all interrupts except
* the PCI interrupt requests,
* at least at heart.
*/
bridge->b_int_enable |= ~BRIDGE_IMR_INT_MSK;
pcibr_soft_t bridge = (pcibr_soft_t)xtalk_intr_sfarg_get(intr);
} else {
/* routing a PCI device interrupt.
* targ and low 38 bits of addr must
* be the same as the already set
* value for the widget error interrupt.
*/
bridge->b_int_addr[which_widget_intr].addr =
((BRIDGE_INT_ADDR_HOST & (addr >> 30)) |
(BRIDGE_INT_ADDR_FLD & vect));
/*
* now bridge can let it through;
* NB: still should be blocked at
* xtalk provider end, until the service
* function is set.
*/
bridge->b_int_enable |= 1 << vect;
}
bridge->b_wid_tflush; /* wait until Bridge PIO complete */
pcireg_intr_dst_target_id_set(bridge, targ);
pcireg_intr_dst_addr_set(bridge, addr);
pcireg_intr_host_err_set(bridge, vect);
}
......@@ -834,17 +716,13 @@ void
pcibr_intr_func(intr_arg_t arg)
{
pcibr_intr_wrap_t wrap = (pcibr_intr_wrap_t) arg;
reg_p wrbf;
intr_func_t func;
pcibr_intr_t intr;
pcibr_intr_list_t list;
int clearit;
int do_nonthreaded = 1;
int is_threaded = 0;
int x = 0;
pcibr_soft_t pcibr_soft = wrap->iw_soft;
bridge_t *bridge = pcibr_soft->bs_base;
uint64_t p_enable = pcibr_soft->bs_int_enable;
int bit = wrap->iw_ibit;
/*
......@@ -854,16 +732,13 @@ pcibr_intr_func(intr_arg_t arg)
* this device.
*/
if (PCIBR_WAR_ENABLED(PV855272, pcibr_soft)) {
unsigned s;
unsigned long s;
/* disable-enable interrupts for this bridge pin */
p_enable &= ~(1 << bit);
s = pcibr_lock(pcibr_soft);
bridge->p_int_enable_64 = p_enable;
p_enable |= (1 << bit);
bridge->p_int_enable_64 = p_enable;
pcibr_unlock(pcibr_soft, s);
s = pcibr_lock(pcibr_soft);
pcireg_intr_enable_bit_clr(pcibr_soft, (1 << bit));
pcireg_intr_enable_bit_set(pcibr_soft, (1 << bit));
pcibr_unlock(pcibr_soft, s);
}
/*
......@@ -886,7 +761,8 @@ pcibr_intr_func(intr_arg_t arg)
clearit = 1;
while (do_nonthreaded) {
for (list = wrap->iw_list; list != NULL; list = list->il_next) {
if ((intr = list->il_intr) && (intr->bi_flags & PCIIO_INTR_CONNECTED)) {
if ((intr = list->il_intr) &&
(intr->bi_flags & PCIIO_INTR_CONNECTED)) {
/*
......@@ -914,17 +790,8 @@ pcibr_intr_func(intr_arg_t arg)
/* Non-threaded - Call the interrupt handler at interrupt level */
/* Only need to flush write buffers if sharing */
if ((wrap->iw_shared) && (wrbf = list->il_wrbf)) {
if ((x = *wrbf)) /* write request buffer flush */
#ifdef SUPPORT_PRINTING_V_FORMAT
printk(KERN_ALERT "pcibr_intr_func %v: \n"
"write buffer flush failed, wrbf=0x%x\n",
list->il_intr->bi_dev, wrbf);
#else
printk(KERN_ALERT "pcibr_intr_func %p: \n"
"write buffer flush failed, wrbf=0x%lx\n",
(void *)list->il_intr->bi_dev, (long) wrbf);
#endif
if (wrap->iw_shared) {
pcireg_wrb_flush_get(list->il_soft, list->il_slot);
}
func = intr->bi_func;
if ( func )
......@@ -954,26 +821,12 @@ pcibr_intr_func(intr_arg_t arg)
* list forever.
*/
if (clearit) {
pcibr_soft_t pcibr_soft = wrap->iw_soft;
bridge_t *bridge = pcibr_soft->bs_base;
bridgereg_t int_enable;
bridgereg_t mask = 1 << wrap->iw_ibit;
unsigned long s;
/* PIC BRINUGP WAR (PV# 854697):
* On PIC we must write 64-bit MMRs with 64-bit stores
*/
uint64_t mask = 1 << wrap->iw_ibit;
unsigned long s;
s = pcibr_lock(pcibr_soft);
if (PCIBR_WAR_ENABLED(PV854697, pcibr_soft)) {
int_enable = bridge->p_int_enable_64;
int_enable &= ~mask;
bridge->p_int_enable_64 = int_enable;
} else {
int_enable = (uint64_t)bridge->b_int_enable;
int_enable &= ~mask;
bridge->b_int_enable = (bridgereg_t)int_enable;
}
bridge->b_wid_tflush; /* wait until Bridge PIO complete */
pcireg_intr_enable_bit_clr(pcibr_soft, mask);
pcireg_tflush_get(pcibr_soft);
pcibr_unlock(pcibr_soft, s);
return;
}
......
......@@ -14,94 +14,867 @@
#include <asm/sn/pci/pcibr_private.h>
#include <asm/sn/pci/pci_defs.h>
#define IS_IOADDR(ptr) (!(((uint64_t)(ptr) & CAC_BASE) == CAC_BASE))
/*
* Control Register Access -- Read/Write 0000_0020
* Identification Register Access -- Read Only 0000_0000
*/
static uint64_t
__pcireg_id_get(pic_t *bridge)
{
return bridge->p_wid_id;
}
uint64_t
pcireg_control_get(void *ptr)
pcireg_bridge_id_get(void *ptr)
{
uint64_t ret = 0;
pic_t *bridge;
return __pcireg_id_get((pic_t *)ptr);
}
if (IS_IOADDR(ptr))
bridge = (pic_t *) ptr;
else
bridge = (pic_t *) ((pcibr_soft_t) (ptr))->bs_base;
uint64_t
pcireg_id_get(pcibr_soft_t ptr)
{
return __pcireg_id_get((pic_t *)ptr->bs_base);
}
ret = ((pic_t *) bridge)->p_wid_control;
return ret;
/*
* Address Bus Side Holding Register Access -- Read Only 0000_0010
*/
uint64_t
pcireg_bus_err_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_wid_err;
}
/*
* Interrupt Status Register Access -- Read Only 0000_0100
* Control Register Access -- Read/Write 0000_0020
*/
static uint64_t
__pcireg_control_get(pic_t *bridge)
{
return bridge->p_wid_control;
}
uint64_t
pcireg_bridge_control_get(void *ptr)
{
return __pcireg_control_get((pic_t *)ptr);
}
uint64_t
pcireg_intr_status_get(void *ptr)
pcireg_control_get(pcibr_soft_t ptr)
{
return __pcireg_control_get((pic_t *)ptr->bs_base);
}
void
pcireg_control_set(pcibr_soft_t ptr, uint64_t val)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
/* WAR for PV 439897 & 454474. Add a readback of the control
* register. Lock to protect against MP accesses to this
* register along with other write-only registers (See PVs).
* This register isnt accessed in the "hot path" so the splhi
* shouldn't be a bottleneck
*/
bridge->p_wid_control = val;
bridge->p_wid_control; /* WAR */
}
void
pcireg_control_bit_clr(pcibr_soft_t ptr, uint64_t bits)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
/* WAR for PV 439897 & 454474. Add a readback of the control
* register. Lock to protect against MP accesses to this
* register along with other write-only registers (See PVs).
* This register isnt accessed in the "hot path" so the splhi
* shouldn't be a bottleneck
*/
bridge->p_wid_control &= ~bits;
bridge->p_wid_control; /* WAR */
}
void
pcireg_control_bit_set(pcibr_soft_t ptr, uint64_t bits)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
/* WAR for PV 439897 & 454474. Add a readback of the control
* register. Lock to protect against MP accesses to this
* register along with other write-only registers (See PVs).
* This register isnt accessed in the "hot path" so the splhi
* shouldn't be a bottleneck
*/
bridge->p_wid_control |= bits;
bridge->p_wid_control; /* WAR */
}
/*
* Bus Speed (from control register); -- Read Only access 0000_0020
* 0x00 == 33MHz, 0x01 == 66MHz, 0x10 == 100MHz, 0x11 == 133MHz
*/
uint64_t
pcireg_speed_get(pcibr_soft_t ptr)
{
uint64_t speedbits;
pic_t *bridge = (pic_t *)ptr->bs_base;
speedbits = bridge->p_wid_control & PIC_CTRL_PCI_SPEED;
return (speedbits >> 4);
}
/*
* Bus Mode (ie. PCIX or PCI) (from Status register); 0000_0008
* 0x0 == PCI, 0x1 == PCI-X
*/
uint64_t
pcireg_mode_get(pcibr_soft_t ptr)
{
uint64_t pcix_active_bit;
pic_t *bridge = (pic_t *)ptr->bs_base;
pcix_active_bit = bridge->p_wid_stat & PIC_STAT_PCIX_ACTIVE;
return (pcix_active_bit >> PIC_STAT_PCIX_ACTIVE_SHFT);
}
void
pcireg_req_timeout_set(pcibr_soft_t ptr, uint64_t val)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
bridge->p_wid_req_timeout = val;
}
/*
* Interrupt Destination Addr Register Access -- Read/Write 0000_0038
*/
void
pcireg_intr_dst_set(pcibr_soft_t ptr, uint64_t val)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
bridge->p_wid_int = val;
}
/*
* Intr Destination Addr Reg Access (target_id) -- Read/Write 0000_0038
*/
uint64_t
pcireg_intr_dst_target_id_get(pcibr_soft_t ptr)
{
uint64_t tid_bits;
pic_t *bridge = (pic_t *)ptr->bs_base;
tid_bits = (bridge->p_wid_int & PIC_INTR_DEST_TID);
return (tid_bits >> PIC_INTR_DEST_TID_SHFT);
}
void
pcireg_intr_dst_target_id_set(pcibr_soft_t ptr, uint64_t target_id)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
bridge->p_wid_int &= ~PIC_INTR_DEST_TID;
bridge->p_wid_int |=
((target_id << PIC_INTR_DEST_TID_SHFT) & PIC_INTR_DEST_TID);
}
/*
* Intr Destination Addr Register Access (addr) -- Read/Write 0000_0038
*/
uint64_t
pcireg_intr_dst_addr_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_wid_int & PIC_XTALK_ADDR_MASK;
}
void
pcireg_intr_dst_addr_set(pcibr_soft_t ptr, uint64_t addr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
bridge->p_wid_int &= ~PIC_XTALK_ADDR_MASK;
bridge->p_wid_int |= (addr & PIC_XTALK_ADDR_MASK);
}
/*
* Cmd Word Holding Bus Side Error Register Access -- Read Only 0000_0040
*/
uint64_t
pcireg_cmdword_err_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_wid_err_cmdword;
}
/*
* PCI/PCIX Target Flush Register Access -- Read Only 0000_0050
*/
uint64_t
pcireg_tflush_get(pcibr_soft_t ptr)
{
short bridge_type;
pic_t *bridge;
uint64_t ret = 0;
pic_t *bridge = (pic_t *)ptr->bs_base;
if (IS_IOADDR(ptr))
bridge = (pic_t *) ptr;
else
bridge = (pic_t *) ((pcibr_soft_t) (ptr))->bs_base;
ret = bridge->p_wid_tflush;
ret = ((pic_t *) bridge)->p_int_status;
/* Read of the Targer Flush should always return zero */
ASSERT_ALWAYS(ret == 0);
return ret;
}
/*
* Cmd Word Holding Link Side Error Register Access -- Read Only 0000_0058
*/
uint64_t
pcireg_linkside_err_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_wid_aux_err;
}
/*
* PCI Response Buffer Address Holding Register -- Read Only 0000_0068
*/
uint64_t
pcireg_resp_err_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_wid_resp;
}
/*
* PCI Resp Buffer Address Holding Reg (Address) -- Read Only 0000_0068
*/
uint64_t
pcireg_resp_err_addr_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_wid_resp & PIC_RSP_BUF_ADDR;
}
/*
* PCI Resp Buffer Address Holding Register (Buffer)-- Read Only 0000_0068
*/
uint64_t
pcireg_resp_err_buf_get(pcibr_soft_t ptr)
{
uint64_t bufnum_bits;
pic_t *bridge = (pic_t *)ptr->bs_base;
bufnum_bits = (bridge->p_wid_resp_upper & PIC_RSP_BUF_NUM);
return (bufnum_bits >> PIC_RSP_BUF_NUM_SHFT);
}
/*
* PCI Resp Buffer Address Holding Register (Device)-- Read Only 0000_0068
*/
uint64_t
pcireg_resp_err_dev_get(pcibr_soft_t ptr)
{
uint64_t devnum_bits;
pic_t *bridge = (pic_t *)ptr->bs_base;
devnum_bits = (bridge->p_wid_resp_upper & PIC_RSP_BUF_DEV_NUM);
return (devnum_bits >> PIC_RSP_BUF_DEV_NUM_SHFT);
}
/*
* Address Holding Register Link Side Errors -- Read Only 0000_0078
*/
uint64_t
pcireg_linkside_err_addr_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_wid_addr_lkerr;
}
void
pcireg_dirmap_wid_set(pcibr_soft_t ptr, uint64_t target)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
bridge->p_dir_map &= ~PIC_DIRMAP_WID;
bridge->p_dir_map |=
((target << PIC_DIRMAP_WID_SHFT) & PIC_DIRMAP_WID);
}
void
pcireg_dirmap_diroff_set(pcibr_soft_t ptr, uint64_t dir_off)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
bridge->p_dir_map &= ~PIC_DIRMAP_DIROFF;
bridge->p_dir_map |= (dir_off & PIC_DIRMAP_DIROFF);
}
void
pcireg_dirmap_add512_set(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
bridge->p_dir_map |= PIC_DIRMAP_ADD512;
}
void
pcireg_dirmap_add512_clr(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
bridge->p_dir_map &= ~PIC_DIRMAP_ADD512;
}
/*
* PCI Page Map Fault Address Register Access -- Read Only 0000_0090
*/
uint64_t
pcireg_map_fault_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_map_fault;
}
/*
* Arbitration Register Access -- Read/Write 0000_00A0
*/
uint64_t
pcireg_arbitration_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_arb;
}
void
pcireg_arbitration_bit_set(pcibr_soft_t ptr, uint64_t bits)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
bridge->p_arb |= bits;
}
/*
* Internal Ram Parity Error Register Access -- Read Only 0000_00B0
*/
uint64_t
pcireg_parity_err_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_ate_parity_err;
}
/*
* Type 1 Configuration Register Access -- Read/Write 0000_00C8
*/
void
pcireg_type1_cntr_set(pcibr_soft_t ptr, uint64_t val)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
bridge->p_pci_cfg = val;
}
/*
* PCI Bus Error Lower Addr Holding Reg Access -- Read Only 0000_00D8
*/
uint64_t
pcireg_pci_bus_addr_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_pci_err;
}
/*
* PCI Bus Error Addr Holding Reg Access (Address) -- Read Only 0000_00D8
*/
uint64_t
pcireg_pci_bus_addr_addr_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_pci_err & PIC_XTALK_ADDR_MASK;
}
/*
* Interrupt Status Register Access -- Read Only 0000_0100
*/
uint64_t
pcireg_intr_status_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_int_status;
}
/*
* Interrupt Enable Register Access -- Read/Write 0000_0108
*/
uint64_t
pcireg_intr_enable_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_int_enable;
}
void
pcireg_intr_enable_set(pcibr_soft_t ptr, uint64_t val)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
bridge->p_int_enable = val;
}
void
pcireg_intr_enable_bit_clr(void *ptr, uint64_t bits)
pcireg_intr_enable_bit_clr(pcibr_soft_t ptr, uint64_t bits)
{
pic_t *bridge;
pic_t *bridge = (pic_t *)ptr->bs_base;
if (IS_IOADDR(ptr))
bridge = (pic_t *) ptr;
else
bridge = (pic_t *) ((pcibr_soft_t) (ptr))->bs_base;
bridge->p_int_enable &= ~bits;
}
void
pcireg_intr_enable_bit_set(void *ptr, uint64_t bits)
pcireg_intr_enable_bit_set(pcibr_soft_t ptr, uint64_t bits)
{
pic_t *bridge;
pic_t *bridge = (pic_t *)ptr->bs_base;
if (IS_IOADDR(ptr))
bridge = (pic_t *) ptr;
else
bridge = (pic_t *) ((pcibr_soft_t) (ptr))->bs_base;
bridge->p_int_enable |= bits;
}
/*
* Interrupt Reset Register Access -- Write Only 0000_0110
*/
void
pcireg_intr_reset_set(pcibr_soft_t ptr, uint64_t val)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
bridge->p_int_rst_stat = val;
}
void
pcireg_intr_mode_set(pcibr_soft_t ptr, uint64_t val)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
bridge->p_int_mode = val;
}
void
pcireg_intr_addr_addr_set(void *ptr, int int_n, uint64_t addr)
pcireg_intr_device_set(pcibr_soft_t ptr, uint64_t val)
{
pic_t *bridge;
pic_t *bridge = (pic_t *)ptr->bs_base;
if (IS_IOADDR(ptr))
bridge = (pic_t *) ptr;
else
bridge = (pic_t *) ((pcibr_soft_t) (ptr))->bs_base;
bridge->p_int_addr[int_n] &= ~(0x0000FFFFFFFFFFFF);
bridge->p_int_addr[int_n] |= (addr & 0x0000FFFFFFFFFFFF);
bridge->p_int_device = val;
}
static void
__pcireg_intr_device_bit_set(pic_t *bridge, uint64_t bits)
{
bridge->p_int_device |= bits;
}
void
pcireg_bridge_intr_device_bit_set(void *ptr, uint64_t bits)
{
__pcireg_intr_device_bit_set((pic_t *)ptr, bits);
}
void
pcireg_intr_device_bit_set(pcibr_soft_t ptr, uint64_t bits)
{
__pcireg_intr_device_bit_set((pic_t *)ptr->bs_base, bits);
}
void
pcireg_intr_device_bit_clr(pcibr_soft_t ptr, uint64_t bits)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
bridge->p_int_device &= ~bits;
}
/*
* Force Interrupt Register Access -- Write Only 0000_01C0 - 0000_01F8
* Host Error Interrupt Field Register Access -- Read/Write 0000_0128
*/
void
pcireg_force_intr_set(void *ptr, int int_n)
pcireg_intr_host_err_set(pcibr_soft_t ptr, uint64_t val)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
bridge->p_int_host_err = val;
}
/*
* Interrupt Host Address Register -- Read/Write 0000_0130 - 0000_0168
*/
uint64_t
pcireg_intr_addr_get(pcibr_soft_t ptr, int int_n)
{
pic_t *bridge;
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_int_addr[int_n];
}
static void
__pcireg_intr_addr_set(pic_t *bridge, int int_n, uint64_t val)
{
bridge->p_int_addr[int_n] = val;
}
void
pcireg_bridge_intr_addr_set(void *ptr, int int_n, uint64_t val)
{
__pcireg_intr_addr_set((pic_t *)ptr, int_n, val);
}
void
pcireg_intr_addr_set(pcibr_soft_t ptr, int int_n, uint64_t val)
{
__pcireg_intr_addr_set((pic_t *)ptr->bs_base, int_n, val);
}
void *
pcireg_intr_addr_addr(pcibr_soft_t ptr, int int_n)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return (void *)&(bridge->p_int_addr[int_n]);
}
static void
__pcireg_intr_addr_vect_set(pic_t *bridge, int int_n, uint64_t vect)
{
bridge->p_int_addr[int_n] &= ~PIC_HOST_INTR_FLD;
bridge->p_int_addr[int_n] |=
((vect << PIC_HOST_INTR_FLD_SHFT) & PIC_HOST_INTR_FLD);
}
void
pcireg_bridge_intr_addr_vect_set(void *ptr, int int_n, uint64_t vect)
{
__pcireg_intr_addr_vect_set((pic_t *)ptr, int_n, vect);
}
void
pcireg_intr_addr_vect_set(pcibr_soft_t ptr, int int_n, uint64_t vect)
{
__pcireg_intr_addr_vect_set((pic_t *)ptr->bs_base, int_n, vect);
}
/*
* Intr Host Address Register (int_addr) -- Read/Write 0000_0130 - 0000_0168
*/
static void
__pcireg_intr_addr_addr_set(pic_t *bridge, int int_n, uint64_t addr)
{
bridge->p_int_addr[int_n] &= ~PIC_HOST_INTR_ADDR;
bridge->p_int_addr[int_n] |= (addr & PIC_HOST_INTR_ADDR);
}
void
pcireg_bridge_intr_addr_addr_set(void *ptr, int int_n, uint64_t addr)
{
__pcireg_intr_addr_addr_set((pic_t *)ptr, int_n, addr);
}
void
pcireg_intr_addr_addr_set(pcibr_soft_t ptr, int int_n, uint64_t addr)
{
__pcireg_intr_addr_addr_set((pic_t *)ptr->bs_base, int_n, addr);
}
/*
* Multiple Interrupt Register Access -- Read Only 0000_0178
*/
uint64_t
pcireg_intr_multiple_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_mult_int;
}
/*
* Force Always Intr Register Access -- Write Only 0000_0180 - 0000_01B8
*/
static void *
__pcireg_force_always_addr_get(pic_t *bridge, int int_n)
{
return (void *)&(bridge->p_force_always[int_n]);
}
void *
pcireg_bridge_force_always_addr_get(void *ptr, int int_n)
{
return __pcireg_force_always_addr_get((pic_t *)ptr, int_n);
}
void *
pcireg_force_always_addr_get(pcibr_soft_t ptr, int int_n)
{
return __pcireg_force_always_addr_get((pic_t *)ptr->bs_base, int_n);
}
/*
* Force Interrupt Register Access -- Write Only 0000_01C0 - 0000_01F8
*/
void
pcireg_force_intr_set(pcibr_soft_t ptr, int int_n)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
if (IS_IOADDR(ptr))
bridge = (pic_t *) ptr;
else
bridge = (pic_t *) ((pcibr_soft_t) (ptr))->bs_base;
bridge->p_force_pin[int_n] = 1;
}
/*
* Device(x) Register Access -- Read/Write 0000_0200 - 0000_0218
*/
uint64_t
pcireg_device_get(pcibr_soft_t ptr, int device)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
ASSERT_ALWAYS((device >= 0) && (device <= 3));
return bridge->p_device[device];
}
void
pcireg_device_set(pcibr_soft_t ptr, int device, uint64_t val)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
ASSERT_ALWAYS((device >= 0) && (device <= 3));
bridge->p_device[device] = val;
}
/*
* Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258
*/
uint64_t
pcireg_wrb_flush_get(pcibr_soft_t ptr, int device)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
uint64_t ret = 0;
ASSERT_ALWAYS((device >= 0) && (device <= 3));
ret = bridge->p_wr_req_buf[device];
/* Read of the Write Buffer Flush should always return zero */
ASSERT_ALWAYS(ret == 0);
return ret;
}
/*
* Even/Odd RRB Register Access -- Read/Write 0000_0280 - 0000_0288
*/
uint64_t
pcireg_rrb_get(pcibr_soft_t ptr, int even_odd)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_rrb_map[even_odd];
}
void
pcireg_rrb_set(pcibr_soft_t ptr, int even_odd, uint64_t val)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
bridge->p_rrb_map[even_odd] = val;
}
void
pcireg_rrb_bit_set(pcibr_soft_t ptr, int even_odd, uint64_t bits)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
bridge->p_rrb_map[even_odd] |= bits;
}
/*
* RRB Status Register Access -- Read Only 0000_0290
*/
uint64_t
pcireg_rrb_status_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_resp_status;
}
/*
* RRB Clear Register Access -- Write Only 0000_0298
*/
void
pcireg_rrb_clear_set(pcibr_soft_t ptr, uint64_t val)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
bridge->p_resp_clear = val;
}
/*
* PCIX Bus Error Address Register Access -- Read Only 0000_0600
*/
uint64_t
pcireg_pcix_bus_err_addr_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_pcix_bus_err_addr;
}
/*
* PCIX Bus Error Attribute Register Access -- Read Only 0000_0608
*/
uint64_t
pcireg_pcix_bus_err_attr_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_pcix_bus_err_attr;
}
/*
* PCIX Bus Error Data Register Access -- Read Only 0000_0610
*/
uint64_t
pcireg_pcix_bus_err_data_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_pcix_bus_err_data;
}
/*
* PCIX PIO Split Request Address Register Access -- Read Only 0000_0618
*/
uint64_t
pcireg_pcix_pio_split_addr_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_pcix_pio_split_addr;
}
/*
* PCIX PIO Split Request Attribute Register Access -- Read Only 0000_0620
*/
uint64_t
pcireg_pcix_pio_split_attr_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_pcix_pio_split_attr;
}
/*
* PCIX DMA Request Error Attribute Register Access -- Read Only 0000_0628
*/
uint64_t
pcireg_pcix_req_err_attr_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_pcix_dma_req_err_attr;
}
/*
* PCIX DMA Request Error Address Register Access -- Read Only 0000_0630
*/
uint64_t
pcireg_pcix_req_err_addr_get(pcibr_soft_t ptr)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
return bridge->p_pcix_dma_req_err_addr;
}
/*
* Type 0 Configuration Space Access -- Read/Write
*/
cfg_p
pcireg_type0_cfg_addr(pcibr_soft_t ptr, uint8_t slot, uint8_t func, int off)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
/* Type 0 Config space accesses on PIC are 1-4, not 0-3 since
* it is a PCIX Bridge. See sys/PCI/pic.h for explanation.
*/
slot++;
ASSERT_ALWAYS(((int) slot >= 1) && ((int) slot <= 4));
return &(bridge->p_type0_cfg_dev[slot].f[func].l[(off / 4)]);
}
/*
* Type 1 Configuration Space Access -- Read/Write
*/
cfg_p
pcireg_type1_cfg_addr(pcibr_soft_t ptr, uint8_t func, int offset)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
/*
* Return a config space address for the given slot/func/offset.
* Note the returned ptr is a 32bit word (ie. cfg_p) aligned ptr
* pointing to the 32bit word that contains the "offset" byte.
*/
return &(bridge->p_type1_cfg.f[func].l[(offset / 4)]);
}
/*
* Internal ATE SSRAM Access -- Read/Write
*/
bridge_ate_t
pcireg_int_ate_get(pcibr_soft_t ptr, int ate_index)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
ASSERT_ALWAYS((ate_index >= 0) && (ate_index <= 1024));
return bridge->p_int_ate_ram[ate_index];
}
void
pcireg_int_ate_set(pcibr_soft_t ptr, int ate_index, bridge_ate_t val)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
ASSERT_ALWAYS((ate_index >= 0) && (ate_index <= 1024));
bridge->p_int_ate_ram[ate_index] = (picate_t) val;
}
bridge_ate_p
pcireg_int_ate_addr(pcibr_soft_t ptr, int ate_index)
{
pic_t *bridge = (pic_t *)ptr->bs_base;
ASSERT_ALWAYS((ate_index >= 0) && (ate_index <= 1024));
return &(bridge->p_int_ate_ram[ate_index]);
}
......@@ -14,23 +14,19 @@
#include <asm/sn/pci/pcibr_private.h>
#include <asm/sn/pci/pci_defs.h>
void do_pcibr_rrb_clear(bridge_t *, int);
void do_pcibr_rrb_flush(bridge_t *, int);
int do_pcibr_rrb_count_valid(bridge_t *, pciio_slot_t, int);
int do_pcibr_rrb_count_avail(bridge_t *, pciio_slot_t);
int do_pcibr_rrb_alloc(bridge_t *, pciio_slot_t, int, int);
int do_pcibr_rrb_free(bridge_t *, pciio_slot_t, int, int);
void do_pcibr_rrb_free_all(pcibr_soft_t, bridge_t *, pciio_slot_t);
void pcibr_rrb_alloc_init(pcibr_soft_t, int, int, int);
void pcibr_rrb_alloc_more(pcibr_soft_t, int, int, int);
void do_pcibr_rrb_autoalloc(pcibr_soft_t, int, int, int);
int pcibr_wrb_flush(vertex_hdl_t);
int pcibr_rrb_alloc(vertex_hdl_t, int *, int *);
int pcibr_rrb_check(vertex_hdl_t, int *, int *, int *, int *);
int pcibr_alloc_all_rrbs(vertex_hdl_t, int, int, int, int,
int, int, int, int, int);
void pcibr_rrb_flush(vertex_hdl_t);
int pcibr_slot_initial_rrb_alloc(vertex_hdl_t,pciio_slot_t);
int pcibr_wrb_flush(vertex_hdl_t);
int pcibr_rrb_alloc(vertex_hdl_t, int *, int *);
int pcibr_rrb_check(vertex_hdl_t, int *, int *, int *, int *);
void pcibr_rrb_flush(vertex_hdl_t);
int pcibr_slot_initial_rrb_alloc(vertex_hdl_t,pciio_slot_t);
void pcibr_rrb_debug(char *, pcibr_soft_t);
void pcibr_rrb_debug(char *, pcibr_soft_t);
/*
* RRB Management
......@@ -52,23 +48,27 @@ void pcibr_rrb_debug(char *, pcibr_soft_t);
#define RRB_MASK (0xf) /* mask a single rrb within reg */
#define RRB_SIZE (4) /* sizeof rrb within reg (bits) */
#define RRB_ENABLE_BIT(bridge) (0x8) /* [BRIDGE | PIC]_RRB_EN */
#define NUM_PDEV_BITS(bridge) (1)
#define NUM_VDEV_BITS(bridge) (2)
#define NUMBER_VCHANNELS(bridge) (4)
#define SLOT_2_PDEV(bridge, slot) ((slot) >> 1)
#define SLOT_2_RRB_REG(bridge, slot) ((slot) & 0x1)
#define RRB_ENABLE_BIT (0x8) /* [BRIDGE | PIC]_RRB_EN */
#define NUM_PDEV_BITS (1)
#define NUMBER_VCHANNELS (4)
#define SLOT_2_PDEV(slot) ((slot) >> 1)
#define SLOT_2_RRB_REG(slot) ((slot) & 0x1)
#define RRB_VALID(rrb) (0x00010000 << (rrb))
#define RRB_INUSE(rrb) (0x00000001 << (rrb))
#define RRB_CLEAR(rrb) (0x00000001 << (rrb))
/* validate that the slot and virtual channel are valid for a given bridge */
#define VALIDATE_SLOT_n_VCHAN(bridge, s, v) \
(((((s) != PCIIO_SLOT_NONE) && ((s) <= (pciio_slot_t)3)) && (((v) >= 0) && ((v) <= 3))) ? 1 : 0)
/* validate that the slot and virtual channel are valid */
#define VALIDATE_SLOT_n_VCHAN(s, v) \
(((((s) != PCIIO_SLOT_NONE) && ((s) <= (pciio_slot_t)3)) && \
(((v) >= 0) && ((v) <= 3))) ? 1 : 0)
/*
* Count how many RRBs are marked valid for the specified PCI slot
* and virtual channel. Return the count.
*/
int
do_pcibr_rrb_count_valid(bridge_t *bridge,
static int
do_pcibr_rrb_count_valid(pcibr_soft_t pcibr_soft,
pciio_slot_t slot,
int vchan)
{
......@@ -76,18 +76,18 @@ do_pcibr_rrb_count_valid(bridge_t *bridge,
uint16_t enable_bit, vchan_bits, pdev_bits, rrb_bits;
int rrb_index, cnt=0;
if (!VALIDATE_SLOT_n_VCHAN(bridge, slot, vchan)) {
if (!VALIDATE_SLOT_n_VCHAN(slot, vchan)) {
printk(KERN_WARNING "do_pcibr_rrb_count_valid() invalid slot/vchan [%d/%d]\n", slot, vchan);
return 0;
}
enable_bit = RRB_ENABLE_BIT(bridge);
vchan_bits = vchan << NUM_PDEV_BITS(bridge);
pdev_bits = SLOT_2_PDEV(bridge, slot);
enable_bit = RRB_ENABLE_BIT;
vchan_bits = vchan << NUM_PDEV_BITS;
pdev_bits = SLOT_2_PDEV(slot);
rrb_bits = enable_bit | vchan_bits | pdev_bits;
tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
tmp = pcireg_rrb_get(pcibr_soft, SLOT_2_RRB_REG(slot));
for (rrb_index = 0; rrb_index < 8; rrb_index++) {
if ((tmp & RRB_MASK) == rrb_bits)
cnt++;
......@@ -101,23 +101,23 @@ do_pcibr_rrb_count_valid(bridge_t *bridge,
* Count how many RRBs are available to be allocated to the specified
* slot. Return the count.
*/
int
do_pcibr_rrb_count_avail(bridge_t *bridge,
static int
do_pcibr_rrb_count_avail(pcibr_soft_t pcibr_soft,
pciio_slot_t slot)
{
uint64_t tmp;
uint16_t enable_bit;
int rrb_index, cnt=0;
if (!VALIDATE_SLOT_n_VCHAN(bridge, slot, 0)) {
if (!VALIDATE_SLOT_n_VCHAN(slot, 0)) {
printk(KERN_WARNING "do_pcibr_rrb_count_avail() invalid slot/vchan");
return 0;
}
enable_bit = RRB_ENABLE_BIT(bridge);
tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
enable_bit = RRB_ENABLE_BIT;
tmp = pcireg_rrb_get(pcibr_soft, SLOT_2_RRB_REG(slot));
for (rrb_index = 0; rrb_index < 8; rrb_index++) {
if ((tmp & enable_bit) != enable_bit)
cnt++;
......@@ -135,8 +135,8 @@ do_pcibr_rrb_count_avail(bridge_t *bridge,
* Note that if a request can be partially filled, it will be, even if
* we return failure.
*/
int
do_pcibr_rrb_alloc(bridge_t *bridge,
static int
do_pcibr_rrb_alloc(pcibr_soft_t pcibr_soft,
pciio_slot_t slot,
int vchan,
int more)
......@@ -145,18 +145,18 @@ do_pcibr_rrb_alloc(bridge_t *bridge,
uint16_t enable_bit, vchan_bits, pdev_bits, rrb_bits;
int rrb_index;
if (!VALIDATE_SLOT_n_VCHAN(bridge, slot, vchan)) {
if (!VALIDATE_SLOT_n_VCHAN(slot, vchan)) {
printk(KERN_WARNING "do_pcibr_rrb_alloc() invalid slot/vchan");
return -1;
}
enable_bit = RRB_ENABLE_BIT(bridge);
vchan_bits = vchan << NUM_PDEV_BITS(bridge);
pdev_bits = SLOT_2_PDEV(bridge, slot);
enable_bit = RRB_ENABLE_BIT;
vchan_bits = vchan << NUM_PDEV_BITS;
pdev_bits = SLOT_2_PDEV(slot);
rrb_bits = enable_bit | vchan_bits | pdev_bits;
reg = tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
reg = tmp = pcireg_rrb_get(pcibr_soft, SLOT_2_RRB_REG(slot));
for (rrb_index = 0; ((rrb_index < 8) && (more > 0)); rrb_index++) {
if ((tmp & enable_bit) != enable_bit) {
/* clear the rrb and OR in the new rrb into 'reg' */
......@@ -166,11 +166,41 @@ do_pcibr_rrb_alloc(bridge_t *bridge,
}
tmp = (tmp >> RRB_SIZE);
}
bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg = reg;
pcireg_rrb_set(pcibr_soft, SLOT_2_RRB_REG(slot), reg);
return (more ? -1 : 0);
}
/*
* Wait for the the specified rrb to have no outstanding XIO pkts
* and for all data to be drained. Mark the rrb as no longer being
* valid.
*/
static void
do_pcibr_rrb_clear(pcibr_soft_t pcibr_soft, int rrb)
{
uint64_t status;
/* bridge_lock must be held; this RRB must be disabled. */
/* wait until RRB has no outstanduing XIO packets. */
status = pcireg_rrb_status_get(pcibr_soft);
while (status & RRB_INUSE(rrb)) {
status = pcireg_rrb_status_get(pcibr_soft);
}
/* if the RRB has data, drain it. */
if (status & RRB_VALID(rrb)) {
pcireg_rrb_clear_set(pcibr_soft, RRB_CLEAR(rrb));
/* wait until RRB is no longer valid. */
status = pcireg_rrb_status_get(pcibr_soft);
while (status & RRB_VALID(rrb)) {
status = pcireg_rrb_status_get(pcibr_soft);
}
}
}
/*
* Release some of the RRBs that have been allocated for the specified
......@@ -180,8 +210,8 @@ do_pcibr_rrb_alloc(bridge_t *bridge,
* Note that if a request can be partially fulfilled, it will be, even
* if we return failure.
*/
int
do_pcibr_rrb_free(bridge_t *bridge,
static int
do_pcibr_rrb_free(pcibr_soft_t pcibr_soft,
pciio_slot_t slot,
int vchan,
int less)
......@@ -190,18 +220,18 @@ do_pcibr_rrb_free(bridge_t *bridge,
uint16_t enable_bit, vchan_bits, pdev_bits, rrb_bits;
int rrb_index;
if (!VALIDATE_SLOT_n_VCHAN(bridge, slot, vchan)) {
if (!VALIDATE_SLOT_n_VCHAN(slot, vchan)) {
printk(KERN_WARNING "do_pcibr_rrb_free() invalid slot/vchan");
return -1;
}
enable_bit = RRB_ENABLE_BIT(bridge);
vchan_bits = vchan << NUM_PDEV_BITS(bridge);
pdev_bits = SLOT_2_PDEV(bridge, slot);
enable_bit = RRB_ENABLE_BIT;
vchan_bits = vchan << NUM_PDEV_BITS;
pdev_bits = SLOT_2_PDEV(slot);
rrb_bits = enable_bit | vchan_bits | pdev_bits;
reg = tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
reg = tmp = pcireg_rrb_get(pcibr_soft, SLOT_2_RRB_REG(slot));
for (rrb_index = 0; ((rrb_index < 8) && (less > 0)); rrb_index++) {
if ((tmp & RRB_MASK) == rrb_bits) {
/*
......@@ -210,132 +240,144 @@ do_pcibr_rrb_free(bridge_t *bridge,
* reg = reg & ~(RRB_MASK << (RRB_SIZE * rrb_index));
* But to be compatible with old code we'll only clear enable.
*/
reg = reg & ~(RRB_ENABLE_BIT(bridge) << (RRB_SIZE * rrb_index));
reg = reg & ~(RRB_ENABLE_BIT << (RRB_SIZE * rrb_index));
clr = clr | (enable_bit << (RRB_SIZE * rrb_index));
less--;
}
tmp = (tmp >> RRB_SIZE);
}
bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg = reg;
pcireg_rrb_set(pcibr_soft, SLOT_2_RRB_REG(slot), reg);
/* call do_pcibr_rrb_clear() for all the rrbs we've freed */
for (rrb_index = 0; rrb_index < 8; rrb_index++) {
int evn_odd = SLOT_2_RRB_REG(bridge, slot);
int evn_odd = SLOT_2_RRB_REG(slot);
if (clr & (enable_bit << (RRB_SIZE * rrb_index)))
do_pcibr_rrb_clear(bridge, (2 * rrb_index) + evn_odd);
do_pcibr_rrb_clear(pcibr_soft, (2 * rrb_index) + evn_odd);
}
return (less ? -1 : 0);
}
/*
* Flush the specified rrb by calling do_pcibr_rrb_clear(). This
* routine is just a wrapper to make sure the rrb is disabled
* before calling do_pcibr_rrb_clear().
*/
static void
do_pcibr_rrb_flush(pcibr_soft_t pcibr_soft, int rrbn)
{
uint64_t rrbv;
int shft = (RRB_SIZE * (rrbn >> 1));
uint64_t ebit = RRB_ENABLE_BIT << shft;
rrbv = pcireg_rrb_get(pcibr_soft, (rrbn & 1));
if (rrbv & ebit) {
pcireg_rrb_set(pcibr_soft, (rrbn & 1), (rrbv & ~ebit));
}
do_pcibr_rrb_clear(pcibr_soft, rrbn);
if (rrbv & ebit) {
pcireg_rrb_set(pcibr_soft, (rrbn & 1), rrbv);
}
}
/*
* free all the rrbs (both the normal and virtual channels) for the
* specified slot.
*/
void
do_pcibr_rrb_free_all(pcibr_soft_t pcibr_soft,
bridge_t *bridge,
pciio_slot_t slot)
{
int vchan;
int vchan_total = NUMBER_VCHANNELS(bridge);
int vchan_total = NUMBER_VCHANNELS;
/* pretend we own all 8 rrbs and just ignore the return value */
for (vchan = 0; vchan < vchan_total; vchan++) {
(void)do_pcibr_rrb_free(bridge, slot, vchan, 8);
do_pcibr_rrb_free(pcibr_soft, slot, vchan, 8);
pcibr_soft->bs_rrb_valid[slot][vchan] = 0;
}
}
/*
* Wait for the the specified rrb to have no outstanding XIO pkts
* and for all data to be drained. Mark the rrb as no longer being
* valid.
* Initialize a slot with a given number of RRBs. (this routine
* will also give back RRBs if the slot has more than we want).
*/
void
do_pcibr_rrb_clear(bridge_t *bridge, int rrb)
pcibr_rrb_alloc_init(pcibr_soft_t pcibr_soft,
int slot,
int vchan,
int init_rrbs)
{
uint64_t status;
int had = pcibr_soft->bs_rrb_valid[slot][vchan];
int have = had;
int added = 0;
/* bridge_lock must be held;
* this RRB must be disabled.
*/
/* wait until RRB has no outstanduing XIO packets. */
while ((status = bridge->b_resp_status) & BRIDGE_RRB_INUSE(rrb)) {
; /* XXX- beats on bridge. bad idea? */
}
/* if the RRB has data, drain it. */
if (status & BRIDGE_RRB_VALID(rrb)) {
bridge->b_resp_clear = BRIDGE_RRB_CLEAR(rrb);
for (added = 0; have < init_rrbs; ++added, ++have) {
if (pcibr_soft->bs_rrb_res[slot] > 0)
pcibr_soft->bs_rrb_res[slot]--;
else if (pcibr_soft->bs_rrb_avail[slot & 1] > 0)
pcibr_soft->bs_rrb_avail[slot & 1]--;
else
break;
if (do_pcibr_rrb_alloc(pcibr_soft, slot, vchan, 1) < 0)
break;
/* wait until RRB is no longer valid. */
while ((status = bridge->b_resp_status) & BRIDGE_RRB_VALID(rrb)) {
; /* XXX- beats on bridge. bad idea? */
}
pcibr_soft->bs_rrb_valid[slot][vchan]++;
}
}
/*
* Flush the specified rrb by calling do_pcibr_rrb_clear(). This
* routine is just a wrapper to make sure the rrb is disabled
* before calling do_pcibr_rrb_clear().
*/
void
do_pcibr_rrb_flush(bridge_t *bridge, int rrbn)
{
reg_p rrbp = &bridge->b_rrb_map[rrbn & 1].reg;
bridgereg_t rrbv;
int shft = (RRB_SIZE * (rrbn >> 1));
unsigned long ebit = RRB_ENABLE_BIT(bridge) << shft;
rrbv = *rrbp;
if (rrbv & ebit) {
*rrbp = rrbv & ~ebit;
/* Free any extra RRBs that the slot may have allocated to it */
while (have > init_rrbs) {
pcibr_soft->bs_rrb_avail[slot & 1]++;
pcibr_soft->bs_rrb_valid[slot][vchan]--;
do_pcibr_rrb_free(pcibr_soft, slot, vchan, 1);
added--;
have--;
}
do_pcibr_rrb_clear(bridge, rrbn);
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_RRB, pcibr_soft->bs_vhdl,
"pcibr_rrb_alloc_init: had %d, added/removed %d, "
"(of requested %d) RRBs "
"to slot %d, vchan %d\n", had, added, init_rrbs,
PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), vchan));
if (rrbv & ebit) {
*rrbp = rrbv;
}
pcibr_rrb_debug("pcibr_rrb_alloc_init", pcibr_soft);
}
/*
* Allocate more RRBs to a given slot (if the RRBs are available).
*/
void
do_pcibr_rrb_autoalloc(pcibr_soft_t pcibr_soft,
int slot,
int vchan,
int more_rrbs)
pcibr_rrb_alloc_more(pcibr_soft_t pcibr_soft,
int slot,
int vchan,
int more_rrbs)
{
bridge_t *bridge = pcibr_soft->bs_base;
int got;
int added;
for (got = 0; got < more_rrbs; ++got) {
for (added = 0; added < more_rrbs; ++added) {
if (pcibr_soft->bs_rrb_res[slot] > 0)
pcibr_soft->bs_rrb_res[slot]--;
else if (pcibr_soft->bs_rrb_avail[slot & 1] > 0)
pcibr_soft->bs_rrb_avail[slot & 1]--;
else
break;
if (do_pcibr_rrb_alloc(bridge, slot, vchan, 1) < 0)
if (do_pcibr_rrb_alloc(pcibr_soft, slot, vchan, 1) < 0)
break;
pcibr_soft->bs_rrb_valid[slot][vchan]++;
}
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_RRB, pcibr_soft->bs_vhdl,
"do_pcibr_rrb_autoalloc: added %d (of %d requested) RRBs "
"to slot %d, vchan %d\n", got, more_rrbs,
"pcibr_rrb_alloc_more: added %d (of %d requested) RRBs "
"to slot %d, vchan %d\n", added, more_rrbs,
PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), vchan));
pcibr_rrb_debug("do_pcibr_rrb_autoalloc", pcibr_soft);
pcibr_rrb_debug("pcibr_rrb_alloc_more", pcibr_soft);
}
......@@ -348,25 +390,24 @@ pcibr_rrb_flush(vertex_hdl_t pconn_vhdl)
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pcibr_soft_t pcibr_soft = (pcibr_soft_t)pciio_info_mfast_get(pciio_info);
pciio_slot_t slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
bridge_t *bridge = pcibr_soft->bs_base;
uint64_t tmp;
uint16_t enable_bit, pdev_bits, rrb_bits, rrb_mask;
int rrb_index;
unsigned long s;
enable_bit = RRB_ENABLE_BIT(bridge);
pdev_bits = SLOT_2_PDEV(bridge, slot);
enable_bit = RRB_ENABLE_BIT;
pdev_bits = SLOT_2_PDEV(slot);
rrb_bits = enable_bit | pdev_bits;
rrb_mask = enable_bit | ((NUM_PDEV_BITS(bridge) << 1) - 1);
rrb_mask = enable_bit | ((NUM_PDEV_BITS << 1) - 1);
tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
tmp = pcireg_rrb_get(pcibr_soft, SLOT_2_RRB_REG(slot));
s = pcibr_lock(pcibr_soft);
for (rrb_index = 0; rrb_index < 8; rrb_index++) {
int evn_odd = SLOT_2_RRB_REG(bridge, slot);
int evn_odd = SLOT_2_RRB_REG(slot);
if ((tmp & rrb_mask) == rrb_bits)
do_pcibr_rrb_flush(bridge, (2 * rrb_index) + evn_odd);
do_pcibr_rrb_flush(pcibr_soft, (2 * rrb_index) + evn_odd);
tmp = (tmp >> RRB_SIZE);
}
pcibr_unlock(pcibr_soft, s);
......@@ -383,12 +424,9 @@ pcibr_wrb_flush(vertex_hdl_t pconn_vhdl)
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
bridge_t *bridge = pcibr_soft->bs_base;
volatile bridgereg_t *wrb_flush;
wrb_flush = &(bridge->b_wr_req_buf[pciio_slot].reg);
while (*wrb_flush)
;
pcireg_wrb_flush_get(pcibr_soft, pciio_slot);
return(0);
}
......@@ -411,7 +449,6 @@ pcibr_rrb_alloc(vertex_hdl_t pconn_vhdl,
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
bridge_t *bridge = pcibr_soft->bs_base;
int desired_vchan0;
int desired_vchan1;
int orig_vchan0;
......@@ -441,7 +478,7 @@ pcibr_rrb_alloc(vertex_hdl_t pconn_vhdl,
s = pcibr_lock(pcibr_soft);
vchan_total = NUMBER_VCHANNELS(bridge);
vchan_total = NUMBER_VCHANNELS;
/* Save the boot-time RRB configuration for this slot */
if (pcibr_soft->bs_rrb_valid_dflt[pciio_slot][VCHAN0] < 0) {
......@@ -507,14 +544,14 @@ pcibr_rrb_alloc(vertex_hdl_t pconn_vhdl,
/* Commit the allocations: free, then alloc.
*/
if (delta_vchan0 < 0)
(void) do_pcibr_rrb_free(bridge, pciio_slot, VCHAN0, -delta_vchan0);
do_pcibr_rrb_free(pcibr_soft, pciio_slot, VCHAN0, -delta_vchan0);
if (delta_vchan1 < 0)
(void) do_pcibr_rrb_free(bridge, pciio_slot, VCHAN1, -delta_vchan1);
do_pcibr_rrb_free(pcibr_soft, pciio_slot, VCHAN1, -delta_vchan1);
if (delta_vchan0 > 0)
(void) do_pcibr_rrb_alloc(bridge, pciio_slot, VCHAN0, delta_vchan0);
do_pcibr_rrb_alloc(pcibr_soft, pciio_slot, VCHAN0, delta_vchan0);
if (delta_vchan1 > 0)
(void) do_pcibr_rrb_alloc(bridge, pciio_slot, VCHAN1, delta_vchan1);
do_pcibr_rrb_alloc(pcibr_soft, pciio_slot, VCHAN1, delta_vchan1);
/* Return final values to caller.
*/
......@@ -666,7 +703,6 @@ pcibr_slot_initial_rrb_alloc(vertex_hdl_t pcibr_vhdl,
pcibr_soft_t pcibr_soft;
pcibr_info_h pcibr_infoh;
pcibr_info_t pcibr_info;
bridge_t *bridge;
int vchan_total;
int vchan;
int chan[4];
......@@ -679,12 +715,10 @@ pcibr_slot_initial_rrb_alloc(vertex_hdl_t pcibr_vhdl,
if (!PCIBR_VALID_SLOT(pcibr_soft, slot))
return(-EINVAL);
bridge = pcibr_soft->bs_base;
/* How many RRBs are on this slot? */
vchan_total = NUMBER_VCHANNELS(bridge);
vchan_total = NUMBER_VCHANNELS;
for (vchan = 0; vchan < vchan_total; vchan++)
chan[vchan] = do_pcibr_rrb_count_valid(bridge, slot, vchan);
chan[vchan] = do_pcibr_rrb_count_valid(pcibr_soft, slot, vchan);
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_RRB, pcibr_vhdl,
"pcibr_slot_initial_rrb_alloc: slot %d started with %d+%d+%d+%d\n",
......@@ -695,25 +729,35 @@ pcibr_slot_initial_rrb_alloc(vertex_hdl_t pcibr_vhdl,
*/
pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
pcibr_info = pcibr_infoh[0];
/*
* PIC BRINGUP WAR (PV# 856866, 859504, 861476, 861478):
* Don't free RRBs we allocated to device[2|3]--vchan3 as
* a WAR to those PVs mentioned above. In pcibr_attach2
* we allocate RRB0,8,1,9 to device[2|3]--vchan3.
*/
if (PCIBR_WAR_ENABLED(PV856866, pcibr_soft) &&
(slot == 2 || slot == 3) &&
(pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE) &&
!pcibr_soft->bs_slot[slot].has_host) {
if (PCIBR_WAR_ENABLED(PV856866, pcibr_soft) &&
(slot == 2 || slot == 3) &&
(pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE) &&
!pcibr_soft->bs_slot[slot].has_host) {
for (vchan = 0; vchan < 2; vchan++) {
do_pcibr_rrb_free(bridge, slot, vchan, 8);
pcibr_soft->bs_rrb_valid[slot][vchan] = 0;
}
for (vchan = 0; vchan < 2; vchan++) {
do_pcibr_rrb_free(pcibr_soft, slot, vchan, 8);
pcibr_soft->bs_rrb_valid[slot][vchan] = 0;
}
pcibr_soft->bs_rrb_valid[slot][3] = chan[3];
return(-ENODEV);
}
/* Give back any assigned to empty slots */
if ((pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE) && !pcibr_soft->bs_slot[slot].has_host) {
do_pcibr_rrb_free_all(pcibr_soft, bridge, slot);
if ((pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE) &&
!pcibr_soft->bs_slot[slot].has_host) {
do_pcibr_rrb_free_all(pcibr_soft, slot);
/* Reserve RRBs for this empty slot for hot-plug */
for (vchan = 0; vchan < vchan_total; vchan++)
pcibr_soft->bs_rrb_valid[slot][vchan] = 0;
return(-ENODEV);
}
......@@ -723,16 +767,6 @@ pcibr_slot_initial_rrb_alloc(vertex_hdl_t pcibr_vhdl,
return(0);
}
void
rrb_reserved_free(pcibr_soft_t pcibr_soft, int slot)
{
int res = pcibr_soft->bs_rrb_res[slot];
if (res) {
pcibr_soft->bs_rrb_avail[slot & 1] += res;
pcibr_soft->bs_rrb_res[slot] = 0;
}
}
/*
* pcibr_initial_rrb
......@@ -750,7 +784,6 @@ pcibr_initial_rrb(vertex_hdl_t pcibr_vhdl,
pciio_slot_t first, pciio_slot_t last)
{
pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
bridge_t *bridge = pcibr_soft->bs_base;
pciio_slot_t slot;
int rrb_total;
int vchan_total;
......@@ -763,12 +796,12 @@ pcibr_initial_rrb(vertex_hdl_t pcibr_vhdl,
have[1][0] = have[1][1] = have[1][2] = 0;
res[0] = res[1] = 0;
vchan_total = NUMBER_VCHANNELS(bridge);
vchan_total = NUMBER_VCHANNELS;
for (slot = pcibr_soft->bs_min_slot;
slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
/* Initial RRB management; give back RRBs in all non-existent slots */
(void) pcibr_slot_initial_rrb_alloc(pcibr_vhdl, slot);
pcibr_slot_initial_rrb_alloc(pcibr_vhdl, slot);
/* Base calculations only on existing slots */
if ((slot >= first) && (slot <= last)) {
......@@ -782,8 +815,8 @@ pcibr_initial_rrb(vertex_hdl_t pcibr_vhdl,
}
/* Initialize even/odd slot available RRB counts */
pcibr_soft->bs_rrb_avail[0] = do_pcibr_rrb_count_avail(bridge, 0);
pcibr_soft->bs_rrb_avail[1] = do_pcibr_rrb_count_avail(bridge, 1);
pcibr_soft->bs_rrb_avail[0] = do_pcibr_rrb_count_avail(pcibr_soft, 0);
pcibr_soft->bs_rrb_avail[1] = do_pcibr_rrb_count_avail(pcibr_soft, 1);
/*
* Calculate reserved RRBs for slots based on current RRB usage
......@@ -804,6 +837,9 @@ pcibr_initial_rrb(vertex_hdl_t pcibr_vhdl,
for (slot = first; slot <= last; ++slot) {
int r;
if (pcibr_soft->bs_unused_slot & (1 << slot))
continue;
rrb_total = 0;
for (vchan = 0; vchan < vchan_total; vchan++)
rrb_total += pcibr_soft->bs_rrb_valid[slot][vchan];
......@@ -829,7 +865,6 @@ void
pcibr_rrb_debug(char *calling_func, pcibr_soft_t pcibr_soft)
{
pciio_slot_t slot;
char tmp_str[256];
if (pcibr_debug_mask & PCIBR_DEBUG_RRB) {
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_RRB, pcibr_soft->bs_vhdl,
......@@ -837,23 +872,17 @@ pcibr_rrb_debug(char *calling_func, pcibr_soft_t pcibr_soft)
pcibr_soft->bs_rrb_avail[0], pcibr_soft->bs_rrb_avail[1]));
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_RRB, pcibr_soft->bs_vhdl,
"\tslot\tvchan0\tvchan1\tvchan2\tvchan3\treserved\n"));
"\tslot\tvchan0\tvchan1\tvchan2\tvchan3\treserved\n"));
for (slot=0; slot < PCIBR_NUM_SLOTS(pcibr_soft); slot++) {
/*
* The kernel only allows functions to have so many variable args,
* attempting to call PCIBR_DEBUG_ALWAYS() with more than 5 printf
* arguments fails so sprintf() it into a temporary string.
*/
sprintf(tmp_str, "\t %d\t %d\t %d\t %d\t %d\t %d\n",
PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot),
0xFFF & pcibr_soft->bs_rrb_valid[slot][VCHAN0],
0xFFF & pcibr_soft->bs_rrb_valid[slot][VCHAN1],
0xFFF & pcibr_soft->bs_rrb_valid[slot][VCHAN2],
0xFFF & pcibr_soft->bs_rrb_valid[slot][VCHAN3],
pcibr_soft->bs_rrb_res[slot]);
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_RRB, pcibr_soft->bs_vhdl,
"%s", tmp_str));
"\t %d\t %d\t %d\t %d\t %d\t %d\n",
PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot),
0xFFF & pcibr_soft->bs_rrb_valid[slot][VCHAN0],
0xFFF & pcibr_soft->bs_rrb_valid[slot][VCHAN1],
0xFFF & pcibr_soft->bs_rrb_valid[slot][VCHAN2],
0xFFF & pcibr_soft->bs_rrb_valid[slot][VCHAN3],
pcibr_soft->bs_rrb_res[slot]));
}
}
}
......@@ -37,11 +37,12 @@ int pcibr_slot_call_device_detach(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot, int drv_flags);
int pcibr_slot_detach(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot,
int drv_flags, char *l1_msg, int *sub_errorp);
static int pcibr_probe_slot(bridge_t *, cfg_p, unsigned int *);
static int pcibr_probe_slot(pcibr_soft_t, cfg_p, unsigned int *);
static int pcibr_probe_work(pcibr_soft_t pcibr_soft, void *addr, int len, void *valp);
void pcibr_device_info_free(vertex_hdl_t, pciio_slot_t);
iopaddr_t pcibr_bus_addr_alloc(pcibr_soft_t, pciio_win_info_t,
pciio_space_t, int, int, int);
void pciibr_bus_addr_free(pcibr_soft_t, pciio_win_info_t);
void pcibr_bus_addr_free(pciio_win_info_t);
cfg_p pcibr_find_capability(cfg_p, unsigned);
extern uint64_t do_pcibr_config_get(cfg_p, unsigned, unsigned);
void do_pcibr_config_set(cfg_p, unsigned, unsigned, uint64_t);
......@@ -57,22 +58,6 @@ void do_pcibr_config_set(cfg_p, unsigned, unsigned, uint64_t);
int max_splittrans_to_numbuf[MAX_SPLIT_TABLE] = {1, 2, 3, 4, 8, 12, 16, 32};
int max_readcount_to_bufsize[MAX_READCNT_TABLE] = {512, 1024, 2048, 4096 };
char *pci_space_name[] = {"NONE",
"ROM",
"IO",
"",
"MEM",
"MEM32",
"MEM64",
"CFG",
"WIN0",
"WIN1",
"WIN2",
"WIN3",
"WIN4",
"WIN5",
"",
"BAD"};
/*
* pcibr_slot_info_init
......@@ -87,7 +72,6 @@ pcibr_slot_info_init(vertex_hdl_t pcibr_vhdl,
pcibr_soft_t pcibr_soft;
pcibr_info_h pcibr_infoh;
pcibr_info_t pcibr_info;
bridge_t *bridge;
cfg_p cfgw;
unsigned idword;
unsigned pfail;
......@@ -106,13 +90,13 @@ pcibr_slot_info_init(vertex_hdl_t pcibr_vhdl,
int func;
vertex_hdl_t conn_vhdl;
pcibr_soft_slot_t slotp;
uint64_t device_reg;
/* Get the basic software information required to proceed */
pcibr_soft = pcibr_soft_get(pcibr_vhdl);
if (!pcibr_soft)
return(EINVAL);
bridge = pcibr_soft->bs_base;
if (!PCIBR_VALID_SLOT(pcibr_soft, slot))
return(EINVAL);
......@@ -124,9 +108,9 @@ pcibr_slot_info_init(vertex_hdl_t pcibr_vhdl,
}
/* Try to read the device-id/vendor-id from the config space */
cfgw = pcibr_slot_config_addr(bridge, slot, 0);
cfgw = pcibr_slot_config_addr(pcibr_soft, slot, 0);
if (pcibr_probe_slot(bridge, cfgw, &idword))
if (pcibr_probe_slot(pcibr_soft, cfgw, &idword))
return(ENODEV);
slotp = &pcibr_soft->bs_slot[slot];
......@@ -157,8 +141,8 @@ pcibr_slot_info_init(vertex_hdl_t pcibr_vhdl,
if (htype & 0x80) { /* MULTIFUNCTION */
for (func = 1; func < 8; ++func) {
cfgw = pcibr_func_config_addr(bridge, 0, slot, func, 0);
if (pcibr_probe_slot(bridge, cfgw, &idwords[func])) {
cfgw = pcibr_func_config_addr(pcibr_soft, 0, slot, func, 0);
if (pcibr_probe_slot(pcibr_soft, cfgw, &idwords[func])) {
pfail |= 1 << func;
continue;
}
......@@ -170,7 +154,7 @@ pcibr_slot_info_init(vertex_hdl_t pcibr_vhdl,
nfunc = func + 1;
rfunc = 0;
}
cfgw = pcibr_slot_config_addr(bridge, slot, 0);
cfgw = pcibr_slot_config_addr(pcibr_soft, slot, 0);
}
pcibr_infoh = kmalloc(nfunc*sizeof (*(pcibr_infoh)), GFP_KERNEL);
if ( !pcibr_infoh ) {
......@@ -189,7 +173,7 @@ pcibr_slot_info_init(vertex_hdl_t pcibr_vhdl,
continue;
idword = idwords[func];
cfgw = pcibr_func_config_addr(bridge, 0, slot, func, 0);
cfgw = pcibr_func_config_addr(pcibr_soft, 0, slot, func, 0);
device = 0xFFFF & (idword >> 16);
htype = do_pcibr_config_get(cfgw, PCI_CFG_HEADER_TYPE, 1);
......@@ -223,9 +207,8 @@ pcibr_slot_info_init(vertex_hdl_t pcibr_vhdl,
*/
lt_time = do_pcibr_config_get(cfgw, PCI_CFG_LATENCY_TIMER, 1);
if ((lt_time == 0) && !(bridge->b_device[slot].reg & BRIDGE_DEV_RT) &&
(device == 0x5 /* RAD_DEV */)) {
device_reg = pcireg_device_get(pcibr_soft, slot);
if ((lt_time == 0) && !(device_reg & BRIDGE_DEV_RT)) {
unsigned min_gnt;
unsigned min_gnt_mult;
......@@ -272,12 +255,14 @@ pcibr_slot_info_init(vertex_hdl_t pcibr_vhdl,
"func=%d, to 0x20\n",
PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), func));
}
}
/* Get the PCI-X capability if running in PCI-X mode. If the func
* doesnt have a pcix capability, allocate a PCIIO_VENDOR_ID_NONE
* pcibr_info struct so the device driver for that function is not
* called.
*/
/* Get the PCI-X capability if running in PCI-X mode. If the func
* doesnt have a pcix capability, allocate a PCIIO_VENDOR_ID_NONE
* pcibr_info struct so the device driver for that function is not
* called.
*/
if (IS_PCIX(pcibr_soft)) {
if (!(pcix_cap = pcibr_find_capability(cfgw, PCI_CAP_PCIX))) {
printk(KERN_WARNING
"%s: Bus running in PCI-X mode, But card in slot %d, "
......@@ -398,7 +383,31 @@ pcibr_slot_info_init(vertex_hdl_t pcibr_vhdl,
}
if (base != 0) { /* estimate size */
pciio_space_t tmp_space = space;
iopaddr_t tmp_base;
size = base & -base;
/*
* Reserve this space in the relavent address map. Don't
* care about the return code from pcibr_bus_addr_alloc().
*/
if (space == PCIIO_SPACE_MEM && code != PCI_BA_MEM_1MEG) {
tmp_space = PCIIO_SPACE_MEM32;
}
tmp_base = pcibr_bus_addr_alloc(pcibr_soft,
&pcibr_info->f_window[win],
tmp_space,
base, size, 0);
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_BAR, pcibr_vhdl,
"pcibr_slot_info_init: slot=%d, func=%d win %d "
"reserving space %s [0x%lx..0x%lx], tmp_base 0x%lx\n",
PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), func, win,
pci_space[tmp_space], (uint64_t)base,
(uint64_t)(base + size - 1), (uint64_t)tmp_base));
} else { /* calculate size */
do_pcibr_config_set(wptr, (win * 4), 4, ~0); /* write 1's */
size = do_pcibr_config_get(wptr, (win * 4), 4); /* read back */
......@@ -602,14 +611,13 @@ pcibr_slot_addr_space_init(vertex_hdl_t pcibr_vhdl,
pcibr_soft_t pcibr_soft;
pcibr_info_h pcibr_infoh;
pcibr_info_t pcibr_info;
bridge_t *bridge;
iopaddr_t mask;
int nbars;
int nfunc;
int func;
int win;
int rc = 0;
int align;
int align = 0;
int align_slot;
pcibr_soft = pcibr_soft_get(pcibr_vhdl);
......@@ -620,8 +628,6 @@ pcibr_slot_addr_space_init(vertex_hdl_t pcibr_vhdl,
if (!PCIBR_VALID_SLOT(pcibr_soft, slot))
return(EINVAL);
bridge = pcibr_soft->bs_base;
/* allocate address space,
* for windows that have not been
* previously assigned.
......@@ -668,7 +674,7 @@ pcibr_slot_addr_space_init(vertex_hdl_t pcibr_vhdl,
if (pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE)
continue;
cfgw = pcibr_func_config_addr(bridge, 0, slot, func, 0);
cfgw = pcibr_func_config_addr(pcibr_soft, 0, slot, func, 0);
wptr = cfgw + PCI_CFG_BASE_ADDR_0 / 4;
if ((do_pcibr_config_get(cfgw, PCI_CFG_HEADER_TYPE, 1) & 0x7f) != 0)
......@@ -834,8 +840,7 @@ pcibr_slot_device_init(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot)
{
pcibr_soft_t pcibr_soft;
bridge_t *bridge;
bridgereg_t devreg;
uint64_t devreg;
pcibr_soft = pcibr_soft_get(pcibr_vhdl);
......@@ -845,28 +850,34 @@ pcibr_slot_device_init(vertex_hdl_t pcibr_vhdl,
if (!PCIBR_VALID_SLOT(pcibr_soft, slot))
return(EINVAL);
bridge = pcibr_soft->bs_base;
/*
* Adjustments to Device(x)
* and init of bss_device shadow
* Adjustments to Device(x) and init of bss_device shadow
*/
devreg = bridge->b_device[slot].reg;
devreg = pcireg_device_get(pcibr_soft, slot);
devreg &= ~BRIDGE_DEV_PAGE_CHK_DIS;
/*
* PIC WAR. PV# 855271
* Don't enable virtual channels in the PIC by default.
* Can cause problems with 32-bit devices. (The bit is only intended
* for 64-bit devices). We set the bit in pcibr_try_set_device()
* if we're 64-bit and requesting virtual channels.
* Enable virtual channels by default (exception: see PIC WAR below)
*/
if (PCIBR_WAR_ENABLED(PV855271, pcibr_soft))
devreg |= BRIDGE_DEV_COH;
else
devreg |= BRIDGE_DEV_COH | BRIDGE_DEV_VIRTUAL_EN;
devreg |= BRIDGE_DEV_VIRTUAL_EN;
/*
* PIC WAR. PV# 855271: Disable virtual channels in the PIC since
* it can cause problems with 32-bit devices. We'll set the bit in
* pcibr_try_set_device() iff we're 64-bit and requesting virtual
* channels.
*/
if (PCIBR_WAR_ENABLED(PV855271, pcibr_soft)) {
devreg &= ~BRIDGE_DEV_VIRTUAL_EN;
}
devreg |= BRIDGE_DEV_COH;
pcibr_soft->bs_slot[slot].bss_device = devreg;
bridge->b_device[slot].reg = devreg;
pcireg_device_set(pcibr_soft, slot, devreg);
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DEVREG, pcibr_vhdl,
"pcibr_slot_device_init: Device(%d): 0x%x\n",
slot, devreg));
return(0);
}
......@@ -1108,44 +1119,6 @@ pcibr_slot_call_device_detach(vertex_hdl_t pcibr_vhdl,
return(error);
}
/*
* pcibr_slot_attach
* This is a place holder routine to keep track of all the
* slot-specific initialization that needs to be done.
* This is usually called when we want to initialize a new
* PCI card on the bus.
*/
int
pcibr_slot_attach(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot,
int drv_flags,
char *l1_msg,
int *sub_errorp)
{
pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
int error;
/* Do not allow a multi-function card to be hot-plug inserted */
if (pcibr_soft->bs_slot[slot].bss_ninfo > 1) {
if (sub_errorp)
*sub_errorp = EPERM;
return(PCI_MULTI_FUNC_ERR);
}
/* Call the device attach */
error = pcibr_slot_call_device_attach(pcibr_vhdl, slot, drv_flags);
if (error) {
if (sub_errorp)
*sub_errorp = error;
if (error == EUNATCH)
return(PCI_NO_DRIVER);
else
return(PCI_SLOT_DRV_ATTACH_ERR);
}
return(0);
}
/*
* pcibr_slot_detach
* This is a place holder routine to keep track of all the
......@@ -1166,6 +1139,8 @@ pcibr_slot_detach(vertex_hdl_t pcibr_vhdl,
if (error) {
if (sub_errorp)
*sub_errorp = error;
if (l1_msg)
;
return(PCI_SLOT_DRV_DETACH_ERR);
}
......@@ -1197,33 +1172,35 @@ pcibr_slot_detach(vertex_hdl_t pcibr_vhdl,
* through the valp parameter.
*/
static int
pcibr_probe_slot_pic(bridge_t *bridge,
cfg_p cfg,
unsigned *valp)
pcibr_probe_slot(pcibr_soft_t pcibr_soft,
cfg_p cfg,
unsigned *valp)
{
int rv;
picreg_t p_old_enable = (picreg_t)0, p_new_enable;
extern int snia_badaddr_val(volatile void *, int, volatile void *);
p_old_enable = bridge->p_int_enable_64;
p_new_enable = p_old_enable & ~(BRIDGE_IMR_PCI_MST_TIMEOUT | PIC_ISR_PCIX_MTOUT);
bridge->p_int_enable_64 = p_new_enable;
return pcibr_probe_work(pcibr_soft, (void *)cfg, 4, (void *)valp);
}
if (bridge->p_err_int_view_64 & (BRIDGE_ISR_PCI_MST_TIMEOUT | PIC_ISR_PCIX_MTOUT))
bridge->p_int_rst_stat_64 = BRIDGE_IRR_MULTI_CLR;
/*
* Probe an offset within a piomap with errors disabled.
* len must be 1, 2, 4, or 8. The probed address must be a multiple of
* len.
*
* Returns: 0 if the offset was probed and put valid data in valp
* -1 if there was a usage error such as improper alignment
* or out of bounds offset/len combination. In this
* case, the map was not probed
* 1 if the offset was probed but resulted in an error
* such as device not responding, bus error, etc.
*/
if (bridge->p_int_status_64 & (BRIDGE_IRR_PCI_GRP | PIC_PCIX_GRP_CLR)) {
bridge->p_int_rst_stat_64 = (BRIDGE_IRR_PCI_GRP_CLR | PIC_PCIX_GRP_CLR);
(void) bridge->b_wid_tflush; /* flushbus */
}
rv = snia_badaddr_val((void *) cfg, 4, valp);
if (bridge->p_err_int_view_64 & (BRIDGE_ISR_PCI_MST_TIMEOUT | PIC_ISR_PCIX_MTOUT)) {
bridge->p_int_rst_stat_64 = BRIDGE_IRR_MULTI_CLR;
rv = 1; /* unoccupied slot */
int
pcibr_piomap_probe(pcibr_piomap_t piomap, off_t offset, int len, void *valp)
{
if (offset + len > piomap->bp_mapsz) {
return -1;
}
bridge->p_int_enable_64 = p_old_enable;
bridge->b_wid_tflush; /* wait until Bridge PIO complete */
return(rv);
return pcibr_probe_work(piomap->bp_soft,
piomap->bp_kvaddr + offset, len, valp);
}
/*
......@@ -1234,11 +1211,31 @@ pcibr_probe_slot_pic(bridge_t *bridge,
* through the valp parameter.
*/
static int
pcibr_probe_slot(bridge_t *bridge,
cfg_p cfg,
unsigned *valp)
pcibr_probe_work(pcibr_soft_t pcibr_soft,
void *addr,
int len,
void *valp)
{
return(pcibr_probe_slot_pic(bridge, cfg, valp));
int rv;
/*
* Sanity checks ...
*/
if (len != 1 && len != 2 && len != 4 && len != 8) {
return -1; /* invalid len */
}
if ((uint64_t)addr & (len-1)) {
return -1; /* invalid alignment */
}
rv = snia_badaddr_val((void *)addr, len, valp);
/* Clear the int_view register incase it was set */
pcireg_intr_reset_set(pcibr_soft, BRIDGE_IRR_MULTI_CLR);
return (rv ? 1 : 0); /* return 1 for snia_badaddr_val error, 0 if ok */
}
......@@ -1249,7 +1246,6 @@ pcibr_device_info_free(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot)
pcibr_info_t pcibr_info;
pciio_function_t func;
pcibr_soft_slot_t slotp = &pcibr_soft->bs_slot[slot];
bridge_t *bridge = pcibr_soft->bs_base;
cfg_p cfgw;
int nfunc = slotp->bss_ninfo;
int bar;
......@@ -1267,7 +1263,7 @@ pcibr_device_info_free(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot)
s = pcibr_lock(pcibr_soft);
/* Disable memory and I/O BARs */
cfgw = pcibr_func_config_addr(bridge, 0, slot, func, 0);
cfgw = pcibr_func_config_addr(pcibr_soft, 0, slot, func, 0);
cmd_reg = do_pcibr_config_get(cfgw, PCI_CFG_COMMAND, 4);
cmd_reg &= (PCI_CMD_MEM_SPACE | PCI_CMD_IO_SPACE);
do_pcibr_config_set(cfgw, PCI_CFG_COMMAND, 4, cmd_reg);
......@@ -1277,7 +1273,7 @@ pcibr_device_info_free(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot)
continue;
/* Free the PCI bus space */
pciibr_bus_addr_free(pcibr_soft, &pcibr_info->f_window[bar]);
pcibr_bus_addr_free(&pcibr_info->f_window[bar]);
/* Get index of the DevIO(x) register used to access this BAR */
devio_index = pcibr_info->f_window[bar].w_devio_index;
......@@ -1295,7 +1291,7 @@ pcibr_device_info_free(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot)
/* Free the Expansion ROM PCI bus space */
if(pcibr_info->f_rbase && pcibr_info->f_rsize) {
pciibr_bus_addr_free(pcibr_soft, &pcibr_info->f_rwindow);
pcibr_bus_addr_free(&pcibr_info->f_rwindow);
}
pcibr_unlock(pcibr_soft, s);
......@@ -1317,12 +1313,6 @@ pcibr_device_info_free(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot)
slotp->bss_d64_flags = 0;
slotp->bss_d32_base = PCIBR_D32_BASE_UNSET;
slotp->bss_d32_flags = 0;
/* Clear out shadow info necessary for the external SSRAM workaround */
slotp->bss_ext_ates_active = ATOMIC_INIT(0);
slotp->bss_cmd_pointer = 0;
slotp->bss_cmd_shadow = 0;
}
......@@ -1365,7 +1355,7 @@ pcibr_bus_addr_alloc(pcibr_soft_t pcibr_soft, pciio_win_info_t win_info_p,
void
pciibr_bus_addr_free(pcibr_soft_t pcibr_soft, pciio_win_info_t win_info_p)
pcibr_bus_addr_free(pciio_win_info_t win_info_p)
{
pciio_device_win_free(&win_info_p->w_win_alloc);
}
......@@ -1381,17 +1371,16 @@ pcibr_widget_to_bus(vertex_hdl_t pcibr_vhdl)
pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
xwidgetnum_t widget = pcibr_soft->bs_xid;
int bricktype = pcibr_soft->bs_bricktype;
int bus = pcibr_soft->bs_busnum;
int bus;
/*
* For PIC there are 2 busses per widget and pcibr_soft->bs_busnum
* will be 0 or 1. For [X]BRIDGE there is 1 bus per widget and
* pcibr_soft->bs_busnum will always be zero. So we add bs_busnum
* to what io_brick_map_widget returns to get the bus number.
*/
if ((bus += io_brick_map_widget(bricktype, widget)) > 0) {
return bus;
} else {
if ((bus = io_brick_map_widget(bricktype, widget)) <= 0) {
printk(KERN_WARNING "pcibr_widget_to_bus() bad bricktype %d\n", bricktype);
return 0;
}
/* For PIC there are 2 busses per widget and pcibr_soft->bs_busnum
* will be 0 or 1. Add in the correct PIC bus offset.
*/
bus += pcibr_soft->bs_busnum;
return bus;
}
......@@ -703,30 +703,6 @@ pciio_info_pops_get(pciio_info_t pciio_info)
return (pciio_info->c_pops);
}
int
pciio_businfo_multi_master_get(pciio_businfo_t businfo)
{
return businfo->bi_multi_master;
}
pciio_asic_type_t
pciio_businfo_asic_type_get(pciio_businfo_t businfo)
{
return businfo->bi_asic_type;
}
pciio_bus_type_t
pciio_businfo_bus_type_get(pciio_businfo_t businfo)
{
return businfo->bi_bus_type;
}
pciio_bus_speed_t
pciio_businfo_bus_speed_get(pciio_businfo_t businfo)
{
return businfo->bi_bus_speed;
}
/* =====================================================================
* GENERIC PCI INITIALIZATION FUNCTIONS
*/
......@@ -1036,12 +1012,3 @@ pciio_info_type1_get(pciio_info_t pci_info)
{
return (pci_info->c_type1);
}
pciio_businfo_t
pciio_businfo_get(vertex_hdl_t conn)
{
pciio_info_t info;
info = pciio_info_get(conn);
return DEV_FUNC(conn, businfo_get)(conn);
}
......@@ -17,12 +17,41 @@
#include <asm/sn/pci/pic.h>
#include <asm/sn/sn_private.h>
extern struct file_operations pcibr_fops;
extern pcibr_list_p pcibr_list;
static int pic_attach2(vertex_hdl_t, void *, vertex_hdl_t,
int, pcibr_soft_t *);
extern int isIO9(nasid_t);
extern char *dev_to_name(vertex_hdl_t dev, char *buf, uint buflen);
extern int pcibr_widget_to_bus(vertex_hdl_t pcibr_vhdl);
extern pcibr_hints_t pcibr_hints_get(vertex_hdl_t, int);
extern unsigned pcibr_intr_bits(pciio_info_t info,
pciio_intr_line_t lines, int nslots);
extern void pcibr_setwidint(xtalk_intr_t);
extern int pcibr_error_handler_wrapper(error_handler_arg_t, int,
ioerror_mode_t, ioerror_t *);
extern void pcibr_error_intr_handler(intr_arg_t);
extern void pcibr_directmap_init(pcibr_soft_t);
extern int pcibr_slot_info_init(vertex_hdl_t,pciio_slot_t);
extern int pcibr_slot_addr_space_init(vertex_hdl_t,pciio_slot_t);
extern int pcibr_slot_device_init(vertex_hdl_t, pciio_slot_t);
extern int pcibr_slot_pcix_rbar_init(pcibr_soft_t, pciio_slot_t);
extern int pcibr_slot_guest_info_init(vertex_hdl_t,pciio_slot_t);
extern int pcibr_slot_call_device_attach(vertex_hdl_t,
pciio_slot_t, int);
extern void pcibr_rrb_alloc_init(pcibr_soft_t, int, int, int);
extern int pcibr_pcix_rbars_calc(pcibr_soft_t);
extern pcibr_info_t pcibr_device_info_new(pcibr_soft_t, pciio_slot_t,
pciio_function_t, pciio_vendor_id_t,
pciio_device_id_t);
extern int pcibr_initial_rrb(vertex_hdl_t, pciio_slot_t,
pciio_slot_t);
extern void xwidget_error_register(vertex_hdl_t, error_handler_f *,
error_handler_arg_t);
extern void pcibr_clearwidint(pcibr_soft_t);
#define PCI_BUS_NO_1 1
extern int pcibr_attach2(vertex_hdl_t, bridge_t *, vertex_hdl_t, int, pcibr_soft_t *);
extern void pcibr_driver_reg_callback(vertex_hdl_t, int, int, int);
extern void pcibr_driver_unreg_callback(vertex_hdl_t, int, int, int);
/*
......@@ -30,10 +59,9 @@ extern void pcibr_driver_unreg_callback(vertex_hdl_t, int, int, int);
*/
static int
pic_bus1_widget_info_dup(vertex_hdl_t conn_v, vertex_hdl_t peer_conn_v,
cnodeid_t xbow_peer)
cnodeid_t xbow_peer, char *peer_path)
{
xwidget_info_t widget_info, peer_widget_info;
char peer_path[256];
vertex_hdl_t peer_hubv;
hubinfo_t peer_hub_info;
......@@ -96,7 +124,7 @@ pic_bus1_redist(nasid_t nasid, vertex_hdl_t conn_v)
char pathname[256], peer_path[256], tmpbuf[256];
char *p;
int rc;
vertex_hdl_t peer_conn_v;
vertex_hdl_t peer_conn_v, hubv;
int pos;
slabid_t slab;
......@@ -141,9 +169,15 @@ pic_bus1_redist(nasid_t nasid, vertex_hdl_t conn_v)
* vertex but that should be safe and we don't
* really expect the additions to fail anyway.
*/
if (!pic_bus1_widget_info_dup(conn_v, peer_conn_v, xbow_peer))
if (!pic_bus1_widget_info_dup(conn_v, peer_conn_v,
xbow_peer, peer_path))
return 0;
hubv = cnodeid_to_vertex(xbow_peer);
ASSERT(hubv != GRAPH_VERTEX_NONE);
device_master_set(peer_conn_v, hubv);
xtalk_provider_register(hubv, &hub_provider);
xtalk_provider_startup(hubv);
return peer_conn_v;
}
}
......@@ -151,12 +185,15 @@ pic_bus1_redist(nasid_t nasid, vertex_hdl_t conn_v)
return 0;
}
/*
* PIC has two buses under a single widget. pic_attach() calls pic_attach2()
* to attach each of those buses.
*/
int
pic_attach(vertex_hdl_t conn_v)
{
int rc;
bridge_t *bridge0, *bridge1 = (bridge_t *)0;
void *bridge0, *bridge1 = (void *)0;
vertex_hdl_t pcibr_vhdl0, pcibr_vhdl1 = (vertex_hdl_t)0;
pcibr_soft_t bus0_soft, bus1_soft = (pcibr_soft_t)0;
vertex_hdl_t conn_v0, conn_v1, peer_conn_v;
......@@ -165,9 +202,8 @@ pic_attach(vertex_hdl_t conn_v)
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v, "pic_attach()\n"));
bridge0 = (bridge_t *) xtalk_piotrans_addr(conn_v, NULL,
0, sizeof(bridge_t), 0);
bridge1 = (bridge_t *)((char *)bridge0 + PIC_BUS1_OFFSET);
bridge0 = pcibr_bridge_ptr_get(conn_v, 0);
bridge1 = pcibr_bridge_ptr_get(conn_v, 1);
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v,
"pic_attach: bridge0=0x%lx, bridge1=0x%lx\n",
......@@ -215,8 +251,23 @@ pic_attach(vertex_hdl_t conn_v)
pciio_provider_startup(pcibr_vhdl0);
pciio_provider_startup(pcibr_vhdl1);
pcibr_attach2(conn_v0, bridge0, pcibr_vhdl0, 0, &bus0_soft);
pcibr_attach2(conn_v1, bridge1, pcibr_vhdl1, 1, &bus1_soft);
pic_attach2(conn_v0, bridge0, pcibr_vhdl0, 0, &bus0_soft);
pic_attach2(conn_v1, bridge1, pcibr_vhdl1, 1, &bus1_soft);
{
/* If we're dual-ported finish duplicating the peer info structure.
* The error handler and arg are done in pic_attach2().
*/
xwidget_info_t info0, info1;
if (conn_v0 != conn_v1) { /* dual ported */
info0 = xwidget_info_get(conn_v0);
info1 = xwidget_info_get(conn_v1);
if (info1->w_efunc == (error_handler_f *)NULL)
info1->w_efunc = info0->w_efunc;
if (info1->w_einfo == (error_handler_arg_t)0)
info1->w_einfo = bus1_soft;
}
}
/* save a pointer to the PIC's other bus's soft struct */
bus0_soft->bs_peers_soft = bus1_soft;
......@@ -229,6 +280,506 @@ pic_attach(vertex_hdl_t conn_v)
return 0;
}
/*
* PIC has two buses under a single widget. pic_attach() calls pic_attach2()
* to attach each of those buses.
*/
static int
pic_attach2(vertex_hdl_t xconn_vhdl, void *bridge,
vertex_hdl_t pcibr_vhdl, int busnum, pcibr_soft_t *ret_softp)
{
vertex_hdl_t ctlr_vhdl;
pcibr_soft_t pcibr_soft;
pcibr_info_t pcibr_info;
xwidget_info_t info;
xtalk_intr_t xtalk_intr;
pcibr_list_p self;
int entry, slot, ibit, i;
vertex_hdl_t noslot_conn;
char devnm[MAXDEVNAME], *s;
pcibr_hints_t pcibr_hints;
picreg_t id;
picreg_t int_enable;
picreg_t pic_ctrl_reg;
int iobrick_type_get_nasid(nasid_t nasid);
int iomoduleid_get(nasid_t nasid);
int irq;
int cpu;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
"pic_attach2: bridge=0x%lx, busnum=%d\n", bridge, busnum));
ctlr_vhdl = NULL;
ctlr_vhdl = hwgraph_register(pcibr_vhdl, EDGE_LBL_CONTROLLER, 0,
0, 0, 0,
S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
(struct file_operations *)&pcibr_fops, (void *)pcibr_vhdl);
ASSERT(ctlr_vhdl != NULL);
id = pcireg_bridge_id_get(bridge);
hwgraph_info_add_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV,
(arbitrary_info_t)XWIDGET_PART_REV_NUM(id));
/*
* Get the hint structure; if some NIC callback marked this vertex as
* "hands-off" then we just return here, before doing anything else.
*/
pcibr_hints = pcibr_hints_get(xconn_vhdl, 0);
if (pcibr_hints && pcibr_hints->ph_hands_off)
return -1;
/* allocate soft structure to hang off the vertex. Link the new soft
* structure to the pcibr_list linked list
*/
pcibr_soft = kmalloc(sizeof (*(pcibr_soft)), GFP_KERNEL);
if ( !pcibr_soft )
return -1;
self = kmalloc(sizeof (*(self)), GFP_KERNEL);
if ( !self ) {
kfree(pcibr_soft);
return -1;
}
memset(pcibr_soft, 0, sizeof (*(pcibr_soft)));
memset(self, 0, sizeof (*(self)));
self->bl_soft = pcibr_soft;
self->bl_vhdl = pcibr_vhdl;
self->bl_next = pcibr_list;
pcibr_list = self;
if (ret_softp)
*ret_softp = pcibr_soft;
memset(pcibr_soft, 0, sizeof *pcibr_soft);
pcibr_soft_set(pcibr_vhdl, pcibr_soft);
s = dev_to_name(pcibr_vhdl, devnm, MAXDEVNAME);
pcibr_soft->bs_name = kmalloc(strlen(s) + 1, GFP_KERNEL);
strcpy(pcibr_soft->bs_name, s);
pcibr_soft->bs_conn = xconn_vhdl;
pcibr_soft->bs_vhdl = pcibr_vhdl;
pcibr_soft->bs_base = (void *)bridge;
pcibr_soft->bs_rev_num = XWIDGET_PART_REV_NUM(id);
pcibr_soft->bs_intr_bits = (pcibr_intr_bits_f *)pcibr_intr_bits;
pcibr_soft->bsi_err_intr = 0;
pcibr_soft->bs_min_slot = 0;
pcibr_soft->bs_max_slot = 3;
pcibr_soft->bs_busnum = busnum;
pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_PIC;
pcibr_soft->bs_int_ate_size = PIC_INTERNAL_ATES;
/* Make sure this is called after setting the bs_base and bs_bridge_type */
pcibr_soft->bs_bridge_mode = (pcireg_speed_get(pcibr_soft) << 1) |
pcireg_mode_get(pcibr_soft);
info = xwidget_info_get(xconn_vhdl);
pcibr_soft->bs_xid = xwidget_info_id_get(info);
pcibr_soft->bs_master = xwidget_info_master_get(info);
pcibr_soft->bs_mxid = xwidget_info_masterid_get(info);
strcpy(pcibr_soft->bs_asic_name, "PIC");
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
"pic_attach2: pcibr_soft=0x%lx, mode=0x%x\n",
pcibr_soft, pcibr_soft->bs_bridge_mode));
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
"pic_attach2: %s ASIC: rev %s (code=0x%x)\n",
pcibr_soft->bs_asic_name,
(IS_PIC_PART_REV_A(pcibr_soft->bs_rev_num)) ? "A" :
(IS_PIC_PART_REV_B(pcibr_soft->bs_rev_num)) ? "B" :
(IS_PIC_PART_REV_C(pcibr_soft->bs_rev_num)) ? "C" :
"unknown", pcibr_soft->bs_rev_num));
/* PV854845: Must clear write request buffer to avoid parity errors */
for (i=0; i < PIC_WR_REQ_BUFSIZE; i++) {
((pic_t *)bridge)->p_wr_req_lower[i] = 0;
((pic_t *)bridge)->p_wr_req_upper[i] = 0;
((pic_t *)bridge)->p_wr_req_parity[i] = 0;
}
pcibr_soft->bs_nasid = NASID_GET(bridge);
pcibr_soft->bs_bricktype = iobrick_type_get_nasid(pcibr_soft->bs_nasid);
if (pcibr_soft->bs_bricktype < 0)
printk(KERN_WARNING "%s: bricktype was unknown by L1 (ret val = 0x%x)\n",
pcibr_soft->bs_name, pcibr_soft->bs_bricktype);
pcibr_soft->bs_moduleid = iomoduleid_get(pcibr_soft->bs_nasid);
if (pcibr_soft->bs_bricktype > 0) {
switch (pcibr_soft->bs_bricktype) {
case MODULE_PXBRICK:
case MODULE_IXBRICK:
case MODULE_OPUSBRICK:
pcibr_soft->bs_first_slot = 0;
pcibr_soft->bs_last_slot = 1;
pcibr_soft->bs_last_reset = 1;
/* Bus 1 of IXBrick has a IO9, so there are 4 devices, not 2 */
if ((pcibr_widget_to_bus(pcibr_vhdl) == 1)
&& isIO9(pcibr_soft->bs_nasid)) {
pcibr_soft->bs_last_slot = 3;
pcibr_soft->bs_last_reset = 3;
}
break;
case MODULE_CGBRICK:
pcibr_soft->bs_first_slot = 0;
pcibr_soft->bs_last_slot = 0;
pcibr_soft->bs_last_reset = 0;
break;
default:
printk(KERN_WARNING "%s: Unknown bricktype: 0x%x\n",
pcibr_soft->bs_name, pcibr_soft->bs_bricktype);
break;
}
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
"pic_attach2: bricktype=%d, brickbus=%d, "
"slots %d-%d\n", pcibr_soft->bs_bricktype,
pcibr_widget_to_bus(pcibr_vhdl),
pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot));
}
/*
* Initialize bridge and bus locks
*/
spin_lock_init(&pcibr_soft->bs_lock);
/*
* If we have one, process the hints structure.
*/
if (pcibr_hints) {
unsigned rrb_fixed;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_HINTS, pcibr_vhdl,
"pic_attach2: pcibr_hints=0x%lx\n", pcibr_hints));
rrb_fixed = pcibr_hints->ph_rrb_fixed;
pcibr_soft->bs_rrb_fixed = rrb_fixed;
if (pcibr_hints->ph_intr_bits)
pcibr_soft->bs_intr_bits = pcibr_hints->ph_intr_bits;
for (slot = pcibr_soft->bs_min_slot;
slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
int hslot = pcibr_hints->ph_host_slot[slot] - 1;
if (hslot < 0) {
pcibr_soft->bs_slot[slot].host_slot = slot;
} else {
pcibr_soft->bs_slot[slot].has_host = 1;
pcibr_soft->bs_slot[slot].host_slot = hslot;
}
}
}
/*
* Set-up initial values for state fields
*/
for (slot = pcibr_soft->bs_min_slot;
slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
pcibr_soft->bs_slot[slot].bss_devio.bssd_space = PCIIO_SPACE_NONE;
pcibr_soft->bs_slot[slot].bss_devio.bssd_ref_cnt = 0;
pcibr_soft->bs_slot[slot].bss_d64_base = PCIBR_D64_BASE_UNSET;
pcibr_soft->bs_slot[slot].bss_d32_base = PCIBR_D32_BASE_UNSET;
pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0] = -1;
}
for (ibit = 0; ibit < 8; ++ibit) {
pcibr_soft->bs_intr[ibit].bsi_xtalk_intr = 0;
pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_soft = pcibr_soft;
pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_list = NULL;
pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_ibit = ibit;
pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_hdlrcnt = 0;
pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_shared = 0;
pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_connected = 0;
}
/*
* connect up our error handler. PIC has 2 busses (thus resulting in 2
* pcibr_soft structs under 1 widget), so only register a xwidget error
* handler for PIC's bus0. NOTE: for PIC pcibr_error_handler_wrapper()
* is a wrapper routine we register that will call the real error handler
* pcibr_error_handler() with the correct pcibr_soft struct.
*/
if (busnum == 0) {
xwidget_error_register(xconn_vhdl,
pcibr_error_handler_wrapper, pcibr_soft);
}
/*
* Clear all pending interrupts. Assume all interrupts are from slot 3
* until otherise setup.
*/
pcireg_intr_reset_set(pcibr_soft, PIC_IRR_ALL_CLR);
pcireg_intr_device_set(pcibr_soft, 0x006db6db);
/* Setup the mapping register used for direct mapping */
pcibr_directmap_init(pcibr_soft);
/*
* Initialize the PICs control register.
*/
pic_ctrl_reg = pcireg_control_get(pcibr_soft);
/* Bridges Requester ID: bus = busnum, dev = 0, func = 0 */
pic_ctrl_reg &= ~PIC_CTRL_BUS_NUM_MASK;
pic_ctrl_reg |= PIC_CTRL_BUS_NUM(busnum);
pic_ctrl_reg &= ~PIC_CTRL_DEV_NUM_MASK;
pic_ctrl_reg &= ~PIC_CTRL_FUN_NUM_MASK;
pic_ctrl_reg &= ~PIC_CTRL_NO_SNOOP;
pic_ctrl_reg &= ~PIC_CTRL_RELAX_ORDER;
/* enable parity checking on PICs internal RAM */
pic_ctrl_reg |= PIC_CTRL_PAR_EN_RESP;
pic_ctrl_reg |= PIC_CTRL_PAR_EN_ATE;
/* PIC BRINGUP WAR (PV# 862253): dont enable write request parity */
if (!PCIBR_WAR_ENABLED(PV862253, pcibr_soft)) {
pic_ctrl_reg |= PIC_CTRL_PAR_EN_REQ;
}
pic_ctrl_reg |= PIC_CTRL_PAGE_SIZE;
pcireg_control_set(pcibr_soft, pic_ctrl_reg);
/* Initialize internal mapping entries (ie. the ATEs) */
for (entry = 0; entry < pcibr_soft->bs_int_ate_size; entry++)
pcireg_int_ate_set(pcibr_soft, entry, 0);
pcibr_soft->bs_int_ate_resource.start = 0;
pcibr_soft->bs_int_ate_resource.end = pcibr_soft->bs_int_ate_size - 1;
/* Setup the PICs error interrupt handler. */
xtalk_intr = xtalk_intr_alloc(xconn_vhdl, (device_desc_t)0, pcibr_vhdl);
ASSERT(xtalk_intr != NULL);
irq = ((hub_intr_t)xtalk_intr)->i_bit;
cpu = ((hub_intr_t)xtalk_intr)->i_cpuid;
intr_unreserve_level(cpu, irq);
((hub_intr_t)xtalk_intr)->i_bit = SGI_PCIBR_ERROR;
xtalk_intr->xi_vector = SGI_PCIBR_ERROR;
pcibr_soft->bsi_err_intr = xtalk_intr;
/*
* On IP35 with XBridge, we do some extra checks in pcibr_setwidint
* in order to work around some addressing limitations. In order
* for that fire wall to work properly, we need to make sure we
* start from a known clean state.
*/
pcibr_clearwidint(pcibr_soft);
xtalk_intr_connect(xtalk_intr,
(intr_func_t) pcibr_error_intr_handler,
(intr_arg_t) pcibr_soft,
(xtalk_intr_setfunc_t) pcibr_setwidint,
(void *) pcibr_soft);
request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler, SA_SHIRQ,
"PCIBR error", (intr_arg_t) pcibr_soft);
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_vhdl,
"pcibr_setwidint: target_id=0x%lx, int_addr=0x%lx\n",
pcireg_intr_dst_target_id_get(pcibr_soft),
pcireg_intr_dst_addr_get(pcibr_soft)));
/* now we can start handling error interrupts */
int_enable = pcireg_intr_enable_get(pcibr_soft);
int_enable |= PIC_ISR_ERRORS;
/* PIC BRINGUP WAR (PV# 856864 & 856865): allow the tnums that are
* locked out to be freed up sooner (by timing out) so that the
* read tnums are never completely used up.
*/
if (PCIBR_WAR_ENABLED(PV856864, pcibr_soft)) {
int_enable &= ~PIC_ISR_PCIX_REQ_TOUT;
int_enable &= ~PIC_ISR_XREAD_REQ_TIMEOUT;
pcireg_req_timeout_set(pcibr_soft, 0x750);
}
pcireg_intr_enable_set(pcibr_soft, int_enable);
pcireg_intr_mode_set(pcibr_soft, 0); /* dont send 'clear interrupt' pkts */
pcireg_tflush_get(pcibr_soft); /* wait until Bridge PIO complete */
/*
* PIC BRINGUP WAR (PV# 856866, 859504, 861476, 861478): Don't use
* RRB0, RRB8, RRB1, and RRB9. Assign them to DEVICE[2|3]--VCHAN3
* so they are not used. This works since there is currently no
* API to penable VCHAN3.
*/
if (PCIBR_WAR_ENABLED(PV856866, pcibr_soft)) {
pcireg_rrb_bit_set(pcibr_soft, 0, 0x000f000f); /* even rrb reg */
pcireg_rrb_bit_set(pcibr_soft, 1, 0x000f000f); /* odd rrb reg */
}
/* PIC only supports 64-bit direct mapping in PCI-X mode. Since
* all PCI-X devices that initiate memory transactions must be
* capable of generating 64-bit addressed, we force 64-bit DMAs.
*/
pcibr_soft->bs_dma_flags = 0;
if (IS_PCIX(pcibr_soft)) {
pcibr_soft->bs_dma_flags |= PCIIO_DMA_A64;
}
{
iopaddr_t prom_base_addr = pcibr_soft->bs_xid << 24;
int prom_base_size = 0x1000000;
int status;
struct resource *res;
/* Allocate resource maps based on bus page size; for I/O and memory
* space, free all pages except those in the base area and in the
* range set by the PROM.
*
* PROM creates BAR addresses in this format: 0x0ws00000 where w is
* the widget number and s is the device register offset for the slot.
*/
/* Setup the Bus's PCI IO Root Resource. */
pcibr_soft->bs_io_win_root_resource.start = PCIBR_BUS_IO_BASE;
pcibr_soft->bs_io_win_root_resource.end = 0xffffffff;
res = (struct resource *) kmalloc( sizeof(struct resource), GFP_KERNEL);
if (!res)
panic("PCIBR:Unable to allocate resource structure\n");
/* Block off the range used by PROM. */
res->start = prom_base_addr;
res->end = prom_base_addr + (prom_base_size - 1);
status = request_resource(&pcibr_soft->bs_io_win_root_resource, res);
if (status)
panic("PCIBR:Unable to request_resource()\n");
/* Setup the Small Window Root Resource */
pcibr_soft->bs_swin_root_resource.start = PAGE_SIZE;
pcibr_soft->bs_swin_root_resource.end = 0x000FFFFF;
/* Setup the Bus's PCI Memory Root Resource */
pcibr_soft->bs_mem_win_root_resource.start = 0x200000;
pcibr_soft->bs_mem_win_root_resource.end = 0xffffffff;
res = (struct resource *) kmalloc( sizeof(struct resource), GFP_KERNEL);
if (!res)
panic("PCIBR:Unable to allocate resource structure\n");
/* Block off the range used by PROM. */
res->start = prom_base_addr;
res->end = prom_base_addr + (prom_base_size - 1);;
status = request_resource(&pcibr_soft->bs_mem_win_root_resource, res);
if (status)
panic("PCIBR:Unable to request_resource()\n");
}
/* build "no-slot" connection point */
pcibr_info = pcibr_device_info_new(pcibr_soft, PCIIO_SLOT_NONE,
PCIIO_FUNC_NONE, PCIIO_VENDOR_ID_NONE, PCIIO_DEVICE_ID_NONE);
noslot_conn = pciio_device_info_register(pcibr_vhdl, &pcibr_info->f_c);
/* Store no slot connection point info for tearing it down during detach. */
pcibr_soft->bs_noslot_conn = noslot_conn;
pcibr_soft->bs_noslot_info = pcibr_info;
for (slot = pcibr_soft->bs_min_slot;
slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
/* Find out what is out there */
(void)pcibr_slot_info_init(pcibr_vhdl, slot);
}
for (slot = pcibr_soft->bs_min_slot;
slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
/* Set up the address space for this slot in the PCI land */
(void)pcibr_slot_addr_space_init(pcibr_vhdl, slot);
}
for (slot = pcibr_soft->bs_min_slot;
slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
/* Setup the device register */
(void)pcibr_slot_device_init(pcibr_vhdl, slot);
}
if (IS_PCIX(pcibr_soft)) {
pcibr_soft->bs_pcix_rbar_inuse = 0;
pcibr_soft->bs_pcix_rbar_avail = NUM_RBAR;
pcibr_soft->bs_pcix_rbar_percent_allowed =
pcibr_pcix_rbars_calc(pcibr_soft);
for (slot = pcibr_soft->bs_min_slot;
slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
/* Setup the PCI-X Read Buffer Attribute Registers (RBARs) */
(void)pcibr_slot_pcix_rbar_init(pcibr_soft, slot);
}
}
for (slot = pcibr_soft->bs_min_slot;
slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
/* Setup host/guest relations */
(void)pcibr_slot_guest_info_init(pcibr_vhdl, slot);
}
/* Handle initial RRB management */
pcibr_initial_rrb(pcibr_vhdl,
pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot);
/* Before any drivers get called that may want to re-allocate RRB's,
* let's get some special cases pre-allocated. Drivers may override
* these pre-allocations, but by doing pre-allocations now we're
* assured not to step all over what the driver intended.
*/
if (pcibr_soft->bs_bricktype > 0) {
switch (pcibr_soft->bs_bricktype) {
case MODULE_PXBRICK:
case MODULE_IXBRICK:
case MODULE_OPUSBRICK:
/*
* If IO9 in bus 1, allocate RRBs to all the IO9 devices
*/
if ((pcibr_widget_to_bus(pcibr_vhdl) == 1) &&
(pcibr_soft->bs_slot[0].bss_vendor_id == 0x10A9) &&
(pcibr_soft->bs_slot[0].bss_device_id == 0x100A)) {
pcibr_rrb_alloc_init(pcibr_soft, 0, VCHAN0, 4);
pcibr_rrb_alloc_init(pcibr_soft, 1, VCHAN0, 4);
pcibr_rrb_alloc_init(pcibr_soft, 2, VCHAN0, 4);
pcibr_rrb_alloc_init(pcibr_soft, 3, VCHAN0, 4);
} else {
pcibr_rrb_alloc_init(pcibr_soft, 0, VCHAN0, 4);
pcibr_rrb_alloc_init(pcibr_soft, 1, VCHAN0, 4);
}
break;
case MODULE_CGBRICK:
pcibr_rrb_alloc_init(pcibr_soft, 0, VCHAN0, 8);
break;
} /* switch */
}
for (slot = pcibr_soft->bs_min_slot;
slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
/* Call the device attach */
(void)pcibr_slot_call_device_attach(pcibr_vhdl, slot, 0);
}
pciio_device_attach(noslot_conn, 0);
return 0;
}
/*
* pci provider functions
*
......@@ -237,6 +788,8 @@ pic_attach(vertex_hdl_t conn_v)
*/
pciio_provider_t pci_pic_provider =
{
PCIIO_ASIC_TYPE_PIC,
(pciio_piomap_alloc_f *) pcibr_piomap_alloc,
(pciio_piomap_free_f *) pcibr_piomap_free,
(pciio_piomap_addr_f *) pcibr_piomap_addr,
......@@ -263,12 +816,12 @@ pciio_provider_t pci_pic_provider =
(pciio_provider_startup_f *) pcibr_provider_startup,
(pciio_provider_shutdown_f *) pcibr_provider_shutdown,
(pciio_reset_f *) pcibr_reset,
(pciio_write_gather_flush_f *) pcibr_write_gather_flush,
(pciio_endian_set_f *) pcibr_endian_set,
(pciio_priority_set_f *) pcibr_priority_set,
(pciio_config_get_f *) pcibr_config_get,
(pciio_config_set_f *) pcibr_config_set,
(pciio_error_extract_f *) 0,
(pciio_error_extract_f *) pcibr_error_extract,
(pciio_driver_reg_callback_f *) pcibr_driver_reg_callback,
(pciio_driver_unreg_callback_f *) pcibr_driver_unreg_callback,
(pciio_device_unregister_f *) pcibr_device_unregister,
......
......@@ -133,11 +133,7 @@ sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
return;
cpu = first_cpu(mask);
if (IS_PIC_SOFT(intr->bi_soft) ) {
sn_shub_redirect_intr(intr, cpu);
} else {
return;
}
sn_shub_redirect_intr(intr, cpu);
(void) set_irq_affinity_info(irq, cpu_physical_id(intr->bi_cpu), redir);
#endif /* CONFIG_SMP */
}
......@@ -262,7 +258,7 @@ sn_check_intr(int irq, pcibr_intr_t intr) {
unsigned long irr_reg;
regval = pcireg_intr_status_get(intr->bi_soft->bs_base);
regval = pcireg_intr_status_get(intr->bi_soft);
irr_reg_num = irq_to_vector(irq) / 64;
irr_bit = irq_to_vector(irq) % 64;
switch (irr_reg_num) {
......
......@@ -158,12 +158,9 @@ struct module_s {
spinlock_t lock; /* Lock for this structure */
/* List of nodes in this module */
cnodeid_t nodes[MODULE_MAX_NODES];
geoid_t geoid[MODULE_MAX_NODES];
struct {
char moduleid[8];
} io[MODULE_MAX_NODES];
int nodecnt; /* Number of nodes in array */
cnodeid_t nodes[MAX_SLABS + 1];
geoid_t geoid[MAX_SLABS + 1];
/* Fields for Module System Controller */
int mesgpend; /* Message pending */
int shutdown; /* Shutdown in progress */
......
......@@ -918,6 +918,10 @@ typedef volatile struct bridge_s {
#define PCIBR_TYPE0_CFG_DEV(ps, s) PCIBRIDGE_TYPE0_CFG_DEV((ps)->bs_busnum, s+1)
#define PCIBR_BUS_TYPE0_CFG_DEVF(ps,s,f) PCIBRIDGE_TYPE0_CFG_DEVF((ps)->bs_busnum,(s+1),f)
/* NOTE: 's' is the internal device number, not the external slot number */
#define PCIBR_BUS_TYPE0_CFG_DEV(ps, s) \
PCIBRIDGE_TYPE0_CFG_DEV((ps)->bs_busnum, s+1)
#endif /* LANGUAGE_C */
#define BRIDGE_EXTERNAL_FLASH 0x00C00000 /* External Flash PROMS */
......@@ -943,10 +947,6 @@ typedef volatile struct bridge_s {
#define XBRIDGE_REV_B 0x2
/* macros to determine bridge type. 'wid' == widget identification */
#define IS_BRIDGE(wid) (XWIDGET_PART_NUM(wid) == BRIDGE_WIDGET_PART_NUM && \
XWIDGET_MFG_NUM(wid) == BRIDGE_WIDGET_MFGR_NUM)
#define IS_XBRIDGE(wid) (XWIDGET_PART_NUM(wid) == XBRIDGE_WIDGET_PART_NUM && \
XWIDGET_MFG_NUM(wid) == XBRIDGE_WIDGET_MFGR_NUM)
#define IS_PIC_BUS0(wid) (XWIDGET_PART_NUM(wid) == PIC_WIDGET_PART_NUM_BUS0 && \
XWIDGET_MFG_NUM(wid) == PIC_WIDGET_MFGR_NUM)
#define IS_PIC_BUS1(wid) (XWIDGET_PART_NUM(wid) == PIC_WIDGET_PART_NUM_BUS1 && \
......
......@@ -31,10 +31,6 @@
#define MAX_PCI_XWIDGET 256
#define MAX_ATE_MAPS 1024
#define SET_PCIA64(dev) \
(((struct sn_device_sysdata *)((dev)->sysdata))->isa64) = 1
#define IS_PCIA64(dev) (((dev)->dma_mask == 0xffffffffffffffffUL) || \
(((struct sn_device_sysdata *)((dev)->sysdata))->isa64))
#define IS_PCI32G(dev) ((dev)->dma_mask >= 0xffffffff)
#define IS_PCI32L(dev) ((dev)->dma_mask < 0xffffffff)
......@@ -50,9 +46,6 @@ struct sn_widget_sysdata {
struct sn_device_sysdata {
vertex_hdl_t vhdl;
int isa64;
volatile unsigned int *dma_buf_sync;
volatile unsigned int *xbow_buf_sync;
pciio_provider_t *pci_provider;
};
......
......@@ -40,26 +40,6 @@ typedef struct pcibr_piomap_s *pcibr_piomap_t;
typedef struct pcibr_dmamap_s *pcibr_dmamap_t;
typedef struct pcibr_intr_s *pcibr_intr_t;
/* =====================================================================
* primary entry points: Bridge (pcibr) device driver
*
* These functions are normal device driver entry points
* and are called along with the similar entry points from
* other device drivers. They are included here as documentation
* of their existence and purpose.
*
* pcibr_init() is called to inform us that there is a pcibr driver
* configured into the kernel; it is responsible for registering
* as a crosstalk widget and providing a routine to be called
* when a widget with the proper part number is observed.
*
* pcibr_attach() is called for each vertex in the hardware graph
* corresponding to a crosstalk widget with the manufacturer
* code and part number registered by pcibr_init().
*/
extern int pcibr_attach(vertex_hdl_t);
/* =====================================================================
* bus provider function table
*
......@@ -72,7 +52,6 @@ extern int pcibr_attach(vertex_hdl_t);
* pcibr, we can go directly to this ops table.
*/
extern pciio_provider_t pcibr_provider;
extern pciio_provider_t pci_pic_provider;
/* =====================================================================
......@@ -107,6 +86,11 @@ extern caddr_t pcibr_piomap_addr(pcibr_piomap_t piomap,
extern void pcibr_piomap_done(pcibr_piomap_t piomap);
extern int pcibr_piomap_probe(pcibr_piomap_t piomap,
off_t offset,
int len,
void *valp);
extern caddr_t pcibr_piotrans_addr(vertex_hdl_t dev,
device_desc_t dev_desc,
pciio_space_t space,
......@@ -193,15 +177,10 @@ extern void pcibr_provider_shutdown(vertex_hdl_t pcibr);
extern int pcibr_reset(vertex_hdl_t dev);
extern int pcibr_write_gather_flush(vertex_hdl_t dev);
extern pciio_endian_t pcibr_endian_set(vertex_hdl_t dev,
pciio_endian_t device_end,
pciio_endian_t desired_end);
extern pciio_priority_t pcibr_priority_set(vertex_hdl_t dev,
pciio_priority_t device_prio);
extern uint64_t pcibr_config_get(vertex_hdl_t conn,
unsigned reg,
unsigned size);
......@@ -211,6 +190,10 @@ extern void pcibr_config_set(vertex_hdl_t conn,
unsigned size,
uint64_t value);
extern pciio_slot_t pcibr_error_extract(vertex_hdl_t pcibr_vhdl,
pciio_space_t *spacep,
iopaddr_t *addrp);
extern int pcibr_wrb_flush(vertex_hdl_t pconn_vhdl);
extern int pcibr_rrb_check(vertex_hdl_t pconn_vhdl,
int *count_vchan0,
......@@ -234,6 +217,12 @@ void pcibr_set_rrb_callback(vertex_hdl_t xconn_vhdl,
rrb_alloc_funct_f *func);
extern int pcibr_device_unregister(vertex_hdl_t);
extern void pcibr_driver_reg_callback(vertex_hdl_t, int, int, int);
extern void pcibr_driver_unreg_callback(vertex_hdl_t,
int, int, int);
extern void * pcibr_bridge_ptr_get(vertex_hdl_t, int);
/*
* Bridge-specific flags that can be set via pcibr_device_flags_set
......@@ -324,9 +313,6 @@ extern int pcibr_rrb_alloc(vertex_hdl_t pconn_vhdl,
* the allocation time in the current implementation of PCI bridge.
*/
extern iopaddr_t pcibr_dmamap_pciaddr_get(pcibr_dmamap_t);
extern xwidget_intr_preset_f pcibr_xintr_preset;
extern void pcibr_hints_fix_rrbs(vertex_hdl_t);
extern void pcibr_hints_dualslot(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
extern void pcibr_hints_subdevs(vertex_hdl_t, pciio_slot_t, ulong);
......@@ -426,7 +412,6 @@ struct pcibr_slot_info_resp_s {
unsigned resp_bss_d64_flags;
iopaddr_t resp_bss_d32_base;
unsigned resp_bss_d32_flags;
atomic_t resp_bss_ext_ates_active;
volatile unsigned *resp_bss_cmd_pointer;
unsigned resp_bss_cmd_shadow;
int resp_bs_rrb_valid;
......@@ -438,8 +423,6 @@ struct pcibr_slot_info_resp_s {
uint64_t resp_b_int_device;
uint64_t resp_b_int_enable;
uint64_t resp_b_int_host;
picreg_t resp_p_int_enable;
picreg_t resp_p_int_host;
struct pcibr_slot_func_info_resp_s {
int resp_f_status;
char resp_f_slot_name[MAXDEVNAME];
......
......@@ -39,18 +39,119 @@ typedef volatile bridgereg_t *reg_p;
/*
* extern functions
*/
cfg_p pcibr_slot_config_addr(bridge_t *, pciio_slot_t, int);
cfg_p pcibr_func_config_addr(bridge_t *, pciio_bus_t bus, pciio_slot_t, pciio_function_t, int);
unsigned pcibr_slot_config_get(bridge_t *, pciio_slot_t, int);
unsigned pcibr_func_config_get(bridge_t *, pciio_slot_t, pciio_function_t, int);
extern void pcireg_intr_enable_bit_clr(void *, uint64_t);
extern void pcireg_intr_enable_bit_set(void *, uint64_t);
extern void pcireg_intr_addr_addr_set(void *, int, uint64_t);
extern void pcireg_force_intr_set(void *, int);
cfg_p pcibr_slot_config_addr(pcibr_soft_t, pciio_slot_t, int);
cfg_p pcibr_func_config_addr(pcibr_soft_t, pciio_bus_t bus, pciio_slot_t, pciio_function_t, int);
void pcibr_debug(uint32_t, vertex_hdl_t, char *, ...);
void pcibr_slot_config_set(bridge_t *, pciio_slot_t, int, unsigned);
void pcibr_func_config_set(bridge_t *, pciio_slot_t, pciio_function_t, int,
unsigned);
void pcibr_func_config_set(pcibr_soft_t, pciio_slot_t, pciio_function_t, int, unsigned);
/*
* pcireg_ externs
*/
extern uint64_t pcireg_id_get(pcibr_soft_t);
extern uint64_t pcireg_bridge_id_get(void *);
extern uint64_t pcireg_bus_err_get(pcibr_soft_t);
extern uint64_t pcireg_control_get(pcibr_soft_t);
extern uint64_t pcireg_bridge_control_get(void *);
extern void pcireg_control_set(pcibr_soft_t, uint64_t);
extern void pcireg_control_bit_clr(pcibr_soft_t, uint64_t);
extern void pcireg_control_bit_set(pcibr_soft_t, uint64_t);
extern void pcireg_req_timeout_set(pcibr_soft_t, uint64_t);
extern void pcireg_intr_dst_set(pcibr_soft_t, uint64_t);
extern uint64_t pcireg_intr_dst_target_id_get(pcibr_soft_t);
extern void pcireg_intr_dst_target_id_set(pcibr_soft_t, uint64_t);
extern uint64_t pcireg_intr_dst_addr_get(pcibr_soft_t);
extern void pcireg_intr_dst_addr_set(pcibr_soft_t, uint64_t);
extern uint64_t pcireg_cmdword_err_get(pcibr_soft_t);
extern uint64_t pcireg_llp_cfg_get(pcibr_soft_t);
extern void pcireg_llp_cfg_set(pcibr_soft_t, uint64_t);
extern uint64_t pcireg_tflush_get(pcibr_soft_t);
extern uint64_t pcireg_linkside_err_get(pcibr_soft_t);
extern uint64_t pcireg_resp_err_get(pcibr_soft_t);
extern uint64_t pcireg_resp_err_addr_get(pcibr_soft_t);
extern uint64_t pcireg_resp_err_buf_get(pcibr_soft_t);
extern uint64_t pcireg_resp_err_dev_get(pcibr_soft_t);
extern uint64_t pcireg_linkside_err_addr_get(pcibr_soft_t);
extern uint64_t pcireg_dirmap_get(pcibr_soft_t);
extern void pcireg_dirmap_set(pcibr_soft_t, uint64_t);
extern void pcireg_dirmap_wid_set(pcibr_soft_t, uint64_t);
extern void pcireg_dirmap_diroff_set(pcibr_soft_t, uint64_t);
extern void pcireg_dirmap_add512_set(pcibr_soft_t);
extern void pcireg_dirmap_add512_clr(pcibr_soft_t);
extern uint64_t pcireg_map_fault_get(pcibr_soft_t);
extern uint64_t pcireg_arbitration_get(pcibr_soft_t);
extern void pcireg_arbitration_set(pcibr_soft_t, uint64_t);
extern void pcireg_arbitration_bit_clr(pcibr_soft_t, uint64_t);
extern void pcireg_arbitration_bit_set(pcibr_soft_t, uint64_t);
extern uint64_t pcireg_parity_err_get(pcibr_soft_t);
extern uint64_t pcireg_type1_cntr_get(pcibr_soft_t);
extern void pcireg_type1_cntr_set(pcibr_soft_t, uint64_t);
extern uint64_t pcireg_timeout_get(pcibr_soft_t);
extern void pcireg_timeout_set(pcibr_soft_t, uint64_t);
extern void pcireg_timeout_bit_clr(pcibr_soft_t, uint64_t);
extern void pcireg_timeout_bit_set(pcibr_soft_t, uint64_t);
extern uint64_t pcireg_pci_bus_addr_get(pcibr_soft_t);
extern uint64_t pcireg_pci_bus_addr_addr_get(pcibr_soft_t);
extern uint64_t pcireg_intr_status_get(pcibr_soft_t);
extern uint64_t pcireg_intr_enable_get(pcibr_soft_t);
extern void pcireg_intr_enable_set(pcibr_soft_t, uint64_t);
extern void pcireg_intr_enable_bit_clr(pcibr_soft_t, uint64_t);
extern void pcireg_intr_enable_bit_set(pcibr_soft_t, uint64_t);
extern void pcireg_intr_reset_set(pcibr_soft_t, uint64_t);
extern void pcireg_intr_reset_bit_set(pcibr_soft_t, uint64_t);
extern uint64_t pcireg_intr_mode_get(pcibr_soft_t);
extern void pcireg_intr_mode_set(pcibr_soft_t, uint64_t);
extern void pcireg_intr_mode_bit_clr(pcibr_soft_t, uint64_t);
extern uint64_t pcireg_intr_device_get(pcibr_soft_t);
extern void pcireg_intr_device_set(pcibr_soft_t, uint64_t);
extern void pcireg_intr_device_bit_set(pcibr_soft_t, uint64_t);
extern void pcireg_bridge_intr_device_bit_set(void *, uint64_t);
extern void pcireg_intr_device_bit_clr(pcibr_soft_t, uint64_t);
extern uint64_t pcireg_intr_host_err_get(pcibr_soft_t);
extern void pcireg_intr_host_err_set(pcibr_soft_t, uint64_t);
extern uint64_t pcireg_intr_addr_get(pcibr_soft_t, int);
extern void pcireg_intr_addr_set(pcibr_soft_t, int, uint64_t);
extern void pcireg_bridge_intr_addr_set(void *, int, uint64_t);
extern void * pcireg_intr_addr_addr(pcibr_soft_t, int);
extern void pcireg_intr_addr_vect_set(pcibr_soft_t, int, uint64_t);
extern void pcireg_bridge_intr_addr_vect_set(void *, int, uint64_t);
extern uint64_t pcireg_intr_addr_addr_get(pcibr_soft_t, int);
extern void pcireg_intr_addr_addr_set(pcibr_soft_t, int, uint64_t);
extern void pcireg_bridge_intr_addr_addr_set(void *, int, uint64_t);
extern uint64_t pcireg_intr_view_get(pcibr_soft_t);
extern uint64_t pcireg_intr_multiple_get(pcibr_soft_t);
extern void pcireg_force_always_set(pcibr_soft_t, int);
extern void * pcireg_bridge_force_always_addr_get(void *, int);
extern void * pcireg_force_always_addr_get(pcibr_soft_t, int);
extern void pcireg_force_intr_set(pcibr_soft_t, int);
extern uint64_t pcireg_device_get(pcibr_soft_t, int);
extern void pcireg_device_set(pcibr_soft_t, int, uint64_t);
extern void pcireg_device_bit_set(pcibr_soft_t, int, uint64_t);
extern void pcireg_device_bit_clr(pcibr_soft_t, int, uint64_t);
extern uint64_t pcireg_rrb_get(pcibr_soft_t, int);
extern void pcireg_rrb_set(pcibr_soft_t, int, uint64_t);
extern void pcireg_rrb_bit_set(pcibr_soft_t, int, uint64_t);
extern void pcireg_rrb_bit_clr(pcibr_soft_t, int, uint64_t);
extern uint64_t pcireg_rrb_status_get(pcibr_soft_t);
extern void pcireg_rrb_clear_set(pcibr_soft_t, uint64_t);
extern uint64_t pcireg_wrb_flush_get(pcibr_soft_t, int);
extern uint64_t pcireg_pcix_bus_err_addr_get(pcibr_soft_t);
extern uint64_t pcireg_pcix_bus_err_attr_get(pcibr_soft_t);
extern uint64_t pcireg_pcix_bus_err_data_get(pcibr_soft_t);
extern uint64_t pcireg_pcix_req_err_attr_get(pcibr_soft_t);
extern uint64_t pcireg_pcix_req_err_addr_get(pcibr_soft_t);
extern uint64_t pcireg_pcix_pio_split_addr_get(pcibr_soft_t);
extern uint64_t pcireg_pcix_pio_split_attr_get(pcibr_soft_t);
extern cfg_p pcireg_type1_cfg_addr(pcibr_soft_t, pciio_function_t,
int);
extern cfg_p pcireg_type0_cfg_addr(pcibr_soft_t, pciio_slot_t,
pciio_function_t, int);
extern bridge_ate_t pcireg_int_ate_get(pcibr_soft_t, int);
extern void pcireg_int_ate_set(pcibr_soft_t, int, bridge_ate_t);
extern bridge_ate_p pcireg_int_ate_addr(pcibr_soft_t, int);
extern uint64_t pcireg_speed_get(pcibr_soft_t);
extern uint64_t pcireg_mode_get(pcibr_soft_t);
/*
* PCIBR_DEBUG() macro and debug bitmask defines
*/
......@@ -117,7 +218,7 @@ struct pcibr_piomap_s {
xtalk_piomap_t bp_xtalk_pio; /* corresponding xtalk resource */
pcibr_piomap_t bp_next; /* Next piomap on the list */
pcibr_soft_t bp_soft; /* backpointer to bridge soft data */
atomic_t bp_toc[1]; /* PCI timeout counter */
atomic_t bp_toc; /* PCI timeout counter */
};
......@@ -143,6 +244,7 @@ struct pcibr_dmamap_s {
bridge_ate_t bd_ate_proto; /* prototype ATE (for xioaddr=0) */
bridge_ate_t bd_ate_prime; /* value of 1st ATE written */
dma_addr_t bd_dma_addr; /* Linux dma handle */
struct resource resource;
};
#define IBUFSIZE 5 /* size of circular buffer (holds 4) */
......@@ -245,7 +347,8 @@ struct pcibr_info_s {
struct pcibr_intr_list_s {
pcibr_intr_list_t il_next;
pcibr_intr_t il_intr;
volatile bridgereg_t *il_wrbf; /* ptr to b_wr_req_buf[] */
pcibr_soft_t il_soft;
pciio_slot_t il_slot;
};
/* =====================================================================
......@@ -271,7 +374,7 @@ struct pcibr_intr_wrap_s {
* To reduce the size of the internal resource mapping structures, do
* not use the entire PCI bus I/O address space
*/
#define PCIBR_BUS_IO_BASE 0x100000
#define PCIBR_BUS_IO_BASE 0x200000
#define PCIBR_BUS_IO_MAX 0x0FFFFFFF
#define PCIBR_BUS_IO_PAGE 0x100000
......@@ -284,8 +387,6 @@ struct pcibr_intr_wrap_s {
#define PCIBR_BUS_MEM_PAGE 0x100000
/* defines for pcibr_soft_s->bs_bridge_type */
#define PCIBR_BRIDGETYPE_BRIDGE 0
#define PCIBR_BRIDGETYPE_XBRIDGE 1
#define PCIBR_BRIDGETYPE_PIC 2
#define IS_PIC_BUSNUM_SOFT(ps, bus) ((ps)->bs_busnum == (bus))
......@@ -310,10 +411,6 @@ struct pcibr_intr_wrap_s {
#define PV862253 (1 << 1) /* PIC: don't enable write req RAM parity checking */
#define PV867308 (3 << 1) /* PIC: make LLP error interrupts FATAL for PIC */
/* Bridgetype macros given a pcibr_soft structure */
#define IS_PIC_SOFT(ps) (ps->bs_bridge_type == PCIBR_BRIDGETYPE_PIC)
/* defines for pcibr_soft_s->bs_bridge_mode */
#define PCIBR_BRIDGEMODE_PCI_33 0x0
#define PCIBR_BRIDGEMODE_PCI_66 0x2
......@@ -349,14 +446,16 @@ struct pcibr_soft_s {
vertex_hdl_t bs_conn; /* xtalk connection point */
vertex_hdl_t bs_vhdl; /* vertex owned by pcibr */
uint64_t bs_int_enable; /* Mask of enabled intrs */
bridge_t *bs_base; /* PIO pointer to Bridge chip */
void *bs_base; /* PIO pointer to Bridge chip */
char *bs_name; /* hw graph name */
char bs_asic_name[16]; /* ASIC name */
xwidgetnum_t bs_xid; /* Bridge's xtalk ID number */
vertex_hdl_t bs_master; /* xtalk master vertex */
xwidgetnum_t bs_mxid; /* master's xtalk ID number */
pciio_slot_t bs_first_slot; /* first existing slot */
pciio_slot_t bs_last_slot; /* last existing slot */
pciio_slot_t bs_last_reset; /* last slot to reset */
uint32_t bs_unused_slot; /* unavailable slots bitmask */
pciio_slot_t bs_min_slot; /* lowest possible slot */
pciio_slot_t bs_max_slot; /* highest possible slot */
pcibr_soft_t bs_peers_soft; /* PICs other bus's soft */
......@@ -474,14 +573,6 @@ struct pcibr_soft_s {
unsigned bss_d64_flags;
iopaddr_t bss_d32_base;
unsigned bss_d32_flags;
/* Shadow information used for implementing
* Bridge Hardware WAR #484930
*/
atomic_t bss_ext_ates_active;
volatile unsigned *bss_cmd_pointer;
unsigned bss_cmd_shadow;
} bs_slot[8];
pcibr_intr_bits_f *bs_intr_bits;
......
......@@ -433,18 +433,11 @@ pciio_provider_shutdown_f (vertex_hdl_t pciio_provider);
typedef int
pciio_reset_f (vertex_hdl_t conn); /* pci connection point */
typedef int
pciio_write_gather_flush_f (vertex_hdl_t dev); /* Device flushing buffers */
typedef pciio_endian_t /* actual endianness */
pciio_endian_set_f (vertex_hdl_t dev, /* specify endianness for this device */
pciio_endian_t device_end, /* endianness of device */
pciio_endian_t desired_end); /* desired endianness */
typedef pciio_priority_t
pciio_priority_set_f (vertex_hdl_t pcicard,
pciio_priority_t device_prio);
typedef uint64_t
pciio_config_get_f (vertex_hdl_t conn, /* pci connection point */
unsigned reg, /* register byte offset */
......@@ -476,13 +469,14 @@ pciio_driver_unreg_callback_f (vertex_hdl_t conn, /* pci connection point */
typedef int
pciio_device_unregister_f (vertex_hdl_t conn);
typedef pciio_businfo_t
pciio_businfo_get_f (vertex_hdl_t conn);
/*
* Adapters that provide a PCI interface adhere to this software interface.
*/
typedef struct pciio_provider_s {
/* ASIC PROVIDER ID */
pciio_asic_type_t provider_asic;
/* PIO MANAGEMENT */
pciio_piomap_alloc_f *piomap_alloc;
pciio_piomap_free_f *piomap_free;
......@@ -513,9 +507,7 @@ typedef struct pciio_provider_s {
pciio_provider_startup_f *provider_startup;
pciio_provider_shutdown_f *provider_shutdown;
pciio_reset_f *reset;
pciio_write_gather_flush_f *write_gather_flush;
pciio_endian_set_f *endian_set;
pciio_priority_set_f *priority_set;
pciio_config_get_f *config_get;
pciio_config_set_f *config_set;
......@@ -526,9 +518,6 @@ typedef struct pciio_provider_s {
pciio_driver_reg_callback_f *driver_reg_callback;
pciio_driver_unreg_callback_f *driver_unreg_callback;
pciio_device_unregister_f *device_unregister;
/* GENERIC BUS INFO */
pciio_businfo_get_f *businfo_get;
} pciio_provider_t;
/* PCI devices use these standard PCI provider interfaces */
......@@ -556,12 +545,9 @@ extern pciio_intr_cpu_get_f pciio_intr_cpu_get;
extern pciio_provider_startup_f pciio_provider_startup;
extern pciio_provider_shutdown_f pciio_provider_shutdown;
extern pciio_reset_f pciio_reset;
extern pciio_write_gather_flush_f pciio_write_gather_flush;
extern pciio_endian_set_f pciio_endian_set;
extern pciio_priority_set_f pciio_priority_set;
extern pciio_config_get_f pciio_config_get;
extern pciio_config_set_f pciio_config_set;
extern pciio_error_extract_f pciio_error_extract;
/* Widgetdev in the IOERROR structure is encoded as follows.
* +---------------------------+
......@@ -706,10 +692,8 @@ extern pciio_provider_t *pciio_provider_fns_get(vertex_hdl_t provider);
/* Generic pci slot information access interface */
extern pciio_info_t pciio_info_chk(vertex_hdl_t vhdl);
extern pciio_info_t pciio_info_get(vertex_hdl_t vhdl);
extern pciio_info_t pciio_hostinfo_get(vertex_hdl_t vhdl);
extern void pciio_info_set(vertex_hdl_t vhdl, pciio_info_t widget_info);
extern vertex_hdl_t pciio_info_dev_get(pciio_info_t pciio_info);
extern vertex_hdl_t pciio_info_hostdev_get(pciio_info_t pciio_info);
extern pciio_bus_t pciio_info_bus_get(pciio_info_t pciio_info);
extern pciio_slot_t pciio_info_slot_get(pciio_info_t pciio_info);
extern pciio_function_t pciio_info_function_get(pciio_info_t pciio_info);
......@@ -753,8 +737,7 @@ sn_pci_set_vchan(struct pci_dev *pci_dev,
if (vchan == 1) {
/* Set Bit 57 */
*addr |= (1UL << 57);
}
else {
} else {
/* Clear Bit 57 */
*addr &= ~(1UL << 57);
}
......
......@@ -66,7 +66,7 @@
#include <asm/sn/pci/pciio.h>
/*********************************************************************
/*
* bus provider function table
*
* Normally, this table is only handed off explicitly
......@@ -81,705 +81,178 @@
extern pciio_provider_t pci_pic_provider;
/*********************************************************************
/*
* misc defines
*
*/
#define PIC_WIDGET_PART_NUM_BUS0 0xd102
#define PIC_WIDGET_PART_NUM_BUS1 0xd112
#define PIC_WIDGET_MFGR_NUM 0x24
#define PIC_WIDGET_REV_A 0x1
#define PIC_WIDGET_REV_B 0x2
#define PIC_WIDGET_REV_C 0x3
#define PIC_XTALK_ADDR_MASK 0x0000FFFFFFFFFFFF
#define PIC_INTERNAL_ATES 1024
#define IS_PIC_PART_REV_A(rev) \
((rev == (PIC_WIDGET_PART_NUM_BUS0 << 4 | PIC_WIDGET_REV_A)) || \
(rev == (PIC_WIDGET_PART_NUM_BUS1 << 4 | PIC_WIDGET_REV_A)))
#define IS_PIC_PART_REV_B(rev) \
((rev == (PIC_WIDGET_PART_NUM_BUS0 << 4 | PIC_WIDGET_REV_B)) || \
(rev == (PIC_WIDGET_PART_NUM_BUS1 << 4 | PIC_WIDGET_REV_B)))
#define IS_PIC_PART_REV_C(rev) \
((rev == (PIC_WIDGET_PART_NUM_BUS0 << 4 | PIC_WIDGET_REV_C)) || \
(rev == (PIC_WIDGET_PART_NUM_BUS1 << 4 | PIC_WIDGET_REV_C)))
/*********************************************************************
* register offset defines
/*
* misc typedefs
*
*/
/* Identification Register -- read-only */
#define PIC_IDENTIFICATION 0x00000000
/* Status Register -- read-only */
#define PIC_STATUS 0x00000008
/* Upper Address Holding Register Bus Side Errors -- read-only */
#define PIC_UPPER_ADDR_REG_BUS_SIDE_ERRS 0x00000010
/* Lower Address Holding Register Bus Side Errors -- read-only */
#define PIC_LOWER_ADDR_REG_BUS_SIDE_ERRS 0x00000018
/* Control Register -- read/write */
#define PIC_CONTROL 0x00000020
/* PCI Request Time-out Value Register -- read/write */
#define PIC_PCI_REQ_TIME_OUT_VALUE 0x00000028
/* Interrupt Destination Upper Address Register -- read/write */
#define PIC_INTR_DEST_UPPER_ADDR 0x00000030
/* Interrupt Destination Lower Address Register -- read/write */
#define PIC_INTR_DEST_LOWER_ADDR 0x00000038
/* Command Word Holding Register Bus Side -- read-only */
#define PIC_CMD_WORD_REG_BUS_SIDE 0x00000040
/* LLP Configuration Register (Bus 0 Only) -- read/write */
#define PIC_LLP_CFG_REG_(BUS_0_ONLY) 0x00000048
/* PCI Target Flush Register -- read-only */
#define PIC_PCI_TARGET_FLUSH 0x00000050
/* Command Word Holding Register Link Side -- read-only */
#define PIC_CMD_WORD_REG_LINK_SIDE 0x00000058
/* Response Buffer Error Upper Address Holding -- read-only */
#define PIC_RESP_BUF_ERR_UPPER_ADDR_ 0x00000060
/* Response Buffer Error Lower Address Holding -- read-only */
#define PIC_RESP_BUF_ERR_LOWER_ADDR_ 0x00000068
/* Test Pin Control Register -- read/write */
#define PIC_TEST_PIN_CONTROL 0x00000070
/* Address Holding Register Link Side Errors -- read-only */
#define PIC_ADDR_REG_LINK_SIDE_ERRS 0x00000078
/* Direct Map Register -- read/write */
#define PIC_DIRECT_MAP 0x00000080
/* PCI Map Fault Address Register -- read-only */
#define PIC_PCI_MAP_FAULT_ADDR 0x00000090
/* Arbitration Priority Register -- read/write */
#define PIC_ARBITRATION_PRIORITY 0x000000A0
/* Internal Ram Parity Error Register -- read-only */
#define PIC_INTERNAL_RAM_PARITY_ERR 0x000000B0
/* PCI Time-out Register -- read/write */
#define PIC_PCI_TIME_OUT 0x000000C0
/* PCI Type 1 Configuration Register -- read/write */
#define PIC_PCI_TYPE_1_CFG 0x000000C8
/* PCI Bus Error Upper Address Holding Register -- read-only */
#define PIC_PCI_BUS_ERR_UPPER_ADDR_ 0x000000D0
/* PCI Bus Error Lower Address Holding Register -- read-only */
#define PIC_PCI_BUS_ERR_LOWER_ADDR_ 0x000000D8
/* PCIX Error Address Register -- read-only */
#define PIC_PCIX_ERR_ADDR 0x000000E0
/* PCIX Error Attribute Register -- read-only */
#define PIC_PCIX_ERR_ATTRIBUTE 0x000000E8
/* PCIX Error Data Register -- read-only */
#define PIC_PCIX_ERR_DATA 0x000000F0
/* PCIX Read Request Timeout Error Register -- read-only */
#define PIC_PCIX_READ_REQ_TIMEOUT_ERR 0x000000F8
/* Interrupt Status Register -- read-only */
#define PIC_INTR_STATUS 0x00000100
/* Interrupt Enable Register -- read/write */
#define PIC_INTR_ENABLE 0x00000108
/* Reset Interrupt Status Register -- write-only */
#define PIC_RESET_INTR_STATUS 0x00000110
/* Interrupt Mode Register -- read/write */
#define PIC_INTR_MODE 0x00000118
/* Interrupt Device Register -- read/write */
#define PIC_INTR_DEVICE 0x00000120
/* Host Error Field Register -- read/write */
#define PIC_HOST_ERR_FIELD 0x00000128
/* Interrupt Pin 0 Host Address Register -- read/write */
#define PIC_INTR_PIN_0_HOST_ADDR 0x00000130
/* Interrupt Pin 1 Host Address Register -- read/write */
#define PIC_INTR_PIN_1_HOST_ADDR 0x00000138
/* Interrupt Pin 2 Host Address Register -- read/write */
#define PIC_INTR_PIN_2_HOST_ADDR 0x00000140
/* Interrupt Pin 3 Host Address Register -- read/write */
#define PIC_INTR_PIN_3_HOST_ADDR 0x00000148
/* Interrupt Pin 4 Host Address Register -- read/write */
#define PIC_INTR_PIN_4_HOST_ADDR 0x00000150
/* Interrupt Pin 5 Host Address Register -- read/write */
#define PIC_INTR_PIN_5_HOST_ADDR 0x00000158
/* Interrupt Pin 6 Host Address Register -- read/write */
#define PIC_INTR_PIN_6_HOST_ADDR 0x00000160
/* Interrupt Pin 7 Host Address Register -- read/write */
#define PIC_INTR_PIN_7_HOST_ADDR 0x00000168
/* Error Interrupt View Register -- read-only */
#define PIC_ERR_INTR_VIEW 0x00000170
/* Multiple Interrupt Register -- read-only */
#define PIC_MULTIPLE_INTR 0x00000178
/* Force Always Interrupt 0 Register -- write-only */
#define PIC_FORCE_ALWAYS_INTR_0 0x00000180
/* Force Always Interrupt 1 Register -- write-only */
#define PIC_FORCE_ALWAYS_INTR_1 0x00000188
/* Force Always Interrupt 2 Register -- write-only */
#define PIC_FORCE_ALWAYS_INTR_2 0x00000190
/* Force Always Interrupt 3 Register -- write-only */
#define PIC_FORCE_ALWAYS_INTR_3 0x00000198
/* Force Always Interrupt 4 Register -- write-only */
#define PIC_FORCE_ALWAYS_INTR_4 0x000001A0
/* Force Always Interrupt 5 Register -- write-only */
#define PIC_FORCE_ALWAYS_INTR_5 0x000001A8
/* Force Always Interrupt 6 Register -- write-only */
#define PIC_FORCE_ALWAYS_INTR_6 0x000001B0
/* Force Always Interrupt 7 Register -- write-only */
#define PIC_FORCE_ALWAYS_INTR_7 0x000001B8
/* Force w/Pin Interrupt 0 Register -- write-only */
#define PIC_FORCE_PIN_INTR_0 0x000001C0
/* Force w/Pin Interrupt 1 Register -- write-only */
#define PIC_FORCE_PIN_INTR_1 0x000001C8
/* Force w/Pin Interrupt 2 Register -- write-only */
#define PIC_FORCE_PIN_INTR_2 0x000001D0
/* Force w/Pin Interrupt 3 Register -- write-only */
#define PIC_FORCE_PIN_INTR_3 0x000001D8
/* Force w/Pin Interrupt 4 Register -- write-only */
#define PIC_FORCE_PIN_INTR_4 0x000001E0
/* Force w/Pin Interrupt 5 Register -- write-only */
#define PIC_FORCE_PIN_INTR_5 0x000001E8
/* Force w/Pin Interrupt 6 Register -- write-only */
#define PIC_FORCE_PIN_INTR_6 0x000001F0
/* Force w/Pin Interrupt 7 Register -- write-only */
#define PIC_FORCE_PIN_INTR_7 0x000001F8
/* Device 0 Register -- read/write */
#define PIC_DEVICE_0 0x00000200
/* Device 1 Register -- read/write */
#define PIC_DEVICE_1 0x00000208
/* Device 2 Register -- read/write */
#define PIC_DEVICE_2 0x00000210
/* Device 3 Register -- read/write */
#define PIC_DEVICE_3 0x00000218
/* Device 0 Write Request Buffer Register -- read-only */
#define PIC_DEVICE_0_WRITE_REQ_BUF 0x00000240
/* Device 1 Write Request Buffer Register -- read-only */
#define PIC_DEVICE_1_WRITE_REQ_BUF 0x00000248
/* Device 2 Write Request Buffer Register -- read-only */
#define PIC_DEVICE_2_WRITE_REQ_BUF 0x00000250
/* Device 3 Write Request Buffer Register -- read-only */
#define PIC_DEVICE_3_WRITE_REQ_BUF 0x00000258
/* Even Device Response Buffer Register -- read/write */
#define PIC_EVEN_DEVICE_RESP_BUF 0x00000280
/* Odd Device Response Buffer Register -- read/write */
#define PIC_ODD_DEVICE_RESP_BUF 0x00000288
/* Read Response Buffer Status Register -- read-only */
#define PIC_READ_RESP_BUF_STATUS 0x00000290
/* Read Response Buffer Clear Register -- write-only */
#define PIC_READ_RESP_BUF_CLEAR 0x00000298
/* PCI RR 0 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_0_UPPER_ADDR_MATCH 0x00000300
/* PCI RR 0 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_0_LOWER_ADDR_MATCH 0x00000308
/* PCI RR 1 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_1_UPPER_ADDR_MATCH 0x00000310
/* PCI RR 1 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_1_LOWER_ADDR_MATCH 0x00000318
/* PCI RR 2 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_2_UPPER_ADDR_MATCH 0x00000320
/* PCI RR 2 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_2_LOWER_ADDR_MATCH 0x00000328
/* PCI RR 3 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_3_UPPER_ADDR_MATCH 0x00000330
/* PCI RR 3 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_3_LOWER_ADDR_MATCH 0x00000338
/* PCI RR 4 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_4_UPPER_ADDR_MATCH 0x00000340
/* PCI RR 4 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_4_LOWER_ADDR_MATCH 0x00000348
/* PCI RR 5 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_5_UPPER_ADDR_MATCH 0x00000350
/* PCI RR 5 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_5_LOWER_ADDR_MATCH 0x00000358
/* PCI RR 6 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_6_UPPER_ADDR_MATCH 0x00000360
/* PCI RR 6 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_6_LOWER_ADDR_MATCH 0x00000368
/* PCI RR 7 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_7_UPPER_ADDR_MATCH 0x00000370
/* PCI RR 7 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_7_LOWER_ADDR_MATCH 0x00000378
/* PCI RR 8 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_8_UPPER_ADDR_MATCH 0x00000380
/* PCI RR 8 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_8_LOWER_ADDR_MATCH 0x00000388
/* PCI RR 9 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_9_UPPER_ADDR_MATCH 0x00000390
/* PCI RR 9 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_9_LOWER_ADDR_MATCH 0x00000398
/* PCI RR 10 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_10_UPPER_ADDR_MATCH 0x000003A0
/* PCI RR 10 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_10_LOWER_ADDR_MATCH 0x000003A8
/* PCI RR 11 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_11_UPPER_ADDR_MATCH 0x000003B0
/* PCI RR 11 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_11_LOWER_ADDR_MATCH 0x000003B8
/* PCI RR 12 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_12_UPPER_ADDR_MATCH 0x000003C0
/* PCI RR 12 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_12_LOWER_ADDR_MATCH 0x000003C8
/* PCI RR 13 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_13_UPPER_ADDR_MATCH 0x000003D0
/* PCI RR 13 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_13_LOWER_ADDR_MATCH 0x000003D8
/* PCI RR 14 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_14_UPPER_ADDR_MATCH 0x000003E0
/* PCI RR 14 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_14_LOWER_ADDR_MATCH 0x000003E8
/* PCI RR 15 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_15_UPPER_ADDR_MATCH 0x000003F0
/* PCI RR 15 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_15_LOWER_ADDR_MATCH 0x000003F8
/* Buffer 0 Flush Count with Data Touch Register -- read/write */
#define PIC_BUF_0_FLUSH_CNT_WITH_DATA_TOUCH 0x00000400
/* Buffer 0 Flush Count w/o Data Touch Register -- read/write */
#define PIC_BUF_0_FLUSH_CNT_W_O_DATA_TOUCH 0x00000408
/* Buffer 0 Request in Flight Count Register -- read/write */
#define PIC_BUF_0_REQ_IN_FLIGHT_CNT 0x00000410
/* Buffer 0 Prefetch Request Count Register -- read/write */
#define PIC_BUF_0_PREFETCH_REQ_CNT 0x00000418
/* Buffer 0 Total PCI Retry Count Register -- read/write */
#define PIC_BUF_0_TOTAL_PCI_RETRY_CNT 0x00000420
/* Buffer 0 Max PCI Retry Count Register -- read/write */
#define PIC_BUF_0_MAX_PCI_RETRY_CNT 0x00000428
/* Buffer 0 Max Latency Count Register -- read/write */
#define PIC_BUF_0_MAX_LATENCY_CNT 0x00000430
/* Buffer 0 Clear All Register -- read/write */
#define PIC_BUF_0_CLEAR_ALL 0x00000438
/* Buffer 2 Flush Count with Data Touch Register -- read/write */
#define PIC_BUF_2_FLUSH_CNT_WITH_DATA_TOUCH 0x00000440
/* Buffer 2 Flush Count w/o Data Touch Register -- read/write */
#define PIC_BUF_2_FLUSH_CNT_W_O_DATA_TOUCH 0x00000448
/* Buffer 2 Request in Flight Count Register -- read/write */
#define PIC_BUF_2_REQ_IN_FLIGHT_CNT 0x00000450
/* Buffer 2 Prefetch Request Count Register -- read/write */
#define PIC_BUF_2_PREFETCH_REQ_CNT 0x00000458
/* Buffer 2 Total PCI Retry Count Register -- read/write */
#define PIC_BUF_2_TOTAL_PCI_RETRY_CNT 0x00000460
/* Buffer 2 Max PCI Retry Count Register -- read/write */
#define PIC_BUF_2_MAX_PCI_RETRY_CNT 0x00000468
/* Buffer 2 Max Latency Count Register -- read/write */
#define PIC_BUF_2_MAX_LATENCY_CNT 0x00000470
/* Buffer 2 Clear All Register -- read/write */
#define PIC_BUF_2_CLEAR_ALL 0x00000478
/* Buffer 4 Flush Count with Data Touch Register -- read/write */
#define PIC_BUF_4_FLUSH_CNT_WITH_DATA_TOUCH 0x00000480
/* Buffer 4 Flush Count w/o Data Touch Register -- read/write */
#define PIC_BUF_4_FLUSH_CNT_W_O_DATA_TOUCH 0x00000488
/* Buffer 4 Request in Flight Count Register -- read/write */
#define PIC_BUF_4_REQ_IN_FLIGHT_CNT 0x00000490
/* Buffer 4 Prefetch Request Count Register -- read/write */
#define PIC_BUF_4_PREFETCH_REQ_CNT 0x00000498
/* Buffer 4 Total PCI Retry Count Register -- read/write */
#define PIC_BUF_4_TOTAL_PCI_RETRY_CNT 0x000004A0
/* Buffer 4 Max PCI Retry Count Register -- read/write */
#define PIC_BUF_4_MAX_PCI_RETRY_CNT 0x000004A8
/* Buffer 4 Max Latency Count Register -- read/write */
#define PIC_BUF_4_MAX_LATENCY_CNT 0x000004B0
/* Buffer 4 Clear All Register -- read/write */
#define PIC_BUF_4_CLEAR_ALL 0x000004B8
/* Buffer 6 Flush Count with Data Touch Register -- read/write */
#define PIC_BUF_6_FLUSH_CNT_WITH_DATA_TOUCH 0x000004C0
/* Buffer 6 Flush Count w/o Data Touch Register -- read/write */
#define PIC_BUF_6_FLUSH_CNT_W_O_DATA_TOUCH 0x000004C8
/* Buffer 6 Request in Flight Count Register -- read/write */
#define PIC_BUF_6_REQ_IN_FLIGHT_CNT 0x000004D0
/* Buffer 6 Prefetch Request Count Register -- read/write */
#define PIC_BUF_6_PREFETCH_REQ_CNT 0x000004D8
/* Buffer 6 Total PCI Retry Count Register -- read/write */
#define PIC_BUF_6_TOTAL_PCI_RETRY_CNT 0x000004E0
/* Buffer 6 Max PCI Retry Count Register -- read/write */
#define PIC_BUF_6_MAX_PCI_RETRY_CNT 0x000004E8
/* Buffer 6 Max Latency Count Register -- read/write */
#define PIC_BUF_6_MAX_LATENCY_CNT 0x000004F0
/* Buffer 6 Clear All Register -- read/write */
#define PIC_BUF_6_CLEAR_ALL 0x000004F8
/* Buffer 8 Flush Count with Data Touch Register -- read/write */
#define PIC_BUF_8_FLUSH_CNT_WITH_DATA_TOUCH 0x00000500
/* Buffer 8 Flush Count w/o Data Touch Register -- read/write */
#define PIC_BUF_8_FLUSH_CNT_W_O_DATA_TOUCH 0x00000508
/* Buffer 8 Request in Flight Count Register -- read/write */
#define PIC_BUF_8_REQ_IN_FLIGHT_CNT 0x00000510
/* Buffer 8 Prefetch Request Count Register -- read/write */
#define PIC_BUF_8_PREFETCH_REQ_CNT 0x00000518
/* Buffer 8 Total PCI Retry Count Register -- read/write */
#define PIC_BUF_8_TOTAL_PCI_RETRY_CNT 0x00000520
/* Buffer 8 Max PCI Retry Count Register -- read/write */
#define PIC_BUF_8_MAX_PCI_RETRY_CNT 0x00000528
/* Buffer 8 Max Latency Count Register -- read/write */
#define PIC_BUF_8_MAX_LATENCY_CNT 0x00000530
/* Buffer 8 Clear All Register -- read/write */
#define PIC_BUF_8_CLEAR_ALL 0x00000538
/* Buffer 10 Flush Count with Data Touch Register -- read/write */
#define PIC_BUF_10_FLUSH_CNT_WITH_DATA_TOUCH 0x00000540
/* Buffer 10 Flush Count w/o Data Touch Register -- read/write */
#define PIC_BUF_10_FLUSH_CNT_W_O_DATA_TOUCH 0x00000548
/* Buffer 10 Request in Flight Count Register -- read/write */
#define PIC_BUF_10_REQ_IN_FLIGHT_CNT 0x00000550
/* Buffer 10 Prefetch Request Count Register -- read/write */
#define PIC_BUF_10_PREFETCH_REQ_CNT 0x00000558
/* Buffer 10 Total PCI Retry Count Register -- read/write */
#define PIC_BUF_10_TOTAL_PCI_RETRY_CNT 0x00000560
/* Buffer 10 Max PCI Retry Count Register -- read/write */
#define PIC_BUF_10_MAX_PCI_RETRY_CNT 0x00000568
/* Buffer 10 Max Latency Count Register -- read/write */
#define PIC_BUF_10_MAX_LATENCY_CNT 0x00000570
/* Buffer 10 Clear All Register -- read/write */
#define PIC_BUF_10_CLEAR_ALL 0x00000578
/* Buffer 12 Flush Count with Data Touch Register -- read/write */
#define PIC_BUF_12_FLUSH_CNT_WITH_DATA_TOUCH 0x00000580
/* Buffer 12 Flush Count w/o Data Touch Register -- read/write */
#define PIC_BUF_12_FLUSH_CNT_W_O_DATA_TOUCH 0x00000588
/* Buffer 12 Request in Flight Count Register -- read/write */
#define PIC_BUF_12_REQ_IN_FLIGHT_CNT 0x00000590
/* Buffer 12 Prefetch Request Count Register -- read/write */
#define PIC_BUF_12_PREFETCH_REQ_CNT 0x00000598
/* Buffer 12 Total PCI Retry Count Register -- read/write */
#define PIC_BUF_12_TOTAL_PCI_RETRY_CNT 0x000005A0
/* Buffer 12 Max PCI Retry Count Register -- read/write */
#define PIC_BUF_12_MAX_PCI_RETRY_CNT 0x000005A8
/* Buffer 12 Max Latency Count Register -- read/write */
#define PIC_BUF_12_MAX_LATENCY_CNT 0x000005B0
/* Buffer 12 Clear All Register -- read/write */
#define PIC_BUF_12_CLEAR_ALL 0x000005B8
/* Buffer 14 Flush Count with Data Touch Register -- read/write */
#define PIC_BUF_14_FLUSH_CNT_WITH_DATA_TOUCH 0x000005C0
/* Buffer 14 Flush Count w/o Data Touch Register -- read/write */
#define PIC_BUF_14_FLUSH_CNT_W_O_DATA_TOUCH 0x000005C8
/* Buffer 14 Request in Flight Count Register -- read/write */
#define PIC_BUF_14_REQ_IN_FLIGHT_CNT 0x000005D0
/* Buffer 14 Prefetch Request Count Register -- read/write */
#define PIC_BUF_14_PREFETCH_REQ_CNT 0x000005D8
/* Buffer 14 Total PCI Retry Count Register -- read/write */
#define PIC_BUF_14_TOTAL_PCI_RETRY_CNT 0x000005E0
/* Buffer 14 Max PCI Retry Count Register -- read/write */
#define PIC_BUF_14_MAX_PCI_RETRY_CNT 0x000005E8
/* Buffer 14 Max Latency Count Register -- read/write */
#define PIC_BUF_14_MAX_LATENCY_CNT 0x000005F0
/* Buffer 14 Clear All Register -- read/write */
#define PIC_BUF_14_CLEAR_ALL 0x000005F8
/* PCIX Read Buffer 0 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_0_ADDR 0x00000A00
/* PCIX Read Buffer 0 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_0_ATTRIBUTE 0x00000A08
/* PCIX Read Buffer 1 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_1_ADDR 0x00000A10
/* PCIX Read Buffer 1 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_1_ATTRIBUTE 0x00000A18
/* PCIX Read Buffer 2 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_2_ADDR 0x00000A20
/* PCIX Read Buffer 2 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_2_ATTRIBUTE 0x00000A28
/* PCIX Read Buffer 3 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_3_ADDR 0x00000A30
/* PCIX Read Buffer 3 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_3_ATTRIBUTE 0x00000A38
/* PCIX Read Buffer 4 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_4_ADDR 0x00000A40
/* PCIX Read Buffer 4 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_4_ATTRIBUTE 0x00000A48
/* PCIX Read Buffer 5 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_5_ADDR 0x00000A50
/* PCIX Read Buffer 5 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_5_ATTRIBUTE 0x00000A58
/* PCIX Read Buffer 6 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_6_ADDR 0x00000A60
/* PCIX Read Buffer 6 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_6_ATTRIBUTE 0x00000A68
/* PCIX Read Buffer 7 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_7_ADDR 0x00000A70
/* PCIX Read Buffer 7 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_7_ATTRIBUTE 0x00000A78
/* PCIX Read Buffer 8 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_8_ADDR 0x00000A80
/* PCIX Read Buffer 8 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_8_ATTRIBUTE 0x00000A88
/* PCIX Read Buffer 9 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_9_ADDR 0x00000A90
/* PCIX Read Buffer 9 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_9_ATTRIBUTE 0x00000A98
/* PCIX Read Buffer 10 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_10_ADDR 0x00000AA0
/* PCIX Read Buffer 10 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_10_ATTRIBUTE 0x00000AA8
/* PCIX Read Buffer 11 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_11_ADDR 0x00000AB0
/* PCIX Read Buffer 11 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_11_ATTRIBUTE 0x00000AB8
/* PCIX Read Buffer 12 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_12_ADDR 0x00000AC0
/* PCIX Read Buffer 12 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_12_ATTRIBUTE 0x00000AC8
/* PCIX Read Buffer 13 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_13_ADDR 0x00000AD0
/* PCIX Read Buffer 13 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_13_ATTRIBUTE 0x00000AD8
/* PCIX Read Buffer 14 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_14_ADDR 0x00000AE0
/* PCIX Read Buffer 14 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_14_ATTRIBUTE 0x00000AE8
/* PCIX Read Buffer 15 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_15_ADDR 0x00000AF0
/* PCIX Read Buffer 15 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_15_ATTRIBUTE 0x00000AF8
/* PCIX Write Buffer 0 Address Register -- read-only */
#define PIC_PCIX_WRITE_BUF_0_ADDR 0x00000B00
/* PCIX Write Buffer 0 Attribute Register -- read-only */
#define PIC_PCIX_WRITE_BUF_0_ATTRIBUTE 0x00000B08
/* PCIX Write Buffer 0 Valid Register -- read-only */
#define PIC_PCIX_WRITE_BUF_0_VALID 0x00000B10
/* PCIX Write Buffer 1 Address Register -- read-only */
#define PIC_PCIX_WRITE_BUF_1_ADDR 0x00000B20
/* PCIX Write Buffer 1 Attribute Register -- read-only */
#define PIC_PCIX_WRITE_BUF_1_ATTRIBUTE 0x00000B28
/* PCIX Write Buffer 1 Valid Register -- read-only */
#define PIC_PCIX_WRITE_BUF_1_VALID 0x00000B30
/* PCIX Write Buffer 2 Address Register -- read-only */
#define PIC_PCIX_WRITE_BUF_2_ADDR 0x00000B40
/* PCIX Write Buffer 2 Attribute Register -- read-only */
#define PIC_PCIX_WRITE_BUF_2_ATTRIBUTE 0x00000B48
typedef uint64_t picreg_t;
typedef uint64_t picate_t;
/* PCIX Write Buffer 2 Valid Register -- read-only */
#define PIC_PCIX_WRITE_BUF_2_VALID 0x00000B50
/*
* PIC Bridge MMR defines
*/
/* PCIX Write Buffer 3 Address Register -- read-only */
#define PIC_PCIX_WRITE_BUF_3_ADDR 0x00000B60
/*
* PIC STATUS register offset 0x00000008
*/
/* PCIX Write Buffer 3 Attribute Register -- read-only */
#define PIC_PCIX_WRITE_BUF_3_ATTRIBUTE 0x00000B68
#define PIC_STAT_PCIX_ACTIVE_SHFT 33
/* PCIX Write Buffer 3 Valid Register -- read-only */
#define PIC_PCIX_WRITE_BUF_3_VALID 0x00000B70
/*
* PIC CONTROL register offset 0x00000020
*/
/* PCIX Write Buffer 4 Address Register -- read-only */
#define PIC_PCIX_WRITE_BUF_4_ADDR 0x00000B80
#define PIC_CTRL_PCI_SPEED_SHFT 4
#define PIC_CTRL_PCI_SPEED (0x3 << PIC_CTRL_PCI_SPEED_SHFT)
#define PIC_CTRL_PAGE_SIZE_SHFT 21
#define PIC_CTRL_PAGE_SIZE (0x1 << PIC_CTRL_PAGE_SIZE_SHFT)
/* PCIX Write Buffer 4 Attribute Register -- read-only */
#define PIC_PCIX_WRITE_BUF_4_ATTRIBUTE 0x00000B88
/* PCIX Write Buffer 4 Valid Register -- read-only */
#define PIC_PCIX_WRITE_BUF_4_VALID 0x00000B90
/*
* PIC Intr Destination Addr offset 0x00000038
*/
/* PCIX Write Buffer 5 Address Register -- read-only */
#define PIC_PCIX_WRITE_BUF_5_ADDR 0x00000BA0
#define PIC_INTR_DEST_ADDR 0x0000FFFFFFFFFFFF
#define PIC_INTR_DEST_TID_SHFT 48
#define PIC_INTR_DEST_TID (0xFull << PIC_INTR_DEST_TID_SHFT)
/* PCIX Write Buffer 5 Attribute Register -- read-only */
#define PIC_PCIX_WRITE_BUF_5_ATTRIBUTE 0x00000BA8
/*
* PIC PCI Responce Buffer offset 0x00000068
*/
#define PIC_RSP_BUF_ADDR 0x0000FFFFFFFFFFFF
#define PIC_RSP_BUF_NUM_SHFT 48
#define PIC_RSP_BUF_NUM (0xFull << PIC_RSP_BUF_NUM_SHFT)
#define PIC_RSP_BUF_DEV_NUM_SHFT 52
#define PIC_RSP_BUF_DEV_NUM (0x3ull << PIC_RSP_BUF_DEV_NUM_SHFT)
/* PCIX Write Buffer 5 Valid Register -- read-only */
#define PIC_PCIX_WRITE_BUF_5_VALID 0x00000BB0
/*
* PIC PCI DIRECT MAP register offset 0x00000080
*/
#define PIC_DIRMAP_DIROFF_SHFT 0
#define PIC_DIRMAP_DIROFF (0x1FFFF << PIC_DIRMAP_DIROFF_SHFT)
#define PIC_DIRMAP_ADD512_SHFT 17
#define PIC_DIRMAP_ADD512 (0x1 << PIC_DIRMAP_ADD512_SHFT)
#define PIC_DIRMAP_WID_SHFT 20
#define PIC_DIRMAP_WID (0xF << PIC_DIRMAP_WID_SHFT)
/* PCIX Write Buffer 6 Address Register -- read-only */
#define PIC_PCIX_WRITE_BUF_6_ADDR 0x00000BC0
#define PIC_DIRMAP_OFF_ADDRSHFT 31
/* PCIX Write Buffer 6 Attribute Register -- read-only */
#define PIC_PCIX_WRITE_BUF_6_ATTRIBUTE 0x00000BC8
/*
* Interrupt Status register offset 0x00000100
*/
#define PIC_ISR_PCIX_SPLIT_MSG_PE (0x1ull << 45)
#define PIC_ISR_PCIX_SPLIT_EMSG (0x1ull << 44)
#define PIC_ISR_PCIX_SPLIT_TO (0x1ull << 43)
#define PIC_ISR_PCIX_UNEX_COMP (0x1ull << 42)
#define PIC_ISR_INT_RAM_PERR (0x1ull << 41)
#define PIC_ISR_PCIX_ARB_ERR (0x1ull << 40)
#define PIC_ISR_PCIX_REQ_TOUT (0x1ull << 39)
#define PIC_ISR_PCIX_TABORT (0x1ull << 38)
#define PIC_ISR_PCIX_PERR (0x1ull << 37)
#define PIC_ISR_PCIX_SERR (0x1ull << 36)
#define PIC_ISR_PCIX_MRETRY (0x1ull << 35)
#define PIC_ISR_PCIX_MTOUT (0x1ull << 34)
#define PIC_ISR_PCIX_DA_PARITY (0x1ull << 33)
#define PIC_ISR_PCIX_AD_PARITY (0x1ull << 32)
#define PIC_ISR_PMU_PAGE_FAULT (0x1ull << 30)
#define PIC_ISR_UNEXP_RESP (0x1ull << 29)
#define PIC_ISR_BAD_XRESP_PKT (0x1ull << 28)
#define PIC_ISR_BAD_XREQ_PKT (0x1ull << 27)
#define PIC_ISR_RESP_XTLK_ERR (0x1ull << 26)
#define PIC_ISR_REQ_XTLK_ERR (0x1ull << 25)
#define PIC_ISR_INVLD_ADDR (0x1ull << 24)
#define PIC_ISR_UNSUPPORTED_XOP (0x1ull << 23)
#define PIC_ISR_XREQ_FIFO_OFLOW (0x1ull << 22)
#define PIC_ISR_LLP_REC_SNERR (0x1ull << 21)
#define PIC_ISR_LLP_REC_CBERR (0x1ull << 20)
#define PIC_ISR_LLP_RCTY (0x1ull << 19)
#define PIC_ISR_LLP_TX_RETRY (0x1ull << 18)
#define PIC_ISR_LLP_TCTY (0x1ull << 17)
#define PIC_ISR_PCI_ABORT (0x1ull << 15)
#define PIC_ISR_PCI_PARITY (0x1ull << 14)
#define PIC_ISR_PCI_SERR (0x1ull << 13)
#define PIC_ISR_PCI_PERR (0x1ull << 12)
#define PIC_ISR_PCI_MST_TIMEOUT (0x1ull << 11)
#define PIC_ISR_PCI_RETRY_CNT (0x1ull << 10)
#define PIC_ISR_XREAD_REQ_TIMEOUT (0x1ull << 9)
#define PIC_ISR_INT_MSK (0xffull << 0)
#define PIC_ISR_INT(x) (0x1ull << (x))
#define PIC_ISR_LINK_ERROR \
(PIC_ISR_LLP_REC_SNERR|PIC_ISR_LLP_REC_CBERR| \
PIC_ISR_LLP_RCTY|PIC_ISR_LLP_TX_RETRY| \
PIC_ISR_LLP_TCTY)
#define PIC_ISR_PCIBUS_PIOERR \
(PIC_ISR_PCI_MST_TIMEOUT|PIC_ISR_PCI_ABORT| \
PIC_ISR_PCIX_MTOUT|PIC_ISR_PCIX_TABORT)
#define PIC_ISR_PCIBUS_ERROR \
(PIC_ISR_PCIBUS_PIOERR|PIC_ISR_PCI_PERR| \
PIC_ISR_PCI_SERR|PIC_ISR_PCI_RETRY_CNT| \
PIC_ISR_PCI_PARITY|PIC_ISR_PCIX_PERR| \
PIC_ISR_PCIX_SERR|PIC_ISR_PCIX_MRETRY| \
PIC_ISR_PCIX_AD_PARITY|PIC_ISR_PCIX_DA_PARITY| \
PIC_ISR_PCIX_REQ_TOUT|PIC_ISR_PCIX_UNEX_COMP| \
PIC_ISR_PCIX_SPLIT_TO|PIC_ISR_PCIX_SPLIT_EMSG| \
PIC_ISR_PCIX_SPLIT_MSG_PE)
#define PIC_ISR_XTALK_ERROR \
(PIC_ISR_XREAD_REQ_TIMEOUT|PIC_ISR_XREQ_FIFO_OFLOW| \
PIC_ISR_UNSUPPORTED_XOP|PIC_ISR_INVLD_ADDR| \
PIC_ISR_REQ_XTLK_ERR|PIC_ISR_RESP_XTLK_ERR| \
PIC_ISR_BAD_XREQ_PKT|PIC_ISR_BAD_XRESP_PKT| \
PIC_ISR_UNEXP_RESP)
#define PIC_ISR_ERRORS \
(PIC_ISR_LINK_ERROR|PIC_ISR_PCIBUS_ERROR| \
PIC_ISR_XTALK_ERROR| \
PIC_ISR_PMU_PAGE_FAULT|PIC_ISR_INT_RAM_PERR)
/* PCIX Write Buffer 6 Valid Register -- read-only */
#define PIC_PCIX_WRITE_BUF_6_VALID 0x00000BD0
/*
* PIC RESET INTR register offset 0x00000110
*/
/* PCIX Write Buffer 7 Address Register -- read-only */
#define PIC_PCIX_WRITE_BUF_7_ADDR 0x00000BE0
#define PIC_IRR_ALL_CLR 0xffffffffffffffff
/* PCIX Write Buffer 7 Attribute Register -- read-only */
#define PIC_PCIX_WRITE_BUF_7_ATTRIBUTE 0x00000BE8
/*
* PIC PCI Host Intr Addr offset 0x00000130 - 0x00000168
*/
#define PIC_HOST_INTR_ADDR 0x0000FFFFFFFFFFFF
#define PIC_HOST_INTR_FLD_SHFT 48
#define PIC_HOST_INTR_FLD (0xFFull << PIC_HOST_INTR_FLD_SHFT)
/* PCIX Write Buffer 7 Valid Register -- read-only */
#define PIC_PCIX_WRITE_BUF_7_VALID 0x00000BF0
/*********************************************************************
* misc typedefs
*
/*
* PIC MMR structure mapping
*/
typedef uint64_t picreg_t;
typedef uint64_t picate_t;
/*****************************************************************************
*********************** PIC MMR structure mapping ***************************
*****************************************************************************/
/* NOTE: PIC WAR. PV#854697. PIC does not allow writes just to [31:0]
* of a 64-bit register. When writing PIC registers, always write the
......
......@@ -19,8 +19,8 @@
#define SGI_ACPI_SCI_INT (0x34)
#define SGI_XPC_NOTIFY (0xe7)
#define IA64_SN2_FIRST_DEVICE_VECTOR (0x34)
#define IA64_SN2_LAST_DEVICE_VECTOR (0xe7)
#define IA64_SN2_FIRST_DEVICE_VECTOR (0x37)
#define IA64_SN2_LAST_DEVICE_VECTOR (0xe6)
#define SN2_IRQ_RESERVED (0x1)
#define SN2_IRQ_CONNECTED (0x2)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment