Commit 364545de authored by Jesse Barnes's avatar Jesse Barnes Committed by David Mosberger

[PATCH] ia64: SN update

And here's the SN specific part of the update.  This should get an SN2
compile all the way to the link stage, where I still have some devfs
stuff to cleanup.
parent 2853991a
...@@ -52,11 +52,12 @@ core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ arch/ia64/hp/common/ arch/ia64/hp ...@@ -52,11 +52,12 @@ core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ arch/ia64/hp/common/ arch/ia64/hp
core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
core-$(CONFIG_IA64_SGI_SN) += arch/ia64/sn/kernel/ \ core-$(CONFIG_IA64_SGI_SN) += arch/ia64/sn/kernel/ \
arch/ia64/sn/io/ \ arch/ia64/sn/io/ \
arch/ia64/sn/io/sn2/ \
arch/ia64/sn/io/sn2/pcibr/ \
arch/ia64/sn/kernel/sn2/ arch/ia64/sn/kernel/sn2/
drivers-$(CONFIG_PCI) += arch/ia64/pci/ drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
drivers-$(CONFIG_IA64_SGI_SN) += arch/ia64/sn/fakeprom/
boot := arch/ia64/boot boot := arch/ia64/boot
tools := arch/ia64/tools tools := arch/ia64/tools
......
...@@ -17,6 +17,6 @@ obj-$(CONFIG_IA64_SGI_SN) += stubs.o sgi_if.o xswitch.o klgraph_hack.o \ ...@@ -17,6 +17,6 @@ obj-$(CONFIG_IA64_SGI_SN) += stubs.o sgi_if.o xswitch.o klgraph_hack.o \
hcl.o labelcl.o invent.o sgi_io_sim.o \ hcl.o labelcl.o invent.o sgi_io_sim.o \
klgraph_hack.o hcl_util.o cdl.o hubdev.o hubspc.o \ klgraph_hack.o hcl_util.o cdl.o hubdev.o hubspc.o \
alenlist.o pci.o pci_dma.o ate_utils.o \ alenlist.o pci.o pci_dma.o ate_utils.o \
ifconfig_net.o io.o ifconfig_bus.o ifconfig_net.o io.o ioconfig_bus.o
obj-$(CONFIG_PCIBA) += pciba.o obj-$(CONFIG_PCIBA) += pciba.o
/* $Id$
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* ioconfig_bus - SGI's Persistent PCI Bus Numbering.
*
* Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <asm/sn/sgi.h>
#include <linux/devfs_fs.h>
#include <linux/devfs_fs_kernel.h>
#include <asm/io.h>
#include <asm/sn/iograph.h>
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h>
#include <asm//sn/sn_sal.h>
#include <asm/sn/addrs.h>
#include <asm/sn/ioconfig_bus.h>
#define SGI_IOCONFIG_BUS "SGI-PERSISTENT PCI BUS NUMBERING"
#define SGI_IOCONFIG_BUS_VERSION "1.0"
/*
* Some Global definitions.
*/
devfs_handle_t ioconfig_bus_handle = NULL;
unsigned long ioconfig_bus_debug = 0;
#ifdef IOCONFIG_BUS_DEBUG
#define DBG(x...) printk(x)
#else
#define DBG(x...)
#endif
u64 ioconfig_file = 0;
u64 ioconfig_file_size = 0;
u64 ioconfig_activated = 0;
char ioconfig_kernopts[128];
/*
* For debugging purpose .. hardcode a table ..
*/
struct ascii_moduleid *ioconfig_bus_table;
u64 ioconfig_bus_table_size = 0;
int free_entry = 0;
int new_entry = 0;
int next_basebus_number = 0;
void
ioconfig_get_busnum(char *io_moduleid, int *bus_num)
{
struct ascii_moduleid *temp;
int index;
DBG("ioconfig_get_busnum io_moduleid %s\n", io_moduleid);
*bus_num = -1;
temp = ioconfig_bus_table;
for (index = 0; index < free_entry; temp++, index++) {
if ( (io_moduleid[0] == temp->io_moduleid[0]) &&
(io_moduleid[1] == temp->io_moduleid[1]) &&
(io_moduleid[2] == temp->io_moduleid[2]) &&
(io_moduleid[4] == temp->io_moduleid[4]) &&
(io_moduleid[5] == temp->io_moduleid[5]) ) {
*bus_num = index * 0x10;
return;
}
}
/*
* New IO Brick encountered.
*/
if (((int)io_moduleid[0]) == 0) {
DBG("ioconfig_get_busnum: Invalid Module Id given %s\n", io_moduleid);
return;
}
io_moduleid[3] = '#';
strcpy((char *)&(ioconfig_bus_table[free_entry].io_moduleid), io_moduleid);
*bus_num = free_entry * 0x10;
free_entry++;
}
void
dump_ioconfig_table()
{
int index = 0;
struct ascii_moduleid *temp;
temp = ioconfig_bus_table;
while (index < free_entry) {
DBG("ASSCI Module ID %s\n", temp->io_moduleid);
temp++;
index++;
}
}
/*
* nextline
* This routine returns the nextline in the buffer.
*/
int nextline(char *buffer, char **next, char *line)
{
char *temp;
if (buffer[0] == 0x0) {
return(0);
}
temp = buffer;
while (*temp != 0) {
*line = *temp;
if (*temp != '\n'){
*line = *temp;
temp++; line++;
} else
break;
}
if (*temp == 0)
*next = temp;
else
*next = ++temp;
return(1);
}
/*
* build_pcibus_name
* This routine parses the ioconfig contents read into
* memory by ioconfig command in EFI and builds the
* persistent pci bus naming table.
*/
void
build_moduleid_table(char *file_contents, struct ascii_moduleid *table)
{
/*
* Read the whole file into memory.
*/
int rc;
char *name;
char *temp;
char *next;
char *current;
char *line;
struct ascii_moduleid *moduleid;
line = kmalloc(256, GFP_KERNEL);
memset(line, 0,256);
name = kmalloc(125, GFP_KERNEL);
memset(name, 0, 125);
moduleid = table;
current = file_contents;
while (nextline(current, &next, line)){
DBG("current 0x%lx next 0x%lx\n", current, next);
temp = line;
/*
* Skip all leading Blank lines ..
*/
while (isspace(*temp))
if (*temp != '\n')
temp++;
else
break;
if (*temp == '\n') {
current = next;
memset(line, 0, 256);
continue;
}
/*
* Skip comment lines
*/
if (*temp == '#') {
current = next;
memset(line, 0, 256);
continue;
}
/*
* Get the next free entry in the table.
*/
rc = sscanf(temp, "%s", name);
strcpy(&moduleid->io_moduleid[0], name);
DBG("Found %s\n", name);
moduleid++;
free_entry++;
current = next;
memset(line, 0, 256);
}
new_entry = free_entry;
kfree(line);
kfree(name);
return;
}
void
ioconfig_bus_init(void)
{
struct ia64_sal_retval ret_stuff;
u64 *temp;
int cnode;
DBG("ioconfig_bus_init called.\n");
for (cnode = 0; cnode < numnodes; cnode++) {
nasid_t nasid;
/*
* Make SAL call to get the address of the bus configuration table.
*/
ret_stuff.status = (uint64_t)0;
ret_stuff.v0 = (uint64_t)0;
ret_stuff.v1 = (uint64_t)0;
ret_stuff.v2 = (uint64_t)0;
nasid = COMPACT_TO_NASID_NODEID(cnode);
SAL_CALL(ret_stuff, SN_SAL_BUS_CONFIG, 0, nasid, 0, 0, 0, 0, 0);
temp = (u64 *)TO_NODE_CAC(nasid, ret_stuff.v0);
ioconfig_file = *temp;
DBG("ioconfig_bus_init: Nasid %d ret_stuff.v0 0x%lx\n", nasid,
ret_stuff.v0);
if (ioconfig_file) {
ioconfig_file_size = ret_stuff.v1;
ioconfig_file = (ioconfig_file | CACHEABLE_MEM_SPACE);
ioconfig_activated = 1;
break;
}
}
DBG("ioconfig_bus_init: ret_stuff.v0 %p ioconfig_file %p %d\n",
ret_stuff.v0, (void *)ioconfig_file, (int)ioconfig_file_size);
ioconfig_bus_table = kmalloc( 512, GFP_KERNEL );
memset(ioconfig_bus_table, 0, 512);
/*
* If ioconfig options are given on the bootline .. take it.
*/
if (*ioconfig_kernopts != '\0') {
/*
* ioconfig="..." kernel options given.
*/
DBG("ioconfig_bus_init: Kernel Options given.\n");
(void) build_moduleid_table((char *)ioconfig_kernopts, ioconfig_bus_table);
(void) dump_ioconfig_table(ioconfig_bus_table);
return;
}
if (ioconfig_activated) {
DBG("ioconfig_bus_init: ioconfig file given.\n");
(void) build_moduleid_table((char *)ioconfig_file, ioconfig_bus_table);
(void) dump_ioconfig_table(ioconfig_bus_table);
} else {
DBG("ioconfig_bus_init: ioconfig command not executed in prom\n");
}
}
void
ioconfig_bus_new_entries(void)
{
int index = 0;
struct ascii_moduleid *temp;
if ((ioconfig_activated) && (free_entry > new_entry)) {
printk("### Please add the following new IO Bricks Module ID \n");
printk("### to your Persistent Bus Numbering Config File\n");
} else
return;
index = new_entry;
temp = &ioconfig_bus_table[index];
while (index < free_entry) {
printk("%s\n", temp);
temp++;
index++;
}
printk("### End\n");
}
static int ioconfig_bus_ioctl(struct inode * inode, struct file * file,
unsigned int cmd, unsigned long arg)
{
struct ioconfig_parm parm;
/*
* Copy in the parameters.
*/
copy_from_user(&parm, (char *)arg, sizeof(struct ioconfig_parm));
parm.number = free_entry - new_entry;
parm.ioconfig_activated = ioconfig_activated;
copy_to_user((char *)arg, &parm, sizeof(struct ioconfig_parm));
copy_to_user((char *)parm.buffer, &ioconfig_bus_table[new_entry], sizeof(struct ascii_moduleid) * (free_entry - new_entry));
return 0;
}
/*
* ioconfig_bus_open - Opens the special device node "/dev/hw/.ioconfig_bus".
*/
static int ioconfig_bus_open(struct inode * inode, struct file * filp)
{
if (ioconfig_bus_debug) {
DBG("ioconfig_bus_open called.\n");
}
return(0);
}
/*
* ioconfig_bus_close - Closes the special device node "/dev/hw/.ioconfig_bus".
*/
static int ioconfig_bus_close(struct inode * inode, struct file * filp)
{
if (ioconfig_bus_debug) {
DBG("ioconfig_bus_close called.\n");
}
return(0);
}
struct file_operations ioconfig_bus_fops = {
ioctl:ioconfig_bus_ioctl,
open:ioconfig_bus_open, /* open */
release:ioconfig_bus_close /* release */
};
/*
* init_ifconfig_bus() - Boot time initialization. Ensure that it is called
* after devfs has been initialized.
*
*/
int init_ioconfig_bus(void)
{
ioconfig_bus_handle = NULL;
ioconfig_bus_handle = hwgraph_register(hwgraph_root, ".ioconfig_bus",
0, DEVFS_FL_AUTO_DEVNUM,
0, 0,
S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
&ioconfig_bus_fops, NULL);
if (ioconfig_bus_handle == NULL) {
panic("Unable to create SGI PERSISTENT BUS NUMBERING Driver.\n");
}
return(0);
}
static int __init ioconfig_bus_setup (char *str)
{
char *temp;
DBG("ioconfig_bus_setup: Kernel Options %s\n", str);
temp = (char *)ioconfig_kernopts;
memset(temp, 0, 128);
while ( (*str != '\0') && !isspace (*str) ) {
if (*str == ',') {
*temp = '\n';
temp++;
str++;
continue;
}
*temp = *str;
temp++;
str++;
}
return(0);
}
__setup("ioconfig=", ioconfig_bus_setup);
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn2 specific io routines.
EXTRA_CFLAGS := -DLITTLE_ENDIAN
obj-y += bte_error.o geo_op.o klconflib.o klgraph.o l1.o \
l1_command.o ml_iograph.o ml_SN_init.o ml_SN_intr.o module.o \
pci_bus_cvlink.o pciio.o pic.o sgi_io_init.o shub.o shuberror.o \
shub_intr.o shubio.o xbow.o xtalk.o
obj-$(CONFIG_KDB) += kdba_io.o
obj-$(CONFIG_SHUB_1_0_SPECIFIC) += efi-rtc.o
/* $Id$
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
/*
* @doc file m:hwcfg
* DESCRIPTION:
*
* This file contains routines for manipulating and generating
* Geographic IDs. They are in a file by themself since they have
* no dependencies on other modules.
*
* ORIGIN:
*
* New for SN2
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <asm/smp.h>
#include <asm/irq.h>
#include <asm/hw_irq.h>
#include <asm/sn/types.h>
#include <asm/sn/sgi.h>
#include <asm/sn/iograph.h>
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_private.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/pci/pciio.h>
#include <asm/sn/pci/pcibr.h>
#include <asm/sn/xtalk/xtalk.h>
#include <asm/sn/pci/pcibr_private.h>
#include <asm/sn/intr.h>
#include <asm/sn/sn2/shub_mmr_t.h>
#include <asm/sn/sn2/shubio.h>
#include <asm/sal.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/module.h>
#include <asm/sn/geo.h>
/********** Global functions and data (visible outside the module) ***********/
/*
* @doc gf:geo_module
*
* moduleid_t geo_module(geoid_t g)
*
* DESCRIPTION:
*
* Return the moduleid component of a geoid.
*
* INTERNALS:
*
* Return INVALID_MODULE for an invalid geoid. Otherwise extract the
* moduleid from the structure, and return it.
*
* ORIGIN:
*
* New for SN2
*/
moduleid_t
geo_module(geoid_t g)
{
if (g.any.type == GEO_TYPE_INVALID)
return INVALID_MODULE;
else
return g.any.module;
}
/*
* @doc gf:geo_slab
*
* slabid_t geo_slab(geoid_t g)
*
* DESCRIPTION:
*
* Return the slabid component of a geoid.
*
* INTERNALS:
*
* Return INVALID_SLAB for an invalid geoid. Otherwise extract the
* slabid from the structure, and return it.
*
* ORIGIN:
*
* New for SN2
*/
slabid_t
geo_slab(geoid_t g)
{
if (g.any.type == GEO_TYPE_INVALID)
return INVALID_SLAB;
else
return g.any.slab;
}
/*
* @doc gf:geo_type
*
* geo_type_t geo_type(geoid_t g)
*
* DESCRIPTION:
*
* Return the type component of a geoid.
*
* INTERNALS:
*
* Extract the type from the structure, and return it.
*
* ORIGIN:
*
* New for SN2
*/
geo_type_t
geo_type(geoid_t g)
{
return g.any.type;
}
/*
* @doc gf:geo_valid
*
* int geo_valid(geoid_t g)
*
* DESCRIPTION:
*
* Return nonzero if g has a valid geoid type.
*
* INTERNALS:
*
* Test the type against GEO_TYPE_INVALID, and return the result.
*
* ORIGIN:
*
* New for SN2
*/
int
geo_valid(geoid_t g)
{
return g.any.type != GEO_TYPE_INVALID;
}
/*
* @doc gf:geo_cmp
*
* int geo_cmp(geoid_t g0, geoid_t g1)
*
* DESCRIPTION:
*
* Compare two geoid_t values, from the coarsest field to the finest.
* The comparison should be consistent with the physical locations of
* of the hardware named by the geoids.
*
* INTERNALS:
*
* First compare the module, then the slab, type, and type-specific fields.
*
* ORIGIN:
*
* New for SN2
*/
int
geo_cmp(geoid_t g0, geoid_t g1)
{
int rv;
/* Compare the common fields */
rv = MODULE_CMP(geo_module(g0), geo_module(g1));
if (rv != 0)
return rv;
rv = geo_slab(g0) - geo_slab(g1);
if (rv != 0)
return rv;
/* Within a slab, sort by type */
rv = geo_type(g0) - geo_type(g1);
if (rv != 0)
return rv;
switch(geo_type(g0)) {
case GEO_TYPE_CPU:
rv = g0.cpu.slice - g1.cpu.slice;
break;
case GEO_TYPE_IOCARD:
rv = g0.pcicard.bus - g1.pcicard.bus;
if (rv) break;
rv = SLOTNUM_GETSLOT(g0.pcicard.slot) -
SLOTNUM_GETSLOT(g1.pcicard.slot);
break;
case GEO_TYPE_MEM:
rv = g0.mem.membus - g1.mem.membus;
if (rv) break;
rv = g0.mem.memslot - g1.mem.memslot;
break;
default:
rv = 0;
}
return rv;
}
/*
* @doc gf:geo_new
*
* geoid_t geo_new(geo_type_t type, ...)
*
* DESCRIPTION:
*
* Generate a new geoid_t value of the given type from its components.
* Expected calling sequences:
* \@itemize \@bullet
* \@item
* \@code\{geo_new(GEO_TYPE_INVALID)\}
* \@item
* \@code\{geo_new(GEO_TYPE_MODULE, moduleid_t m)\}
* \@item
* \@code\{geo_new(GEO_TYPE_NODE, moduleid_t m, slabid_t s)\}
* \@item
* \@code\{geo_new(GEO_TYPE_RTR, moduleid_t m, slabid_t s)\}
* \@item
* \@code\{geo_new(GEO_TYPE_IOCNTL, moduleid_t m, slabid_t s)\}
* \@item
* \@code\{geo_new(GEO_TYPE_IOCARD, moduleid_t m, slabid_t s, char bus, slotid_t slot)\}
* \@item
* \@code\{geo_new(GEO_TYPE_CPU, moduleid_t m, slabid_t s, char slice)\}
* \@item
* \@code\{geo_new(GEO_TYPE_MEM, moduleid_t m, slabid_t s, char membus, char slot)\}
* \@end itemize
*
* Invalid types return a GEO_TYPE_INVALID geoid_t.
*
* INTERNALS:
*
* Use the type to determine which fields to expect. Write the fields into
* a new geoid_t and return it. Note: scalars smaller than an "int" are
* promoted to "int" by the "..." operator, so we need extra casts on "char",
* "slotid_t", and "slabid_t".
*
* ORIGIN:
*
* New for SN2
*/
geoid_t
geo_new(geo_type_t type, ...)
{
va_list al;
geoid_t g;
memset(&g, 0, sizeof(g));
va_start(al, type);
/* Make sure the type is sane */
if (type >= GEO_TYPE_MAX)
type = GEO_TYPE_INVALID;
g.any.type = type;
if (type == GEO_TYPE_INVALID)
goto done; /* invalid geoids have no components at all */
g.any.module = va_arg(al, moduleid_t);
if (type == GEO_TYPE_MODULE)
goto done;
g.any.slab = (slabid_t)va_arg(al, int);
/* Some types have additional components */
switch(type) {
case GEO_TYPE_CPU:
g.cpu.slice = (char)va_arg(al, int);
break;
case GEO_TYPE_IOCARD:
g.pcicard.bus = (char)va_arg(al, int);
g.pcicard.slot = (slotid_t)va_arg(al, int);
break;
case GEO_TYPE_MEM:
g.mem.membus = (char)va_arg(al, int);
g.mem.memslot = (char)va_arg(al, int);
break;
default:
break;
}
done:
va_end(al);
return g;
}
/* $Id$
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/ctype.h>
#include <asm/sn/sgi.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/iograph.h>
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/module.h>
#include <asm/sn/router.h>
#include <asm/sn/xtalk/xbow.h>
#define printf printk
int hasmetarouter;
#define LDEBUG 0
#define NIC_UNKNOWN ((nic_t) -1)
#undef DEBUG_KLGRAPH
#ifdef DEBUG_KLGRAPH
#define DBG(x...) printk(x)
#else
#define DBG(x...)
#endif /* DEBUG_KLGRAPH */
static void sort_nic_names(lboard_t *) ;
u64 klgraph_addr[MAX_COMPACT_NODES];
int module_number = 0;
lboard_t *
find_lboard(lboard_t *start, unsigned char brd_type)
{
/* Search all boards stored on this node. */
while (start) {
if (start->brd_type == brd_type)
return start;
start = KLCF_NEXT(start);
}
/* Didn't find it. */
return (lboard_t *)NULL;
}
lboard_t *
find_lboard_class(lboard_t *start, unsigned char brd_type)
{
/* Search all boards stored on this node. */
while (start) {
if (KLCLASS(start->brd_type) == KLCLASS(brd_type))
return start;
start = KLCF_NEXT(start);
}
/* Didn't find it. */
return (lboard_t *)NULL;
}
klinfo_t *
find_component(lboard_t *brd, klinfo_t *kli, unsigned char struct_type)
{
int index, j;
if (kli == (klinfo_t *)NULL) {
index = 0;
} else {
for (j = 0; j < KLCF_NUM_COMPS(brd); j++) {
if (kli == KLCF_COMP(brd, j))
break;
}
index = j;
if (index == KLCF_NUM_COMPS(brd)) {
DBG("find_component: Bad pointer: 0x%p\n", kli);
return (klinfo_t *)NULL;
}
index++; /* next component */
}
for (; index < KLCF_NUM_COMPS(brd); index++) {
kli = KLCF_COMP(brd, index);
DBG("find_component: brd %p kli %p request type = 0x%x kli type 0x%x\n", brd, kli, kli->struct_type, KLCF_COMP_TYPE(kli));
if (KLCF_COMP_TYPE(kli) == struct_type)
return kli;
}
/* Didn't find it. */
return (klinfo_t *)NULL;
}
klinfo_t *
find_first_component(lboard_t *brd, unsigned char struct_type)
{
return find_component(brd, (klinfo_t *)NULL, struct_type);
}
lboard_t *
find_lboard_modslot(lboard_t *start, geoid_t geoid)
{
/* Search all boards stored on this node. */
while (start) {
if (geo_cmp(start->brd_geoid, geoid))
return start;
start = KLCF_NEXT(start);
}
/* Didn't find it. */
return (lboard_t *)NULL;
}
lboard_t *
find_lboard_module(lboard_t *start, geoid_t geoid)
{
/* Search all boards stored on this node. */
while (start) {
if (geo_cmp(start->brd_geoid, geoid))
return start;
start = KLCF_NEXT(start);
}
/* Didn't find it. */
return (lboard_t *)NULL;
}
lboard_t *
find_lboard_module_class(lboard_t *start, geoid_t geoid,
unsigned char brd_type)
{
while (start) {
DBG("find_lboard_module_class: lboard 0x%p, start->brd_geoid 0x%x, mod 0x%x, start->brd_type 0x%x, brd_type 0x%x\n", start, start->brd_geoid, geoid, start->brd_type, brd_type);
if (geo_cmp(start->brd_geoid, geoid) &&
(KLCLASS(start->brd_type) == KLCLASS(brd_type)))
return start;
start = KLCF_NEXT(start);
}
/* Didn't find it. */
return (lboard_t *)NULL;
}
/*
* Convert a NIC name to a name for use in the hardware graph.
*/
void
nic_name_convert(char *old_name, char *new_name)
{
int i;
char c;
char *compare_ptr;
if ((old_name[0] == '\0') || (old_name[1] == '\0')) {
strcpy(new_name, EDGE_LBL_XWIDGET);
} else {
for (i = 0; i < strlen(old_name); i++) {
c = old_name[i];
if (isalpha(c))
new_name[i] = tolower(c);
else if (isdigit(c))
new_name[i] = c;
else
new_name[i] = '_';
}
new_name[i] = '\0';
}
/* XXX -
* Since a bunch of boards made it out with weird names like
* IO6-fibbbed and IO6P2, we need to look for IO6 in a name and
* replace it with "baseio" to avoid confusion in the field.
* We also have to make sure we don't report media_io instead of
* baseio.
*/
/* Skip underscores at the beginning of the name */
for (compare_ptr = new_name; (*compare_ptr) == '_'; compare_ptr++)
;
/*
* Check for some names we need to replace. Early boards
* had junk following the name so check only the first
* characters.
*/
if (!strncmp(new_name, "io6", 3) ||
!strncmp(new_name, "mio", 3) ||
!strncmp(new_name, "media_io", 8))
strcpy(new_name, "baseio");
else if (!strncmp(new_name, "divo", 4))
strcpy(new_name, "divo") ;
}
/*
* Find the lboard structure and get the board name.
* If we can't find the structure or it's too low a revision,
* use default name.
*/
lboard_t *
get_board_name(nasid_t nasid, geoid_t geoid, slotid_t slot, char *name)
{
lboard_t *brd;
brd = find_lboard_modslot((lboard_t *)KL_CONFIG_INFO(nasid),
geoid);
#ifndef _STANDALONE
{
cnodeid_t cnode = NASID_TO_COMPACT_NODEID(nasid);
if (!brd && (NODEPDA(cnode)->xbow_peer != INVALID_NASID))
brd = find_lboard_modslot((lboard_t *)
KL_CONFIG_INFO(NODEPDA(cnode)->xbow_peer),
geoid);
}
#endif
if (!brd || (brd->brd_sversion < 2)) {
strcpy(name, EDGE_LBL_XWIDGET);
} else {
nic_name_convert(brd->brd_name, name);
}
/*
* PV # 540860
* If the name is not 'baseio'
* get the lowest of all the names in the nic string.
* This is needed for boards like divo, which can have
* a bunch of daughter cards, but would like to be called
* divo. We could do this for baseio
* but it has some special case names that we would not
* like to disturb at this point.
*/
/* gfx boards don't need any of this name scrambling */
if (brd && (KLCLASS(brd->brd_type) == KLCLASS_GFX)) {
return(brd);
}
if (!(!strcmp(name, "baseio") )) {
if (brd) {
sort_nic_names(brd) ;
/* Convert to small case, '-' to '_' etc */
nic_name_convert(brd->brd_name, name) ;
}
}
return(brd);
}
/*
* get_actual_nasid
*
* Completely disabled brds have their klconfig on
* some other nasid as they have no memory. But their
* actual nasid is hidden in the klconfig. Use this
* routine to get it. Works for normal boards too.
*/
nasid_t
get_actual_nasid(lboard_t *brd)
{
klhub_t *hub ;
if (!brd)
return INVALID_NASID ;
/* find out if we are a completely disabled brd. */
hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
if (!hub)
return INVALID_NASID ;
if (!(hub->hub_info.flags & KLINFO_ENABLE)) /* disabled node brd */
return hub->hub_info.physid ;
else
return brd->brd_nasid ;
}
int
xbow_port_io_enabled(nasid_t nasid, int link)
{
lboard_t *brd;
klxbow_t *xbow_p;
/*
* look for boards that might contain an xbow or xbridge
*/
brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IOBRICK_XBOW);
if (brd == NULL) return 0;
if ((xbow_p = (klxbow_t *)find_component(brd, NULL, KLSTRUCT_XBOW))
== NULL)
return 0;
if (!XBOW_PORT_TYPE_IO(xbow_p, link) || !XBOW_PORT_IS_ENABLED(xbow_p, link))
return 0;
return 1;
}
void
board_to_path(lboard_t *brd, char *path)
{
moduleid_t modnum;
char *board_name;
char buffer[16];
ASSERT(brd);
switch (KLCLASS(brd->brd_type)) {
case KLCLASS_NODE:
board_name = EDGE_LBL_NODE;
break;
case KLCLASS_ROUTER:
if (brd->brd_type == KLTYPE_META_ROUTER) {
board_name = EDGE_LBL_META_ROUTER;
hasmetarouter++;
} else if (brd->brd_type == KLTYPE_REPEATER_ROUTER) {
board_name = EDGE_LBL_REPEATER_ROUTER;
hasmetarouter++;
} else
board_name = EDGE_LBL_ROUTER;
break;
case KLCLASS_MIDPLANE:
board_name = EDGE_LBL_MIDPLANE;
break;
case KLCLASS_IO:
board_name = EDGE_LBL_IO;
break;
case KLCLASS_IOBRICK:
if (brd->brd_type == KLTYPE_PBRICK)
board_name = EDGE_LBL_PBRICK;
else if (brd->brd_type == KLTYPE_IBRICK)
board_name = EDGE_LBL_IBRICK;
else if (brd->brd_type == KLTYPE_XBRICK)
board_name = EDGE_LBL_XBRICK;
else
board_name = EDGE_LBL_IOBRICK;
break;
default:
board_name = EDGE_LBL_UNKNOWN;
}
modnum = geo_module(brd->brd_geoid);
memset(buffer, 0, 16);
format_module_id(buffer, modnum, MODULE_FORMAT_BRIEF);
sprintf(path, EDGE_LBL_MODULE "/%s/" EDGE_LBL_SLAB "/%d/%s", buffer, geo_slab(brd->brd_geoid), board_name);
}
/*
* Get the module number for a NASID.
*/
moduleid_t
get_module_id(nasid_t nasid)
{
lboard_t *brd;
brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
if (!brd)
return INVALID_MODULE;
else
return geo_module(brd->brd_geoid);
}
#define MHZ 1000000
/* Get the canonical hardware graph name for the given pci component
* on the given io board.
*/
void
device_component_canonical_name_get(lboard_t *brd,
klinfo_t *component,
char *name)
{
slotid_t slot;
char board_name[20];
ASSERT(brd);
/* Convert the [ CLASS | TYPE ] kind of slotid
* into a string
*/
slot = brd->brd_slot;
/* Get the io board name */
if (!brd || (brd->brd_sversion < 2)) {
strcpy(name, EDGE_LBL_XWIDGET);
} else {
nic_name_convert(brd->brd_name, board_name);
}
/* Give out the canonical name of the pci device*/
sprintf(name,
"/dev/hw/"EDGE_LBL_MODULE "/%x/"EDGE_LBL_SLAB"/%d/"
EDGE_LBL_SLOT"/%s/"EDGE_LBL_PCI"/%d",
geo_module(brd->brd_geoid), geo_slab(brd->brd_geoid),
board_name, KLCF_BRIDGE_W_ID(component));
}
/*
* Get the serial number of the main component of a board
* Returns 0 if a valid serial number is found
* 1 otherwise.
* Assumptions: Nic manufacturing string has the following format
* *Serial:<serial_number>;*
*/
static int
component_serial_number_get(lboard_t *board,
klconf_off_t mfg_nic_offset,
char *serial_number,
char *key_pattern)
{
char *mfg_nic_string;
char *serial_string,*str;
int i;
char *serial_pattern = "Serial:";
/* We have an error on a null mfg nic offset */
if (!mfg_nic_offset)
return(1);
/* Get the hub's manufacturing nic information
* which is in the form of a pre-formatted string
*/
mfg_nic_string =
(char *)NODE_OFFSET_TO_K0(NASID_GET(board),
mfg_nic_offset);
/* There is no manufacturing nic info */
if (!mfg_nic_string)
return(1);
str = mfg_nic_string;
/* Look for the key pattern first (if it is specified)
* and then print the serial number corresponding to that.
*/
if (strcmp(key_pattern,"") &&
!(str = strstr(mfg_nic_string,key_pattern)))
return(1);
/* There is no serial number info in the manufacturing
* nic info
*/
if (!(serial_string = strstr(str,serial_pattern)))
return(1);
serial_string = serial_string + strlen(serial_pattern);
/* Copy the serial number information from the klconfig */
i = 0;
while (serial_string[i] != ';') {
serial_number[i] = serial_string[i];
i++;
}
serial_number[i] = 0;
return(0);
}
/*
* Get the serial number of a board
* Returns 0 if a valid serial number is found
* 1 otherwise.
*/
int
board_serial_number_get(lboard_t *board,char *serial_number)
{
ASSERT(board && serial_number);
if (!board || !serial_number)
return(1);
strcpy(serial_number,"");
switch(KLCLASS(board->brd_type)) {
case KLCLASS_CPU: { /* Node board */
klhub_t *hub;
/* Get the hub component information */
hub = (klhub_t *)find_first_component(board,
KLSTRUCT_HUB);
/* If we don't have a hub component on an IP27
* then we have a weird klconfig.
*/
if (!hub)
return(1);
/* Get the serial number information from
* the hub's manufacturing nic info
*/
if (component_serial_number_get(board,
hub->hub_mfg_nic,
serial_number,
"IP37"))
return(1);
break;
}
case KLCLASS_IO: { /* IO board */
if (KLTYPE(board->brd_type) == KLTYPE_TPU) {
/* Special case for TPU boards */
kltpu_t *tpu;
/* Get the tpu component information */
tpu = (kltpu_t *)find_first_component(board,
KLSTRUCT_TPU);
/* If we don't have a tpu component on a tpu board
* then we have a weird klconfig.
*/
if (!tpu)
return(1);
/* Get the serial number information from
* the tpu's manufacturing nic info
*/
if (component_serial_number_get(board,
tpu->tpu_mfg_nic,
serial_number,
""))
return(1);
break;
} else if ((KLTYPE(board->brd_type) == KLTYPE_GSN_A) ||
(KLTYPE(board->brd_type) == KLTYPE_GSN_B)) {
/* Special case for GSN boards */
klgsn_t *gsn;
/* Get the gsn component information */
gsn = (klgsn_t *)find_first_component(board,
((KLTYPE(board->brd_type) == KLTYPE_GSN_A) ?
KLSTRUCT_GSN_A : KLSTRUCT_GSN_B));
/* If we don't have a gsn component on a gsn board
* then we have a weird klconfig.
*/
if (!gsn)
return(1);
/* Get the serial number information from
* the gsn's manufacturing nic info
*/
if (component_serial_number_get(board,
gsn->gsn_mfg_nic,
serial_number,
""))
return(1);
break;
} else {
klbri_t *bridge;
/* Get the bridge component information */
bridge = (klbri_t *)find_first_component(board,
KLSTRUCT_BRI);
/* If we don't have a bridge component on an IO board
* then we have a weird klconfig.
*/
if (!bridge)
return(1);
/* Get the serial number information from
* the bridge's manufacturing nic info
*/
if (component_serial_number_get(board,
bridge->bri_mfg_nic,
serial_number,
""))
return(1);
break;
}
}
case KLCLASS_ROUTER: { /* Router board */
klrou_t *router;
/* Get the router component information */
router = (klrou_t *)find_first_component(board,
KLSTRUCT_ROU);
/* If we don't have a router component on a router board
* then we have a weird klconfig.
*/
if (!router)
return(1);
/* Get the serial number information from
* the router's manufacturing nic info
*/
if (component_serial_number_get(board,
router->rou_mfg_nic,
serial_number,
""))
return(1);
break;
}
case KLCLASS_GFX: { /* Gfx board */
klgfx_t *graphics;
/* Get the graphics component information */
graphics = (klgfx_t *)find_first_component(board, KLSTRUCT_GFX);
/* If we don't have a gfx component on a gfx board
* then we have a weird klconfig.
*/
if (!graphics)
return(1);
/* Get the serial number information from
* the graphics's manufacturing nic info
*/
if (component_serial_number_get(board,
graphics->gfx_mfg_nic,
serial_number,
""))
return(1);
break;
}
default:
strcpy(serial_number,"");
break;
}
return(0);
}
#include "asm/sn/sn_private.h"
xwidgetnum_t
nodevertex_widgetnum_get(devfs_handle_t node_vtx)
{
hubinfo_t hubinfo_p;
hwgraph_info_get_LBL(node_vtx, INFO_LBL_NODE_INFO,
(arbitrary_info_t *) &hubinfo_p);
return(hubinfo_p->h_widgetid);
}
devfs_handle_t
nodevertex_xbow_peer_get(devfs_handle_t node_vtx)
{
hubinfo_t hubinfo_p;
nasid_t xbow_peer_nasid;
cnodeid_t xbow_peer;
hwgraph_info_get_LBL(node_vtx, INFO_LBL_NODE_INFO,
(arbitrary_info_t *) &hubinfo_p);
xbow_peer_nasid = hubinfo_p->h_nodepda->xbow_peer;
if(xbow_peer_nasid == INVALID_NASID)
return ( (devfs_handle_t)-1);
xbow_peer = NASID_TO_COMPACT_NODEID(xbow_peer_nasid);
return(NODEPDA(xbow_peer)->node_vertex);
}
/* NIC Sorting Support */
#define MAX_NICS_PER_STRING 32
#define MAX_NIC_NAME_LEN 32
static char *
get_nic_string(lboard_t *lb)
{
int i;
klinfo_t *k = NULL ;
klconf_off_t mfg_off = 0 ;
char *mfg_nic = NULL ;
for (i = 0; i < KLCF_NUM_COMPS(lb); i++) {
k = KLCF_COMP(lb, i) ;
switch(k->struct_type) {
case KLSTRUCT_BRI:
mfg_off = ((klbri_t *)k)->bri_mfg_nic ;
break ;
case KLSTRUCT_HUB:
mfg_off = ((klhub_t *)k)->hub_mfg_nic ;
break ;
case KLSTRUCT_ROU:
mfg_off = ((klrou_t *)k)->rou_mfg_nic ;
break ;
case KLSTRUCT_GFX:
mfg_off = ((klgfx_t *)k)->gfx_mfg_nic ;
break ;
case KLSTRUCT_TPU:
mfg_off = ((kltpu_t *)k)->tpu_mfg_nic ;
break ;
case KLSTRUCT_GSN_A:
case KLSTRUCT_GSN_B:
mfg_off = ((klgsn_t *)k)->gsn_mfg_nic ;
break ;
case KLSTRUCT_XTHD:
mfg_off = ((klxthd_t *)k)->xthd_mfg_nic ;
break;
default:
mfg_off = 0 ;
break ;
}
if (mfg_off)
break ;
}
if ((mfg_off) && (k))
mfg_nic = (char *)NODE_OFFSET_TO_K0(k->nasid, mfg_off) ;
return mfg_nic ;
}
char *
get_first_string(char **ptrs, int n)
{
int i ;
char *tmpptr ;
if ((ptrs == NULL) || (n == 0))
return NULL ;
tmpptr = ptrs[0] ;
if (n == 1)
return tmpptr ;
for (i = 0 ; i < n ; i++) {
if (strcmp(tmpptr, ptrs[i]) > 0)
tmpptr = ptrs[i] ;
}
return tmpptr ;
}
int
get_ptrs(char *idata, char **ptrs, int n, char *label)
{
int i = 0 ;
char *tmp = idata ;
if ((ptrs == NULL) || (idata == NULL) || (label == NULL) || (n == 0))
return 0 ;
while ( (tmp = strstr(tmp, label)) ){
tmp += strlen(label) ;
/* check for empty name field, and last NULL ptr */
if ((i < (n-1)) && (*tmp != ';')) {
ptrs[i++] = tmp ;
}
}
ptrs[i] = NULL ;
return i ;
}
/*
* sort_nic_names
*
* Does not really do sorting. Find the alphabetically lowest
* name among all the nic names found in a nic string.
*
* Return:
* Nothing
*
* Side Effects:
*
* lb->brd_name gets the new name found
*/
static void
sort_nic_names(lboard_t *lb)
{
char *nic_str ;
char *ptrs[MAX_NICS_PER_STRING] ;
char name[MAX_NIC_NAME_LEN] ;
char *tmp, *tmp1 ;
*name = 0 ;
/* Get the nic pointer from the lb */
if ((nic_str = get_nic_string(lb)) == NULL)
return ;
tmp = get_first_string(ptrs,
get_ptrs(nic_str, ptrs, MAX_NICS_PER_STRING, "Name:")) ;
if (tmp == NULL)
return ;
if ( (tmp1 = strchr(tmp, ';')) ){
strncpy(name, tmp, tmp1-tmp) ;
name[tmp1-tmp] = 0 ;
} else {
strncpy(name, tmp, (sizeof(name) -1)) ;
name[sizeof(name)-1] = 0 ;
}
strcpy(lb->brd_name, name) ;
}
char brick_types[MAX_BRICK_TYPES + 1] = "crikxdpn%#012345";
/*
* Format a module id for printing.
*/
void
format_module_id(char *buffer, moduleid_t m, int fmt)
{
int rack, position;
char brickchar;
rack = MODULE_GET_RACK(m);
ASSERT(MODULE_GET_BTYPE(m) < MAX_BRICK_TYPES);
brickchar = MODULE_GET_BTCHAR(m);
position = MODULE_GET_BPOS(m);
if (fmt == MODULE_FORMAT_BRIEF) {
/* Brief module number format, eg. 002c15 */
/* Decompress the rack number */
*buffer++ = '0' + RACK_GET_CLASS(rack);
*buffer++ = '0' + RACK_GET_GROUP(rack);
*buffer++ = '0' + RACK_GET_NUM(rack);
/* Add the brick type */
*buffer++ = brickchar;
}
else if (fmt == MODULE_FORMAT_LONG) {
/* Fuller hwgraph format, eg. rack/002/bay/15 */
strcpy(buffer, EDGE_LBL_RACK "/"); buffer += strlen(buffer);
*buffer++ = '0' + RACK_GET_CLASS(rack);
*buffer++ = '0' + RACK_GET_GROUP(rack);
*buffer++ = '0' + RACK_GET_NUM(rack);
strcpy(buffer, "/" EDGE_LBL_RPOS "/"); buffer += strlen(buffer);
}
/* Add the bay position, using at least two digits */
if (position < 10)
*buffer++ = '0';
sprintf(buffer, "%d", position);
}
/*
* Parse a module id, in either brief or long form.
* Returns < 0 on error.
* The long form does not include a brick type, so it defaults to 0 (CBrick)
*/
int
parse_module_id(char *buffer)
{
unsigned int v, rack, bay, type, form;
moduleid_t m;
char c;
if (strstr(buffer, EDGE_LBL_RACK "/") == buffer) {
form = MODULE_FORMAT_LONG;
buffer += strlen(EDGE_LBL_RACK "/");
/* A long module ID must be exactly 5 non-template chars. */
if (strlen(buffer) != strlen("/" EDGE_LBL_RPOS "/") + 5)
return -1;
}
else {
form = MODULE_FORMAT_BRIEF;
/* A brief module id must be exactly 6 characters */
if (strlen(buffer) != 6)
return -2;
}
/* The rack number must be exactly 3 digits */
if (!(isdigit(buffer[0]) && isdigit(buffer[1]) && isdigit(buffer[2])))
return -3;
rack = 0;
v = *buffer++ - '0';
if (v > RACK_CLASS_MASK(rack) >> RACK_CLASS_SHFT(rack))
return -4;
RACK_ADD_CLASS(rack, v);
v = *buffer++ - '0';
if (v > RACK_GROUP_MASK(rack) >> RACK_GROUP_SHFT(rack))
return -5;
RACK_ADD_GROUP(rack, v);
v = *buffer++ - '0';
/* rack numbers are 1-based */
if (v-1 > RACK_NUM_MASK(rack) >> RACK_NUM_SHFT(rack))
return -6;
RACK_ADD_NUM(rack, v);
if (form == MODULE_FORMAT_BRIEF) {
/* Next should be a module type character. Accept ucase or lcase. */
c = *buffer++;
if (!isalpha(c))
return -7;
/* strchr() returns a pointer into brick_types[], or NULL */
type = (unsigned int)(strchr(brick_types, tolower(c)) - brick_types);
if (type > MODULE_BTYPE_MASK >> MODULE_BTYPE_SHFT)
return -8;
}
else {
/* Hardcode the module type, and skip over the boilerplate */
type = MODULE_CBRICK;
if (strstr(buffer, "/" EDGE_LBL_RPOS "/") != buffer)
return -9;
buffer += strlen("/" EDGE_LBL_RPOS "/");
}
/* The bay number is last. Make sure it's exactly two digits */
if (!(isdigit(buffer[0]) && isdigit(buffer[1]) && !buffer[2]))
return -10;
bay = 10 * (buffer[0] - '0') + (buffer[1] - '0');
if (bay > MODULE_BPOS_MASK >> MODULE_BPOS_SHFT)
return -11;
m = RBT_TO_MODULE(rack, bay, type);
/* avoid sign extending the moduleid_t */
return (int)(unsigned short)m;
}
/* $Id$
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
/*
* klgraph.c-
* This file specifies the interface between the kernel and the PROM's
* configuration data structures.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <asm/sn/sgi.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/io.h>
#include <asm/sn/iograph.h>
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h>
#include <asm/sn/kldir.h>
#include <asm/sn/gda.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/router.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/hcl_util.h>
// #define KLGRAPH_DEBUG 1
#ifdef KLGRAPH_DEBUG
#define GRPRINTF(x) printk x
#define CE_GRPANIC CE_PANIC
#else
#define GRPRINTF(x)
#define CE_GRPANIC CE_PANIC
#endif
#include <asm/sn/sn_private.h>
extern char arg_maxnodes[];
extern u64 klgraph_addr[];
void mark_cpuvertex_as_cpu(devfs_handle_t vhdl, cpuid_t cpuid);
/*
* Support for verbose inventory via hardware graph.
* klhwg_invent_alloc allocates the necessary size of inventory information
* and fills in the generic information.
*/
invent_generic_t *
klhwg_invent_alloc(cnodeid_t cnode, int class, int size)
{
invent_generic_t *invent;
invent = kern_malloc(size);
if (!invent) return NULL;
invent->ig_module = NODE_MODULEID(cnode);
invent->ig_slot = SLOTNUM_GETSLOT(NODE_SLOTID(cnode));
invent->ig_invclass = class;
return invent;
}
/*
* Add detailed disabled cpu inventory info to the hardware graph.
*/
void
klhwg_disabled_cpu_invent_info(devfs_handle_t cpuv,
cnodeid_t cnode,
klcpu_t *cpu, slotid_t slot)
{
invent_cpuinfo_t *cpu_invent;
diag_inv_t *diag_invent;
cpu_invent = (invent_cpuinfo_t *)
klhwg_invent_alloc(cnode, INV_PROCESSOR, sizeof(invent_cpuinfo_t));
if (!cpu_invent)
return;
/* Diag information on this processor */
diag_invent = (diag_inv_t *)
klhwg_invent_alloc(cnode, INV_CPUDIAGVAL, sizeof(diag_inv_t));
if (!diag_invent)
return;
/* Disabled CPU */
cpu_invent->ic_gen.ig_flag = 0x0;
cpu_invent->ic_gen.ig_slot = slot;
cpu_invent->ic_cpu_info.cpuflavor = cpu->cpu_prid;
cpu_invent->ic_cpu_info.cpufq = cpu->cpu_speed;
cpu_invent->ic_cpu_info.sdfreq = cpu->cpu_scachespeed;
cpu_invent->ic_cpu_info.sdsize = cpu->cpu_scachesz;
cpu_invent->ic_cpuid = cpu->cpu_info.virtid;
cpu_invent->ic_slice = cpu->cpu_info.physid;
/* Disabled CPU label */
hwgraph_info_add_LBL(cpuv, INFO_LBL_DETAIL_INVENT,
(arbitrary_info_t) cpu_invent);
hwgraph_info_export_LBL(cpuv, INFO_LBL_DETAIL_INVENT,
sizeof(invent_cpuinfo_t));
/* Diagval label - stores reason for disable +{virt,phys}id +diagval*/
hwgraph_info_add_LBL(cpuv, INFO_LBL_DIAGVAL,
(arbitrary_info_t) diag_invent);
hwgraph_info_export_LBL(cpuv, INFO_LBL_DIAGVAL,
sizeof(diag_inv_t));
}
/*
* Add detailed cpu inventory info to the hardware graph.
*/
void
klhwg_cpu_invent_info(devfs_handle_t cpuv,
cnodeid_t cnode,
klcpu_t *cpu)
{
invent_cpuinfo_t *cpu_invent;
cpu_invent = (invent_cpuinfo_t *)
klhwg_invent_alloc(cnode, INV_PROCESSOR, sizeof(invent_cpuinfo_t));
if (!cpu_invent)
return;
if (KLCONFIG_INFO_ENABLED((klinfo_t *)cpu))
cpu_invent->ic_gen.ig_flag = INVENT_ENABLED;
else
cpu_invent->ic_gen.ig_flag = 0x0;
cpu_invent->ic_cpu_info.cpuflavor = cpu->cpu_prid;
cpu_invent->ic_cpu_info.cpufq = cpu->cpu_speed;
cpu_invent->ic_cpu_info.sdfreq = cpu->cpu_scachespeed;
cpu_invent->ic_cpu_info.sdsize = cpu->cpu_scachesz;
cpu_invent->ic_cpuid = cpu->cpu_info.virtid;
cpu_invent->ic_slice = cpu_physical_id_to_slice(cpu->cpu_info.virtid);
hwgraph_info_add_LBL(cpuv, INFO_LBL_DETAIL_INVENT,
(arbitrary_info_t) cpu_invent);
hwgraph_info_export_LBL(cpuv, INFO_LBL_DETAIL_INVENT,
sizeof(invent_cpuinfo_t));
}
/*
* Add information about the baseio prom version number
* as a part of detailed inventory info in the hwgraph.
*/
void
klhwg_baseio_inventory_add(devfs_handle_t baseio_vhdl,cnodeid_t cnode)
{
invent_miscinfo_t *baseio_inventory;
unsigned char version = 0,revision = 0;
/* Allocate memory for the "detailed inventory" info
* for the baseio
*/
baseio_inventory = (invent_miscinfo_t *)
klhwg_invent_alloc(cnode, INV_PROM, sizeof(invent_miscinfo_t));
baseio_inventory->im_type = INV_IO6PROM;
/* Store the revision info in the inventory */
baseio_inventory->im_version = version;
baseio_inventory->im_rev = revision;
/* Put the inventory info in the hardware graph */
hwgraph_info_add_LBL(baseio_vhdl, INFO_LBL_DETAIL_INVENT,
(arbitrary_info_t) baseio_inventory);
/* Make the information available to the user programs
* thru hwgfs.
*/
hwgraph_info_export_LBL(baseio_vhdl, INFO_LBL_DETAIL_INVENT,
sizeof(invent_miscinfo_t));
}
char *hub_rev[] = {
"0.0",
"1.0",
"2.0",
"2.1",
"2.2",
"2.3"
};
/*
* Add detailed cpu inventory info to the hardware graph.
*/
void
klhwg_hub_invent_info(devfs_handle_t hubv,
cnodeid_t cnode,
klhub_t *hub)
{
invent_miscinfo_t *hub_invent;
hub_invent = (invent_miscinfo_t *)
klhwg_invent_alloc(cnode, INV_MISC, sizeof(invent_miscinfo_t));
if (!hub_invent)
return;
if (KLCONFIG_INFO_ENABLED((klinfo_t *)hub))
hub_invent->im_gen.ig_flag = INVENT_ENABLED;
hub_invent->im_type = INV_HUB;
hub_invent->im_rev = hub->hub_info.revision;
hub_invent->im_speed = hub->hub_speed;
hwgraph_info_add_LBL(hubv, INFO_LBL_DETAIL_INVENT,
(arbitrary_info_t) hub_invent);
hwgraph_info_export_LBL(hubv, INFO_LBL_DETAIL_INVENT,
sizeof(invent_miscinfo_t));
}
/* ARGSUSED */
void
klhwg_add_hub(devfs_handle_t node_vertex, klhub_t *hub, cnodeid_t cnode)
{
devfs_handle_t myhubv;
devfs_handle_t hub_mon;
int rc;
extern struct file_operations shub_mon_fops;
GRPRINTF(("klhwg_add_hub: adding %s\n", EDGE_LBL_HUB));
(void) hwgraph_path_add(node_vertex, EDGE_LBL_HUB, &myhubv);
rc = device_master_set(myhubv, node_vertex);
hub_mon = hwgraph_register(myhubv, EDGE_LBL_PERFMON,
0, DEVFS_FL_AUTO_DEVNUM,
0, 0,
S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
&shub_mon_fops, (void *)(long)cnode);
}
/* ARGSUSED */
void
klhwg_add_disabled_cpu(devfs_handle_t node_vertex, cnodeid_t cnode, klcpu_t *cpu, slotid_t slot)
{
devfs_handle_t my_cpu;
char name[120];
cpuid_t cpu_id;
nasid_t nasid;
nasid = COMPACT_TO_NASID_NODEID(cnode);
cpu_id = nasid_slice_to_cpuid(nasid, cpu->cpu_info.physid);
if(cpu_id != -1){
sprintf(name, "%s/%s/%c", EDGE_LBL_DISABLED, EDGE_LBL_CPU, 'a' + cpu->cpu_info.physid);
(void) hwgraph_path_add(node_vertex, name, &my_cpu);
mark_cpuvertex_as_cpu(my_cpu, cpu_id);
device_master_set(my_cpu, node_vertex);
klhwg_disabled_cpu_invent_info(my_cpu, cnode, cpu, slot);
return;
}
}
/* ARGSUSED */
void
klhwg_add_cpu(devfs_handle_t node_vertex, cnodeid_t cnode, klcpu_t *cpu)
{
devfs_handle_t my_cpu, cpu_dir;
char name[120];
cpuid_t cpu_id;
nasid_t nasid;
nasid = COMPACT_TO_NASID_NODEID(cnode);
cpu_id = nasid_slice_to_cpuid(nasid, cpu->cpu_info.physid);
sprintf(name, "%s/%d/%c",
EDGE_LBL_CPUBUS,
0,
'a' + cpu->cpu_info.physid);
GRPRINTF(("klhwg_add_cpu: adding %s to vertex 0x%p\n", name, node_vertex));
(void) hwgraph_path_add(node_vertex, name, &my_cpu);
mark_cpuvertex_as_cpu(my_cpu, cpu_id);
device_master_set(my_cpu, node_vertex);
/* Add an alias under the node's CPU directory */
if (hwgraph_edge_get(node_vertex, EDGE_LBL_CPU, &cpu_dir) == GRAPH_SUCCESS) {
sprintf(name, "%c", 'a' + cpu->cpu_info.physid);
(void) hwgraph_edge_add(cpu_dir, my_cpu, name);
}
klhwg_cpu_invent_info(my_cpu, cnode, cpu);
}
void
klhwg_add_xbow(cnodeid_t cnode, nasid_t nasid)
{
lboard_t *brd;
klxbow_t *xbow_p;
nasid_t hub_nasid;
cnodeid_t hub_cnode;
int widgetnum;
devfs_handle_t xbow_v, hubv;
/*REFERENCED*/
graph_error_t err;
if ((brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IOBRICK_XBOW)) == NULL)
return;
if (KL_CONFIG_DUPLICATE_BOARD(brd))
return;
GRPRINTF(("klhwg_add_xbow: adding cnode %d nasid %d xbow edges\n",
cnode, nasid));
if ((xbow_p = (klxbow_t *)find_component(brd, NULL, KLSTRUCT_XBOW))
== NULL)
return;
for (widgetnum = HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX; widgetnum++) {
if (!XBOW_PORT_TYPE_HUB(xbow_p, widgetnum))
continue;
hub_nasid = XBOW_PORT_NASID(xbow_p, widgetnum);
if (hub_nasid == INVALID_NASID) {
printk(KERN_WARNING "hub widget %d, skipping xbow graph\n", widgetnum);
continue;
}
hub_cnode = NASID_TO_COMPACT_NODEID(hub_nasid);
if (is_specified(arg_maxnodes) && hub_cnode == INVALID_CNODEID) {
continue;
}
hubv = cnodeid_to_vertex(hub_cnode);
err = hwgraph_path_add(hubv, EDGE_LBL_XTALK, &xbow_v);
if (err != GRAPH_SUCCESS) {
if (err == GRAPH_DUP)
printk(KERN_WARNING "klhwg_add_xbow: Check for "
"working routers and router links!");
PRINT_PANIC("klhwg_add_xbow: Failed to add "
"edge: vertex 0x%p to vertex 0x%p,"
"error %d\n",
(void *)hubv, (void *)xbow_v, err);
}
xswitch_vertex_init(xbow_v);
NODEPDA(hub_cnode)->xbow_vhdl = xbow_v;
/*
* XXX - This won't work is we ever hook up two hubs
* by crosstown through a crossbow.
*/
if (hub_nasid != nasid) {
NODEPDA(hub_cnode)->xbow_peer = nasid;
NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->xbow_peer =
hub_nasid;
}
GRPRINTF(("klhwg_add_xbow: adding port nasid %d %s to vertex 0x%p\n",
hub_nasid, EDGE_LBL_XTALK, hubv));
}
}
/* ARGSUSED */
void
klhwg_add_node(devfs_handle_t hwgraph_root, cnodeid_t cnode, gda_t *gdap)
{
nasid_t nasid;
lboard_t *brd;
klhub_t *hub;
devfs_handle_t node_vertex = NULL;
char path_buffer[100];
int rv;
char *s;
int board_disabled = 0;
klcpu_t *cpu;
nasid = COMPACT_TO_NASID_NODEID(cnode);
brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
GRPRINTF(("klhwg_add_node: Adding cnode %d, nasid %d, brd 0x%p\n",
cnode, nasid, brd));
ASSERT(brd);
do {
devfs_handle_t cpu_dir;
/* Generate a hardware graph path for this board. */
board_to_path(brd, path_buffer);
GRPRINTF(("klhwg_add_node: adding %s to vertex 0x%p\n",
path_buffer, hwgraph_root));
rv = hwgraph_path_add(hwgraph_root, path_buffer, &node_vertex);
if (rv != GRAPH_SUCCESS)
PRINT_PANIC("Node vertex creation failed. "
"Path == %s",
path_buffer);
hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
ASSERT(hub);
if(hub->hub_info.flags & KLINFO_ENABLE)
board_disabled = 0;
else
board_disabled = 1;
if(!board_disabled) {
mark_nodevertex_as_node(node_vertex,
cnode + board_disabled * numnodes);
s = dev_to_name(node_vertex, path_buffer, sizeof(path_buffer));
NODEPDA(cnode)->hwg_node_name =
kmalloc(strlen(s) + 1,
GFP_KERNEL);
ASSERT_ALWAYS(NODEPDA(cnode)->hwg_node_name != NULL);
strcpy(NODEPDA(cnode)->hwg_node_name, s);
hubinfo_set(node_vertex, NODEPDA(cnode)->pdinfo);
/* Set up node board's slot */
NODEPDA(cnode)->slotdesc = brd->brd_slot;
/* Set up the module we're in */
NODEPDA(cnode)->geoid = brd->brd_geoid;
NODEPDA(cnode)->module = module_lookup(geo_module(brd->brd_geoid));
}
/* Get the first CPU structure */
cpu = (klcpu_t *)find_first_component(brd, KLSTRUCT_CPU);
/*
* If there's at least 1 CPU, add a "cpu" directory to represent
* the collection of all CPUs attached to this node.
*/
if (cpu) {
graph_error_t rv;
rv = hwgraph_path_add(node_vertex, EDGE_LBL_CPU, &cpu_dir);
if (rv != GRAPH_SUCCESS)
panic("klhwg_add_node: Cannot create CPU directory\n");
}
/* Add each CPU */
while (cpu) {
cpuid_t cpu_id;
cpu_id = nasid_slice_to_cpuid(nasid,cpu->cpu_info.physid);
if (cpu_enabled(cpu_id))
klhwg_add_cpu(node_vertex, cnode, cpu);
else
klhwg_add_disabled_cpu(node_vertex, cnode, cpu, brd->brd_slot);
cpu = (klcpu_t *)
find_component(brd, (klinfo_t *)cpu, KLSTRUCT_CPU);
} /* while */
if(!board_disabled)
klhwg_add_hub(node_vertex, hub, cnode);
brd = KLCF_NEXT(brd);
if (brd)
brd = find_lboard(brd, KLTYPE_SNIA);
else
break;
} while(brd);
}
/* ARGSUSED */
void
klhwg_add_all_routers(devfs_handle_t hwgraph_root)
{
nasid_t nasid;
cnodeid_t cnode;
lboard_t *brd;
devfs_handle_t node_vertex;
char path_buffer[100];
int rv;
for (cnode = 0; cnode < numnodes; cnode++) {
nasid = COMPACT_TO_NASID_NODEID(cnode);
GRPRINTF(("klhwg_add_all_routers: adding router on cnode %d\n",
cnode));
brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
KLTYPE_ROUTER);
if (!brd)
/* No routers stored in this node's memory */
continue;
do {
ASSERT(brd);
GRPRINTF(("Router board struct is %p\n", brd));
/* Don't add duplicate boards. */
if (brd->brd_flags & DUPLICATE_BOARD)
continue;
GRPRINTF(("Router 0x%p module number is %d\n", brd, brd->brd_geoid));
/* Generate a hardware graph path for this board. */
board_to_path(brd, path_buffer);
GRPRINTF(("Router path is %s\n", path_buffer));
/* Add the router */
GRPRINTF(("klhwg_add_all_routers: adding %s to vertex 0x%p\n",
path_buffer, hwgraph_root));
rv = hwgraph_path_add(hwgraph_root, path_buffer, &node_vertex);
if (rv != GRAPH_SUCCESS)
PRINT_PANIC("Router vertex creation "
"failed. Path == %s",
path_buffer);
GRPRINTF(("klhwg_add_all_routers: get next board from 0x%p\n",
brd));
/* Find the rest of the routers stored on this node. */
} while ( (brd = find_lboard_class(KLCF_NEXT(brd),
KLTYPE_ROUTER)) );
GRPRINTF(("klhwg_add_all_routers: Done.\n"));
}
}
/* ARGSUSED */
void
klhwg_connect_one_router(devfs_handle_t hwgraph_root, lboard_t *brd,
cnodeid_t cnode, nasid_t nasid)
{
klrou_t *router;
char path_buffer[50];
char dest_path[50];
devfs_handle_t router_hndl;
devfs_handle_t dest_hndl;
int rc;
int port;
lboard_t *dest_brd;
GRPRINTF(("klhwg_connect_one_router: Connecting router on cnode %d\n",
cnode));
/* Don't add duplicate boards. */
if (brd->brd_flags & DUPLICATE_BOARD) {
GRPRINTF(("klhwg_connect_one_router: Duplicate router 0x%p on cnode %d\n",
brd, cnode));
return;
}
/* Generate a hardware graph path for this board. */
board_to_path(brd, path_buffer);
rc = hwgraph_traverse(hwgraph_root, path_buffer, &router_hndl);
if (rc != GRAPH_SUCCESS && is_specified(arg_maxnodes))
return;
if (rc != GRAPH_SUCCESS)
printk(KERN_WARNING "Can't find router: %s", path_buffer);
/* We don't know what to do with multiple router components */
if (brd->brd_numcompts != 1) {
PRINT_PANIC("klhwg_connect_one_router: %d cmpts on router\n",
brd->brd_numcompts);
return;
}
/* Convert component 0 to klrou_t ptr */
router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd),
brd->brd_compts[0]);
for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
/* See if the port's active */
if (router->rou_port[port].port_nasid == INVALID_NASID) {
GRPRINTF(("klhwg_connect_one_router: port %d inactive.\n",
port));
continue;
}
if (is_specified(arg_maxnodes) && NASID_TO_COMPACT_NODEID(router->rou_port[port].port_nasid)
== INVALID_CNODEID) {
continue;
}
dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
router->rou_port[port].port_nasid,
router->rou_port[port].port_offset);
/* Generate a hardware graph path for this board. */
board_to_path(dest_brd, dest_path);
rc = hwgraph_traverse(hwgraph_root, dest_path, &dest_hndl);
if (rc != GRAPH_SUCCESS) {
if (is_specified(arg_maxnodes) && KL_CONFIG_DUPLICATE_BOARD(dest_brd))
continue;
PRINT_PANIC("Can't find router: %s", dest_path);
}
GRPRINTF(("klhwg_connect_one_router: Link from %s/%d to %s\n",
path_buffer, port, dest_path));
sprintf(dest_path, "%d", port);
rc = hwgraph_edge_add(router_hndl, dest_hndl, dest_path);
if (rc == GRAPH_DUP) {
GRPRINTF(("Skipping port %d. nasid %d %s/%s\n",
port, router->rou_port[port].port_nasid,
path_buffer, dest_path));
continue;
}
if (rc != GRAPH_SUCCESS && !is_specified(arg_maxnodes))
PRINT_PANIC("Can't create edge: %s/%s to vertex 0x%p error 0x%x\n",
path_buffer, dest_path, (void *)dest_hndl, rc);
}
}
void
klhwg_connect_routers(devfs_handle_t hwgraph_root)
{
nasid_t nasid;
cnodeid_t cnode;
lboard_t *brd;
for (cnode = 0; cnode < numnodes; cnode++) {
nasid = COMPACT_TO_NASID_NODEID(cnode);
GRPRINTF(("klhwg_connect_routers: Connecting routers on cnode %d\n",
cnode));
brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
KLTYPE_ROUTER);
if (!brd)
continue;
do {
nasid = COMPACT_TO_NASID_NODEID(cnode);
klhwg_connect_one_router(hwgraph_root, brd,
cnode, nasid);
/* Find the rest of the routers stored on this node. */
} while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) );
}
}
void
klhwg_connect_hubs(devfs_handle_t hwgraph_root)
{
nasid_t nasid;
cnodeid_t cnode;
lboard_t *brd;
klhub_t *hub;
lboard_t *dest_brd;
devfs_handle_t hub_hndl;
devfs_handle_t dest_hndl;
char path_buffer[50];
char dest_path[50];
graph_error_t rc;
int port;
for (cnode = 0; cnode < numnodes; cnode++) {
nasid = COMPACT_TO_NASID_NODEID(cnode);
GRPRINTF(("klhwg_connect_hubs: Connecting hubs on cnode %d\n",
cnode));
brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
ASSERT(brd);
hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
ASSERT(hub);
for (port = 1; port <= MAX_NI_PORTS; port++) {
/* See if the port's active */
if (hub->hub_port[port].port_nasid == INVALID_NASID) {
GRPRINTF(("klhwg_connect_hubs: port inactive.\n"));
continue;
}
if (is_specified(arg_maxnodes) && NASID_TO_COMPACT_NODEID(hub->hub_port[port].port_nasid) == INVALID_CNODEID)
continue;
/* Generate a hardware graph path for this board. */
board_to_path(brd, path_buffer);
GRPRINTF(("klhwg_connect_hubs: Hub path is %s.\n", path_buffer));
rc = hwgraph_traverse(hwgraph_root, path_buffer, &hub_hndl);
if (rc != GRAPH_SUCCESS)
printk(KERN_WARNING "Can't find hub: %s", path_buffer);
dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
hub->hub_port[port].port_nasid,
hub->hub_port[port].port_offset);
/* Generate a hardware graph path for this board. */
board_to_path(dest_brd, dest_path);
rc = hwgraph_traverse(hwgraph_root, dest_path, &dest_hndl);
if (rc != GRAPH_SUCCESS) {
if (is_specified(arg_maxnodes) && KL_CONFIG_DUPLICATE_BOARD(dest_brd))
continue;
PRINT_PANIC("Can't find board: %s", dest_path);
} else {
char buf[1024];
GRPRINTF(("klhwg_connect_hubs: Link from %s to %s.\n",
path_buffer, dest_path));
rc = hwgraph_path_add(hub_hndl, EDGE_LBL_INTERCONNECT, &hub_hndl);
sprintf(buf,"%s/%s",path_buffer,EDGE_LBL_INTERCONNECT);
rc = hwgraph_traverse(hwgraph_root, buf, &hub_hndl);
sprintf(buf,"%d",port);
rc = hwgraph_edge_add(hub_hndl, dest_hndl, buf);
if (rc != GRAPH_SUCCESS)
PRINT_PANIC("Can't create edge: %s/%s to vertex 0x%p, error 0x%x\n",
path_buffer, dest_path, (void *)dest_hndl, rc);
}
}
}
}
/* Store the pci/vme disabled board information as extended administrative
* hints which can later be used by the drivers using the device/driver
* admin interface.
*/
void
klhwg_device_disable_hints_add(void)
{
cnodeid_t cnode; /* node we are looking at */
nasid_t nasid; /* nasid of the node */
lboard_t *board; /* board we are looking at */
int comp_index; /* component index */
klinfo_t *component; /* component in the board we are
* looking at
*/
char device_name[MAXDEVNAME];
for(cnode = 0; cnode < numnodes; cnode++) {
nasid = COMPACT_TO_NASID_NODEID(cnode);
board = (lboard_t *)KL_CONFIG_INFO(nasid);
/* Check out all the board info stored on a node */
while(board) {
/* No need to look at duplicate boards or non-io
* boards
*/
if (KL_CONFIG_DUPLICATE_BOARD(board) ||
KLCLASS(board->brd_type) != KLCLASS_IO) {
board = KLCF_NEXT(board);
continue;
}
/* Check out all the components of a board */
for (comp_index = 0;
comp_index < KLCF_NUM_COMPS(board);
comp_index++) {
component = KLCF_COMP(board,comp_index);
/* If the component is enabled move on to
* the next component
*/
if (KLCONFIG_INFO_ENABLED(component))
continue;
/* NOTE : Since the prom only supports
* the disabling of pci devices the following
* piece of code makes sense.
* Make sure that this assumption is valid
*/
/* This component is disabled. Store this
* hint in the extended device admin table
*/
/* Get the canonical name of the pci device */
device_component_canonical_name_get(board,
component,
device_name);
#ifdef DEBUG
printf("%s DISABLED\n",device_name);
#endif
}
/* go to the next board info stored on this
* node
*/
board = KLCF_NEXT(board);
}
}
}
void
klhwg_add_all_modules(devfs_handle_t hwgraph_root)
{
cmoduleid_t cm;
char name[128];
devfs_handle_t vhdl;
devfs_handle_t module_vhdl;
int rc;
char buffer[16];
/* Add devices under each module */
for (cm = 0; cm < nummodules; cm++) {
/* Use module as module vertex fastinfo */
memset(buffer, 0, 16);
format_module_id(buffer, modules[cm]->id, MODULE_FORMAT_BRIEF);
sprintf(name, EDGE_LBL_MODULE "/%s", buffer);
rc = hwgraph_path_add(hwgraph_root, name, &module_vhdl);
ASSERT(rc == GRAPH_SUCCESS);
rc = rc;
hwgraph_fastinfo_set(module_vhdl, (arbitrary_info_t) modules[cm]);
/* Add system controller */
sprintf(name,
EDGE_LBL_MODULE "/%s/" EDGE_LBL_L1,
buffer);
rc = hwgraph_path_add(hwgraph_root, name, &vhdl);
ASSERT_ALWAYS(rc == GRAPH_SUCCESS);
rc = rc;
hwgraph_info_add_LBL(vhdl,
INFO_LBL_ELSC,
(arbitrary_info_t) (__psint_t) 1);
}
}
void
klhwg_add_all_nodes(devfs_handle_t hwgraph_root)
{
cnodeid_t cnode;
for (cnode = 0; cnode < numnodes; cnode++) {
klhwg_add_node(hwgraph_root, cnode, NULL);
}
for (cnode = 0; cnode < numnodes; cnode++) {
klhwg_add_xbow(cnode, cnodeid_to_nasid(cnode));
}
/*
* As for router hardware inventory information, we set this
* up in router.c.
*/
klhwg_add_all_routers(hwgraph_root);
klhwg_connect_routers(hwgraph_root);
klhwg_connect_hubs(hwgraph_root);
/* Go through the entire system's klconfig
* to figure out which pci components have been disabled
*/
klhwg_device_disable_hints_add();
}
/* $Id$
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
/* In general, this file is organized in a hierarchy from lower-level
* to higher-level layers, as follows:
*
* UART routines
* Bedrock/L1 "PPP-like" protocol implementation
* System controller "message" interface (allows multiplexing
* of various kinds of requests and responses with
* console I/O)
* Console interface:
* "l1_cons", the glue that allows the L1 to act
* as the system console for the stdio libraries
*
* Routines making use of the system controller "message"-style interface
* can be found in l1_command.c.
*/
#include <linux/types.h>
#include <linux/config.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <asm/sn/sgi.h>
#include <asm/sn/io.h>
#include <asm/sn/iograph.h>
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/hcl_util.h>
#include <asm/sn/labelcl.h>
#include <asm/sn/eeprom.h>
#include <asm/sn/router.h>
#include <asm/sn/module.h>
#include <asm/sn/ksys/l1.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/clksupport.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/uart16550.h>
#include <asm/sn/simulator.h>
#define UART_BAUD_RATE 57600
int
get_L1_baud(void)
{
return UART_BAUD_RATE;
}
/* Return the current interrupt level */
int
l1_get_intr_value( void )
{
return(0);
}
/* Disconnect the callup functions - throw away interrupts */
void
l1_unconnect_intr(void)
{
}
/* Set up uart interrupt handling for this node's uart */
void
l1_connect_intr(void *rx_notify, void *tx_notify)
{
#if 0
// Will need code here for sn2 - something like this
console_nodepda = NODEPDA(NASID_TO_COMPACT_NODEID(get_master_nasid());
intr_connect_level(console_nodepda->node_first_cpu,
SGI_UART_VECTOR, INTPEND0_MAXMASK,
dummy_intr_func);
request_irq(SGI_UART_VECTOR | (console_nodepda->node_first_cpu << 8),
intr_func, SA_INTERRUPT | SA_SHIRQ,
"l1_protocol_driver", (void *)sc);
#endif
}
/* These are functions to use from serial_in/out when in protocol
* mode to send and receive uart control regs. These are external
* interfaces into the protocol driver.
*/
void
l1_control_out(int offset, int value)
{
/* quietly ignore unless simulator */
if ( IS_RUNNING_ON_SIMULATOR() ) {
extern u64 master_node_bedrock_address;
if ( master_node_bedrock_address != (u64)0 ) {
writeb(value, (unsigned long)master_node_bedrock_address +
(offset<< 3));
}
return;
}
}
/* Console input exported interface. Return a register value. */
int
l1_control_in_polled(int offset)
{
static int l1_control_in_local(int);
return(l1_control_in_local(offset));
}
int
l1_control_in(int offset)
{
static int l1_control_in_local(int);
return(l1_control_in_local(offset));
}
static int
l1_control_in_local(int offset)
{
int sal_call_status = 0, input;
int ret = 0;
if ( IS_RUNNING_ON_SIMULATOR() ) {
extern u64 master_node_bedrock_address;
ret = readb((unsigned long)master_node_bedrock_address +
(offset<< 3));
return(ret);
}
if ( offset == REG_LSR ) {
ret = (LSR_XHRE | LSR_XSRE); /* can send anytime */
sal_call_status = ia64_sn_console_check(&input);
if ( !sal_call_status && input ) {
/* input pending */
ret |= LSR_RCA;
}
}
return(ret);
}
/*
* Console input exported interface. Return a character (if one is available)
*/
int
l1_serial_in_polled(void)
{
static int l1_serial_in_local(void);
return(l1_serial_in_local());
}
int
l1_serial_in(void)
{
static int l1_serial_in_local(void);
if ( IS_RUNNING_ON_SIMULATOR() ) {
extern u64 master_node_bedrock_address;
return(readb((unsigned long)master_node_bedrock_address + (REG_DAT<< 3)));
}
return(l1_serial_in_local());
}
static int
l1_serial_in_local(void)
{
int ch;
if ( IS_RUNNING_ON_SIMULATOR() ) {
extern u64 master_node_bedrock_address;
return(readb((unsigned long)master_node_bedrock_address + (REG_DAT<< 3)));
}
if ( !(ia64_sn_console_getc(&ch)) )
return(ch);
else
return(0);
}
/* Console output exported interface. Write message to the console. */
int
l1_serial_out( char *str, int len )
{
int counter = len;
/* Ignore empty messages */
if ( len == 0 )
return(len);
#if defined(CONFIG_IA64_EARLY_PRINTK)
/* Need to setup SAL calls so the PROM calls will work */
{
static int inited;
void early_sn_setup(void);
if(!inited) {
inited=1;
early_sn_setup();
}
}
#endif
if ( IS_RUNNING_ON_SIMULATOR() ) {
extern u64 master_node_bedrock_address;
void early_sn_setup(void);
if (!master_node_bedrock_address)
early_sn_setup();
if ( master_node_bedrock_address != (u64)0 ) {
#ifdef FLAG_DIRECT_CONSOLE_WRITES
/* This is an easy way to pre-pend the output to know whether the output
* was done via sal or directly */
writeb('[', (unsigned long)master_node_bedrock_address + (REG_DAT<< 3));
writeb('+', (unsigned long)master_node_bedrock_address + (REG_DAT<< 3));
writeb(']', (unsigned long)master_node_bedrock_address + (REG_DAT<< 3));
writeb(' ', (unsigned long)master_node_bedrock_address + (REG_DAT<< 3));
#endif /* FLAG_DIRECT_CONSOLE_WRITES */
while ( counter > 0 ) {
writeb(*str, (unsigned long)master_node_bedrock_address + (REG_DAT<< 3));
counter--;
str++;
}
}
return(len);
}
/* Attempt to write things out thru the sal */
if ( ia64_sn_console_putb(str, len) )
return(0);
return((counter <= 0) ? 0 : (len - counter));
}
/* $Id$
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <asm/sn/sgi.h>
#include <asm/sn/io.h>
#include <asm/sn/iograph.h>
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/hcl_util.h>
#include <asm/sn/labelcl.h>
#include <asm/sn/eeprom.h>
#include <asm/sn/router.h>
#include <asm/sn/module.h>
#include <asm/sn/ksys/l1.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/clksupport.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/sn_sal.h>
#include <linux/ctype.h>
#define ELSC_TIMEOUT 1000000 /* ELSC response timeout (usec) */
#define LOCK_TIMEOUT 5000000 /* Hub lock timeout (usec) */
#define hub_cpu_get() 0
#define LBYTE(caddr) (*(char *) caddr)
extern char *bcopy(const char * src, char * dest, int count);
#define LDEBUG 0
/*
* ELSC data is in NVRAM page 7 at the following offsets.
*/
#define NVRAM_MAGIC_AD 0x700 /* magic number used for init */
#define NVRAM_PASS_WD 0x701 /* password (4 bytes in length) */
#define NVRAM_DBG1 0x705 /* virtual XOR debug switches */
#define NVRAM_DBG2 0x706 /* physical XOR debug switches */
#define NVRAM_CFG 0x707 /* ELSC Configuration info */
#define NVRAM_MODULE 0x708 /* system module number */
#define NVRAM_BIST_FLG 0x709 /* BIST flags (2 bits per nodeboard) */
#define NVRAM_PARTITION 0x70a /* module's partition id */
#define NVRAM_DOMAIN 0x70b /* module's domain id */
#define NVRAM_CLUSTER 0x70c /* module's cluster id */
#define NVRAM_CELL 0x70d /* module's cellid */
#define NVRAM_MAGIC_NO 0x37 /* value of magic number */
#define NVRAM_SIZE 16 /* 16 bytes in nvram */
/* elsc_display_line writes up to 12 characters to either the top or bottom
* line of the L1 display. line points to a buffer containing the message
* to be displayed. The zero-based line number is specified by lnum (so
* lnum == 0 specifies the top line and lnum == 1 specifies the bottom).
* Lines longer than 12 characters, or line numbers not less than
* L1_DISPLAY_LINES, cause elsc_display_line to return an error.
*/
int elsc_display_line(nasid_t nasid, char *line, int lnum)
{
return 0;
}
/*
* iobrick routines
*/
/* iobrick_rack_bay_type_get fills in the three int * arguments with the
* rack number, bay number and brick type of the L1 being addressed. Note
* that if the L1 operation fails and this function returns an error value,
* garbage may be written to brick_type.
*/
int iobrick_rack_bay_type_get( nasid_t nasid, uint *rack,
uint *bay, uint *brick_type )
{
int result = 0;
if ( ia64_sn_sysctl_iobrick_module_get(nasid, &result) )
return( ELSC_ERROR_CMD_SEND );
*rack = (result & L1_ADDR_RACK_MASK) >> L1_ADDR_RACK_SHFT;
*bay = (result & L1_ADDR_BAY_MASK) >> L1_ADDR_BAY_SHFT;
*brick_type = (result & L1_ADDR_TYPE_MASK) >> L1_ADDR_TYPE_SHFT;
*brick_type = toupper(*brick_type);
return 0;
}
int iomoduleid_get(nasid_t nasid)
{
int result = 0;
if ( ia64_sn_sysctl_iobrick_module_get(nasid, &result) )
return( ELSC_ERROR_CMD_SEND );
return result;
}
int iobrick_module_get(nasid_t nasid)
{
uint rnum, rack, bay, brick_type, t;
int ret;
/* construct module ID from rack and slot info */
if ((ret = iobrick_rack_bay_type_get(nasid, &rnum, &bay, &brick_type)) < 0)
return ret;
if (bay > MODULE_BPOS_MASK >> MODULE_BPOS_SHFT)
return ELSC_ERROR_MODULE;
/* Build a moduleid_t-compatible rack number */
rack = 0;
t = rnum / 100; /* rack class (CPU/IO) */
if (t > RACK_CLASS_MASK(rack) >> RACK_CLASS_SHFT(rack))
return ELSC_ERROR_MODULE;
RACK_ADD_CLASS(rack, t);
rnum %= 100;
t = rnum / 10; /* rack group */
if (t > RACK_GROUP_MASK(rack) >> RACK_GROUP_SHFT(rack))
return ELSC_ERROR_MODULE;
RACK_ADD_GROUP(rack, t);
t = rnum % 10; /* rack number (one-based) */
if (t-1 > RACK_NUM_MASK(rack) >> RACK_NUM_SHFT(rack))
return ELSC_ERROR_MODULE;
RACK_ADD_NUM(rack, t);
switch( brick_type ) {
case 'I':
brick_type = MODULE_IBRICK; break;
case 'P':
brick_type = MODULE_PBRICK; break;
case 'X':
brick_type = MODULE_XBRICK; break;
}
ret = RBT_TO_MODULE(rack, bay, brick_type);
return ret;
}
/*
* iobrick_module_get_nasid() returns a module_id which has the brick
* type encoded in bits 15-12, but this is not the true brick type...
* The module_id returned by iobrick_module_get_nasid() is modified
* to make a PEBRICKs & PXBRICKs look like a PBRICK. So this routine
* iobrick_type_get_nasid() returns the true unmodified brick type.
*/
int
iobrick_type_get_nasid(nasid_t nasid)
{
uint rack, bay, type;
int t, ret;
extern char brick_types[];
if ((ret = iobrick_rack_bay_type_get(nasid, &rack, &bay, &type)) < 0) {
return ret;
}
/* convert brick_type to lower case */
if ((type >= 'A') && (type <= 'Z'))
type = type - 'A' + 'a';
/* convert to a module.h brick type */
for( t = 0; t < MAX_BRICK_TYPES; t++ ) {
if( brick_types[t] == type )
return t;
}
return -1; /* unknown brick */
}
int iobrick_module_get_nasid(nasid_t nasid)
{
int io_moduleid;
#ifdef PIC_LATER
uint rack, bay;
if (PEBRICK_NODE(nasid)) {
if (peer_iobrick_rack_bay_get(nasid, &rack, &bay)) {
printf("Could not read rack and bay location "
"of PEBrick at nasid %d\n", nasid);
}
io_moduleid = peer_iobrick_module_get(sc, rack, bay);
}
#endif /* PIC_LATER */
io_moduleid = iobrick_module_get(nasid);
return io_moduleid;
}
/* $Id$
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bootmem.h>
#include <asm/sn/sgi.h>
#include <asm/sn/io.h>
#include <asm/sn/iograph.h>
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h>
#include <asm/sn/sn_private.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/snconfig.h>
extern int numcpus;
extern char arg_maxnodes[];
extern cpuid_t master_procid;
extern int hasmetarouter;
int maxcpus;
cpumask_t boot_cpumask;
hubreg_t region_mask = 0;
extern xwidgetnum_t hub_widget_id(nasid_t);
extern int valid_icache_reasons; /* Reasons to flush the icache */
extern int valid_dcache_reasons; /* Reasons to flush the dcache */
extern u_char miniroot;
extern volatile int need_utlbmiss_patch;
extern void iograph_early_init(void);
nasid_t master_nasid = INVALID_NASID; /* This is the partition master nasid */
nasid_t master_baseio_nasid = INVALID_NASID; /* This is the master base I/O nasid */
/*
* mlreset(void)
* very early machine reset - at this point NO interrupts have been
* enabled; nor is memory, tlb, p0, etc setup.
*
* slave is zero when mlreset is called for the master processor and
* is nonzero thereafter.
*/
void
mlreset(int slave)
{
/*
* We are the master cpu and node.
*/
master_nasid = get_nasid();
set_master_bridge_base();
/* We're the master processor */
master_procid = smp_processor_id();
master_nasid = cpuid_to_nasid(master_procid);
/*
* master_nasid we get back better be same as one from
* get_nasid()
*/
ASSERT_ALWAYS(master_nasid == get_nasid());
/* early initialization of iograph */
iograph_early_init();
/* Initialize Hub Pseudodriver Management */
hubdev_init();
}
/* XXX - Move the meat of this to intr.c ? */
/*
* Set up the platform-dependent fields in the nodepda.
*/
void init_platform_nodepda(nodepda_t *npda, cnodeid_t node)
{
hubinfo_t hubinfo;
extern void router_map_init(nodepda_t *);
extern void router_queue_init(nodepda_t *,cnodeid_t);
extern void intr_init_vecblk(nodepda_t *, cnodeid_t, int);
/* Allocate per-node platform-dependent data */
hubinfo = (hubinfo_t)alloc_bootmem_node(NODE_DATA(node), sizeof(struct hubinfo_s));
npda->pdinfo = (void *)hubinfo;
hubinfo->h_nodepda = npda;
hubinfo->h_cnodeid = node;
hubinfo->h_nasid = COMPACT_TO_NASID_NODEID(node);
spin_lock_init(&hubinfo->h_crblock);
hubinfo->h_widgetid = hub_widget_id(hubinfo->h_nasid);
npda->xbow_peer = INVALID_NASID;
/*
* Initialize the linked list of
* router info pointers to the dependent routers
*/
npda->npda_rip_first = NULL;
/*
* npda_rip_last always points to the place
* where the next element is to be inserted
* into the list
*/
npda->npda_rip_last = &npda->npda_rip_first;
npda->geoid.any.type = GEO_TYPE_INVALID;
mutex_init_locked(&npda->xbow_sema); /* init it locked? */
}
/* XXX - Move the interrupt stuff to intr.c ? */
/*
* Set up the platform-dependent fields in the processor pda.
* Must be done _after_ init_platform_nodepda().
* If we need a lock here, something else is wrong!
*/
void init_platform_pda(cpuid_t cpu)
{
}
void
update_node_information(cnodeid_t cnodeid)
{
nodepda_t *npda = NODEPDA(cnodeid);
nodepda_router_info_t *npda_rip;
/* Go through the list of router info
* structures and copy some frequently
* accessed info from the info hanging
* off the corresponding router vertices
*/
npda_rip = npda->npda_rip_first;
while(npda_rip) {
if (npda_rip->router_infop) {
npda_rip->router_portmask =
npda_rip->router_infop->ri_portmask;
npda_rip->router_slot =
npda_rip->router_infop->ri_slotnum;
} else {
/* No router, no ports. */
npda_rip->router_portmask = 0;
}
npda_rip = npda_rip->router_next;
}
}
/* $Id$
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <asm/sn/sgi.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/iograph.h>
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/hcl_util.h>
#include <asm/sn/labelcl.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/pci/bridge.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/eeprom.h>
#include <asm/sn/sn_private.h>
#include <asm/sn/pci/pcibr.h>
#include <asm/sn/xtalk/xtalk.h>
#include <asm/sn/xtalk/xswitch.h>
#include <asm/sn/xtalk/xwidget.h>
#include <asm/sn/xtalk/xtalk_private.h>
#include <asm/sn/xtalk/xtalkaddrs.h>
/* #define IOGRAPH_DEBUG */
#ifdef IOGRAPH_DEBUG
#define DBG(x...) printk(x)
#else
#define DBG(x...)
#endif /* IOGRAPH_DEBUG */
/* #define PROBE_TEST */
/* At most 2 hubs can be connected to an xswitch */
#define NUM_XSWITCH_VOLUNTEER 2
extern unsigned char Is_pic_on_this_nasid[512];
/*
* Track which hubs have volunteered to manage devices hanging off of
* a Crosstalk Switch (e.g. xbow). This structure is allocated,
* initialized, and hung off the xswitch vertex early on when the
* xswitch vertex is created.
*/
typedef struct xswitch_vol_s {
mutex_t xswitch_volunteer_mutex;
int xswitch_volunteer_count;
devfs_handle_t xswitch_volunteer[NUM_XSWITCH_VOLUNTEER];
} *xswitch_vol_t;
void
xswitch_vertex_init(devfs_handle_t xswitch)
{
xswitch_vol_t xvolinfo;
int rc;
extern void * snia_kmem_zalloc(size_t size, int flag);
xvolinfo = snia_kmem_zalloc(sizeof(struct xswitch_vol_s), GFP_KERNEL);
mutex_init(&xvolinfo->xswitch_volunteer_mutex);
rc = hwgraph_info_add_LBL(xswitch,
INFO_LBL_XSWITCH_VOL,
(arbitrary_info_t)xvolinfo);
ASSERT(rc == GRAPH_SUCCESS); rc = rc;
}
/*
* When assignment of hubs to widgets is complete, we no longer need the
* xswitch volunteer structure hanging around. Destroy it.
*/
static void
xswitch_volunteer_delete(devfs_handle_t xswitch)
{
xswitch_vol_t xvolinfo;
int rc;
extern void snia_kmem_free(void *ptr, size_t size);
rc = hwgraph_info_remove_LBL(xswitch,
INFO_LBL_XSWITCH_VOL,
(arbitrary_info_t *)&xvolinfo);
snia_kmem_free(xvolinfo, sizeof(struct xswitch_vol_s));
}
/*
* A Crosstalk master volunteers to manage xwidgets on the specified xswitch.
*/
/* ARGSUSED */
static void
volunteer_for_widgets(devfs_handle_t xswitch, devfs_handle_t master)
{
xswitch_vol_t xvolinfo = NULL;
devfs_handle_t hubv;
hubinfo_t hubinfo;
(void)hwgraph_info_get_LBL(xswitch,
INFO_LBL_XSWITCH_VOL,
(arbitrary_info_t *)&xvolinfo);
if (xvolinfo == NULL) {
if (!is_headless_node_vertex(master))
printk(KERN_WARNING
"volunteer for widgets: vertex 0x%p has no info label",
(void *)xswitch);
return;
}
mutex_lock(&xvolinfo->xswitch_volunteer_mutex);
ASSERT(xvolinfo->xswitch_volunteer_count < NUM_XSWITCH_VOLUNTEER);
xvolinfo->xswitch_volunteer[xvolinfo->xswitch_volunteer_count] = master;
xvolinfo->xswitch_volunteer_count++;
/*
* if dual ported, make the lowest widgetid always be
* xswitch_volunteer[0].
*/
if (xvolinfo->xswitch_volunteer_count == NUM_XSWITCH_VOLUNTEER) {
hubv = xvolinfo->xswitch_volunteer[0];
hubinfo_get(hubv, &hubinfo);
if (hubinfo->h_widgetid != XBOW_HUBLINK_LOW) {
xvolinfo->xswitch_volunteer[0] =
xvolinfo->xswitch_volunteer[1];
xvolinfo->xswitch_volunteer[1] = hubv;
}
}
mutex_unlock(&xvolinfo->xswitch_volunteer_mutex);
}
extern int xbow_port_io_enabled(nasid_t nasid, int widgetnum);
/*
* Assign all the xwidgets hanging off the specified xswitch to the
* Crosstalk masters that have volunteered for xswitch duty.
*/
/* ARGSUSED */
static void
assign_widgets_to_volunteers(devfs_handle_t xswitch, devfs_handle_t hubv)
{
xswitch_info_t xswitch_info;
xswitch_vol_t xvolinfo = NULL;
xwidgetnum_t widgetnum;
int num_volunteer;
nasid_t nasid;
hubinfo_t hubinfo;
extern int iobrick_type_get_nasid(nasid_t);
hubinfo_get(hubv, &hubinfo);
nasid = hubinfo->h_nasid;
xswitch_info = xswitch_info_get(xswitch);
ASSERT(xswitch_info != NULL);
(void)hwgraph_info_get_LBL(xswitch,
INFO_LBL_XSWITCH_VOL,
(arbitrary_info_t *)&xvolinfo);
if (xvolinfo == NULL) {
if (!is_headless_node_vertex(hubv))
printk(KERN_WARNING
"assign_widgets_to_volunteers:vertex 0x%p has "
" no info label",
(void *)xswitch);
return;
}
num_volunteer = xvolinfo->xswitch_volunteer_count;
ASSERT(num_volunteer > 0);
/* Assign master hub for xswitch itself. */
if (HUB_WIDGET_ID_MIN > 0) {
hubv = xvolinfo->xswitch_volunteer[0];
xswitch_info_master_assignment_set(xswitch_info, (xwidgetnum_t)0, hubv);
}
/*
* TBD: Use administrative information to alter assignment of
* widgets to hubs.
*/
for (widgetnum=HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX; widgetnum++) {
int i;
/*
* Ignore disabled/empty ports.
*/
if (!xbow_port_io_enabled(nasid, widgetnum))
continue;
/*
* If this is the master IO board, assign it to the same
* hub that owned it in the prom.
*/
if (is_master_baseio_nasid_widget(nasid, widgetnum)) {
extern nasid_t get_master_baseio_nasid(void);
for (i=0; i<num_volunteer; i++) {
hubv = xvolinfo->xswitch_volunteer[i];
hubinfo_get(hubv, &hubinfo);
nasid = hubinfo->h_nasid;
if (nasid == get_master_baseio_nasid())
goto do_assignment;
}
PRINT_PANIC("Nasid == %d, console nasid == %d",
nasid, get_master_baseio_nasid());
}
/*
* Assuming that we're dual-hosted and that PCI cards
* are naturally placed left-to-right, alternate PCI
* buses across both Cbricks. For Pbricks, and Ibricks,
* io_brick_map_widget() returns the PCI bus number
* associated with the given brick type and widget number.
* For Xbricks, it returns the XIO slot number.
*/
i = 0;
if (num_volunteer > 1) {
int bt;
bt = iobrick_type_get_nasid(nasid);
if (bt >= 0) {
/*
* PXBRICK has two busses per widget so this
* algorithm wouldn't work (all busses would
* be assigned to one volunteer). Change the
* bricktype to PBRICK whose mapping is setup
* suchthat 2 of the PICs will be assigned to
* one volunteer and the other one will be
* assigned to the other volunteer.
*/
if (bt == MODULE_PXBRICK)
bt = MODULE_PBRICK;
i = io_brick_map_widget(bt, widgetnum) & 1;
}
}
hubv = xvolinfo->xswitch_volunteer[i];
do_assignment:
/*
* At this point, we want to make hubv the master of widgetnum.
*/
xswitch_info_master_assignment_set(xswitch_info, widgetnum, hubv);
}
xswitch_volunteer_delete(xswitch);
}
/*
* Early iograph initialization. Called by master CPU in mlreset().
* Useful for including iograph.o in kernel.o.
*/
void
iograph_early_init(void)
{
/*
* Need new way to get this information ..
*/
cnodeid_t cnode;
nasid_t nasid;
lboard_t *board;
/*
* Init. the board-to-hwgraph link early, so FRU analyzer
* doesn't trip on leftover values if we panic early on.
*/
for(cnode = 0; cnode < numnodes; cnode++) {
nasid = COMPACT_TO_NASID_NODEID(cnode);
board = (lboard_t *)KL_CONFIG_INFO(nasid);
DBG("iograph_early_init: Found board 0x%p\n", board);
/* Check out all the board info stored on a node */
while(board) {
board->brd_graph_link = GRAPH_VERTEX_NONE;
board = KLCF_NEXT(board);
DBG("iograph_early_init: Found board 0x%p\n", board);
}
}
hubio_init();
}
/*
* Let boot processor know that we're done initializing our node's IO
* and then exit.
*/
/* ARGSUSED */
static void
io_init_done(cnodeid_t cnodeid,cpu_cookie_t c)
{
/* Let boot processor know that we're done. */
}
/*
* Probe to see if this hub's xtalk link is active. If so,
* return the Crosstalk Identification of the widget that we talk to.
* This is called before any of the Crosstalk infrastructure for
* this hub is set up. It's usually called on the node that we're
* probing, but not always.
*
* TBD: Prom code should actually do this work, and pass through
* hwid for our use.
*/
static void
early_probe_for_widget(devfs_handle_t hubv, xwidget_hwid_t hwid)
{
hubreg_t llp_csr_reg;
nasid_t nasid;
hubinfo_t hubinfo;
hubinfo_get(hubv, &hubinfo);
nasid = hubinfo->h_nasid;
llp_csr_reg = REMOTE_HUB_L(nasid, IIO_LLP_CSR);
/*
* If link is up, read the widget's part number.
* A direct connect widget must respond to widgetnum=0.
*/
if (llp_csr_reg & IIO_LLP_CSR_IS_UP) {
/* TBD: Put hub into "indirect" mode */
/*
* We're able to read from a widget because our hub's
* WIDGET_ID was set up earlier.
*/
widgetreg_t widget_id = *(volatile widgetreg_t *)
(RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID);
DBG("early_probe_for_widget: Hub Vertex 0x%p is UP widget_id = 0x%x Register 0x%p\n", hubv, widget_id,
(volatile widgetreg_t *)(RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID) );
hwid->part_num = XWIDGET_PART_NUM(widget_id);
hwid->rev_num = XWIDGET_REV_NUM(widget_id);
hwid->mfg_num = XWIDGET_MFG_NUM(widget_id);
/* TBD: link reset */
} else {
hwid->part_num = XWIDGET_PART_NUM_NONE;
hwid->rev_num = XWIDGET_REV_NUM_NONE;
hwid->mfg_num = XWIDGET_MFG_NUM_NONE;
}
}
/* Add inventory information to the widget vertex
* Right now (module,slot,revision) is being
* added as inventory information.
*/
static void
xwidget_inventory_add(devfs_handle_t widgetv,
lboard_t *board,
struct xwidget_hwid_s hwid)
{
if (!board)
return;
/* Donot add inventory information for the baseio
* on a speedo with an xbox. It has already been
* taken care of in SN00_vmc.
* Speedo with xbox's baseio comes in at slot io1 (widget 9)
*/
device_inventory_add(widgetv,INV_IOBD,board->brd_type,
geo_module(board->brd_geoid),
SLOTNUM_GETSLOT(board->brd_slot),
hwid.rev_num);
}
/*
* io_xswitch_widget_init
*
*/
void
io_xswitch_widget_init(devfs_handle_t xswitchv,
devfs_handle_t hubv,
xwidgetnum_t widgetnum,
async_attach_t aa)
{
xswitch_info_t xswitch_info;
xwidgetnum_t hub_widgetid;
devfs_handle_t widgetv;
cnodeid_t cnode;
widgetreg_t widget_id;
nasid_t nasid, peer_nasid;
struct xwidget_hwid_s hwid;
hubinfo_t hubinfo;
/*REFERENCED*/
int rc;
char pathname[128];
lboard_t *board = NULL;
char buffer[16];
char bt;
moduleid_t io_module;
slotid_t get_widget_slotnum(int xbow, int widget);
DBG("\nio_xswitch_widget_init: hubv 0x%p, xswitchv 0x%p, widgetnum 0x%x\n", hubv, xswitchv, widgetnum);
/*
* Verify that xswitchv is indeed an attached xswitch.
*/
xswitch_info = xswitch_info_get(xswitchv);
ASSERT(xswitch_info != NULL);
hubinfo_get(hubv, &hubinfo);
nasid = hubinfo->h_nasid;
cnode = NASID_TO_COMPACT_NODEID(nasid);
hub_widgetid = hubinfo->h_widgetid;
/*
* Check that the widget is an io widget and is enabled
* on this nasid or the `peer' nasid. The peer nasid
* is the other hub/bedrock connected to the xbow.
*/
peer_nasid = NODEPDA(cnode)->xbow_peer;
if (peer_nasid == INVALID_NASID)
/* If I don't have a peer, use myself. */
peer_nasid = nasid;
if (!xbow_port_io_enabled(nasid, widgetnum) &&
!xbow_port_io_enabled(peer_nasid, widgetnum)) {
return;
}
if (xswitch_info_link_ok(xswitch_info, widgetnum)) {
char name[4];
lboard_t dummy;
/*
* If the current hub is not supposed to be the master
* for this widgetnum, then skip this widget.
*/
if (xswitch_info_master_assignment_get(xswitch_info,
widgetnum) != hubv) {
return;
}
board = find_lboard_class(
(lboard_t *)KL_CONFIG_INFO(nasid),
KLCLASS_IOBRICK);
if (!board && NODEPDA(cnode)->xbow_peer != INVALID_NASID) {
board = find_lboard_class(
(lboard_t *)KL_CONFIG_INFO( NODEPDA(cnode)->xbow_peer),
KLCLASS_IOBRICK);
}
if (board) {
DBG("io_xswitch_widget_init: Found KLTYPE_IOBRICK Board 0x%p brd_type 0x%x\n", board, board->brd_type);
} else {
DBG("io_xswitch_widget_init: FIXME did not find IOBOARD\n");
board = &dummy;
}
/* Copy over the nodes' geoid info */
{
lboard_t *brd;
brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
if ( brd != (lboard_t *)0 ) {
board->brd_geoid = brd->brd_geoid;
}
}
/*
* Make sure we really want to say xbrick, pbrick,
* etc. rather than XIO, graphics, etc.
*/
memset(buffer, 0, 16);
format_module_id(buffer, geo_module(board->brd_geoid), MODULE_FORMAT_BRIEF);
sprintf(pathname, EDGE_LBL_MODULE "/%s/" EDGE_LBL_SLAB "/%d" "/%cbrick" "/%s/%d",
buffer,
geo_slab(board->brd_geoid),
(board->brd_type == KLTYPE_IBRICK) ? 'I' :
(board->brd_type == KLTYPE_PBRICK) ? 'P' :
(board->brd_type == KLTYPE_XBRICK) ? 'X' : '?',
EDGE_LBL_XTALK, widgetnum);
DBG("io_xswitch_widget_init: path= %s\n", pathname);
rc = hwgraph_path_add(hwgraph_root, pathname, &widgetv);
ASSERT(rc == GRAPH_SUCCESS);
/* This is needed to let the user programs to map the
* module,slot numbers to the corresponding widget numbers
* on the crossbow.
*/
device_master_set(hwgraph_connectpt_get(widgetv), hubv);
sprintf(name, "%d", widgetnum);
DBG("io_xswitch_widget_init: FIXME hwgraph_edge_add %s xswitchv 0x%p, widgetv 0x%p\n", name, xswitchv, widgetv);
rc = hwgraph_edge_add(xswitchv, widgetv, name);
/*
* crosstalk switch code tracks which
* widget is attached to each link.
*/
xswitch_info_vhdl_set(xswitch_info, widgetnum, widgetv);
/*
* Peek at the widget to get its crosstalk part and
* mfgr numbers, then present it to the generic xtalk
* bus provider to have its driver attach routine
* called (or not).
*/
widget_id = XWIDGET_ID_READ(nasid, widgetnum);
hwid.part_num = XWIDGET_PART_NUM(widget_id);
hwid.rev_num = XWIDGET_REV_NUM(widget_id);
hwid.mfg_num = XWIDGET_MFG_NUM(widget_id);
/* Store some inventory information about
* the xwidget in the hardware graph.
*/
xwidget_inventory_add(widgetv,board,hwid);
(void)xwidget_register(&hwid, widgetv, widgetnum,
hubv, hub_widgetid,
aa);
ia64_sn_sysctl_iobrick_module_get(nasid, &io_module);
if (io_module >= 0) {
char buffer[16];
devfs_handle_t to, from;
memset(buffer, 0, 16);
format_module_id(buffer, geo_module(board->brd_geoid), MODULE_FORMAT_BRIEF);
bt = toupper(MODULE_GET_BTCHAR(io_module));
/* Add a helper vertex so xbow monitoring
* can identify the brick type. It's simply
* an edge from the widget 0 vertex to the
* brick vertex.
*/
sprintf(pathname, "/dev/hw/" EDGE_LBL_MODULE "/%s/"
EDGE_LBL_SLAB "/%d/"
EDGE_LBL_NODE "/" EDGE_LBL_XTALK "/"
"0",
buffer, geo_slab(board->brd_geoid));
from = hwgraph_path_to_vertex(pathname);
ASSERT_ALWAYS(from);
sprintf(pathname, "/dev/hw/" EDGE_LBL_MODULE "/%s/"
EDGE_LBL_SLAB "/%d/"
"%cbrick",
buffer, geo_slab(board->brd_geoid), bt);
to = hwgraph_path_to_vertex(pathname);
ASSERT_ALWAYS(to);
rc = hwgraph_edge_add(from, to,
EDGE_LBL_INTERCONNECT);
if (rc == -EEXIST)
goto link_done;
if (rc != GRAPH_SUCCESS) {
printk("%s: Unable to establish link"
" for xbmon.", pathname);
}
link_done:
}
#ifdef SN0_USE_BTE
bte_bpush_war(cnode, (void *)board);
#endif
}
}
static void
io_init_xswitch_widgets(devfs_handle_t xswitchv, cnodeid_t cnode)
{
xwidgetnum_t widgetnum;
async_attach_t aa;
aa = async_attach_new();
DBG("io_init_xswitch_widgets: xswitchv 0x%p for cnode %d\n", xswitchv, cnode);
for (widgetnum = HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX;
widgetnum++) {
io_xswitch_widget_init(xswitchv,
cnodeid_to_vertex(cnode),
widgetnum, aa);
}
/*
* Wait for parallel attach threads, if any, to complete.
*/
async_attach_waitall(aa);
async_attach_free(aa);
}
/*
* For each PCI bridge connected to the xswitch, add a link from the
* board's klconfig info to the bridge's hwgraph vertex. This lets
* the FRU analyzer find the bridge without traversing the hardware
* graph and risking hangs.
*/
static void
io_link_xswitch_widgets(devfs_handle_t xswitchv, cnodeid_t cnodeid)
{
xwidgetnum_t widgetnum;
char pathname[128];
devfs_handle_t vhdl;
nasid_t nasid, peer_nasid;
lboard_t *board;
/* And its connected hub's nasids */
nasid = COMPACT_TO_NASID_NODEID(cnodeid);
peer_nasid = NODEPDA(cnodeid)->xbow_peer;
/*
* Look for paths matching "<widgetnum>/pci" under xswitchv.
* For every widget, init. its lboard's hwgraph link. If the
* board has a PCI bridge, point the link to it.
*/
for (widgetnum = HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX;
widgetnum++) {
sprintf(pathname, "%d", widgetnum);
if (hwgraph_traverse(xswitchv, pathname, &vhdl) !=
GRAPH_SUCCESS)
continue;
board = find_lboard_module((lboard_t *)KL_CONFIG_INFO(nasid),
NODEPDA(cnodeid)->geoid);
if (board == NULL && peer_nasid != INVALID_NASID) {
/*
* Try to find the board on our peer
*/
board = find_lboard_module(
(lboard_t *)KL_CONFIG_INFO(peer_nasid),
NODEPDA(cnodeid)->geoid);
}
if (board == NULL) {
printk(KERN_WARNING "Could not find PROM info for vertex 0x%p, "
"FRU analyzer may fail",
(void *)vhdl);
return;
}
if ( Is_pic_on_this_nasid[nasid] ) {
/* Check both buses */
sprintf(pathname, "%d/"EDGE_LBL_PCIX_0, widgetnum);
if (hwgraph_traverse(xswitchv, pathname, &vhdl) == GRAPH_SUCCESS)
board->brd_graph_link = vhdl;
else {
sprintf(pathname, "%d/"EDGE_LBL_PCIX_1, widgetnum);
if (hwgraph_traverse(xswitchv, pathname, &vhdl) == GRAPH_SUCCESS)
board->brd_graph_link = vhdl;
else
board->brd_graph_link = GRAPH_VERTEX_NONE;
}
}
else {
sprintf(pathname, "%d/"EDGE_LBL_PCI, widgetnum);
if (hwgraph_traverse(xswitchv, pathname, &vhdl) == GRAPH_SUCCESS)
board->brd_graph_link = vhdl;
else
board->brd_graph_link = GRAPH_VERTEX_NONE;
}
}
}
/*
* Initialize all I/O on the specified node.
*/
static void
io_init_node(cnodeid_t cnodeid)
{
/*REFERENCED*/
devfs_handle_t hubv, switchv, widgetv;
struct xwidget_hwid_s hwid;
hubinfo_t hubinfo;
int is_xswitch;
nodepda_t *npdap;
struct semaphore *peer_sema = 0;
uint32_t widget_partnum;
nodepda_router_info_t *npda_rip;
cpu_cookie_t c = 0;
extern int hubdev_docallouts(devfs_handle_t);
npdap = NODEPDA(cnodeid);
/*
* Get the "top" vertex for this node's hardware
* graph; it will carry the per-hub hub-specific
* data, and act as the crosstalk provider master.
* It's canonical path is probably something of the
* form /hw/module/%M/slot/%d/node
*/
hubv = cnodeid_to_vertex(cnodeid);
DBG("io_init_node: Initialize IO for cnode %d hubv(node) 0x%p npdap 0x%p\n", cnodeid, hubv, npdap);
ASSERT(hubv != GRAPH_VERTEX_NONE);
hubdev_docallouts(hubv);
/*
* Set up the dependent routers if we have any.
*/
npda_rip = npdap->npda_rip_first;
while(npda_rip) {
/* If the router info has not been initialized
* then we need to do the router initialization
*/
if (!npda_rip->router_infop) {
router_init(cnodeid,0,npda_rip);
}
npda_rip = npda_rip->router_next;
}
/*
* Read mfg info on this hub
*/
/*
* If nothing connected to this hub's xtalk port, we're done.
*/
early_probe_for_widget(hubv, &hwid);
if (hwid.part_num == XWIDGET_PART_NUM_NONE) {
#ifdef PROBE_TEST
if ((cnodeid == 1) || (cnodeid == 2)) {
int index;
for (index = 0; index < 600; index++)
DBG("Interfering with device probing!!!\n");
}
#endif
/* io_init_done takes cpu cookie as 2nd argument
* to do a restorenoderun for the setnoderun done
* at the start of this thread
*/
DBG("**** io_init_node: Node's 0x%p hub widget has XWIDGET_PART_NUM_NONE ****\n", hubv);
return;
/* NOTREACHED */
}
/*
* attach our hub_provider information to hubv,
* so we can use it as a crosstalk provider "master"
* vertex.
*/
xtalk_provider_register(hubv, &hub_provider);
xtalk_provider_startup(hubv);
/*
* Create a vertex to represent the crosstalk bus
* attached to this hub, and a vertex to be used
* as the connect point for whatever is out there
* on the other side of our crosstalk connection.
*
* Crosstalk Switch drivers "climb up" from their
* connection point to try and take over the switch
* point.
*
* Of course, the edges and verticies may already
* exist, in which case our net effect is just to
* associate the "xtalk_" driver with the connection
* point for the device.
*/
(void)hwgraph_path_add(hubv, EDGE_LBL_XTALK, &switchv);
DBG("io_init_node: Created 'xtalk' entry to '../node/' xtalk vertex 0x%p\n", switchv);
ASSERT(switchv != GRAPH_VERTEX_NONE);
(void)hwgraph_edge_add(hubv, switchv, EDGE_LBL_IO);
DBG("io_init_node: Created symlink 'io' from ../node/io to ../node/xtalk \n");
/*
* We need to find the widget id and update the basew_id field
* accordingly. In particular, SN00 has direct connected bridge,
* and hence widget id is Not 0.
*/
widget_partnum = (((*(volatile int32_t *)(NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + WIDGET_ID))) & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT;
if (widget_partnum == BRIDGE_WIDGET_PART_NUM ||
widget_partnum == XBRIDGE_WIDGET_PART_NUM){
npdap->basew_id = (((*(volatile int32_t *)(NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + BRIDGE_WID_CONTROL))) & WIDGET_WIDGET_ID);
DBG("io_init_node: Found XBRIDGE widget_partnum= 0x%x\n", widget_partnum);
} else if ((widget_partnum == XBOW_WIDGET_PART_NUM) ||
(widget_partnum == XXBOW_WIDGET_PART_NUM) ||
(widget_partnum == PXBOW_WIDGET_PART_NUM) ) {
/*
* Xbow control register does not have the widget ID field.
* So, hard code the widget ID to be zero.
*/
DBG("io_init_node: Found XBOW widget_partnum= 0x%x\n", widget_partnum);
npdap->basew_id = 0;
} else {
npdap->basew_id = (((*(volatile int32_t *)(NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + BRIDGE_WID_CONTROL))) & WIDGET_WIDGET_ID);
panic(" ****io_init_node: Unknown Widget Part Number 0x%x Widget ID 0x%x attached to Hubv 0x%p ****\n", widget_partnum, npdap->basew_id, (void *)hubv);
/*NOTREACHED*/
}
{
char widname[10];
sprintf(widname, "%x", npdap->basew_id);
(void)hwgraph_path_add(switchv, widname, &widgetv);
DBG("io_init_node: Created '%s' to '..node/xtalk/' vertex 0x%p\n", widname, widgetv);
ASSERT(widgetv != GRAPH_VERTEX_NONE);
}
nodepda->basew_xc = widgetv;
is_xswitch = xwidget_hwid_is_xswitch(&hwid);
/*
* Try to become the master of the widget. If this is an xswitch
* with multiple hubs connected, only one will succeed. Mastership
* of an xswitch is used only when touching registers on that xswitch.
* The slave xwidgets connected to the xswitch can be owned by various
* masters.
*/
if (device_master_set(widgetv, hubv) == 0) {
/* Only one hub (thread) per Crosstalk device or switch makes
* it to here.
*/
/*
* Initialize whatever xwidget is hanging off our hub.
* Whatever it is, it's accessible through widgetnum 0.
*/
hubinfo_get(hubv, &hubinfo);
(void)xwidget_register(&hwid, widgetv, npdap->basew_id, hubv, hubinfo->h_widgetid, NULL);
if (!is_xswitch) {
/* io_init_done takes cpu cookie as 2nd argument
* to do a restorenoderun for the setnoderun done
* at the start of this thread
*/
io_init_done(cnodeid,c);
/* NOTREACHED */
}
/*
* Special handling for Crosstalk Switches (e.g. xbow).
* We need to do things in roughly the following order:
* 1) Initialize xswitch hardware (done above)
* 2) Determine which hubs are available to be widget masters
* 3) Discover which links are active from the xswitch
* 4) Assign xwidgets hanging off the xswitch to hubs
* 5) Initialize all xwidgets on the xswitch
*/
volunteer_for_widgets(switchv, hubv);
/* If there's someone else on this crossbow, recognize him */
if (npdap->xbow_peer != INVALID_NASID) {
nodepda_t *peer_npdap = NODEPDA(NASID_TO_COMPACT_NODEID(npdap->xbow_peer));
peer_sema = &peer_npdap->xbow_sema;
volunteer_for_widgets(switchv, peer_npdap->node_vertex);
}
assign_widgets_to_volunteers(switchv, hubv);
/* Signal that we're done */
if (peer_sema) {
mutex_unlock(peer_sema);
}
}
else {
/* Wait 'til master is done assigning widgets. */
mutex_lock(&npdap->xbow_sema);
}
#ifdef PROBE_TEST
if ((cnodeid == 1) || (cnodeid == 2)) {
int index;
for (index = 0; index < 500; index++)
DBG("Interfering with device probing!!!\n");
}
#endif
/* Now both nodes can safely inititialize widgets */
io_init_xswitch_widgets(switchv, cnodeid);
io_link_xswitch_widgets(switchv, cnodeid);
/* io_init_done takes cpu cookie as 2nd argument
* to do a restorenoderun for the setnoderun done
* at the start of this thread
*/
io_init_done(cnodeid,c);
DBG("\nio_init_node: DONE INITIALIZED ALL I/O FOR CNODEID %d\n\n", cnodeid);
}
#define IOINIT_STKSZ (16 * 1024)
#define __DEVSTR1 "/../.master/"
#define __DEVSTR2 "/target/"
#define __DEVSTR3 "/lun/0/disk/partition/"
#define __DEVSTR4 "/../ef"
/*
* ioconfig starts numbering SCSI's at NUM_BASE_IO_SCSI_CTLR.
*/
#define NUM_BASE_IO_SCSI_CTLR 6
/*
* This tells ioconfig where it can start numbering scsi controllers.
* Below this base number, platform-specific handles the numbering.
* XXX Irix legacy..controller numbering should be part of devfsd's job
*/
int num_base_io_scsi_ctlr = 2; /* used by syssgi */
devfs_handle_t base_io_scsi_ctlr_vhdl[NUM_BASE_IO_SCSI_CTLR];
static devfs_handle_t baseio_enet_vhdl,baseio_console_vhdl;
/*
* Put the logical controller number information in the
* scsi controller vertices for each scsi controller that
* is in a "fixed position".
*/
static void
scsi_ctlr_nums_add(devfs_handle_t pci_vhdl)
{
{
int i;
num_base_io_scsi_ctlr = NUM_BASE_IO_SCSI_CTLR;
/* Initialize base_io_scsi_ctlr_vhdl array */
for (i=0; i<num_base_io_scsi_ctlr; i++)
base_io_scsi_ctlr_vhdl[i] = GRAPH_VERTEX_NONE;
}
{
/*
* May want to consider changing the SN0 code, above, to work more like
* the way this works.
*/
devfs_handle_t base_ibrick_xbridge_vhdl;
devfs_handle_t base_ibrick_xtalk_widget_vhdl;
devfs_handle_t scsi_ctlr_vhdl;
int i;
graph_error_t rv;
/*
* This is a table of "well-known" SCSI controllers and their well-known
* controller numbers. The names in the table start from the base IBrick's
* Xbridge vertex, so the first component is the xtalk widget number.
*/
static struct {
char *base_ibrick_scsi_path;
int controller_number;
} hardwired_scsi_controllers[] = {
{"15/" EDGE_LBL_PCI "/1/" EDGE_LBL_SCSI_CTLR "/0", 0},
{"15/" EDGE_LBL_PCI "/2/" EDGE_LBL_SCSI_CTLR "/0", 1},
{"15/" EDGE_LBL_PCI "/3/" EDGE_LBL_SCSI_CTLR "/0", 2},
{"14/" EDGE_LBL_PCI "/1/" EDGE_LBL_SCSI_CTLR "/0", 3},
{"14/" EDGE_LBL_PCI "/2/" EDGE_LBL_SCSI_CTLR "/0", 4},
{"15/" EDGE_LBL_PCI "/6/ohci/0/" EDGE_LBL_SCSI_CTLR "/0", 5},
{NULL, -1} /* must be last */
};
base_ibrick_xtalk_widget_vhdl = hwgraph_connectpt_get(pci_vhdl);
ASSERT_ALWAYS(base_ibrick_xtalk_widget_vhdl != GRAPH_VERTEX_NONE);
base_ibrick_xbridge_vhdl = hwgraph_connectpt_get(base_ibrick_xtalk_widget_vhdl);
ASSERT_ALWAYS(base_ibrick_xbridge_vhdl != GRAPH_VERTEX_NONE);
hwgraph_vertex_unref(base_ibrick_xtalk_widget_vhdl);
/*
* Iterate through the list of well-known SCSI controllers.
* For each controller found, set it's controller number according
* to the table.
*/
for (i=0; hardwired_scsi_controllers[i].base_ibrick_scsi_path != NULL; i++) {
rv = hwgraph_path_lookup(base_ibrick_xbridge_vhdl,
hardwired_scsi_controllers[i].base_ibrick_scsi_path, &scsi_ctlr_vhdl, NULL);
if (rv != GRAPH_SUCCESS) /* No SCSI at this path */
continue;
ASSERT(hardwired_scsi_controllers[i].controller_number < NUM_BASE_IO_SCSI_CTLR);
base_io_scsi_ctlr_vhdl[hardwired_scsi_controllers[i].controller_number] = scsi_ctlr_vhdl;
device_controller_num_set(scsi_ctlr_vhdl, hardwired_scsi_controllers[i].controller_number);
hwgraph_vertex_unref(scsi_ctlr_vhdl); /* (even though we're actually keeping a reference) */
}
hwgraph_vertex_unref(base_ibrick_xbridge_vhdl);
}
}
#include <asm/sn/ioerror_handling.h>
devfs_handle_t sys_critical_graph_root = GRAPH_VERTEX_NONE;
/* Define the system critical vertices and connect them through
* a canonical parent-child relationships for easy traversal
* during io error handling.
*/
static void
sys_critical_graph_init(void)
{
devfs_handle_t bridge_vhdl,master_node_vhdl;
devfs_handle_t xbow_vhdl = GRAPH_VERTEX_NONE;
extern devfs_handle_t hwgraph_root;
devfs_handle_t pci_slot_conn;
int slot;
devfs_handle_t baseio_console_conn;
DBG("sys_critical_graph_init: FIXME.\n");
baseio_console_conn = hwgraph_connectpt_get(baseio_console_vhdl);
if (baseio_console_conn == NULL) {
return;
}
/* Get the vertex handle for the baseio bridge */
bridge_vhdl = device_master_get(baseio_console_conn);
/* Get the master node of the baseio card */
master_node_vhdl = cnodeid_to_vertex(
master_node_get(baseio_console_vhdl));
/* Add the "root->node" part of the system critical graph */
sys_critical_graph_vertex_add(hwgraph_root,master_node_vhdl);
/* Check if we have a crossbow */
if (hwgraph_traverse(master_node_vhdl,
EDGE_LBL_XTALK"/0",
&xbow_vhdl) == GRAPH_SUCCESS) {
/* We have a crossbow.Add "node->xbow" part of the system
* critical graph.
*/
sys_critical_graph_vertex_add(master_node_vhdl,xbow_vhdl);
/* Add "xbow->baseio bridge" of the system critical graph */
sys_critical_graph_vertex_add(xbow_vhdl,bridge_vhdl);
hwgraph_vertex_unref(xbow_vhdl);
} else
/* We donot have a crossbow. Add "node->baseio_bridge"
* part of the system critical graph.
*/
sys_critical_graph_vertex_add(master_node_vhdl,bridge_vhdl);
/* Add all the populated PCI slot vertices to the system critical
* graph with the bridge vertex as the parent.
*/
for (slot = 0 ; slot < 8; slot++) {
char slot_edge[10];
sprintf(slot_edge,"%d",slot);
if (hwgraph_traverse(bridge_vhdl,slot_edge, &pci_slot_conn)
!= GRAPH_SUCCESS)
continue;
sys_critical_graph_vertex_add(bridge_vhdl,pci_slot_conn);
hwgraph_vertex_unref(pci_slot_conn);
}
hwgraph_vertex_unref(bridge_vhdl);
/* Add the "ioc3 pci connection point -> console ioc3" part
* of the system critical graph
*/
if (hwgraph_traverse(baseio_console_vhdl,"..",&pci_slot_conn) ==
GRAPH_SUCCESS) {
sys_critical_graph_vertex_add(pci_slot_conn,
baseio_console_vhdl);
hwgraph_vertex_unref(pci_slot_conn);
}
/* Add the "ethernet pci connection point -> base ethernet" part of
* the system critical graph
*/
if (hwgraph_traverse(baseio_enet_vhdl,"..",&pci_slot_conn) ==
GRAPH_SUCCESS) {
sys_critical_graph_vertex_add(pci_slot_conn,
baseio_enet_vhdl);
hwgraph_vertex_unref(pci_slot_conn);
}
/* Add the "scsi controller pci connection point -> base scsi
* controller" part of the system critical graph
*/
if (hwgraph_traverse(base_io_scsi_ctlr_vhdl[0],
"../..",&pci_slot_conn) == GRAPH_SUCCESS) {
sys_critical_graph_vertex_add(pci_slot_conn,
base_io_scsi_ctlr_vhdl[0]);
hwgraph_vertex_unref(pci_slot_conn);
}
if (hwgraph_traverse(base_io_scsi_ctlr_vhdl[1],
"../..",&pci_slot_conn) == GRAPH_SUCCESS) {
sys_critical_graph_vertex_add(pci_slot_conn,
base_io_scsi_ctlr_vhdl[1]);
hwgraph_vertex_unref(pci_slot_conn);
}
hwgraph_vertex_unref(baseio_console_conn);
}
static void
baseio_ctlr_num_set(void)
{
char name[MAXDEVNAME];
devfs_handle_t console_vhdl, pci_vhdl, enet_vhdl;
devfs_handle_t ioc3_console_vhdl_get(void);
DBG("baseio_ctlr_num_set; FIXME\n");
console_vhdl = ioc3_console_vhdl_get();
if (console_vhdl == GRAPH_VERTEX_NONE)
return;
/* Useful for setting up the system critical graph */
baseio_console_vhdl = console_vhdl;
vertex_to_name(console_vhdl,name,MAXDEVNAME);
strcat(name,__DEVSTR1);
pci_vhdl = hwgraph_path_to_vertex(name);
scsi_ctlr_nums_add(pci_vhdl);
/* Unref the pci_vhdl due to the reference by hwgraph_path_to_vertex
*/
hwgraph_vertex_unref(pci_vhdl);
vertex_to_name(console_vhdl, name, MAXDEVNAME);
strcat(name, __DEVSTR4);
enet_vhdl = hwgraph_path_to_vertex(name);
/* Useful for setting up the system critical graph */
baseio_enet_vhdl = enet_vhdl;
device_controller_num_set(enet_vhdl, 0);
/* Unref the enet_vhdl due to the reference by hwgraph_path_to_vertex
*/
hwgraph_vertex_unref(enet_vhdl);
}
/* #endif */
/*
* Initialize all I/O devices. Starting closest to nodes, probe and
* initialize outward.
*/
void
init_all_devices(void)
{
/* Governor on init threads..bump up when safe
* (beware many devfs races)
*/
cnodeid_t cnodeid, active;
active = 0;
for (cnodeid = 0; cnodeid < numnodes; cnodeid++) {
DBG("init_all_devices: Calling io_init_node() for cnode %d\n", cnodeid);
io_init_node(cnodeid);
DBG("init_all_devices: Done io_init_node() for cnode %d\n", cnodeid);
}
for (cnodeid = 0; cnodeid < numnodes; cnodeid++)
/*
* Update information generated by IO init.
*/
update_node_information(cnodeid);
baseio_ctlr_num_set();
/* Setup the system critical graph (which is a subgraph of the
* main hwgraph). This information is useful during io error
* handling.
*/
sys_critical_graph_init();
#if HWG_PRINT
hwgraph_print();
#endif
}
#define toint(x) ((int)(x) - (int)('0'))
void
devnamefromarcs(char *devnm)
{
int val;
char tmpnm[MAXDEVNAME];
char *tmp1, *tmp2;
val = strncmp(devnm, "dks", 3);
if (val != 0)
return;
tmp1 = devnm + 3;
if (!isdigit(*tmp1))
return;
val = 0;
while (isdigit(*tmp1)) {
val = 10*val+toint(*tmp1);
tmp1++;
}
if(*tmp1 != 'd')
return;
else
tmp1++;
if ((val < 0) || (val >= num_base_io_scsi_ctlr)) {
int i;
int viable_found = 0;
DBG("Only controller numbers 0..%d are supported for\n", NUM_BASE_IO_SCSI_CTLR-1);
DBG("prom \"root\" variables of the form dksXdXsX.\n");
DBG("To use another disk you must use the full hardware graph path\n\n");
DBG("Possible controller numbers for use in 'dksXdXsX' on this system: ");
for (i=0; i<NUM_BASE_IO_SCSI_CTLR; i++) {
if (base_io_scsi_ctlr_vhdl[i] != GRAPH_VERTEX_NONE) {
DBG("%d ", i);
viable_found=1;
}
}
if (viable_found)
DBG("\n");
else
DBG("none found!\n");
DELAY(15000000);
//prom_reboot();
panic("FIXME: devnamefromarcs: should call prom_reboot here.\n");
/* NOTREACHED */
}
ASSERT(base_io_scsi_ctlr_vhdl[val] != GRAPH_VERTEX_NONE);
vertex_to_name(base_io_scsi_ctlr_vhdl[val],
tmpnm,
MAXDEVNAME);
tmp2 = tmpnm + strlen(tmpnm);
strcpy(tmp2, __DEVSTR2);
tmp2 += strlen(__DEVSTR2);
while (*tmp1 != 's') {
if((*tmp2++ = *tmp1++) == '\0')
return;
}
tmp1++;
strcpy(tmp2, __DEVSTR3);
tmp2 += strlen(__DEVSTR3);
while ( (*tmp2++ = *tmp1++) )
;
tmp2--;
*tmp2++ = '/';
strcpy(tmp2, EDGE_LBL_BLOCK);
strcpy(devnm,tmpnm);
}
static
struct io_brick_map_s io_brick_tab[] = {
/* Ibrick widget number to PCI bus number map */
{ MODULE_IBRICK, /* Ibrick type */
/* PCI Bus # Widget # */
{ 0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */
0, /* 0x8 */
0, /* 0x9 */
0, 0, /* 0xa - 0xb */
0, /* 0xc */
0, /* 0xd */
2, /* 0xe */
1 /* 0xf */
}
},
/* Pbrick widget number to PCI bus number map */
{ MODULE_PBRICK, /* Pbrick type */
/* PCI Bus # Widget # */
{ 0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */
2, /* 0x8 */
1, /* 0x9 */
0, 0, /* 0xa - 0xb */
4, /* 0xc */
6, /* 0xd */
3, /* 0xe */
5 /* 0xf */
}
},
/* PXbrick widget number to PCI bus number map */
{ MODULE_PXBRICK, /* PXbrick type */
/* PCI Bus # Widget # */
{ 0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */
0, /* 0x8 */
0, /* 0x9 */
0, 0, /* 0xa - 0xb */
1, /* 0xc */
5, /* 0xd */
0, /* 0xe */
3 /* 0xf */
}
},
/* Xbrick widget to XIO slot map */
{ MODULE_XBRICK, /* Xbrick type */
/* XIO Slot # Widget # */
{ 0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */
1, /* 0x8 */
3, /* 0x9 */
0, 0, /* 0xa - 0xb */
2, /* 0xc */
4, /* 0xd */
0, /* 0xe */
0 /* 0xf */
}
}
};
/*
* Use the brick's type to map a widget number to a meaningful int
*/
int
io_brick_map_widget(int brick_type, int widget_num)
{
int num_bricks, i;
/* Calculate number of bricks in table */
num_bricks = sizeof(io_brick_tab)/sizeof(io_brick_tab[0]);
/* Look for brick prefix in table */
for (i = 0; i < num_bricks; i++) {
if (brick_type == io_brick_tab[i].ibm_type)
return(io_brick_tab[i].ibm_map_wid[widget_num]);
}
return 0;
}
/*
* Use the device's vertex to map the device's widget to a meaningful int
*/
int
io_path_map_widget(devfs_handle_t vertex)
{
char hw_path_name[MAXDEVNAME];
char *wp, *bp, *sp = NULL;
int widget_num;
long atoi(char *);
int hwgraph_vertex_name_get(devfs_handle_t vhdl, char *buf, uint buflen);
/* Get the full path name of the vertex */
if (GRAPH_SUCCESS != hwgraph_vertex_name_get(vertex, hw_path_name,
MAXDEVNAME))
return 0;
/* Find the widget number in the path name */
wp = strstr(hw_path_name, "/"EDGE_LBL_XTALK"/");
if (wp == NULL)
return 0;
widget_num = atoi(wp+7);
if (widget_num < XBOW_PORT_8 || widget_num > XBOW_PORT_F)
return 0;
/* Find "brick" in the path name */
bp = strstr(hw_path_name, "brick");
if (bp == NULL)
return 0;
/* Find preceding slash */
sp = bp;
while (sp > hw_path_name) {
sp--;
if (*sp == '/')
break;
}
/* Invalid if no preceding slash */
if (!sp)
return 0;
/* Bump slash pointer to "brick" prefix */
sp++;
/*
* Verify "brick" prefix length; valid exaples:
* 'I' from "/Ibrick"
* 'P' from "/Pbrick"
* 'X' from "/Xbrick"
*/
if ((bp - sp) != 1)
return 0;
return (io_brick_map_widget((int)*sp, widget_num));
}
/* $Id$
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <asm/sn/sgi.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/io.h>
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/pci/bridge.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/sn1/hubdev.h>
#include <asm/sn/module.h>
#include <asm/sn/pci/pcibr.h>
#include <asm/sn/xtalk/xswitch.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/sn_cpuid.h>
/* #define LDEBUG 1 */
#ifdef LDEBUG
#define DPRINTF printk
#define printf printk
#else
#define DPRINTF(x...)
#endif
module_t *modules[MODULE_MAX];
int nummodules;
#define SN00_SERIAL_FUDGE 0x3b1af409d513c2
#define SN0_SERIAL_FUDGE 0x6e
void
encode_int_serial(uint64_t src,uint64_t *dest)
{
uint64_t val;
int i;
val = src + SN00_SERIAL_FUDGE;
for (i = 0; i < sizeof(long long); i++) {
((char*)dest)[i] =
((char*)&val)[sizeof(long long)/2 +
((i%2) ? ((i/2 * -1) - 1) : (i/2))];
}
}
void
decode_int_serial(uint64_t src, uint64_t *dest)
{
uint64_t val;
int i;
for (i = 0; i < sizeof(long long); i++) {
((char*)&val)[sizeof(long long)/2 +
((i%2) ? ((i/2 * -1) - 1) : (i/2))] =
((char*)&src)[i];
}
*dest = val - SN00_SERIAL_FUDGE;
}
void
encode_str_serial(const char *src, char *dest)
{
int i;
for (i = 0; i < MAX_SERIAL_NUM_SIZE; i++) {
dest[i] = src[MAX_SERIAL_NUM_SIZE/2 +
((i%2) ? ((i/2 * -1) - 1) : (i/2))] +
SN0_SERIAL_FUDGE;
}
}
void
decode_str_serial(const char *src, char *dest)
{
int i;
for (i = 0; i < MAX_SERIAL_NUM_SIZE; i++) {
dest[MAX_SERIAL_NUM_SIZE/2 +
((i%2) ? ((i/2 * -1) - 1) : (i/2))] = src[i] -
SN0_SERIAL_FUDGE;
}
}
module_t *module_lookup(moduleid_t id)
{
int i;
for (i = 0; i < nummodules; i++)
if (modules[i]->id == id) {
DPRINTF("module_lookup: found m=0x%p\n", modules[i]);
return modules[i];
}
return NULL;
}
/*
* module_add_node
*
* The first time a new module number is seen, a module structure is
* inserted into the module list in order sorted by module number
* and the structure is initialized.
*
* The node number is added to the list of nodes in the module.
*/
module_t *module_add_node(geoid_t geoid, cnodeid_t cnodeid)
{
module_t *m;
int i;
char buffer[16];
moduleid_t moduleid;
memset(buffer, 0, 16);
moduleid = geo_module(geoid);
format_module_id(buffer, moduleid, MODULE_FORMAT_BRIEF);
DPRINTF("module_add_node: moduleid=%s node=%d\n", buffer, cnodeid);
if ((m = module_lookup(moduleid)) == 0) {
m = kmalloc(sizeof (module_t), GFP_KERNEL);
memset(m, 0 , sizeof(module_t));
ASSERT_ALWAYS(m);
m->id = moduleid;
spin_lock_init(&m->lock);
mutex_init_locked(&m->thdcnt);
/* Insert in sorted order by module number */
for (i = nummodules; i > 0 && modules[i - 1]->id > moduleid; i--)
modules[i] = modules[i - 1];
modules[i] = m;
nummodules++;
}
m->nodes[m->nodecnt] = cnodeid;
m->geoid[m->nodecnt] = geoid;
m->nodecnt++;
DPRINTF("module_add_node: module %s now has %d nodes\n", buffer, m->nodecnt);
return m;
}
int module_probe_snum(module_t *m, nasid_t nasid)
{
lboard_t *board;
klmod_serial_num_t *comp;
char * bcopy(const char * src, char * dest, int count);
char serial_number[16];
/*
* record brick serial number
*/
board = find_lboard((lboard_t *) KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
if (! board || KL_CONFIG_DUPLICATE_BOARD(board))
{
#if LDEBUG
printf ("module_probe_snum: no IP35 board found!\n");
#endif
return 0;
}
board_serial_number_get( board, serial_number );
if( serial_number[0] != '\0' ) {
encode_str_serial( serial_number, m->snum.snum_str );
m->snum_valid = 1;
}
#if LDEBUG
else {
printf("module_probe_snum: brick serial number is null!\n");
}
printf("module_probe_snum: brick serial number == %s\n", serial_number);
#endif /* DEBUG */
board = find_lboard((lboard_t *) KL_CONFIG_INFO(nasid),
KLTYPE_IOBRICK_XBOW);
if (! board || KL_CONFIG_DUPLICATE_BOARD(board))
return 0;
comp = GET_SNUM_COMP(board);
if (comp) {
#if LDEBUG
int i;
printf("********found module with id %x and string", m->id);
for (i = 0; i < MAX_SERIAL_NUM_SIZE; i++)
printf(" %x ", comp->snum.snum_str[i]);
printf("\n"); /* Fudged string is not ASCII */
#endif
if (comp->snum.snum_str[0] != '\0') {
bcopy(comp->snum.snum_str,
m->sys_snum,
MAX_SERIAL_NUM_SIZE);
m->sys_snum_valid = 1;
}
}
if (m->sys_snum_valid)
return 1;
else {
DPRINTF("Invalid serial number for module %d, "
"possible missing or invalid NIC.", m->id);
return 0;
}
}
void
io_module_init(void)
{
cnodeid_t node;
lboard_t *board;
nasid_t nasid;
int nserial;
module_t *m;
DPRINTF("*******module_init\n");
nserial = 0;
for (node = 0; node < numnodes; node++) {
nasid = COMPACT_TO_NASID_NODEID(node);
board = find_lboard((lboard_t *) KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
ASSERT(board);
m = module_add_node(board->brd_geoid, node);
if (! m->snum_valid && module_probe_snum(m, nasid))
nserial++;
}
DPRINTF("********found total of %d serial numbers in the system\n",
nserial);
if (nserial == 0)
DPRINTF(KERN_WARNING "io_module_init: No serial number found.\n");
}
int
get_kmod_info(cmoduleid_t cmod, module_info_t *mod_info)
{
if (cmod < 0 || cmod >= nummodules)
return -EINVAL;
mod_info->mod_num = modules[cmod]->id;
ia64_sn_sys_serial_get(mod_info->serial_str);
mod_info->serial_num = ia64_sn_partition_serial_get();
return 0;
}
/* $Id$
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/config.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/sched.h>
#include <linux/ioport.h>
#include <asm/sn/types.h>
#include <asm/sn/hack.h>
#include <asm/sn/sgi.h>
#include <asm/sn/io.h>
#include <asm/sn/driver.h>
#include <asm/sn/iograph.h>
#include <asm/param.h>
#include <asm/sn/pio.h>
#include <asm/sn/xtalk/xwidget.h>
#include <asm/sn/sn_private.h>
#include <asm/sn/addrs.h>
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/hcl_util.h>
#include <asm/sn/intr.h>
#include <asm/sn/xtalk/xtalkaddrs.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/pci/pciio.h>
#include <asm/sn/pci/pcibr.h>
#include <asm/sn/pci/pcibr_private.h>
#include <asm/sn/pci/pci_bus_cvlink.h>
#include <asm/sn/simulator.h>
#include <asm/sn/sn_cpuid.h>
extern int bridge_rev_b_data_check_disable;
devfs_handle_t busnum_to_pcibr_vhdl[MAX_PCI_XWIDGET];
nasid_t busnum_to_nid[MAX_PCI_XWIDGET];
void * busnum_to_atedmamaps[MAX_PCI_XWIDGET];
unsigned char num_bridges;
static int done_probing = 0;
static int pci_bus_map_create(devfs_handle_t xtalk, char * io_moduleid);
devfs_handle_t devfn_to_vertex(unsigned char busnum, unsigned int devfn);
extern unsigned char Is_pic_on_this_nasid[512];
extern void sn_init_irq_desc(void);
extern void register_pcibr_intr(int irq, pcibr_intr_t intr);
/*
* For the given device, initialize whether it is a PIC device.
*/
static void
set_isPIC(struct sn_device_sysdata *device_sysdata)
{
pciio_info_t pciio_info = pciio_info_get(device_sysdata->vhdl);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
device_sysdata->isPIC = IS_PIC_SOFT(pcibr_soft);;
}
/*
* pci_bus_cvlink_init() - To be called once during initialization before
* SGI IO Infrastructure init is called.
*/
void
pci_bus_cvlink_init(void)
{
extern void ioconfig_bus_init(void);
memset(busnum_to_pcibr_vhdl, 0x0, sizeof(devfs_handle_t) * MAX_PCI_XWIDGET);
memset(busnum_to_nid, 0x0, sizeof(nasid_t) * MAX_PCI_XWIDGET);
memset(busnum_to_atedmamaps, 0x0, sizeof(void *) * MAX_PCI_XWIDGET);
num_bridges = 0;
ioconfig_bus_init();
}
/*
* pci_bus_to_vertex() - Given a logical Linux Bus Number returns the associated
* pci bus vertex from the SGI IO Infrastructure.
*/
devfs_handle_t
pci_bus_to_vertex(unsigned char busnum)
{
devfs_handle_t pci_bus = NULL;
/*
* First get the xwidget vertex.
*/
pci_bus = busnum_to_pcibr_vhdl[busnum];
return(pci_bus);
}
/*
* devfn_to_vertex() - returns the vertex of the device given the bus, slot,
* and function numbers.
*/
devfs_handle_t
devfn_to_vertex(unsigned char busnum, unsigned int devfn)
{
int slot = 0;
int func = 0;
char name[16];
devfs_handle_t pci_bus = NULL;
devfs_handle_t device_vertex = (devfs_handle_t)NULL;
/*
* Go get the pci bus vertex.
*/
pci_bus = pci_bus_to_vertex(busnum);
if (!pci_bus) {
/*
* During probing, the Linux pci code invents non existant
* bus numbers and pci_dev structures and tries to access
* them to determine existance. Don't crib during probing.
*/
if (done_probing)
printk("devfn_to_vertex: Invalid bus number %d given.\n", busnum);
return(NULL);
}
/*
* Go get the slot&function vertex.
* Should call pciio_slot_func_to_name() when ready.
*/
slot = PCI_SLOT(devfn);
func = PCI_FUNC(devfn);
/*
* For a NON Multi-function card the name of the device looks like:
* ../pci/1, ../pci/2 ..
*/
if (func == 0) {
sprintf(name, "%d", slot);
if (hwgraph_traverse(pci_bus, name, &device_vertex) ==
GRAPH_SUCCESS) {
if (device_vertex) {
return(device_vertex);
}
}
}
/*
* This maybe a multifunction card. It's names look like:
* ../pci/1a, ../pci/1b, etc.
*/
sprintf(name, "%d%c", slot, 'a'+func);
if (hwgraph_traverse(pci_bus, name, &device_vertex) != GRAPH_SUCCESS) {
if (!device_vertex) {
return(NULL);
}
}
return(device_vertex);
}
/*
* For the given device, initialize the addresses for both the Device(x) Flush
* Write Buffer register and the Xbow Flush Register for the port the PCI bus
* is connected.
*/
static void
set_flush_addresses(struct pci_dev *device_dev,
struct sn_device_sysdata *device_sysdata)
{
pciio_info_t pciio_info = pciio_info_get(device_sysdata->vhdl);
pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
bridge_t *bridge = pcibr_soft->bs_base;
nasid_t nasid;
/*
* Get the nasid from the bridge.
*/
nasid = NASID_GET(device_sysdata->dma_buf_sync);
if (IS_PIC_DEVICE(device_dev)) {
device_sysdata->dma_buf_sync = (volatile unsigned int *)
&bridge->b_wr_req_buf[pciio_slot].reg;
device_sysdata->xbow_buf_sync = (volatile unsigned int *)
XBOW_PRIO_LINKREGS_PTR(NODE_SWIN_BASE(nasid, 0),
pcibr_soft->bs_xid);
} else {
/*
* Accessing Xbridge and Xbow register when SHUB swapoper is on!.
*/
device_sysdata->dma_buf_sync = (volatile unsigned int *)
((uint64_t)&(bridge->b_wr_req_buf[pciio_slot].reg)^4);
device_sysdata->xbow_buf_sync = (volatile unsigned int *)
((uint64_t)(XBOW_PRIO_LINKREGS_PTR(
NODE_SWIN_BASE(nasid, 0), pcibr_soft->bs_xid)) ^ 4);
}
#ifdef DEBUG
printk("set_flush_addresses: dma_buf_sync %p xbow_buf_sync %p\n",
device_sysdata->dma_buf_sync, device_sysdata->xbow_buf_sync);
printk("set_flush_addresses: dma_buf_sync\n");
while((volatile unsigned int )*device_sysdata->dma_buf_sync);
printk("set_flush_addresses: xbow_buf_sync\n");
while((volatile unsigned int )*device_sysdata->xbow_buf_sync);
#endif
}
/*
* Most drivers currently do not properly tell the arch specific pci dma
* interfaces whether they can handle A64. Here is where we privately
* keep track of this.
*/
static void __init
set_sn_pci64(struct pci_dev *dev)
{
unsigned short vendor = dev->vendor;
unsigned short device = dev->device;
if (vendor == PCI_VENDOR_ID_QLOGIC) {
if ((device == PCI_DEVICE_ID_QLOGIC_ISP2100) ||
(device == PCI_DEVICE_ID_QLOGIC_ISP2200)) {
SET_PCIA64(dev);
return;
}
}
if (vendor == PCI_VENDOR_ID_SGI) {
if (device == PCI_DEVICE_ID_SGI_IOC3) {
SET_PCIA64(dev);
return;
}
}
}
/*
* sn_pci_fixup() - This routine is called when platform_pci_fixup() is
* invoked at the end of pcibios_init() to link the Linux pci
* infrastructure to SGI IO Infrasturcture - ia64/kernel/pci.c
*
* Other platform specific fixup can also be done here.
*/
void
sn_pci_fixup(int arg)
{
struct list_head *ln;
struct pci_bus *pci_bus = NULL;
struct pci_dev *device_dev = NULL;
struct sn_widget_sysdata *widget_sysdata;
struct sn_device_sysdata *device_sysdata;
pciio_intr_t intr_handle;
int cpuid, bit;
devfs_handle_t device_vertex;
pciio_intr_line_t lines;
extern void sn_pci_find_bios(void);
extern int numnodes;
int cnode;
extern void io_sh_swapper(int, int);
for (cnode = 0; cnode < numnodes; cnode++) {
if ( !Is_pic_on_this_nasid[cnodeid_to_nasid(cnode)] )
io_sh_swapper((cnodeid_to_nasid(cnode)), 0);
}
if (arg == 0) {
#ifdef CONFIG_PROC_FS
extern void register_sn_procfs(void);
#endif
sn_init_irq_desc();
sn_pci_find_bios();
for (cnode = 0; cnode < numnodes; cnode++) {
extern void intr_init_vecblk(nodepda_t *npda, cnodeid_t, int);
intr_init_vecblk(NODEPDA(cnode), cnode, 0);
}
/*
* When we return to generic Linux, Swapper is always on ..
*/
for (cnode = 0; cnode < numnodes; cnode++) {
if ( !Is_pic_on_this_nasid[cnodeid_to_nasid(cnode)] )
io_sh_swapper((cnodeid_to_nasid(cnode)), 1);
}
#ifdef CONFIG_PROC_FS
register_sn_procfs();
#endif
return;
}
done_probing = 1;
/*
* Initialize the pci bus vertex in the pci_bus struct.
*/
for( ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
pci_bus = pci_bus_b(ln);
widget_sysdata = kmalloc(sizeof(struct sn_widget_sysdata),
GFP_KERNEL);
widget_sysdata->vhdl = pci_bus_to_vertex(pci_bus->number);
pci_bus->sysdata = (void *)widget_sysdata;
}
/*
* set the root start and end so that drivers calling check_region()
* won't see a conflict
*/
ioport_resource.start = 0xc000000000000000;
ioport_resource.end = 0xcfffffffffffffff;
/*
* Set the root start and end for Mem Resource.
*/
iomem_resource.start = 0;
iomem_resource.end = 0xffffffffffffffff;
/*
* Initialize the device vertex in the pci_dev struct.
*/
pci_for_each_dev(device_dev) {
unsigned int irq;
int idx;
u16 cmd;
devfs_handle_t vhdl;
unsigned long size;
extern int bit_pos_to_irq(int);
if (device_dev->vendor == PCI_VENDOR_ID_SGI &&
device_dev->device == PCI_DEVICE_ID_SGI_IOC3) {
extern void pci_fixup_ioc3(struct pci_dev *d);
pci_fixup_ioc3(device_dev);
}
/* Set the device vertex */
device_sysdata = kmalloc(sizeof(struct sn_device_sysdata),
GFP_KERNEL);
device_sysdata->vhdl = devfn_to_vertex(device_dev->bus->number, device_dev->devfn);
device_sysdata->isa64 = 0;
/*
* Set the xbridge Device(X) Write Buffer Flush and Xbow Flush
* register addresses.
*/
(void) set_flush_addresses(device_dev, device_sysdata);
device_dev->sysdata = (void *) device_sysdata;
set_sn_pci64(device_dev);
set_isPIC(device_sysdata);
pci_read_config_word(device_dev, PCI_COMMAND, &cmd);
/*
* Set the resources address correctly. The assumption here
* is that the addresses in the resource structure has been
* read from the card and it was set in the card by our
* Infrastructure ..
*/
vhdl = device_sysdata->vhdl;
for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
size = 0;
size = device_dev->resource[idx].end -
device_dev->resource[idx].start;
if (size) {
device_dev->resource[idx].start = (unsigned long)pciio_pio_addr(vhdl, 0, PCIIO_SPACE_WIN(idx), 0, size, 0, (IS_PIC_DEVICE(device_dev)) ? 0 : PCIIO_BYTE_STREAM);
device_dev->resource[idx].start |= __IA64_UNCACHED_OFFSET;
}
else
continue;
device_dev->resource[idx].end =
device_dev->resource[idx].start + size;
if (device_dev->resource[idx].flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (device_dev->resource[idx].flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
#if 0
/*
* Software WAR for a Software BUG.
* This is only temporary.
* See PV 872791
*/
/*
* Now handle the ROM resource ..
*/
size = device_dev->resource[PCI_ROM_RESOURCE].end -
device_dev->resource[PCI_ROM_RESOURCE].start;
if (size) {
device_dev->resource[PCI_ROM_RESOURCE].start =
(unsigned long) pciio_pio_addr(vhdl, 0, PCIIO_SPACE_ROM, 0,
size, 0, (IS_PIC_DEVICE(device_dev)) ? 0 : PCIIO_BYTE_STREAM);
device_dev->resource[PCI_ROM_RESOURCE].start |= __IA64_UNCACHED_OFFSET;
device_dev->resource[PCI_ROM_RESOURCE].end =
device_dev->resource[PCI_ROM_RESOURCE].start + size;
}
#endif
/*
* Update the Command Word on the Card.
*/
cmd |= PCI_COMMAND_MASTER; /* If the device doesn't support */
/* bit gets dropped .. no harm */
pci_write_config_word(device_dev, PCI_COMMAND, cmd);
pci_read_config_byte(device_dev, PCI_INTERRUPT_PIN, (unsigned char *)&lines);
if (device_dev->vendor == PCI_VENDOR_ID_SGI &&
device_dev->device == PCI_DEVICE_ID_SGI_IOC3 ) {
lines = 1;
}
device_sysdata = (struct sn_device_sysdata *)device_dev->sysdata;
device_vertex = device_sysdata->vhdl;
intr_handle = pciio_intr_alloc(device_vertex, NULL, lines, device_vertex);
bit = intr_handle->pi_irq;
cpuid = intr_handle->pi_cpu;
irq = bit;
irq = irq + (cpuid << 8);
pciio_intr_connect(intr_handle, (intr_func_t)0, (intr_arg_t)0);
device_dev->irq = irq;
register_pcibr_intr(irq, (pcibr_intr_t)intr_handle);
#ifdef ajmtestintr
{
int slot = PCI_SLOT(device_dev->devfn);
static int timer_set = 0;
pcibr_intr_t pcibr_intr = (pcibr_intr_t)intr_handle;
pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
extern void intr_test_handle_intr(int, void*, struct pt_regs *);
if (!timer_set) {
intr_test_set_timer();
timer_set = 1;
}
intr_test_register_irq(irq, pcibr_soft, slot);
request_irq(irq, intr_test_handle_intr,0,NULL, NULL);
}
#endif
}
for (cnode = 0; cnode < numnodes; cnode++) {
if ( !Is_pic_on_this_nasid[cnodeid_to_nasid(cnode)] )
io_sh_swapper((cnodeid_to_nasid(cnode)), 1);
}
}
/*
* linux_bus_cvlink() Creates a link between the Linux PCI Bus number
* to the actual hardware component that it represents:
* /dev/hw/linux/busnum/0 -> ../../../hw/module/001c01/slab/0/Ibrick/xtalk/15/pci
*
* The bus vertex, when called to devfs_generate_path() returns:
* hw/module/001c01/slab/0/Ibrick/xtalk/15/pci
* hw/module/001c01/slab/1/Pbrick/xtalk/12/pci-x/0
* hw/module/001c01/slab/1/Pbrick/xtalk/12/pci-x/1
*/
void
linux_bus_cvlink(void)
{
char name[8];
int index;
for (index=0; index < MAX_PCI_XWIDGET; index++) {
if (!busnum_to_pcibr_vhdl[index])
continue;
sprintf(name, "%x", index);
(void) hwgraph_edge_add(linux_busnum, busnum_to_pcibr_vhdl[index],
name);
}
}
/*
* pci_bus_map_create() - Called by pci_bus_to_hcl_cvlink() to finish the job.
*
* Linux PCI Bus numbers are assigned from lowest module_id numbers
* (rack/slot etc.) starting from HUB_WIDGET_ID_MAX down to
* HUB_WIDGET_ID_MIN:
* widgetnum 15 gets lower Bus Number than widgetnum 14 etc.
*
* Given 2 modules 001c01 and 001c02 we get the following mappings:
* 001c01, widgetnum 15 = Bus number 0
* 001c01, widgetnum 14 = Bus number 1
* 001c02, widgetnum 15 = Bus number 3
* 001c02, widgetnum 14 = Bus number 4
* etc.
*
* The rational for starting Bus Number 0 with Widget number 15 is because
* the system boot disks are always connected via Widget 15 Slot 0 of the
* I-brick. Linux creates /dev/sd* devices(naming) strating from Bus Number 0
* Therefore, /dev/sda1 will be the first disk, on Widget 15 of the lowest
* module id(Master Cnode) of the system.
*
*/
static int
pci_bus_map_create(devfs_handle_t xtalk, char * io_moduleid)
{
devfs_handle_t master_node_vertex = NULL;
devfs_handle_t xwidget = NULL;
devfs_handle_t pci_bus = NULL;
hubinfo_t hubinfo = NULL;
xwidgetnum_t widgetnum;
char pathname[128];
graph_error_t rv;
int bus;
int basebus_num;
int bus_number;
/*
* Loop throught this vertex and get the Xwidgets ..
*/
/* PCI devices */
for (widgetnum = HUB_WIDGET_ID_MAX; widgetnum >= HUB_WIDGET_ID_MIN; widgetnum--) {
sprintf(pathname, "%d", widgetnum);
xwidget = NULL;
/*
* Example - /hw/module/001c16/Pbrick/xtalk/8 is the xwidget
* /hw/module/001c16/Pbrick/xtalk/8/pci/1 is device
*/
rv = hwgraph_traverse(xtalk, pathname, &xwidget);
if ( (rv != GRAPH_SUCCESS) ) {
if (!xwidget) {
continue;
}
}
sprintf(pathname, "%d/"EDGE_LBL_PCI, widgetnum);
pci_bus = NULL;
if (hwgraph_traverse(xtalk, pathname, &pci_bus) != GRAPH_SUCCESS)
if (!pci_bus) {
continue;
}
/*
* Assign the correct bus number and also the nasid of this
* pci Xwidget.
*
* Should not be any race here ...
*/
num_bridges++;
busnum_to_pcibr_vhdl[num_bridges - 1] = pci_bus;
/*
* Get the master node and from there get the NASID.
*/
master_node_vertex = device_master_get(xwidget);
if (!master_node_vertex) {
printk("WARNING: pci_bus_map_create: Unable to get .master for vertex 0x%p\n", (void *)xwidget);
}
hubinfo_get(master_node_vertex, &hubinfo);
if (!hubinfo) {
printk("WARNING: pci_bus_map_create: Unable to get hubinfo for master node vertex 0x%p\n", (void *)master_node_vertex);
return(1);
} else {
busnum_to_nid[num_bridges - 1] = hubinfo->h_nasid;
}
/*
* Pre assign DMA maps needed for 32 Bits Page Map DMA.
*/
busnum_to_atedmamaps[num_bridges - 1] = (void *) kmalloc(
sizeof(struct sn_dma_maps_s) * MAX_ATE_MAPS, GFP_KERNEL);
if (!busnum_to_atedmamaps[num_bridges - 1])
printk("WARNING: pci_bus_map_create: Unable to precreate ATE DMA Maps for busnum %d vertex 0x%p\n", num_bridges - 1, (void *)xwidget);
memset(busnum_to_atedmamaps[num_bridges - 1], 0x0,
sizeof(struct sn_dma_maps_s) * MAX_ATE_MAPS);
}
/*
* PCIX devices
* We number busses differently for PCI-X devices.
* We start from Lowest Widget on up ..
*/
(void) ioconfig_get_busnum((char *)io_moduleid, &basebus_num);
for (widgetnum = HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX; widgetnum++) {
/* Do both buses */
for ( bus = 0; bus < 2; bus++ ) {
sprintf(pathname, "%d", widgetnum);
xwidget = NULL;
/*
* Example - /hw/module/001c16/Pbrick/xtalk/8 is the xwidget
* /hw/module/001c16/Pbrick/xtalk/8/pci-x/0 is the bus
* /hw/module/001c16/Pbrick/xtalk/8/pci-x/0/1 is device
*/
rv = hwgraph_traverse(xtalk, pathname, &xwidget);
if ( (rv != GRAPH_SUCCESS) ) {
if (!xwidget) {
continue;
}
}
if ( bus == 0 )
sprintf(pathname, "%d/"EDGE_LBL_PCIX_0, widgetnum);
else
sprintf(pathname, "%d/"EDGE_LBL_PCIX_1, widgetnum);
pci_bus = NULL;
if (hwgraph_traverse(xtalk, pathname, &pci_bus) != GRAPH_SUCCESS)
if (!pci_bus) {
continue;
}
/*
* Assign the correct bus number and also the nasid of this
* pci Xwidget.
*
* Should not be any race here ...
*/
bus_number = basebus_num + bus + io_brick_map_widget(MODULE_PXBRICK, widgetnum);
#ifdef DEBUG
printk("bus_number %d basebus_num %d bus %d io %d\n",
bus_number, basebus_num, bus,
io_brick_map_widget(MODULE_PXBRICK, widgetnum));
#endif
busnum_to_pcibr_vhdl[bus_number] = pci_bus;
/*
* Pre assign DMA maps needed for 32 Bits Page Map DMA.
*/
busnum_to_atedmamaps[bus_number] = (void *) kmalloc(
sizeof(struct sn_dma_maps_s) * MAX_ATE_MAPS, GFP_KERNEL);
if (!busnum_to_atedmamaps[bus_number])
printk("WARNING: pci_bus_map_create: Unable to precreate ATE DMA Maps for busnum %d vertex 0x%p\n", num_bridges - 1, (void *)xwidget);
memset(busnum_to_atedmamaps[bus_number], 0x0,
sizeof(struct sn_dma_maps_s) * MAX_ATE_MAPS);
}
}
return(0);
}
/*
* pci_bus_to_hcl_cvlink() - This routine is called after SGI IO Infrastructure
* initialization has completed to set up the mappings between Xbridge
* and logical pci bus numbers. We also set up the NASID for each of these
* xbridges.
*
* Must be called before pci_init() is invoked.
*/
int
pci_bus_to_hcl_cvlink(void)
{
devfs_handle_t devfs_hdl = NULL;
devfs_handle_t xtalk = NULL;
int rv = 0;
char name[256];
char tmp_name[256];
int i, ii;
/*
* Figure out which IO Brick is connected to the Compute Bricks.
*/
for (i = 0; i < nummodules; i++) {
extern int iomoduleid_get(nasid_t);
moduleid_t iobrick_id;
nasid_t nasid = -1;
int nodecnt;
int n = 0;
nodecnt = modules[i]->nodecnt;
for ( n = 0; n < nodecnt; n++ ) {
nasid = cnodeid_to_nasid(modules[i]->nodes[n]);
iobrick_id = iomoduleid_get(nasid);
if ((int)iobrick_id > 0) { /* Valid module id */
char name[12];
memset(name, 0, 12);
format_module_id((char *)&(modules[i]->io[n].moduleid), iobrick_id, MODULE_FORMAT_BRIEF);
}
}
}
devfs_hdl = hwgraph_path_to_vertex("/dev/hw/module");
for (i = 0; i < nummodules ; i++) {
for ( ii = 0; ii < 2 ; ii++ ) {
memset(name, 0, 256);
memset(tmp_name, 0, 256);
format_module_id(name, modules[i]->id, MODULE_FORMAT_BRIEF);
sprintf(tmp_name, "/slab/%d/Pbrick/xtalk", geo_slab(modules[i]->geoid[ii]));
strcat(name, tmp_name);
xtalk = NULL;
rv = hwgraph_edge_get(devfs_hdl, name, &xtalk);
pci_bus_map_create(xtalk, (char *)&(modules[i]->io[ii].moduleid));
}
}
/*
* Create the Linux PCI bus number vertex link.
*/
(void)linux_bus_cvlink();
(void)ioconfig_bus_new_entries();
return(0);
}
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn2 specific pci bridge routines.
EXTRA_CFLAGS := -DLITTLE_ENDIAN
ifdef CONFIG_IA64_SGI_SN2
EXTRA_CFLAGS += -DSHUB_SWAP_WAR
endif
obj-$(CONFIG_IA64_SGI_SN2) += pcibr_dvr.o pcibr_ate.o pcibr_config.o \
pcibr_dvr.o pcibr_hints.o \
pcibr_intr.o pcibr_rrb.o pcibr_slot.o \
pcibr_error.o
/* $Id$
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#define USRPCI 0
#include <linux/init.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <asm/sn/sgi.h>
#include <asm/sn/xtalk/xbow.h> /* Must be before iograph.h to get MAX_PORT_NUM */
#include <asm/sn/iograph.h>
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/hcl_util.h>
#include <asm/sn/labelcl.h>
#include <asm/sn/pci/bridge.h>
#include <asm/sn/ioerror_handling.h>
#include <asm/sn/pci/pciio.h>
#include <asm/sn/pci/pciio_private.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/io.h>
#include <asm/sn/pci/pci_bus_cvlink.h>
#include <asm/sn/ate_utils.h>
#include <asm/sn/simulator.h>
#ifdef __ia64
#define rmallocmap atemapalloc
#define rmfreemap atemapfree
#define rmfree atefree
#define rmalloc atealloc
#endif
#define DEBUG_PCIIO
#undef DEBUG_PCIIO /* turn this on for yet more console output */
#define GET_NEW(ptr) (ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))
#define DO_DEL(ptr) (kfree(ptr))
char pciio_info_fingerprint[] = "pciio_info";
cdl_p pciio_registry = NULL;
int
badaddr_val(volatile void *addr, int len, volatile void *ptr)
{
int ret = 0;
volatile void *new_addr;
switch (len) {
case 4:
new_addr = (void *) addr;
ret = ia64_sn_probe_io_slot((long)new_addr, len, (void *)ptr);
break;
default:
printk(KERN_WARNING "badaddr_val given len %x but supports len of 4 only\n", len);
}
if (ret < 0)
panic("badaddr_val: unexpected status (%d) in probing", ret);
return(ret);
}
nasid_t
get_console_nasid(void)
{
extern nasid_t console_nasid;
extern nasid_t master_baseio_nasid;
if (console_nasid < 0) {
console_nasid = ia64_sn_get_console_nasid();
if (console_nasid < 0) {
// ZZZ What do we do if we don't get a console nasid on the hardware????
if (IS_RUNNING_ON_SIMULATOR() )
console_nasid = master_baseio_nasid;
}
}
return console_nasid;
}
nasid_t
get_master_baseio_nasid(void)
{
extern nasid_t master_baseio_nasid;
extern char master_baseio_wid;
if (master_baseio_nasid < 0) {
nasid_t tmp;
master_baseio_nasid = ia64_sn_get_master_baseio_nasid();
if ( master_baseio_nasid >= 0 ) {
master_baseio_wid = WIDGETID_GET(KL_CONFIG_CH_CONS_INFO(master_baseio_nasid)->memory_base);
}
}
return master_baseio_nasid;
}
int
hub_dma_enabled(devfs_handle_t xconn_vhdl)
{
return(0);
}
int
hub_error_devenable(devfs_handle_t xconn_vhdl, int devnum, int error_code)
{
return(0);
}
void
ioerror_dump(char *name, int error_code, int error_mode, ioerror_t *ioerror)
{
}
/******
****** end hack defines ......
******/
/* =====================================================================
* PCI Generic Bus Provider
* Implement PCI provider operations. The pciio* layer provides a
* platform-independent interface for PCI devices. This layer
* switches among the possible implementations of a PCI adapter.
*/
/* =====================================================================
* Provider Function Location SHORTCUT
*
* On platforms with only one possible PCI provider, macros can be
* set up at the top that cause the table lookups and indirections to
* completely disappear.
*/
/* =====================================================================
* Function Table of Contents
*/
#if !defined(DEV_FUNC)
static pciio_provider_t *pciio_to_provider_fns(devfs_handle_t dev);
#endif
pciio_piomap_t pciio_piomap_alloc(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, size_t, unsigned);
void pciio_piomap_free(pciio_piomap_t);
caddr_t pciio_piomap_addr(pciio_piomap_t, iopaddr_t, size_t);
void pciio_piomap_done(pciio_piomap_t);
caddr_t pciio_piotrans_addr(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, unsigned);
caddr_t pciio_pio_addr(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, pciio_piomap_t *, unsigned);
iopaddr_t pciio_piospace_alloc(devfs_handle_t, device_desc_t, pciio_space_t, size_t, size_t);
void pciio_piospace_free(devfs_handle_t, pciio_space_t, iopaddr_t, size_t);
pciio_dmamap_t pciio_dmamap_alloc(devfs_handle_t, device_desc_t, size_t, unsigned);
void pciio_dmamap_free(pciio_dmamap_t);
iopaddr_t pciio_dmamap_addr(pciio_dmamap_t, paddr_t, size_t);
alenlist_t pciio_dmamap_list(pciio_dmamap_t, alenlist_t, unsigned);
void pciio_dmamap_done(pciio_dmamap_t);
iopaddr_t pciio_dmatrans_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, unsigned);
alenlist_t pciio_dmatrans_list(devfs_handle_t, device_desc_t, alenlist_t, unsigned);
void pciio_dmamap_drain(pciio_dmamap_t);
void pciio_dmaaddr_drain(devfs_handle_t, paddr_t, size_t);
void pciio_dmalist_drain(devfs_handle_t, alenlist_t);
iopaddr_t pciio_dma_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, pciio_dmamap_t *, unsigned);
pciio_intr_t pciio_intr_alloc(devfs_handle_t, device_desc_t, pciio_intr_line_t, devfs_handle_t);
void pciio_intr_free(pciio_intr_t);
int pciio_intr_connect(pciio_intr_t, intr_func_t, intr_arg_t);
void pciio_intr_disconnect(pciio_intr_t);
devfs_handle_t pciio_intr_cpu_get(pciio_intr_t);
void pciio_slot_func_to_name(char *, pciio_slot_t, pciio_function_t);
void pciio_provider_startup(devfs_handle_t);
void pciio_provider_shutdown(devfs_handle_t);
pciio_endian_t pciio_endian_set(devfs_handle_t, pciio_endian_t, pciio_endian_t);
pciio_priority_t pciio_priority_set(devfs_handle_t, pciio_priority_t);
devfs_handle_t pciio_intr_dev_get(pciio_intr_t);
devfs_handle_t pciio_pio_dev_get(pciio_piomap_t);
pciio_slot_t pciio_pio_slot_get(pciio_piomap_t);
pciio_space_t pciio_pio_space_get(pciio_piomap_t);
iopaddr_t pciio_pio_pciaddr_get(pciio_piomap_t);
ulong pciio_pio_mapsz_get(pciio_piomap_t);
caddr_t pciio_pio_kvaddr_get(pciio_piomap_t);
devfs_handle_t pciio_dma_dev_get(pciio_dmamap_t);
pciio_slot_t pciio_dma_slot_get(pciio_dmamap_t);
pciio_info_t pciio_info_chk(devfs_handle_t);
pciio_info_t pciio_info_get(devfs_handle_t);
void pciio_info_set(devfs_handle_t, pciio_info_t);
devfs_handle_t pciio_info_dev_get(pciio_info_t);
pciio_slot_t pciio_info_slot_get(pciio_info_t);
pciio_function_t pciio_info_function_get(pciio_info_t);
pciio_vendor_id_t pciio_info_vendor_id_get(pciio_info_t);
pciio_device_id_t pciio_info_device_id_get(pciio_info_t);
devfs_handle_t pciio_info_master_get(pciio_info_t);
arbitrary_info_t pciio_info_mfast_get(pciio_info_t);
pciio_provider_t *pciio_info_pops_get(pciio_info_t);
error_handler_f *pciio_info_efunc_get(pciio_info_t);
error_handler_arg_t *pciio_info_einfo_get(pciio_info_t);
pciio_space_t pciio_info_bar_space_get(pciio_info_t, int);
iopaddr_t pciio_info_bar_base_get(pciio_info_t, int);
size_t pciio_info_bar_size_get(pciio_info_t, int);
iopaddr_t pciio_info_rom_base_get(pciio_info_t);
size_t pciio_info_rom_size_get(pciio_info_t);
void pciio_init(void);
int pciio_attach(devfs_handle_t);
void pciio_provider_register(devfs_handle_t, pciio_provider_t *pciio_fns);
void pciio_provider_unregister(devfs_handle_t);
pciio_provider_t *pciio_provider_fns_get(devfs_handle_t);
int pciio_driver_register(pciio_vendor_id_t, pciio_device_id_t, char *driver_prefix, unsigned);
void pciio_driver_unregister(char *driver_prefix);
devfs_handle_t pciio_device_register(devfs_handle_t, devfs_handle_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
void pciio_device_unregister(devfs_handle_t);
pciio_info_t pciio_device_info_new(pciio_info_t, devfs_handle_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
void pciio_device_info_free(pciio_info_t);
devfs_handle_t pciio_device_info_register(devfs_handle_t, pciio_info_t);
void pciio_device_info_unregister(devfs_handle_t, pciio_info_t);
int pciio_device_attach(devfs_handle_t, int);
int pciio_device_detach(devfs_handle_t, int);
void pciio_error_register(devfs_handle_t, error_handler_f *, error_handler_arg_t);
int pciio_reset(devfs_handle_t);
int pciio_write_gather_flush(devfs_handle_t);
int pciio_slot_inuse(devfs_handle_t);
/* =====================================================================
* Provider Function Location
*
* If there is more than one possible provider for
* this platform, we need to examine the master
* vertex of the current vertex for a provider
* function structure, and indirect through the
* appropriately named member.
*/
#if !defined(DEV_FUNC)
static pciio_provider_t *
pciio_to_provider_fns(devfs_handle_t dev)
{
pciio_info_t card_info;
pciio_provider_t *provider_fns;
/*
* We're called with two types of vertices, one is
* the bridge vertex (ends with "pci") and the other is the
* pci slot vertex (ends with "pci/[0-8]"). For the first type
* we need to get the provider from the PFUNCS label. For
* the second we get it from fastinfo/c_pops.
*/
provider_fns = pciio_provider_fns_get(dev);
if (provider_fns == NULL) {
card_info = pciio_info_get(dev);
if (card_info != NULL) {
provider_fns = pciio_info_pops_get(card_info);
}
}
if (provider_fns == NULL)
#if defined(SUPPORT_PRINTING_V_FORMAT)
PRINT_PANIC("%v: provider_fns == NULL", dev);
#else
PRINT_PANIC("0x%p: provider_fns == NULL", (void *)dev);
#endif
return provider_fns;
}
#define DEV_FUNC(dev,func) pciio_to_provider_fns(dev)->func
#define CAST_PIOMAP(x) ((pciio_piomap_t)(x))
#define CAST_DMAMAP(x) ((pciio_dmamap_t)(x))
#define CAST_INTR(x) ((pciio_intr_t)(x))
#endif
/*
* Many functions are not passed their vertex
* information directly; rather, they must
* dive through a resource map. These macros
* are available to coordinate this detail.
*/
#define PIOMAP_FUNC(map,func) DEV_FUNC((map)->pp_dev,func)
#define DMAMAP_FUNC(map,func) DEV_FUNC((map)->pd_dev,func)
#define INTR_FUNC(intr_hdl,func) DEV_FUNC((intr_hdl)->pi_dev,func)
/* =====================================================================
* PIO MANAGEMENT
*
* For mapping system virtual address space to
* pciio space on a specified card
*/
pciio_piomap_t
pciio_piomap_alloc(devfs_handle_t dev, /* set up mapping for this device */
device_desc_t dev_desc, /* device descriptor */
pciio_space_t space, /* CFG, MEM, IO, or a device-decoded window */
iopaddr_t addr, /* lowest address (or offset in window) */
size_t byte_count, /* size of region containing our mappings */
size_t byte_count_max, /* maximum size of a mapping */
unsigned flags)
{ /* defined in sys/pio.h */
return (pciio_piomap_t) DEV_FUNC(dev, piomap_alloc)
(dev, dev_desc, space, addr, byte_count, byte_count_max, flags);
}
void
pciio_piomap_free(pciio_piomap_t pciio_piomap)
{
PIOMAP_FUNC(pciio_piomap, piomap_free)
(CAST_PIOMAP(pciio_piomap));
}
caddr_t
pciio_piomap_addr(pciio_piomap_t pciio_piomap, /* mapping resources */
iopaddr_t pciio_addr, /* map for this pciio address */
size_t byte_count)
{ /* map this many bytes */
pciio_piomap->pp_kvaddr = PIOMAP_FUNC(pciio_piomap, piomap_addr)
(CAST_PIOMAP(pciio_piomap), pciio_addr, byte_count);
return pciio_piomap->pp_kvaddr;
}
void
pciio_piomap_done(pciio_piomap_t pciio_piomap)
{
PIOMAP_FUNC(pciio_piomap, piomap_done)
(CAST_PIOMAP(pciio_piomap));
}
caddr_t
pciio_piotrans_addr(devfs_handle_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
pciio_space_t space, /* CFG, MEM, IO, or a device-decoded window */
iopaddr_t addr, /* starting address (or offset in window) */
size_t byte_count, /* map this many bytes */
unsigned flags)
{ /* (currently unused) */
return DEV_FUNC(dev, piotrans_addr)
(dev, dev_desc, space, addr, byte_count, flags);
}
caddr_t
pciio_pio_addr(devfs_handle_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
pciio_space_t space, /* CFG, MEM, IO, or a device-decoded window */
iopaddr_t addr, /* starting address (or offset in window) */
size_t byte_count, /* map this many bytes */
pciio_piomap_t *mapp, /* where to return the map pointer */
unsigned flags)
{ /* PIO flags */
pciio_piomap_t map = 0;
int errfree = 0;
caddr_t res;
if (mapp) {
map = *mapp; /* possible pre-allocated map */
*mapp = 0; /* record "no map used" */
}
res = pciio_piotrans_addr
(dev, dev_desc, space, addr, byte_count, flags);
if (res)
return res; /* pciio_piotrans worked */
if (!map) {
map = pciio_piomap_alloc
(dev, dev_desc, space, addr, byte_count, byte_count, flags);
if (!map)
return res; /* pciio_piomap_alloc failed */
errfree = 1;
}
res = pciio_piomap_addr
(map, addr, byte_count);
if (!res) {
if (errfree)
pciio_piomap_free(map);
return res; /* pciio_piomap_addr failed */
}
if (mapp)
*mapp = map; /* pass back map used */
return res; /* pciio_piomap_addr succeeded */
}
iopaddr_t
pciio_piospace_alloc(devfs_handle_t dev, /* Device requiring space */
device_desc_t dev_desc, /* Device descriptor */
pciio_space_t space, /* MEM32/MEM64/IO */
size_t byte_count, /* Size of mapping */
size_t align)
{ /* Alignment needed */
if (align < NBPP)
align = NBPP;
return DEV_FUNC(dev, piospace_alloc)
(dev, dev_desc, space, byte_count, align);
}
void
pciio_piospace_free(devfs_handle_t dev, /* Device freeing space */
pciio_space_t space, /* Type of space */
iopaddr_t pciaddr, /* starting address */
size_t byte_count)
{ /* Range of address */
DEV_FUNC(dev, piospace_free)
(dev, space, pciaddr, byte_count);
}
/* =====================================================================
* DMA MANAGEMENT
*
* For mapping from pci space to system
* physical space.
*/
pciio_dmamap_t
pciio_dmamap_alloc(devfs_handle_t dev, /* set up mappings for this device */
device_desc_t dev_desc, /* device descriptor */
size_t byte_count_max, /* max size of a mapping */
unsigned flags)
{ /* defined in dma.h */
return (pciio_dmamap_t) DEV_FUNC(dev, dmamap_alloc)
(dev, dev_desc, byte_count_max, flags);
}
void
pciio_dmamap_free(pciio_dmamap_t pciio_dmamap)
{
DMAMAP_FUNC(pciio_dmamap, dmamap_free)
(CAST_DMAMAP(pciio_dmamap));
}
iopaddr_t
pciio_dmamap_addr(pciio_dmamap_t pciio_dmamap, /* use these mapping resources */
paddr_t paddr, /* map for this address */
size_t byte_count)
{ /* map this many bytes */
return DMAMAP_FUNC(pciio_dmamap, dmamap_addr)
(CAST_DMAMAP(pciio_dmamap), paddr, byte_count);
}
alenlist_t
pciio_dmamap_list(pciio_dmamap_t pciio_dmamap, /* use these mapping resources */
alenlist_t alenlist, /* map this Address/Length List */
unsigned flags)
{
return DMAMAP_FUNC(pciio_dmamap, dmamap_list)
(CAST_DMAMAP(pciio_dmamap), alenlist, flags);
}
void
pciio_dmamap_done(pciio_dmamap_t pciio_dmamap)
{
DMAMAP_FUNC(pciio_dmamap, dmamap_done)
(CAST_DMAMAP(pciio_dmamap));
}
iopaddr_t
pciio_dmatrans_addr(devfs_handle_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
paddr_t paddr, /* system physical address */
size_t byte_count, /* length */
unsigned flags)
{ /* defined in dma.h */
return DEV_FUNC(dev, dmatrans_addr)
(dev, dev_desc, paddr, byte_count, flags);
}
alenlist_t
pciio_dmatrans_list(devfs_handle_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
alenlist_t palenlist, /* system address/length list */
unsigned flags)
{ /* defined in dma.h */
return DEV_FUNC(dev, dmatrans_list)
(dev, dev_desc, palenlist, flags);
}
iopaddr_t
pciio_dma_addr(devfs_handle_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
paddr_t paddr, /* system physical address */
size_t byte_count, /* length */
pciio_dmamap_t *mapp, /* map to use, then map we used */
unsigned flags)
{ /* PIO flags */
pciio_dmamap_t map = 0;
int errfree = 0;
iopaddr_t res;
if (mapp) {
map = *mapp; /* possible pre-allocated map */
*mapp = 0; /* record "no map used" */
}
res = pciio_dmatrans_addr
(dev, dev_desc, paddr, byte_count, flags);
if (res)
return res; /* pciio_dmatrans worked */
if (!map) {
map = pciio_dmamap_alloc
(dev, dev_desc, byte_count, flags);
if (!map)
return res; /* pciio_dmamap_alloc failed */
errfree = 1;
}
res = pciio_dmamap_addr
(map, paddr, byte_count);
if (!res) {
if (errfree)
pciio_dmamap_free(map);
return res; /* pciio_dmamap_addr failed */
}
if (mapp)
*mapp = map; /* pass back map used */
return res; /* pciio_dmamap_addr succeeded */
}
void
pciio_dmamap_drain(pciio_dmamap_t map)
{
DMAMAP_FUNC(map, dmamap_drain)
(CAST_DMAMAP(map));
}
void
pciio_dmaaddr_drain(devfs_handle_t dev, paddr_t addr, size_t size)
{
DEV_FUNC(dev, dmaaddr_drain)
(dev, addr, size);
}
void
pciio_dmalist_drain(devfs_handle_t dev, alenlist_t list)
{
DEV_FUNC(dev, dmalist_drain)
(dev, list);
}
/* =====================================================================
* INTERRUPT MANAGEMENT
*
* Allow crosstalk devices to establish interrupts
*/
/*
* Allocate resources required for an interrupt as specified in intr_desc.
* Return resource handle in intr_hdl.
*/
pciio_intr_t
pciio_intr_alloc(devfs_handle_t dev, /* which Crosstalk device */
device_desc_t dev_desc, /* device descriptor */
pciio_intr_line_t lines, /* INTR line(s) to attach */
devfs_handle_t owner_dev)
{ /* owner of this interrupt */
return (pciio_intr_t) DEV_FUNC(dev, intr_alloc)
(dev, dev_desc, lines, owner_dev);
}
/*
* Free resources consumed by intr_alloc.
*/
void
pciio_intr_free(pciio_intr_t intr_hdl)
{
INTR_FUNC(intr_hdl, intr_free)
(CAST_INTR(intr_hdl));
}
/*
* Associate resources allocated with a previous pciio_intr_alloc call with the
* described handler, arg, name, etc.
*
* Returns 0 on success, returns <0 on failure.
*/
int
pciio_intr_connect(pciio_intr_t intr_hdl,
intr_func_t intr_func, intr_arg_t intr_arg) /* pciio intr resource handle */
{
return INTR_FUNC(intr_hdl, intr_connect)
(CAST_INTR(intr_hdl), intr_func, intr_arg);
}
/*
* Disassociate handler with the specified interrupt.
*/
void
pciio_intr_disconnect(pciio_intr_t intr_hdl)
{
INTR_FUNC(intr_hdl, intr_disconnect)
(CAST_INTR(intr_hdl));
}
/*
* Return a hwgraph vertex that represents the CPU currently
* targeted by an interrupt.
*/
devfs_handle_t
pciio_intr_cpu_get(pciio_intr_t intr_hdl)
{
return INTR_FUNC(intr_hdl, intr_cpu_get)
(CAST_INTR(intr_hdl));
}
void
pciio_slot_func_to_name(char *name,
pciio_slot_t slot,
pciio_function_t func)
{
/*
* standard connection points:
*
* PCIIO_SLOT_NONE: .../pci/direct
* PCIIO_FUNC_NONE: .../pci/<SLOT> ie. .../pci/3
* multifunction: .../pci/<SLOT><FUNC> ie. .../pci/3c
*/
if (slot == PCIIO_SLOT_NONE)
sprintf(name, EDGE_LBL_DIRECT);
else if (func == PCIIO_FUNC_NONE)
sprintf(name, "%d", slot);
else
sprintf(name, "%d%c", slot, 'a'+func);
}
/*
* pciio_cardinfo_get
*
* Get the pciio info structure corresponding to the
* specified PCI "slot" (we like it when the same index
* number is used for the PCI IDSEL, the REQ/GNT pair,
* and the interrupt line being used for INTA. We like
* it so much we call it the slot number).
*/
static pciio_info_t
pciio_cardinfo_get(
devfs_handle_t pciio_vhdl,
pciio_slot_t pci_slot)
{
char namebuf[16];
pciio_info_t info = 0;
devfs_handle_t conn;
pciio_slot_func_to_name(namebuf, pci_slot, PCIIO_FUNC_NONE);
if (GRAPH_SUCCESS ==
hwgraph_traverse(pciio_vhdl, namebuf, &conn)) {
info = pciio_info_chk(conn);
hwgraph_vertex_unref(conn);
}
return info;
}
/*
* pciio_error_handler:
* dispatch an error to the appropriate
* pciio connection point, or process
* it as a generic pci error.
* Yes, the first parameter is the
* provider vertex at the middle of
* the bus; we get to the pciio connect
* point using the ioerror widgetdev field.
*
* This function is called by the
* specific PCI provider, after it has figured
* out where on the PCI bus (including which slot,
* if it can tell) the error came from.
*/
/*ARGSUSED */
int
pciio_error_handler(
devfs_handle_t pciio_vhdl,
int error_code,
ioerror_mode_t mode,
ioerror_t *ioerror)
{
pciio_info_t pciio_info;
devfs_handle_t pconn_vhdl;
#if USRPCI
devfs_handle_t usrpci_v;
#endif
pciio_slot_t slot;
int retval;
#ifdef EHE_ENABLE
error_state_t e_state;
#endif /* EHE_ENABLE */
#if DEBUG && ERROR_DEBUG
printk("%v: pciio_error_handler\n", pciio_vhdl);
#endif
IOERR_PRINTF(printk(KERN_NOTICE "%v: PCI Bus Error: Error code: %d Error mode: %d\n",
pciio_vhdl, error_code, mode));
/* If there is an error handler sitting on
* the "no-slot" connection point, give it
* first crack at the error. NOTE: it is
* quite possible that this function may
* do further refining of the ioerror.
*/
pciio_info = pciio_cardinfo_get(pciio_vhdl, PCIIO_SLOT_NONE);
if (pciio_info && pciio_info->c_efunc) {
pconn_vhdl = pciio_info_dev_get(pciio_info);
#ifdef EHE_ENABLE
e_state = error_state_get(pciio_vhdl);
if (e_state == ERROR_STATE_ACTION)
(void)error_state_set(pciio_vhdl, ERROR_STATE_NONE);
if (error_state_set(pconn_vhdl,e_state) == ERROR_RETURN_CODE_CANNOT_SET_STATE)
return(IOERROR_UNHANDLED);
#endif
retval = pciio_info->c_efunc
(pciio_info->c_einfo, error_code, mode, ioerror);
if (retval != IOERROR_UNHANDLED)
return retval;
}
/* Is the error associated with a particular slot?
*/
if (IOERROR_FIELDVALID(ioerror, widgetdev)) {
short widgetdev;
/*
* NOTE :
* widgetdev is a 4byte value encoded as slot in the higher order
* 2 bytes and function in the lower order 2 bytes.
*/
IOERROR_GETVALUE(widgetdev, ioerror, widgetdev);
slot = pciio_widgetdev_slot_get(widgetdev);
/* If this slot has an error handler,
* deliver the error to it.
*/
pciio_info = pciio_cardinfo_get(pciio_vhdl, slot);
if (pciio_info != NULL) {
if (pciio_info->c_efunc != NULL) {
pconn_vhdl = pciio_info_dev_get(pciio_info);
#ifdef EHE_ENABLE
e_state = error_state_get(pciio_vhdl);
if (e_state == ERROR_STATE_ACTION)
(void)error_state_set(pciio_vhdl, ERROR_STATE_NONE);
if (error_state_set(pconn_vhdl,e_state) ==
ERROR_RETURN_CODE_CANNOT_SET_STATE)
return(IOERROR_UNHANDLED);
#endif /* EHE_ENABLE */
retval = pciio_info->c_efunc
(pciio_info->c_einfo, error_code, mode, ioerror);
if (retval != IOERROR_UNHANDLED)
return retval;
}
#if USRPCI
/* If the USRPCI driver is available and
* knows about this connection point,
* deliver the error to it.
*
* OK to use pconn_vhdl here, even though we
* have already UNREF'd it, since we know that
* it is not going away.
*/
pconn_vhdl = pciio_info_dev_get(pciio_info);
if (GRAPH_SUCCESS == hwgraph_traverse(pconn_vhdl, EDGE_LBL_USRPCI, &usrpci_v)) {
iopaddr_t busaddr;
IOERROR_GETVALUE(busaddr, ioerror, busaddr);
retval = usrpci_error_handler (usrpci_v, error_code, busaddr);
hwgraph_vertex_unref(usrpci_v);
if (retval != IOERROR_UNHANDLED) {
/*
* This unref is not needed. If this code is called often enough,
* the system will crash, due to vertex reference count reaching 0,
* causing vertex to be unallocated. -jeremy
* hwgraph_vertex_unref(pconn_vhdl);
*/
return retval;
}
}
#endif
}
}
return (mode == MODE_DEVPROBE)
? IOERROR_HANDLED /* probes are OK */
: IOERROR_UNHANDLED; /* otherwise, foo! */
}
/* =====================================================================
* CONFIGURATION MANAGEMENT
*/
/*
* Startup a crosstalk provider
*/
void
pciio_provider_startup(devfs_handle_t pciio_provider)
{
DEV_FUNC(pciio_provider, provider_startup)
(pciio_provider);
}
/*
* Shutdown a crosstalk provider
*/
void
pciio_provider_shutdown(devfs_handle_t pciio_provider)
{
DEV_FUNC(pciio_provider, provider_shutdown)
(pciio_provider);
}
/*
* Specify endianness constraints. The driver tells us what the device
* does and how it would like to see things in memory. We reply with
* how things will actually appear in memory.
*/
pciio_endian_t
pciio_endian_set(devfs_handle_t dev,
pciio_endian_t device_end,
pciio_endian_t desired_end)
{
ASSERT((device_end == PCIDMA_ENDIAN_BIG) || (device_end == PCIDMA_ENDIAN_LITTLE));
ASSERT((desired_end == PCIDMA_ENDIAN_BIG) || (desired_end == PCIDMA_ENDIAN_LITTLE));
#if DEBUG
#if defined(SUPPORT_PRINTING_V_FORMAT)
printk(KERN_ALERT "%v: pciio_endian_set is going away.\n"
"\tplease use PCIIO_BYTE_STREAM or PCIIO_WORD_VALUES in your\n"
"\tpciio_dmamap_alloc and pciio_dmatrans calls instead.\n",
dev);
#else
printk(KERN_ALERT "0x%x: pciio_endian_set is going away.\n"
"\tplease use PCIIO_BYTE_STREAM or PCIIO_WORD_VALUES in your\n"
"\tpciio_dmamap_alloc and pciio_dmatrans calls instead.\n",
dev);
#endif
#endif
return DEV_FUNC(dev, endian_set)
(dev, device_end, desired_end);
}
/*
* Specify PCI arbitration priority.
*/
pciio_priority_t
pciio_priority_set(devfs_handle_t dev,
pciio_priority_t device_prio)
{
ASSERT((device_prio == PCI_PRIO_HIGH) || (device_prio == PCI_PRIO_LOW));
return DEV_FUNC(dev, priority_set)
(dev, device_prio);
}
/*
* Read value of configuration register
*/
uint64_t
pciio_config_get(devfs_handle_t dev,
unsigned reg,
unsigned size)
{
uint64_t value = 0;
unsigned shift = 0;
/* handle accesses that cross words here,
* since that's common code between all
* possible providers.
*/
while (size > 0) {
unsigned biw = 4 - (reg&3);
if (biw > size)
biw = size;
value |= DEV_FUNC(dev, config_get)
(dev, reg, biw) << shift;
shift += 8*biw;
reg += biw;
size -= biw;
}
return value;
}
/*
* Change value of configuration register
*/
void
pciio_config_set(devfs_handle_t dev,
unsigned reg,
unsigned size,
uint64_t value)
{
/* handle accesses that cross words here,
* since that's common code between all
* possible providers.
*/
while (size > 0) {
unsigned biw = 4 - (reg&3);
if (biw > size)
biw = size;
DEV_FUNC(dev, config_set)
(dev, reg, biw, value);
reg += biw;
size -= biw;
value >>= biw * 8;
}
}
/* =====================================================================
* GENERIC PCI SUPPORT FUNCTIONS
*/
/*
* Issue a hardware reset to a card.
*/
int
pciio_reset(devfs_handle_t dev)
{
return DEV_FUNC(dev, reset) (dev);
}
/*
* flush write gather buffers
*/
int
pciio_write_gather_flush(devfs_handle_t dev)
{
return DEV_FUNC(dev, write_gather_flush) (dev);
}
devfs_handle_t
pciio_intr_dev_get(pciio_intr_t pciio_intr)
{
return (pciio_intr->pi_dev);
}
/****** Generic crosstalk pio interfaces ******/
devfs_handle_t
pciio_pio_dev_get(pciio_piomap_t pciio_piomap)
{
return (pciio_piomap->pp_dev);
}
pciio_slot_t
pciio_pio_slot_get(pciio_piomap_t pciio_piomap)
{
return (pciio_piomap->pp_slot);
}
pciio_space_t
pciio_pio_space_get(pciio_piomap_t pciio_piomap)
{
return (pciio_piomap->pp_space);
}
iopaddr_t
pciio_pio_pciaddr_get(pciio_piomap_t pciio_piomap)
{
return (pciio_piomap->pp_pciaddr);
}
ulong
pciio_pio_mapsz_get(pciio_piomap_t pciio_piomap)
{
return (pciio_piomap->pp_mapsz);
}
caddr_t
pciio_pio_kvaddr_get(pciio_piomap_t pciio_piomap)
{
return (pciio_piomap->pp_kvaddr);
}
/****** Generic crosstalk dma interfaces ******/
devfs_handle_t
pciio_dma_dev_get(pciio_dmamap_t pciio_dmamap)
{
return (pciio_dmamap->pd_dev);
}
pciio_slot_t
pciio_dma_slot_get(pciio_dmamap_t pciio_dmamap)
{
return (pciio_dmamap->pd_slot);
}
/****** Generic pci slot information interfaces ******/
pciio_info_t
pciio_info_chk(devfs_handle_t pciio)
{
arbitrary_info_t ainfo = 0;
hwgraph_info_get_LBL(pciio, INFO_LBL_PCIIO, &ainfo);
return (pciio_info_t) ainfo;
}
pciio_info_t
pciio_info_get(devfs_handle_t pciio)
{
pciio_info_t pciio_info;
pciio_info = (pciio_info_t) hwgraph_fastinfo_get(pciio);
#ifdef DEBUG_PCIIO
{
int pos;
char dname[256];
pos = devfs_generate_path(pciio, dname, 256);
printk("%s : path= %s\n", __FUNCTION__, &dname[pos]);
}
#endif /* DEBUG_PCIIO */
if ((pciio_info != NULL) &&
(pciio_info->c_fingerprint != pciio_info_fingerprint)
&& (pciio_info->c_fingerprint != NULL)) {
return((pciio_info_t)-1); /* Should panic .. */
}
return pciio_info;
}
void
pciio_info_set(devfs_handle_t pciio, pciio_info_t pciio_info)
{
if (pciio_info != NULL)
pciio_info->c_fingerprint = pciio_info_fingerprint;
hwgraph_fastinfo_set(pciio, (arbitrary_info_t) pciio_info);
/* Also, mark this vertex as a PCI slot
* and use the pciio_info, so pciio_info_chk
* can work (and be fairly efficient).
*/
hwgraph_info_add_LBL(pciio, INFO_LBL_PCIIO,
(arbitrary_info_t) pciio_info);
}
devfs_handle_t
pciio_info_dev_get(pciio_info_t pciio_info)
{
return (pciio_info->c_vertex);
}
pciio_slot_t
pciio_info_slot_get(pciio_info_t pciio_info)
{
return (pciio_info->c_slot);
}
pciio_function_t
pciio_info_function_get(pciio_info_t pciio_info)
{
return (pciio_info->c_func);
}
pciio_vendor_id_t
pciio_info_vendor_id_get(pciio_info_t pciio_info)
{
return (pciio_info->c_vendor);
}
pciio_device_id_t
pciio_info_device_id_get(pciio_info_t pciio_info)
{
return (pciio_info->c_device);
}
devfs_handle_t
pciio_info_master_get(pciio_info_t pciio_info)
{
return (pciio_info->c_master);
}
arbitrary_info_t
pciio_info_mfast_get(pciio_info_t pciio_info)
{
return (pciio_info->c_mfast);
}
pciio_provider_t *
pciio_info_pops_get(pciio_info_t pciio_info)
{
return (pciio_info->c_pops);
}
error_handler_f *
pciio_info_efunc_get(pciio_info_t pciio_info)
{
return (pciio_info->c_efunc);
}
error_handler_arg_t *
pciio_info_einfo_get(pciio_info_t pciio_info)
{
return (pciio_info->c_einfo);
}
pciio_space_t
pciio_info_bar_space_get(pciio_info_t info, int win)
{
return info->c_window[win].w_space;
}
iopaddr_t
pciio_info_bar_base_get(pciio_info_t info, int win)
{
return info->c_window[win].w_base;
}
size_t
pciio_info_bar_size_get(pciio_info_t info, int win)
{
return info->c_window[win].w_size;
}
iopaddr_t
pciio_info_rom_base_get(pciio_info_t info)
{
return info->c_rbase;
}
size_t
pciio_info_rom_size_get(pciio_info_t info)
{
return info->c_rsize;
}
/* =====================================================================
* GENERIC PCI INITIALIZATION FUNCTIONS
*/
/*
* pciioinit: called once during device driver
* initializtion if this driver is configured into
* the system.
*/
void
pciio_init(void)
{
cdl_p cp;
#if DEBUG && ATTACH_DEBUG
printf("pciio_init\n");
#endif
/* Allocate the registry.
* We might already have one.
* If we don't, go get one.
* MPness: someone might have
* set one up for us while we
* were not looking; use an atomic
* compare-and-swap to commit to
* using the new registry if and
* only if nobody else did first.
* If someone did get there first,
* toss the one we allocated back
* into the pool.
*/
if (pciio_registry == NULL) {
cp = cdl_new(EDGE_LBL_PCI, "vendor", "device");
if (!compare_and_swap_ptr((void **) &pciio_registry, NULL, (void *) cp)) {
cdl_del(cp);
}
}
ASSERT(pciio_registry != NULL);
}
/*
* pciioattach: called for each vertex in the graph
* that is a PCI provider.
*/
/*ARGSUSED */
int
pciio_attach(devfs_handle_t pciio)
{
#if DEBUG && ATTACH_DEBUG
#if defined(SUPPORT_PRINTING_V_FORMAT)
printk("%v: pciio_attach\n", pciio);
#else
printk("0x%x: pciio_attach\n", pciio);
#endif
#endif
return 0;
}
/*
* Associate a set of pciio_provider functions with a vertex.
*/
void
pciio_provider_register(devfs_handle_t provider, pciio_provider_t *pciio_fns)
{
hwgraph_info_add_LBL(provider, INFO_LBL_PFUNCS, (arbitrary_info_t) pciio_fns);
}
/*
* Disassociate a set of pciio_provider functions with a vertex.
*/
void
pciio_provider_unregister(devfs_handle_t provider)
{
arbitrary_info_t ainfo;
hwgraph_info_remove_LBL(provider, INFO_LBL_PFUNCS, (long *) &ainfo);
}
/*
* Obtain a pointer to the pciio_provider functions for a specified Crosstalk
* provider.
*/
pciio_provider_t *
pciio_provider_fns_get(devfs_handle_t provider)
{
arbitrary_info_t ainfo = 0;
(void) hwgraph_info_get_LBL(provider, INFO_LBL_PFUNCS, &ainfo);
return (pciio_provider_t *) ainfo;
}
/*ARGSUSED4 */
int
pciio_driver_register(
pciio_vendor_id_t vendor_id,
pciio_device_id_t device_id,
char *driver_prefix,
unsigned flags)
{
/* a driver's init routine might call
* pciio_driver_register before the
* system calls pciio_init; so we
* make the init call ourselves here.
*/
if (pciio_registry == NULL)
pciio_init();
return cdl_add_driver(pciio_registry,
vendor_id, device_id,
driver_prefix, flags, NULL);
}
/*
* Remove an initialization function.
*/
void
pciio_driver_unregister(
char *driver_prefix)
{
/* before a driver calls unregister,
* it must have called register; so
* we can assume we have a registry here.
*/
ASSERT(pciio_registry != NULL);
cdl_del_driver(pciio_registry, driver_prefix, NULL);
}
/*
* Set the slot status for a device supported by the
* driver being registered.
*/
void
pciio_driver_reg_callback(
devfs_handle_t pconn_vhdl,
int key1,
int key2,
int error)
{
}
/*
* Set the slot status for a device supported by the
* driver being unregistered.
*/
void
pciio_driver_unreg_callback(
devfs_handle_t pconn_vhdl,
int key1,
int key2,
int error)
{
}
/*
* Call some function with each vertex that
* might be one of this driver's attach points.
*/
void
pciio_iterate(char *driver_prefix,
pciio_iter_f * func)
{
/* a driver's init routine might call
* pciio_iterate before the
* system calls pciio_init; so we
* make the init call ourselves here.
*/
if (pciio_registry == NULL)
pciio_init();
ASSERT(pciio_registry != NULL);
cdl_iterate(pciio_registry, driver_prefix, (cdl_iter_f *) func);
}
devfs_handle_t
pciio_device_register(
devfs_handle_t connectpt, /* vertex for /hw/.../pciio/%d */
devfs_handle_t master, /* card's master ASIC (PCI provider) */
pciio_slot_t slot, /* card's slot */
pciio_function_t func, /* card's func */
pciio_vendor_id_t vendor_id,
pciio_device_id_t device_id)
{
return pciio_device_info_register
(connectpt, pciio_device_info_new (NULL, master, slot, func,
vendor_id, device_id));
}
void
pciio_device_unregister(devfs_handle_t pconn)
{
DEV_FUNC(pconn,device_unregister)(pconn);
}
pciio_info_t
pciio_device_info_new(
pciio_info_t pciio_info,
devfs_handle_t master,
pciio_slot_t slot,
pciio_function_t func,
pciio_vendor_id_t vendor_id,
pciio_device_id_t device_id)
{
if (!pciio_info)
GET_NEW(pciio_info);
ASSERT(pciio_info != NULL);
pciio_info->c_slot = slot;
pciio_info->c_func = func;
pciio_info->c_vendor = vendor_id;
pciio_info->c_device = device_id;
pciio_info->c_master = master;
pciio_info->c_mfast = hwgraph_fastinfo_get(master);
pciio_info->c_pops = pciio_provider_fns_get(master);
pciio_info->c_efunc = 0;
pciio_info->c_einfo = 0;
return pciio_info;
}
void
pciio_device_info_free(pciio_info_t pciio_info)
{
/* NOTE : pciio_info is a structure within the pcibr_info
* and not a pointer to memory allocated on the heap !!
*/
BZERO((char *)pciio_info,sizeof(pciio_info));
}
devfs_handle_t
pciio_device_info_register(
devfs_handle_t connectpt, /* vertex at center of bus */
pciio_info_t pciio_info) /* details about the connectpt */
{
char name[32];
devfs_handle_t pconn;
int device_master_set(devfs_handle_t, devfs_handle_t);
pciio_slot_func_to_name(name,
pciio_info->c_slot,
pciio_info->c_func);
if (GRAPH_SUCCESS !=
hwgraph_path_add(connectpt, name, &pconn))
return pconn;
pciio_info->c_vertex = pconn;
pciio_info_set(pconn, pciio_info);
#ifdef DEBUG_PCIIO
{
int pos;
char dname[256];
pos = devfs_generate_path(pconn, dname, 256);
printk("%s : pconn path= %s \n", __FUNCTION__, &dname[pos]);
}
#endif /* DEBUG_PCIIO */
/*
* create link to our pci provider
*/
device_master_set(pconn, pciio_info->c_master);
#if USRPCI
/*
* Call into usrpci provider to let it initialize for
* the given slot.
*/
if (pciio_info->c_slot != PCIIO_SLOT_NONE)
usrpci_device_register(pconn, pciio_info->c_master, pciio_info->c_slot);
#endif
return pconn;
}
void
pciio_device_info_unregister(devfs_handle_t connectpt,
pciio_info_t pciio_info)
{
char name[32];
devfs_handle_t pconn;
if (!pciio_info)
return;
pciio_slot_func_to_name(name,
pciio_info->c_slot,
pciio_info->c_func);
hwgraph_edge_remove(connectpt,name,&pconn);
pciio_info_set(pconn,0);
/* Remove the link to our pci provider */
hwgraph_edge_remove(pconn, EDGE_LBL_MASTER, NULL);
hwgraph_vertex_unref(pconn);
hwgraph_vertex_destroy(pconn);
}
/* Add the pci card inventory information to the hwgraph
*/
static void
pciio_device_inventory_add(devfs_handle_t pconn_vhdl)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
ASSERT(pciio_info);
ASSERT(pciio_info->c_vertex == pconn_vhdl);
/* Donot add inventory for non-existent devices */
if ((pciio_info->c_vendor == PCIIO_VENDOR_ID_NONE) ||
(pciio_info->c_device == PCIIO_DEVICE_ID_NONE))
return;
device_inventory_add(pconn_vhdl,INV_IOBD,INV_PCIADAP,
pciio_info->c_vendor,pciio_info->c_device,
pciio_info->c_slot);
}
/*ARGSUSED */
int
pciio_device_attach(devfs_handle_t pconn,
int drv_flags)
{
pciio_info_t pciio_info;
pciio_vendor_id_t vendor_id;
pciio_device_id_t device_id;
pciio_device_inventory_add(pconn);
pciio_info = pciio_info_get(pconn);
vendor_id = pciio_info->c_vendor;
device_id = pciio_info->c_device;
/* we don't start attaching things until
* all the driver init routines (including
* pciio_init) have been called; so we
* can assume here that we have a registry.
*/
ASSERT(pciio_registry != NULL);
return(cdl_add_connpt(pciio_registry, vendor_id, device_id, pconn, drv_flags));
}
int
pciio_device_detach(devfs_handle_t pconn,
int drv_flags)
{
pciio_info_t pciio_info;
pciio_vendor_id_t vendor_id;
pciio_device_id_t device_id;
pciio_info = pciio_info_get(pconn);
vendor_id = pciio_info->c_vendor;
device_id = pciio_info->c_device;
/* we don't start attaching things until
* all the driver init routines (including
* pciio_init) have been called; so we
* can assume here that we have a registry.
*/
ASSERT(pciio_registry != NULL);
return(cdl_del_connpt(pciio_registry, vendor_id, device_id,
pconn, drv_flags));
}
/* SN2 */
/*
* Allocate (if necessary) and initialize a PCI window mapping structure.
*/
pciio_win_map_t
pciio_device_win_map_new(pciio_win_map_t win_map,
size_t region_size,
size_t page_size)
{
ASSERT((page_size & (page_size - 1)) == 0);
ASSERT((region_size & (page_size - 1)) == 0);
if (win_map == NULL)
NEW(win_map);
/*
* The map array tracks the free ``pages'' in the region. The worst
* case scenario is when every other page in the region is free --
* e.i. maximum fragmentation. This leads to (max pages + 1) / 2 + 1
* map entries. The first "+1" handles the divide by 2 rounding; the
* second handles the need for an end marker sentinel.
*/
win_map->wm_map = rmallocmap((region_size / page_size + 1) / 2 + 1);
win_map->wm_page_size = page_size;
ASSERT(win_map->wm_map != NULL);
return win_map;
}
/*
* Free resources associated with a PCI window mapping structure.
*/
extern void
pciio_device_win_map_free(pciio_win_map_t win_map)
{
rmfreemap(win_map->wm_map);
bzero(win_map, sizeof *win_map);
}
/*
* Populate window map with specified free range.
*/
void
pciio_device_win_populate(pciio_win_map_t win_map,
iopaddr_t ioaddr,
size_t size)
{
ASSERT((size & (win_map->wm_page_size - 1)) == 0);
ASSERT((ioaddr & (win_map->wm_page_size - 1)) == 0);
rmfree(win_map->wm_map,
size / win_map->wm_page_size,
(unsigned long)ioaddr / win_map->wm_page_size);
}
/*
* Allocate space from the specified PCI window mapping resource. On
* success record information about the allocation in the supplied window
* allocation cookie (if non-NULL) and return the address of the allocated
* window. On failure return NULL.
*
* The "size" parameter is usually from a PCI device's Base Address Register
* (BAR) decoder. As such, the allocation must be aligned to be a multiple of
* that. The "align" parameter acts as a ``minimum alignment'' allocation
* constraint. The alignment contraint reflects system or device addressing
* restrictions such as the inability to share higher level ``windows''
* between devices, etc. The returned PCI address allocation will be a
* multiple of the alignment constraint both in alignment and size. Thus, the
* returned PCI address block is aligned to the maximum of the requested size
* and alignment.
*/
iopaddr_t
pciio_device_win_alloc(pciio_win_map_t win_map,
pciio_win_alloc_t win_alloc,
size_t start, size_t size, size_t align)
{
unsigned long base;
#ifdef PIC_LATER
ASSERT((size & (size - 1)) == 0);
ASSERT((align & (align - 1)) == 0);
/*
* Convert size and alignment to pages. If size is greated than the
* requested alignment, we bump the alignment up to size; otherwise
* convert the size into a multiple of the alignment request.
*/
size = (size + win_map->wm_page_size - 1) / win_map->wm_page_size;
align = align / win_map->wm_page_size;
if (size > align)
align = size;
else
size = (size + align - 1) & ~(align - 1);
/* XXXX */
base = rmalloc_align(win_map->wm_map, size, align, VM_NOSLEEP);
if (base == RMALLOC_FAIL)
return((iopaddr_t)NULL);
#else
int index_page, index_page_align;
int align_pages, size_pages;
int alloc_pages, free_pages;
int addr_align;
/* Convert PCI bus alignment from bytes to pages */
align_pages = align / win_map->wm_page_size;
/* Convert PCI request from bytes to pages */
size_pages = (size / win_map->wm_page_size) +
((size % win_map->wm_page_size) ? 1 : 0);
/* Align address with the larger of the size or the requested slot align */
if (size_pages > align_pages)
align_pages = size_pages;
/*
* Avoid wasting space by aligning - 1; this will prevent crossing
* another alignment boundary.
*/
alloc_pages = size_pages + (align_pages - 1);
/* Allocate PCI bus space in pages */
index_page = (int) rmalloc(win_map->wm_map,
(size_t) alloc_pages);
/* Error if no PCI bus address space available */
if (!index_page)
return 0;
/* PCI bus address index starts at 0 */
index_page--;
/* Align the page offset as requested */
index_page_align = (index_page + (align_pages - 1)) -
((index_page + (align_pages - 1)) % align_pages);
free_pages = (align_pages - 1) - (index_page_align - index_page);
/* Free unused PCI bus pages adjusting the index to start at 1 */
rmfree(win_map->wm_map,
free_pages,
(index_page_align + 1) + size_pages);
/* Return aligned PCI bus space in bytes */
addr_align = (index_page_align * win_map->wm_page_size);
base = index_page;
size = alloc_pages - free_pages;
#endif /* PIC_LATER */
/*
* If a window allocation cookie has been supplied, use it to keep
* track of all the allocated space assigned to this window.
*/
if (win_alloc) {
win_alloc->wa_map = win_map;
win_alloc->wa_base = base;
win_alloc->wa_pages = size;
}
return base * win_map->wm_page_size;
}
/*
* Free the specified window allocation back into the PCI window mapping
* resource. As noted above, we keep page addresses offset by 1 ...
*/
void
pciio_device_win_free(pciio_win_alloc_t win_alloc)
{
if (win_alloc->wa_pages)
rmfree(win_alloc->wa_map->wm_map,
win_alloc->wa_pages,
win_alloc->wa_base);
}
/*
* pciio_error_register:
* arrange for a function to be called with
* a specified first parameter plus other
* information when an error is encountered
* and traced to the pci slot corresponding
* to the connection point pconn.
*
* may also be called with a null function
* pointer to "unregister" the error handler.
*
* NOTE: subsequent calls silently overwrite
* previous data for this vertex. We assume that
* cooperating drivers, well, cooperate ...
*/
void
pciio_error_register(devfs_handle_t pconn,
error_handler_f *efunc,
error_handler_arg_t einfo)
{
pciio_info_t pciio_info;
pciio_info = pciio_info_get(pconn);
ASSERT(pciio_info != NULL);
pciio_info->c_efunc = efunc;
pciio_info->c_einfo = einfo;
}
/*
* Check if any device has been found in this slot, and return
* true or false
* vhdl is the vertex for the slot
*/
int
pciio_slot_inuse(devfs_handle_t pconn_vhdl)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
ASSERT(pciio_info);
ASSERT(pciio_info->c_vertex == pconn_vhdl);
if (pciio_info->c_vendor) {
/*
* Non-zero value for vendor indicate
* a board being found in this slot.
*/
return 1;
}
return 0;
}
int
pciio_dma_enabled(devfs_handle_t pconn_vhdl)
{
return DEV_FUNC(pconn_vhdl, dma_enabled)(pconn_vhdl);
}
int
pciio_info_type1_get(pciio_info_t pci_info)
{
return(0);
}
/*
* These are complementary Linux interfaces that takes in a pci_dev * as the
* first arguement instead of devfs_handle_t.
*/
iopaddr_t snia_pciio_dmatrans_addr(struct pci_dev *, device_desc_t, paddr_t, size_t, unsigned);
pciio_dmamap_t snia_pciio_dmamap_alloc(struct pci_dev *, device_desc_t, size_t, unsigned);
void snia_pciio_dmamap_free(pciio_dmamap_t);
iopaddr_t snia_pciio_dmamap_addr(pciio_dmamap_t, paddr_t, size_t);
void snia_pciio_dmamap_done(pciio_dmamap_t);
pciio_endian_t snia_pciio_endian_set(struct pci_dev *pci_dev, pciio_endian_t device_end,
pciio_endian_t desired_end);
#include <linux/module.h>
EXPORT_SYMBOL(snia_pciio_dmatrans_addr);
EXPORT_SYMBOL(snia_pciio_dmamap_alloc);
EXPORT_SYMBOL(snia_pciio_dmamap_free);
EXPORT_SYMBOL(snia_pciio_dmamap_addr);
EXPORT_SYMBOL(snia_pciio_dmamap_done);
EXPORT_SYMBOL(snia_pciio_endian_set);
int
snia_pcibr_rrb_alloc(struct pci_dev *pci_dev,
int *count_vchan0,
int *count_vchan1)
{
devfs_handle_t dev = PCIDEV_VERTEX(pci_dev);
return pcibr_rrb_alloc(dev, count_vchan0, count_vchan1);
}
EXPORT_SYMBOL(snia_pcibr_rrb_alloc);
pciio_endian_t
snia_pciio_endian_set(struct pci_dev *pci_dev,
pciio_endian_t device_end,
pciio_endian_t desired_end)
{
devfs_handle_t dev = PCIDEV_VERTEX(pci_dev);
return DEV_FUNC(dev, endian_set)
(dev, device_end, desired_end);
}
iopaddr_t
snia_pciio_dmatrans_addr(struct pci_dev *pci_dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
paddr_t paddr, /* system physical address */
size_t byte_count, /* length */
unsigned flags)
{ /* defined in dma.h */
devfs_handle_t dev = PCIDEV_VERTEX(pci_dev);
/*
* If the device is not a PIC, we always want the PCIIO_BYTE_STREAM to be
* set. Otherwise, it must not be set. This applies to SN1 and SN2.
*/
return DEV_FUNC(dev, dmatrans_addr)
(dev, dev_desc, paddr, byte_count, (IS_PIC_DEVICE(pci_dev)) ? (flags & ~PCIIO_BYTE_STREAM) : flags | PCIIO_BYTE_STREAM);
}
pciio_dmamap_t
snia_pciio_dmamap_alloc(struct pci_dev *pci_dev, /* set up mappings for this device */
device_desc_t dev_desc, /* device descriptor */
size_t byte_count_max, /* max size of a mapping */
unsigned flags)
{ /* defined in dma.h */
devfs_handle_t dev = PCIDEV_VERTEX(pci_dev);
/*
* If the device is not a PIC, we always want the PCIIO_BYTE_STREAM to be
* set. Otherwise, it must not be set. This applies to SN1 and SN2.
*/
return (pciio_dmamap_t) DEV_FUNC(dev, dmamap_alloc)
(dev, dev_desc, byte_count_max, (IS_PIC_DEVICE(pci_dev)) ? (flags & ~PCIIO_BYTE_STREAM) : flags | PCIIO_BYTE_STREAM);
}
void
snia_pciio_dmamap_free(pciio_dmamap_t pciio_dmamap)
{
DMAMAP_FUNC(pciio_dmamap, dmamap_free)
(CAST_DMAMAP(pciio_dmamap));
}
iopaddr_t
snia_pciio_dmamap_addr(pciio_dmamap_t pciio_dmamap, /* use these mapping resources */
paddr_t paddr, /* map for this address */
size_t byte_count)
{ /* map this many bytes */
return DMAMAP_FUNC(pciio_dmamap, dmamap_addr)
(CAST_DMAMAP(pciio_dmamap), paddr, byte_count);
}
void
snia_pciio_dmamap_done(pciio_dmamap_t pciio_dmamap)
{
DMAMAP_FUNC(pciio_dmamap, dmamap_done)
(CAST_DMAMAP(pciio_dmamap));
}
/*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <asm/sn/sgi.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/addrs.h>
#include <asm/sn/arch.h>
#include <asm/sn/iograph.h>
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h>
#include <asm/sn/xtalk/xwidget.h>
#include <asm/sn/pci/bridge.h>
#include <asm/sn/pci/pciio.h>
#include <asm/sn/pci/pcibr.h>
#include <asm/sn/pci/pcibr_private.h>
#include <asm/sn/pci/pci_defs.h>
#include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
#include <asm/sn/eeprom.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_private.h>
extern char *bcopy(const char * src, char * dest, int count);
#define PCI_BUS_NO_1 1
int pic_devflag = D_MP;
extern int pcibr_attach2(devfs_handle_t, bridge_t *, devfs_handle_t, int, pcibr_soft_t *);
extern void pcibr_driver_reg_callback(devfs_handle_t, int, int, int);
extern void pcibr_driver_unreg_callback(devfs_handle_t, int, int, int);
void
pic_init(void)
{
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INIT, NULL, "pic_init()\n"));
xwidget_driver_register(PIC_WIDGET_PART_NUM_BUS0,
PIC_WIDGET_MFGR_NUM,
"pic_",
0);
}
/*
* copy inventory_t from conn_v to peer_conn_v
*/
int
pic_bus1_inventory_dup(devfs_handle_t conn_v, devfs_handle_t peer_conn_v)
{
inventory_t *pinv, *peer_pinv;
if (hwgraph_info_get_LBL(conn_v, INFO_LBL_INVENT,
(arbitrary_info_t *)&pinv) == GRAPH_SUCCESS)
{
NEW(peer_pinv);
bcopy(pinv, peer_pinv, sizeof(inventory_t));
if (hwgraph_info_add_LBL(peer_conn_v, INFO_LBL_INVENT,
(arbitrary_info_t)peer_pinv) != GRAPH_SUCCESS) {
DEL(peer_pinv);
return 0;
}
return 1;
}
printk("pic_bus1_inventory_dup: cannot get INFO_LBL_INVENT from 0x%lx\n ",
conn_v);
return 0;
}
/*
* copy xwidget_info_t from conn_v to peer_conn_v
*/
int
pic_bus1_widget_info_dup(devfs_handle_t conn_v, devfs_handle_t peer_conn_v,
cnodeid_t xbow_peer)
{
xwidget_info_t widget_info, peer_widget_info;
char peer_path[256];
char *p;
devfs_handle_t peer_hubv;
hubinfo_t peer_hub_info;
/* get the peer hub's widgetid */
peer_hubv = NODEPDA(xbow_peer)->node_vertex;
peer_hub_info = NULL;
hubinfo_get(peer_hubv, &peer_hub_info);
if (peer_hub_info == NULL)
return 0;
if (hwgraph_info_get_LBL(conn_v, INFO_LBL_XWIDGET,
(arbitrary_info_t *)&widget_info) == GRAPH_SUCCESS) {
NEW(peer_widget_info);
peer_widget_info->w_vertex = peer_conn_v;
peer_widget_info->w_id = widget_info->w_id;
peer_widget_info->w_master = peer_hubv;
peer_widget_info->w_masterid = peer_hub_info->h_widgetid;
/* structure copy */
peer_widget_info->w_hwid = widget_info->w_hwid;
peer_widget_info->w_efunc = 0;
peer_widget_info->w_einfo = 0;
peer_widget_info->w_name = kmalloc(strlen(peer_path) + 1, GFP_KERNEL);
strcpy(peer_widget_info->w_name, peer_path);
if (hwgraph_info_add_LBL(peer_conn_v, INFO_LBL_XWIDGET,
(arbitrary_info_t)peer_widget_info) != GRAPH_SUCCESS) {
DEL(peer_widget_info);
return 0;
}
xwidget_info_set(peer_conn_v, peer_widget_info);
return 1;
}
printk("pic_bus1_widget_info_dup: "
"cannot get INFO_LBL_XWIDGET from 0x%lx\n", conn_v);
return 0;
}
/*
* If this PIC is attached to two Cbricks ("dual-ported") then
* attach each bus to opposite Cbricks.
*
* If successful, return a new vertex suitable for attaching the PIC bus.
* If not successful, return zero and both buses will attach to the
* vertex passed into pic_attach().
*/
devfs_handle_t
pic_bus1_redist(nasid_t nasid, devfs_handle_t conn_v)
{
cnodeid_t cnode = NASID_TO_COMPACT_NODEID(nasid);
cnodeid_t xbow_peer = -1;
char pathname[256], peer_path[256], tmpbuf[256];
char *p;
int rc;
devfs_handle_t peer_conn_v;
int pos;
slabid_t slab;
if (NODEPDA(cnode)->xbow_peer >= 0) { /* if dual-ported */
/* create a path for this widget on the peer Cbrick */
/* pcibr widget hw/module/001c11/slab/0/Pbrick/xtalk/12 */
/* sprintf(pathname, "%v", conn_v); */
xbow_peer = NASID_TO_COMPACT_NODEID(NODEPDA(cnode)->xbow_peer);
pos = devfs_generate_path(conn_v, tmpbuf, 256);
strcpy(pathname, &tmpbuf[pos]);
p = pathname + strlen("hw/module/001c01/slab/0/");
memset(tmpbuf, 0, 16);
format_module_id(tmpbuf, geo_module((NODEPDA(xbow_peer))->geoid), MODULE_FORMAT_BRIEF);
slab = geo_slab((NODEPDA(xbow_peer))->geoid);
sprintf(peer_path, "module/%s/slab/%d/%s", tmpbuf, (int)slab, p);
/* Look for vertex for this widget on the peer Cbrick.
* Expect GRAPH_NOT_FOUND.
*/
rc = hwgraph_traverse(hwgraph_root, peer_path, &peer_conn_v);
if (GRAPH_SUCCESS == rc)
printk("pic_attach: found unexpected vertex: 0x%lx\n",
peer_conn_v);
else if (GRAPH_NOT_FOUND != rc) {
printk("pic_attach: hwgraph_traverse unexpectedly"
" returned 0x%x\n", rc);
} else {
/* try to add the widget vertex to the peer Cbrick */
rc = hwgraph_path_add(hwgraph_root, peer_path, &peer_conn_v);
if (GRAPH_SUCCESS != rc)
printk("pic_attach: hwgraph_path_add"
" failed with 0x%x\n", rc);
else {
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v,
"pic_bus1_redist: added vertex %v\n", peer_conn_v));
/* Now hang appropiate stuff off of the new
* vertex. We bail out if we cannot add something.
* In that case, we don't remove the newly added
* vertex but that should be safe and we don't
* really expect the additions to fail anyway.
*/
#if 0
if (!pic_bus1_inventory_dup(conn_v, peer_conn_v))
return 0;
pic_bus1_device_desc_dup(conn_v, peer_conn_v);
#endif
if (!pic_bus1_widget_info_dup(conn_v, peer_conn_v, xbow_peer))
return 0;
return peer_conn_v;
}
}
}
return 0;
}
int
pic_attach(devfs_handle_t conn_v)
{
int rc;
bridge_t *bridge0, *bridge1 = (bridge_t *)0;
devfs_handle_t pcibr_vhdl0, pcibr_vhdl1 = (devfs_handle_t)0;
pcibr_soft_t bus0_soft, bus1_soft = (pcibr_soft_t)0;
devfs_handle_t conn_v0, conn_v1, peer_conn_v;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v, "pic_attach()\n"));
bridge0 = (bridge_t *) xtalk_piotrans_addr(conn_v, NULL,
0, sizeof(bridge_t), 0);
bridge1 = (bridge_t *)((char *)bridge0 + PIC_BUS1_OFFSET);
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v,
"pic_attach: bridge0=0x%x, bridge1=0x%x\n",
bridge0, bridge1));
conn_v0 = conn_v1 = conn_v;
/* If dual-ported then split the two PIC buses across both Cbricks */
if (peer_conn_v = pic_bus1_redist(NASID_GET(bridge0), conn_v))
conn_v1 = peer_conn_v;
/*
* Create the vertex for the PCI buses, which week
* will also use to hold the pcibr_soft and
* which will be the "master" vertex for all the
* pciio connection points we will hang off it.
* This needs to happen before we call nic_bridge_vertex_info
* as we are some of the *_vmc functions need access to the edges.
*
* Opening this vertex will provide access to
* the Bridge registers themselves.
*/
/* FIXME: what should the hwgraph path look like ? */
rc = hwgraph_path_add(conn_v0, EDGE_LBL_PCIX_0, &pcibr_vhdl0);
ASSERT(rc == GRAPH_SUCCESS);
rc = hwgraph_path_add(conn_v1, EDGE_LBL_PCIX_1, &pcibr_vhdl1);
ASSERT(rc == GRAPH_SUCCESS);
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v,
"pic_attach: pcibr_vhdl0=%v, pcibr_vhdl1=%v\n",
pcibr_vhdl0, pcibr_vhdl1));
/* register pci provider array */
pciio_provider_register(pcibr_vhdl0, &pci_pic_provider);
pciio_provider_register(pcibr_vhdl1, &pci_pic_provider);
pciio_provider_startup(pcibr_vhdl0);
pciio_provider_startup(pcibr_vhdl1);
pcibr_attach2(conn_v0, bridge0, pcibr_vhdl0, 0, &bus0_soft);
pcibr_attach2(conn_v1, bridge1, pcibr_vhdl1, 1, &bus1_soft);
/* save a pointer to the PIC's other bus's soft struct */
bus0_soft->bs_peers_soft = bus1_soft;
bus1_soft->bs_peers_soft = bus0_soft;
bus0_soft->bs_peers_soft = (pcibr_soft_t)0;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v,
"pic_attach: bus0_soft=0x%x, bus1_soft=0x%x\n",
bus0_soft, bus1_soft));
return 0;
}
/*
* pci provider functions
*
* mostly in pcibr.c but if any are needed here then
* this might be a way to get them here.
*/
pciio_provider_t pci_pic_provider =
{
(pciio_piomap_alloc_f *) pcibr_piomap_alloc,
(pciio_piomap_free_f *) pcibr_piomap_free,
(pciio_piomap_addr_f *) pcibr_piomap_addr,
(pciio_piomap_done_f *) pcibr_piomap_done,
(pciio_piotrans_addr_f *) pcibr_piotrans_addr,
(pciio_piospace_alloc_f *) pcibr_piospace_alloc,
(pciio_piospace_free_f *) pcibr_piospace_free,
(pciio_dmamap_alloc_f *) pcibr_dmamap_alloc,
(pciio_dmamap_free_f *) pcibr_dmamap_free,
(pciio_dmamap_addr_f *) pcibr_dmamap_addr,
(pciio_dmamap_list_f *) pcibr_dmamap_list,
(pciio_dmamap_done_f *) pcibr_dmamap_done,
(pciio_dmatrans_addr_f *) pcibr_dmatrans_addr,
(pciio_dmatrans_list_f *) pcibr_dmatrans_list,
(pciio_dmamap_drain_f *) pcibr_dmamap_drain,
(pciio_dmaaddr_drain_f *) pcibr_dmaaddr_drain,
(pciio_dmalist_drain_f *) pcibr_dmalist_drain,
(pciio_intr_alloc_f *) pcibr_intr_alloc,
(pciio_intr_free_f *) pcibr_intr_free,
(pciio_intr_connect_f *) pcibr_intr_connect,
(pciio_intr_disconnect_f *) pcibr_intr_disconnect,
(pciio_intr_cpu_get_f *) pcibr_intr_cpu_get,
(pciio_provider_startup_f *) pcibr_provider_startup,
(pciio_provider_shutdown_f *) pcibr_provider_shutdown,
(pciio_reset_f *) pcibr_reset,
(pciio_write_gather_flush_f *) pcibr_write_gather_flush,
(pciio_endian_set_f *) pcibr_endian_set,
(pciio_priority_set_f *) pcibr_priority_set,
(pciio_config_get_f *) pcibr_config_get,
(pciio_config_set_f *) pcibr_config_set,
(pciio_error_devenable_f *) 0,
(pciio_error_extract_f *) 0,
(pciio_driver_reg_callback_f *) pcibr_driver_reg_callback,
(pciio_driver_unreg_callback_f *) pcibr_driver_unreg_callback,
(pciio_device_unregister_f *) pcibr_device_unregister,
(pciio_dma_enabled_f *) pcibr_dma_enabled,
};
/* $Id$
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/config.h>
#include <linux/slab.h>
#include <asm/sn/sgi.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/sn_private.h>
#include <asm/sn/pci/pciba.h>
#include <linux/smp.h>
extern void mlreset(void);
extern int init_hcl(void);
extern void klgraph_hack_init(void);
extern void hubspc_init(void);
extern void pciio_init(void);
extern void pcibr_init(void);
extern void xtalk_init(void);
extern void xbow_init(void);
extern void xbmon_init(void);
extern void pciiox_init(void);
extern void pic_init(void);
extern void usrpci_init(void);
extern void ioc3_init(void);
extern void initialize_io(void);
extern void klhwg_add_all_modules(devfs_handle_t);
extern void klhwg_add_all_nodes(devfs_handle_t);
void sn_mp_setup(void);
extern devfs_handle_t hwgraph_root;
extern void io_module_init(void);
extern void pci_bus_cvlink_init(void);
extern void temp_hack(void);
extern int pci_bus_to_hcl_cvlink(void);
/* #define DEBUG_IO_INIT 1 */
#ifdef DEBUG_IO_INIT
#define DBG(x...) printk(x)
#else
#define DBG(x...)
#endif /* DEBUG_IO_INIT */
/*
* per_hub_init
*
* This code is executed once for each Hub chip.
*/
static void
per_hub_init(cnodeid_t cnode)
{
nasid_t nasid;
nodepda_t *npdap;
ii_icmr_u_t ii_icmr;
ii_ibcr_u_t ii_ibcr;
nasid = COMPACT_TO_NASID_NODEID(cnode);
ASSERT(nasid != INVALID_NASID);
ASSERT(NASID_TO_COMPACT_NODEID(nasid) == cnode);
npdap = NODEPDA(cnode);
REMOTE_HUB_S(nasid, IIO_IWEIM, 0x8000);
/*
* Set the total number of CRBs that can be used.
*/
ii_icmr.ii_icmr_regval= 0x0;
ii_icmr.ii_icmr_fld_s.i_c_cnt = 0xf;
REMOTE_HUB_S(nasid, IIO_ICMR, ii_icmr.ii_icmr_regval);
/*
* Set the number of CRBs that both of the BTEs combined
* can use minus 1.
*/
ii_ibcr.ii_ibcr_regval= 0x0;
ii_ibcr.ii_ibcr_fld_s.i_count = 0x8;
REMOTE_HUB_S(nasid, IIO_IBCR, ii_ibcr.ii_ibcr_regval);
/*
* Set CRB timeout to be 10ms.
*/
#ifdef BRINGUP2
REMOTE_HUB_S(nasid, IIO_ICTP, 0xffffff );
REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);
//REMOTE_HUB_S(nasid, IIO_IWI, 0x00FF00FF00FFFFFF);
#endif
/* Initialize error interrupts for this hub. */
hub_error_init(cnode);
}
/*
* This routine is responsible for the setup of all the IRIX hwgraph style
* stuff that's been pulled into linux. It's called by sn_pci_find_bios which
* is called just before the generic Linux PCI layer does its probing (by
* platform_pci_fixup aka sn_pci_fixup).
*
* It is very IMPORTANT that this call is only made by the Master CPU!
*
*/
void
sgi_master_io_infr_init(void)
{
int cnode;
extern void kdba_io_init();
/*
* Do any early init stuff .. einit_tbl[] etc.
*/
init_hcl(); /* Sets up the hwgraph compatibility layer with devfs */
/*
* initialize the Linux PCI to xwidget vertexes ..
*/
pci_bus_cvlink_init();
kdba_io_init();
#ifdef BRINGUP
/*
* Hack to provide statically initialzed klgraph entries.
*/
DBG("--> sgi_master_io_infr_init: calling klgraph_hack_init()\n");
klgraph_hack_init();
#endif /* BRINGUP */
/*
* This is the Master CPU. Emulate mlsetup and main.c in Irix.
*/
mlreset();
/*
* allowboot() is called by kern/os/main.c in main()
* Emulate allowboot() ...
* per_cpu_init() - only need per_hub_init()
* cpu_io_setup() - Nothing to do.
*
*/
sn_mp_setup();
for (cnode = 0; cnode < numnodes; cnode++) {
per_hub_init(cnode);
}
/* We can do headless hub cnodes here .. */
/*
* io_init[] stuff.
*
* Get SGI IO Infrastructure drivers to init and register with
* each other etc.
*/
hubspc_init();
pciio_init();
pcibr_init();
pic_init();
xtalk_init();
xbow_init();
xbmon_init();
pciiox_init();
usrpci_init();
ioc3_init();
/*
*
* Our IO Infrastructure drivers are in place ..
* Initialize the whole IO Infrastructure .. xwidget/device probes.
*
*/
initialize_io();
pci_bus_to_hcl_cvlink();
#ifdef CONFIG_PCIBA
DBG("--> sgi_master_io_infr_init: calling pciba_init()\n");
#ifndef BRINGUP2
pciba_init();
#endif
#endif
}
/*
* One-time setup for MP SN.
* Allocate per-node data, slurp prom klconfig information and
* convert it to hwgraph information.
*/
void
sn_mp_setup(void)
{
cpuid_t cpu;
for (cpu = 0; cpu < NR_CPUS; cpu++) {
/* Skip holes in CPU space */
if (cpu_enabled(cpu)) {
init_platform_pda(cpu);
}
}
/*
* Initialize platform-dependent vertices in the hwgraph:
* module
* node
* cpu
* memory
* slot
* hub
* router
* xbow
*/
io_module_init(); /* Use to be called module_init() .. */
klhwg_add_all_modules(hwgraph_root);
klhwg_add_all_nodes(hwgraph_root);
}
/* $Id$
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
#ident "$Revision: 1.167 $"
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <asm/smp.h>
#include <asm/irq.h>
#include <asm/hw_irq.h>
#include <asm/sn/sgi.h>
#include <asm/sn/iograph.h>
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_private.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/pci/pciio.h>
#include <asm/sn/pci/pcibr.h>
#include <asm/sn/xtalk/xtalk.h>
#include <asm/sn/pci/pcibr_private.h>
#include <asm/sn/intr.h>
#include <asm/sn/sn2/shub_mmr_t.h>
#include <asm/sal.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/sndrv.h>
/*
* Shub WAR for Xbridge Little Endian problem:
* Xbridge has to run in BIG ENDIAN even with Shub.
*/
/*
* io_sh_swapper: Turn on Shub byte swapping.
* All data destined to and from Shub to XIO are byte-swapped.
*/
void
io_sh_swapper(nasid_t nasid, int onoff)
{
ii_iwc_u_t ii_iwc;
ii_iwc.ii_iwc_regval = REMOTE_HUB_L(nasid, IIO_IWC);
ii_iwc.ii_iwc_fld_s.i_dma_byte_swap = onoff;
REMOTE_HUB_S(nasid, IIO_IWC, ii_iwc.ii_iwc_regval);
ii_iwc.ii_iwc_regval = REMOTE_HUB_L(nasid, IIO_IWC);
}
/*
* io_get_sh_swapper: Return current Swap mode.
* 1 = Swap on, 0 = Swap off.
*/
int
io_get_sh_swapper(nasid_t nasid)
{
ii_iwc_u_t ii_iwc;
ii_iwc.ii_iwc_regval = REMOTE_HUB_L(nasid, IIO_IWC);
return(ii_iwc.ii_iwc_fld_s.i_dma_byte_swap);
}
#define SHUB_NUM_ECF_REGISTERS 8
static uint32_t shub_perf_counts[SHUB_NUM_ECF_REGISTERS];
static shubreg_t shub_perf_counts_regs[SHUB_NUM_ECF_REGISTERS] = {
SH_PERFORMANCE_COUNTER0,
SH_PERFORMANCE_COUNTER1,
SH_PERFORMANCE_COUNTER2,
SH_PERFORMANCE_COUNTER3,
SH_PERFORMANCE_COUNTER4,
SH_PERFORMANCE_COUNTER5,
SH_PERFORMANCE_COUNTER6,
SH_PERFORMANCE_COUNTER7
};
static inline void
shub_mmr_write(cnodeid_t cnode, shubreg_t reg, uint64_t val)
{
int nasid = cnodeid_to_nasid(cnode);
volatile uint64_t *addr = (uint64_t *)(GLOBAL_MMR_ADDR(nasid, reg));
*addr = val;
__ia64_mf_a();
}
static inline void
shub_mmr_write32(cnodeid_t cnode, shubreg_t reg, uint32_t val)
{
int nasid = cnodeid_to_nasid(cnode);
volatile uint32_t *addr = (uint32_t *)(GLOBAL_MMR_ADDR(nasid, reg));
*addr = val;
__ia64_mf_a();
}
static inline uint64_t
shub_mmr_read(cnodeid_t cnode, shubreg_t reg)
{
int nasid = cnodeid_to_nasid(cnode);
volatile uint64_t val;
val = *(uint64_t *)(GLOBAL_MMR_ADDR(nasid, reg));
__ia64_mf_a();
return val;
}
static inline uint32_t
shub_mmr_read32(cnodeid_t cnode, shubreg_t reg)
{
int nasid = cnodeid_to_nasid(cnode);
volatile uint32_t val;
val = *(uint32_t *)(GLOBAL_MMR_ADDR(nasid, reg));
__ia64_mf_a();
return val;
}
static int
reset_shub_stats(cnodeid_t cnode)
{
int i;
for (i=0; i < SHUB_NUM_ECF_REGISTERS; i++) {
shub_perf_counts[i] = 0;
shub_mmr_write32(cnode, shub_perf_counts_regs[i], 0);
}
return 0;
}
static int
configure_shub_stats(cnodeid_t cnode, unsigned long arg)
{
uint64_t *p = (uint64_t *)arg;
uint64_t i;
uint64_t regcnt;
uint64_t regval[2];
if (copy_from_user((void *)&regcnt, p, sizeof(regcnt)))
return -EFAULT;
for (p++, i=0; i < regcnt; i++, p += 2) {
if (copy_from_user((void *)regval, (void *)p, sizeof(regval)))
return -EFAULT;
if (regval[0] & 0x7) {
printk("Error: configure_shub_stats: unaligned address 0x%016lx\n", regval[0]);
return -EINVAL;
}
shub_mmr_write(cnode, (shubreg_t)regval[0], regval[1]);
}
return 0;
}
static int
capture_shub_stats(cnodeid_t cnode, uint32_t *counts)
{
int i;
for (i=0; i < SHUB_NUM_ECF_REGISTERS; i++) {
counts[i] = shub_mmr_read32(cnode, shub_perf_counts_regs[i]);
}
return 0;
}
static int
shubstats_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
cnodeid_t cnode;
uint64_t longarg;
devfs_handle_t d;
int nasid;
if ((d = devfs_get_handle_from_inode(inode)) == NULL)
return -ENODEV;
cnode = (cnodeid_t)hwgraph_fastinfo_get(d);
switch (cmd) {
case SNDRV_SHUB_CONFIGURE:
return configure_shub_stats(cnode, arg);
break;
case SNDRV_SHUB_RESETSTATS:
reset_shub_stats(cnode);
break;
case SNDRV_SHUB_INFOSIZE:
longarg = sizeof(shub_perf_counts);
if (copy_to_user((void *)arg, &longarg, sizeof(longarg))) {
return -EFAULT;
}
break;
case SNDRV_SHUB_GETSTATS:
capture_shub_stats(cnode, shub_perf_counts);
if (copy_to_user((void *)arg, shub_perf_counts,
sizeof(shub_perf_counts))) {
return -EFAULT;
}
break;
case SNDRV_SHUB_GETNASID:
nasid = cnodeid_to_nasid(cnode);
if (copy_to_user((void *)arg, &nasid,
sizeof(nasid))) {
return -EFAULT;
}
break;
default:
return -EINVAL;
}
return 0;
}
struct file_operations shub_mon_fops = {
ioctl: shubstats_ioctl,
};
/* $Id: shubio.c,v 1.1 2002/02/28 17:31:25 marcelo Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000,2002-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <asm/smp.h>
#include <asm/sn/sgi.h>
#include <asm/sn/io.h>
#include <asm/sn/iograph.h>
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h>
#include <asm/sn/sn_private.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/pci/pciio.h>
#include <asm/sn/pci/pcibr.h>
#include <asm/sn/xtalk/xtalk.h>
#include <asm/sn/pci/pcibr_private.h>
#include <asm/sn/intr.h>
#include <asm/sn/ioerror_handling.h>
#include <asm/sn/ioerror.h>
#include <asm/sn/sn2/shubio.h>
error_state_t error_state_get(devfs_handle_t v);
error_return_code_t error_state_set(devfs_handle_t v,error_state_t new_state);
/*
* Get the xtalk provider function pointer for the
* specified hub.
*/
/*ARGSUSED*/
int
hub_xp_error_handler(
devfs_handle_t hub_v,
nasid_t nasid,
int error_code,
ioerror_mode_t mode,
ioerror_t *ioerror)
{
/*REFERENCED*/
hubreg_t iio_imem;
devfs_handle_t xswitch;
error_state_t e_state;
cnodeid_t cnode;
/*
* Before walking down to the next level, check if
* the I/O link is up. If it's been disabled by the
* hub ii for some reason, we can't even touch the
* widget registers.
*/
iio_imem = REMOTE_HUB_L(nasid, IIO_IMEM);
if (!(iio_imem & (IIO_IMEM_B0ESD|IIO_IMEM_W0ESD))){
/*
* IIO_IMEM_B0ESD getting set, indicates II shutdown
* on HUB0 parts.. Hopefully that's not true for
* Hub1 parts..
*
*
* If either one of them is shut down, can't
* go any further.
*/
return IOERROR_XTALKLEVEL;
}
/* Get the error state of the hub */
e_state = error_state_get(hub_v);
cnode = NASID_TO_COMPACT_NODEID(nasid);
xswitch = NODEPDA(cnode)->basew_xc;
/* Set the error state of the crosstalk device to that of
* hub.
*/
if (error_state_set(xswitch , e_state) ==
ERROR_RETURN_CODE_CANNOT_SET_STATE)
return(IOERROR_UNHANDLED);
/* Clean the error state of the hub if we are in the action handling
* phase.
*/
if (e_state == ERROR_STATE_ACTION)
(void)error_state_set(hub_v, ERROR_STATE_NONE);
/* hand the error off to the switch or the directly
* connected crosstalk device.
*/
return xtalk_error_handler(xswitch,
error_code, mode, ioerror);
}
/*
* Check if the widget in error has been enabled for PIO accesses
*/
int
is_widget_pio_enabled(ioerror_t *ioerror)
{
cnodeid_t src_node;
nasid_t src_nasid;
hubreg_t ii_iowa;
xwidgetnum_t widget;
iopaddr_t p;
/* Get the node where the PIO error occurred */
IOERROR_GETVALUE(p,ioerror, srcnode);
src_node = p;
if (src_node == CNODEID_NONE)
return(0);
/* Get the nasid for the cnode */
src_nasid = COMPACT_TO_NASID_NODEID(src_node);
if (src_nasid == INVALID_NASID)
return(0);
/* Read the Outbound widget access register for this hub */
ii_iowa = REMOTE_HUB_L(src_nasid, IIO_IOWA);
IOERROR_GETVALUE(p,ioerror, widgetnum);
widget = p;
/* Check if the PIOs to the widget with PIO error have been
* enabled.
*/
if (ii_iowa & IIO_IOWA_WIDGET(widget))
return(1);
return(0);
}
/*
* Hub IO error handling.
*
* Gets invoked for different types of errors found at the hub.
* Typically this includes situations from bus error or due to
* an error interrupt (mostly generated at the hub).
*/
int
hub_ioerror_handler(
devfs_handle_t hub_v,
int error_code,
int mode,
struct io_error_s *ioerror)
{
hubinfo_t hinfo; /* Hub info pointer */
nasid_t nasid;
int retval = 0;
/*REFERENCED*/
iopaddr_t p;
IOERROR_DUMP("hub_ioerror_handler", error_code, mode, ioerror);
hubinfo_get(hub_v, &hinfo);
if (!hinfo){
/* Print an error message and return */
goto end;
}
nasid = hinfo->h_nasid;
switch(error_code) {
case PIO_READ_ERROR:
/*
* Cpu got a bus error while accessing IO space.
* hubaddr field in ioerror structure should have
* the IO address that caused access error.
*/
/*
* Identify if the physical address in hub_error_data
* corresponds to small/large window, and accordingly,
* get the xtalk address.
*/
/*
* Evaluate the widget number and the widget address that
* caused the error. Use 'vaddr' if it's there.
* This is typically true either during probing
* or a kernel driver getting into trouble.
* Otherwise, use paddr to figure out widget details
* This is typically true for user mode bus errors while
* accessing I/O space.
*/
IOERROR_GETVALUE(p,ioerror,vaddr);
if (p){
/*
* If neither in small window nor in large window range,
* outright reject it.
*/
IOERROR_GETVALUE(p,ioerror,vaddr);
if (NODE_SWIN_ADDR(nasid, (paddr_t)p)){
iopaddr_t hubaddr;
xwidgetnum_t widgetnum;
iopaddr_t xtalkaddr;
IOERROR_GETVALUE(p,ioerror,hubaddr);
hubaddr = p;
widgetnum = SWIN_WIDGETNUM(hubaddr);
xtalkaddr = SWIN_WIDGETADDR(hubaddr);
/*
* differentiate local register vs IO space access
*/
IOERROR_SETVALUE(ioerror,widgetnum,widgetnum);
IOERROR_SETVALUE(ioerror,xtalkaddr,xtalkaddr);
} else if (NODE_BWIN_ADDR(nasid, (paddr_t)p)){
/*
* Address corresponds to large window space.
* Convert it to xtalk address.
*/
int bigwin;
hub_piomap_t bw_piomap;
xtalk_piomap_t xt_pmap = NULL;
iopaddr_t hubaddr;
xwidgetnum_t widgetnum;
iopaddr_t xtalkaddr;
IOERROR_GETVALUE(p,ioerror,hubaddr);
hubaddr = p;
/*
* Have to loop to find the correct xtalk_piomap
* because the're not allocated on a one-to-one
* basis to the window number.
*/
for (bigwin=0; bigwin < HUB_NUM_BIG_WINDOW; bigwin++) {
bw_piomap = hubinfo_bwin_piomap_get(hinfo,
bigwin);
if (bw_piomap->hpio_bigwin_num ==
(BWIN_WINDOWNUM(hubaddr) - 1)) {
xt_pmap = hub_piomap_xt_piomap(bw_piomap);
break;
}
}
ASSERT(xt_pmap);
widgetnum = xtalk_pio_target_get(xt_pmap);
xtalkaddr = xtalk_pio_xtalk_addr_get(xt_pmap) + BWIN_WIDGETADDR(hubaddr);
IOERROR_SETVALUE(ioerror,widgetnum,widgetnum);
IOERROR_SETVALUE(ioerror,xtalkaddr,xtalkaddr);
/*
* Make sure that widgetnum doesnot map to hub
* register widget number, as we never use
* big window to access hub registers.
*/
ASSERT(widgetnum != HUB_REGISTER_WIDGET);
}
} else if (IOERROR_FIELDVALID(ioerror,hubaddr)) {
iopaddr_t hubaddr;
xwidgetnum_t widgetnum;
iopaddr_t xtalkaddr;
IOERROR_GETVALUE(p,ioerror,hubaddr);
hubaddr = p;
if (BWIN_WINDOWNUM(hubaddr)){
int window = BWIN_WINDOWNUM(hubaddr) - 1;
hubreg_t itte;
itte = (hubreg_t)HUB_L(IIO_ITTE_GET(nasid, window));
widgetnum = (itte >> IIO_ITTE_WIDGET_SHIFT) &
IIO_ITTE_WIDGET_MASK;
xtalkaddr = (((itte >> IIO_ITTE_OFFSET_SHIFT) &
IIO_ITTE_OFFSET_MASK) <<
BWIN_SIZE_BITS) +
BWIN_WIDGETADDR(hubaddr);
} else {
widgetnum = SWIN_WIDGETNUM(hubaddr);
xtalkaddr = SWIN_WIDGETADDR(hubaddr);
}
IOERROR_SETVALUE(ioerror,widgetnum,widgetnum);
IOERROR_SETVALUE(ioerror,xtalkaddr,xtalkaddr);
} else {
IOERROR_DUMP("hub_ioerror_handler", error_code,
mode, ioerror);
IOERR_PRINTF(printk(
"hub_ioerror_handler: Invalid address passed"));
return IOERROR_INVALIDADDR;
}
IOERROR_GETVALUE(p,ioerror,widgetnum);
if ((p) == HUB_REGISTER_WIDGET) {
/*
* Error in accessing Hub local register
* This should happen mostly in SABLE mode..
*/
retval = 0;
} else {
/* Make sure that the outbound widget access for this
* widget is enabled.
*/
if (!is_widget_pio_enabled(ioerror)) {
if (error_state_get(hub_v) ==
ERROR_STATE_ACTION)
ioerror_dump("No outbound widget"
" access - ",
error_code, mode, ioerror);
return(IOERROR_HANDLED);
}
retval = hub_xp_error_handler(
hub_v, nasid, error_code, mode, ioerror);
}
IOERR_PRINTF(printk(
"hub_ioerror_handler:PIO_READ_ERROR return: %d",
retval));
break;
case PIO_WRITE_ERROR:
/*
* This hub received an interrupt indicating a widget
* attached to this hub got a timeout.
* widgetnum field should be filled to indicate the
* widget that caused error.
*
* NOTE: This hub may have nothing to do with this error.
* We are here since the widget attached to the xbow
* gets its PIOs through this hub.
*
* There is nothing that can be done at this level.
* Just invoke the xtalk error handling mechanism.
*/
IOERROR_GETVALUE(p,ioerror,widgetnum);
if ((p) == HUB_REGISTER_WIDGET) {
} else {
/* Make sure that the outbound widget access for this
* widget is enabled.
*/
if (!is_widget_pio_enabled(ioerror)) {
if (error_state_get(hub_v) ==
ERROR_STATE_ACTION)
ioerror_dump("No outbound widget"
" access - ",
error_code, mode, ioerror);
return(IOERROR_HANDLED);
}
retval = hub_xp_error_handler(
hub_v, nasid, error_code, mode, ioerror);
}
break;
case DMA_READ_ERROR:
/*
* DMA Read error always ends up generating an interrupt
* at the widget level, and never at the hub level. So,
* we don't expect to come here any time
*/
ASSERT(0);
retval = IOERROR_UNHANDLED;
break;
case DMA_WRITE_ERROR:
/*
* DMA Write error is generated when a write by an I/O
* device could not be completed. Problem is, device is
* totally unaware of this problem, and would continue
* writing to system memory. So, hub has a way to send
* an error interrupt on the first error, and bitbucket
* all further write transactions.
* Coming here indicates that hub detected one such error,
* and we need to handle it.
*
* Hub interrupt handler would have extracted physaddr,
* widgetnum, and widgetdevice from the CRB
*
* There is nothing special to do here, since gathering
* data from crb's is done elsewhere. Just pass the
* error to xtalk layer.
*/
retval = hub_xp_error_handler(hub_v, nasid, error_code, mode,
ioerror);
break;
default:
ASSERT(0);
return IOERROR_BADERRORCODE;
}
/*
* If error was not handled, we may need to take certain action
* based on the error code.
* For e.g. in case of PIO_READ_ERROR, we may need to release the
* PIO Read entry table (they are sticky after errors).
* Similarly other cases.
*
* Further Action TBD
*/
end:
if (retval == IOERROR_HWGRAPH_LOOKUP) {
/*
* If we get errors very early, we can't traverse
* the path using hardware graph.
* To handle this situation, we need a functions
* which don't depend on the hardware graph vertex to
* handle errors. This break the modularity of the
* existing code. Instead we print out the reason for
* not handling error, and return. On return, all the
* info collected would be dumped. This should provide
* sufficient info to analyse the error.
*/
printk("Unable to handle IO error: hardware graph not setup\n");
}
return retval;
}
#define L_BITSMINOR 18
#define L_MAXMAJ 0x1ff
#define emajor(x) (int )(((unsigned )(x)>>L_BITSMINOR) & L_MAXMAJ)
#define dev_is_vertex(dev) (emajor((dev_t)(dev)) == 0)
#define INFO_LBL_ERROR_STATE "error_state"
#define v_error_state_get(v,s) \
(hwgraph_info_get_LBL(v,INFO_LBL_ERROR_STATE, (arbitrary_info_t *)&s))
#define v_error_state_set(v,s,replace) \
(replace ? \
hwgraph_info_replace_LBL(v,INFO_LBL_ERROR_STATE,(arbitrary_info_t)s,0) :\
hwgraph_info_add_LBL(v,INFO_LBL_ERROR_STATE, (arbitrary_info_t)s))
#define v_error_state_clear(v) \
(hwgraph_info_remove_LBL(v,INFO_LBL_ERROR_STATE,0))
/*
* error_state_get
* Get the state of the vertex.
* Returns ERROR_STATE_INVALID on failure
* current state otherwise
*/
error_state_t
error_state_get(devfs_handle_t v)
{
error_state_t s;
/* Check if we have a valid hwgraph vertex */
if (!dev_is_vertex(v))
return(ERROR_STATE_NONE);
/* Get the labelled info hanging off the vertex which corresponds
* to the state.
*/
if (v_error_state_get(v, s) != GRAPH_SUCCESS) {
return(ERROR_STATE_NONE);
}
return(s);
}
/*
* error_state_set
* Set the state of the vertex
* Returns ERROR_RETURN_CODE_CANNOT_SET_STATE on failure
* ERROR_RETURN_CODE_SUCCESS otherwise
*/
error_return_code_t
error_state_set(devfs_handle_t v,error_state_t new_state)
{
error_state_t old_state;
boolean_t replace = B_TRUE;
/* Check if we have a valid hwgraph vertex */
if (!dev_is_vertex(v))
return(ERROR_RETURN_CODE_GENERAL_FAILURE);
/* This means that the error state needs to be cleaned */
if (new_state == ERROR_STATE_NONE) {
/* Make sure that we have an error state */
if (v_error_state_get(v,old_state) == GRAPH_SUCCESS)
v_error_state_clear(v);
return(ERROR_RETURN_CODE_SUCCESS);
}
/* Check if the state information has been set at least once
* for this vertex.
*/
if (v_error_state_get(v,old_state) != GRAPH_SUCCESS)
replace = B_FALSE;
if (v_error_state_set(v,new_state,replace) != GRAPH_SUCCESS) {
return(ERROR_RETURN_CODE_CANNOT_SET_STATE);
}
return(ERROR_RETURN_CODE_SUCCESS);
}
/* $Id$
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <asm/sn/sgi.h>
#include <asm/sn/intr.h>
#include <asm/sn/sn2/sn_private.h>
#include <asm/sn/sn2/shubio.h>
#include <asm/sn/iograph.h>
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h>
#include <asm/sn/hack.h>
#include <asm/sn/pci/bridge.h>
#include <asm/sn/xtalk/xtalk_private.h>
#include <asm/sn/simulator.h>
/* #define DEBUG 1 */
/* #define XBOW_DEBUG 1 */
/* #define DEBUG_ERROR 1 */
/*
* Files needed to get the device driver entry points
*/
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/xtalk/xtalk.h>
#include <asm/sn/xtalk/xswitch.h>
#include <asm/sn/xtalk/xwidget.h>
#include <asm/sn/prio.h>
#include <asm/sn/hcl_util.h>
#define NEW(ptr) (ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))
#define DEL(ptr) (kfree(ptr))
int xbow_devflag = D_MP;
/*
* This file supports the Xbow chip. Main functions: initializtion,
* error handling, and GBR.
*/
/*
* each vertex corresponding to an xbow chip
* has a "fastinfo" pointer pointing at one
* of these things.
*/
typedef struct xbow_soft_s *xbow_soft_t;
struct xbow_soft_s {
devfs_handle_t conn; /* our connection point */
devfs_handle_t vhdl; /* xbow's private vertex */
devfs_handle_t busv; /* the xswitch vertex */
xbow_t *base; /* PIO pointer to crossbow chip */
char *name; /* hwgraph name */
xbow_perf_t xbow_perfcnt[XBOW_PERF_COUNTERS];
xbow_perf_link_t xbow_perflink[MAX_XBOW_PORTS];
xbow_link_status_t xbow_link_status[MAX_XBOW_PORTS];
spinlock_t xbow_perf_lock;
int link_monitor;
widget_cfg_t *wpio[MAX_XBOW_PORTS]; /* cached PIO pointer */
/* Bandwidth allocation state. Bandwidth values are for the
* destination port since contention happens there.
* Implicit mapping from xbow ports (8..f) -> (0..7) array indices.
*/
spinlock_t xbow_bw_alloc_lock; /* bw allocation lock */
unsigned long long bw_hiwm[MAX_XBOW_PORTS]; /* hiwater mark values */
unsigned long long bw_cur_used[MAX_XBOW_PORTS]; /* bw used currently */
};
#define xbow_soft_set(v,i) hwgraph_fastinfo_set((v), (arbitrary_info_t)(i))
#define xbow_soft_get(v) ((xbow_soft_t)hwgraph_fastinfo_get((v)))
/*
* Function Table of Contents
*/
void xbow_mlreset(xbow_t *);
void xbow_init(void);
int xbow_attach(devfs_handle_t);
int xbow_open(devfs_handle_t *, int, int, cred_t *);
int xbow_close(devfs_handle_t, int, int, cred_t *);
int xbow_map(devfs_handle_t, vhandl_t *, off_t, size_t, uint);
int xbow_unmap(devfs_handle_t, vhandl_t *);
int xbow_ioctl(devfs_handle_t, int, void *, int, struct cred *, int *);
int xbow_widget_present(xbow_t *, int);
static int xbow_link_alive(xbow_t *, int);
devfs_handle_t xbow_widget_lookup(devfs_handle_t, int);
void xbow_intr_preset(void *, int, xwidgetnum_t, iopaddr_t, xtalk_intr_vector_t);
void xbow_update_perf_counters(devfs_handle_t);
xbow_perf_link_t *xbow_get_perf_counters(devfs_handle_t);
int xbow_enable_perf_counter(devfs_handle_t, int, int, int);
xbow_link_status_t *xbow_get_llp_status(devfs_handle_t);
void xbow_update_llp_status(devfs_handle_t);
int xbow_disable_llp_monitor(devfs_handle_t);
int xbow_enable_llp_monitor(devfs_handle_t);
int xbow_prio_bw_alloc(devfs_handle_t, xwidgetnum_t, xwidgetnum_t,
unsigned long long, unsigned long long);
static void xbow_setwidint(xtalk_intr_t);
void idbg_xbowregs(int64_t);
xswitch_reset_link_f xbow_reset_link;
xswitch_provider_t xbow_provider =
{
xbow_reset_link,
};
/*
* This is the file operation table for the pcibr driver.
* As each of the functions are implemented, put the
* appropriate function name below.
*/
static int xbow_mmap(struct file * file, struct vm_area_struct * vma);
struct file_operations xbow_fops = {
owner: THIS_MODULE,
llseek: NULL,
read: NULL,
write: NULL,
readdir: NULL,
poll: NULL,
ioctl: NULL,
mmap: xbow_mmap,
open: xbow_open,
flush: NULL,
release: NULL,
fsync: NULL,
fasync: NULL,
lock: NULL,
readv: NULL,
writev: NULL,
sendpage: NULL,
get_unmapped_area: NULL
};
static int
xbow_mmap(struct file * file, struct vm_area_struct * vma)
{
unsigned long phys_addr;
int error = 0;
phys_addr = (unsigned long)file->private_data & ~0xc000000000000000; /* Mask out the Uncache bits */
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= VM_RESERVED | VM_IO;
error = io_remap_page_range(vma, vma->vm_start, phys_addr,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
return(error);
}
/*
* xbow_mlreset: called at mlreset time if the
* platform specific code determines that there is
* a crossbow in a critical path that must be
* functional before the driver would normally get
* the device properly set up.
*
* what do we need to do, that the boot prom can
* not be counted on to have already done, that is
* generic across all platforms using crossbows?
*/
/*ARGSUSED */
void
xbow_mlreset(xbow_t * xbow)
{
}
/*
* xbow_init: called with the rest of the device
* driver XXX_init routines. This platform *might*
* have a Crossbow chip, or even several, but it
* might have none. Register with the crosstalk
* generic provider so when we encounter the chip
* the right magic happens.
*/
void
xbow_init(void)
{
#if DEBUG && ATTACH_DEBUG
printk("xbow_init\n");
#endif
xwidget_driver_register(PXBOW_WIDGET_PART_NUM,
0, /* XXBOW_WIDGET_MFGR_NUM, */
"xbow_",
CDL_PRI_HI); /* attach before friends */
xwidget_driver_register(XXBOW_WIDGET_PART_NUM,
0, /* XXBOW_WIDGET_MFGR_NUM, */
"xbow_",
CDL_PRI_HI); /* attach before friends */
xwidget_driver_register(XBOW_WIDGET_PART_NUM,
XBOW_WIDGET_MFGR_NUM,
"xbow_",
CDL_PRI_HI); /* attach before friends */
}
#ifdef XBRIDGE_REGS_SIM
/* xbow_set_simulated_regs: sets xbow regs as needed
* for powering through the boot
*/
void
xbow_set_simulated_regs(xbow_t *xbow, int port)
{
/*
* turn on link
*/
xbow->xb_link(port).link_status = (1<<31);
/*
* and give it a live widget too
*/
xbow->xb_link(port).link_aux_status = XB_AUX_STAT_PRESENT;
/*
* zero the link control reg
*/
xbow->xb_link(port).link_control = 0x0;
}
#endif /* XBRIDGE_REGS_SIM */
/*
* xbow_attach: the crosstalk provider has
* determined that there is a crossbow widget
* present, and has handed us the connection
* point for that vertex.
*
* We not only add our own vertex, but add
* some "xtalk switch" data to the switch
* vertex (at the connect point's parent) if
* it does not have any.
*/
/*ARGSUSED */
int
xbow_attach(devfs_handle_t conn)
{
/*REFERENCED */
devfs_handle_t vhdl;
devfs_handle_t busv;
xbow_t *xbow;
xbow_soft_t soft;
int port;
xswitch_info_t info;
xtalk_intr_t intr_hdl;
char devnm[MAXDEVNAME], *s;
xbowreg_t id;
int rev;
int i;
int xbow_num;
static void xbow_errintr_handler(int, void *, struct pt_regs *);
#if DEBUG && ATTACH_DEBUG
#if defined(SUPPORT_PRINTING_V_FORMAT)
printk("%v: xbow_attach\n", conn);
#else
printk("0x%x: xbow_attach\n", conn);
#endif
#endif
/*
* Get a PIO pointer to the base of the crossbow
* chip.
*/
#ifdef XBRIDGE_REGS_SIM
printk("xbow_attach: XBRIDGE_REGS_SIM FIXME: allocating %ld bytes for xbow_s\n", sizeof(xbow_t));
xbow = (xbow_t *) kmalloc(sizeof(xbow_t), GFP_KERNEL);
/*
* turn on ports e and f like in a real live ibrick
*/
xbow_set_simulated_regs(xbow, 0xe);
xbow_set_simulated_regs(xbow, 0xf);
#else
xbow = (xbow_t *) xtalk_piotrans_addr(conn, 0, 0, sizeof(xbow_t), 0);
#endif /* XBRIDGE_REGS_SIM */
/*
* Locate the "switch" vertex: it is the parent
* of our connection point.
*/
busv = hwgraph_connectpt_get(conn);
#if DEBUG && ATTACH_DEBUG
printk("xbow_attach: Bus Vertex 0x%p, conn 0x%p, xbow register 0x%p wid= 0x%x\n", busv, conn, xbow, *(volatile u32 *)xbow);
#endif
ASSERT(busv != GRAPH_VERTEX_NONE);
/*
* Create our private vertex, and connect our
* driver information to it. This makes it possible
* for diagnostic drivers to open the crossbow
* vertex for access to registers.
*/
/*
* Register a xbow driver with devfs.
* file ops.
*/
vhdl = NULL;
vhdl = devfs_register(conn, EDGE_LBL_XBOW,
DEVFS_FL_AUTO_DEVNUM, 0, 0,
S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
&xbow_fops, (void *)xbow);
if (!vhdl) {
printk(KERN_WARNING "xbow_attach: Unable to create char device for xbow conn %p\n",
(void *)conn);
}
/*
* Allocate the soft state structure and attach
* it to the xbow's vertex
*/
NEW(soft);
soft->conn = conn;
soft->vhdl = vhdl;
soft->busv = busv;
soft->base = xbow;
/* does the universe really need another macro? */
/* xbow_soft_set(vhdl, (arbitrary_info_t) soft); */
/* hwgraph_fastinfo_set(vhdl, (arbitrary_info_t) soft); */
#define XBOW_NUM_SUFFIX_FORMAT "[xbow# %d]"
/* Add xbow number as a suffix to the hwgraph name of the xbow.
* This is helpful while looking at the error/warning messages.
*/
xbow_num = 0;
/*
* get the name of this xbow vertex and keep the info.
* This is needed during errors and interupts, but as
* long as we have it, we can use it elsewhere.
*/
s = dev_to_name(vhdl, devnm, MAXDEVNAME);
soft->name = kmalloc(strlen(s) + strlen(XBOW_NUM_SUFFIX_FORMAT) + 1,
GFP_KERNEL);
sprintf(soft->name,"%s"XBOW_NUM_SUFFIX_FORMAT, s,xbow_num);
#ifdef XBRIDGE_REGS_SIM
/* my o200/ibrick has id=0x2d002049, but XXBOW_WIDGET_PART_NUM is defined
* as 0xd000, so I'm using that for the partnum bitfield.
*/
printk("xbow_attach: XBRIDGE_REGS_SIM FIXME: need xb_wid_id value!!\n");
id = 0x2d000049;
#else
id = xbow->xb_wid_id;
#endif /* XBRIDGE_REGS_SIM */
rev = XWIDGET_PART_REV_NUM(id);
mutex_spinlock_init(&soft->xbow_perf_lock);
soft->xbow_perfcnt[0].xp_perf_reg = &xbow->xb_perf_ctr_a;
soft->xbow_perfcnt[1].xp_perf_reg = &xbow->xb_perf_ctr_b;
/* Initialization for GBR bw allocation */
mutex_spinlock_init(&soft->xbow_bw_alloc_lock);
#define XBOW_8_BIT_PORT_BW_MAX (400 * 1000 * 1000) /* 400 MB/s */
#define XBOW_16_BIT_PORT_BW_MAX (800 * 1000 * 1000) /* 800 MB/s */
/* Set bandwidth hiwatermark and current values */
for (i = 0; i < MAX_XBOW_PORTS; i++) {
soft->bw_hiwm[i] = XBOW_16_BIT_PORT_BW_MAX; /* for now */
soft->bw_cur_used[i] = 0;
}
/*
* attach the crossbow error interrupt.
*/
intr_hdl = xtalk_intr_alloc(conn, (device_desc_t)0, vhdl);
ASSERT(intr_hdl != NULL);
xtalk_intr_connect(intr_hdl,
(intr_func_t) xbow_errintr_handler,
(intr_arg_t) soft,
(xtalk_intr_setfunc_t) xbow_setwidint,
(void *) xbow);
request_irq(CPU_VECTOR_TO_IRQ(((hub_intr_t)intr_hdl)->i_cpuid,
((hub_intr_t)intr_hdl)->i_bit),
(intr_func_t)xbow_errintr_handler, 0, "XBOW error",
(intr_arg_t) soft);
#ifdef BUS_INT_WAR_NOT_YET
{
void sn_add_polled_interrupt(int, int);
sn_add_polled_interrupt(CPU_VECTOR_TO_IRQ(((hub_intr_t)intr_hdl)->i_cpuid,
((hub_intr_t)intr_hdl)->i_bit), 5000);
}
#endif
/*
* Enable xbow error interrupts
*/
xbow->xb_wid_control = (XB_WID_CTRL_REG_ACC_IE | XB_WID_CTRL_XTALK_IE);
/*
* take a census of the widgets present,
* leaving notes at the switch vertex.
*/
info = xswitch_info_new(busv);
for (port = MAX_PORT_NUM - MAX_XBOW_PORTS;
port < MAX_PORT_NUM; ++port) {
if (!xbow_link_alive(xbow, port)) {
#if DEBUG && XBOW_DEBUG
printk(KERN_INFO "0x%p link %d is not alive\n",
(void *)busv, port);
#endif
continue;
}
if (!xbow_widget_present(xbow, port)) {
#if DEBUG && XBOW_DEBUG
printk(KERN_INFO "0x%p link %d is alive but no widget is present\n", (void *)busv, port);
#endif
continue;
}
#if DEBUG && XBOW_DEBUG
printk(KERN_INFO "0x%p link %d has a widget\n",
(void *)busv, port);
#endif
xswitch_info_link_is_ok(info, port);
/*
* Turn some error interrupts on
* and turn others off. The PROM has
* some things turned on we don't
* want to see (bandwidth allocation
* errors for instance); so if it
* is not listed here, it is not on.
*/
xbow->xb_link(port).link_control =
( (xbow->xb_link(port).link_control
/*
* Turn off these bits; they are non-fatal,
* but we might want to save some statistics
* on the frequency of these errors.
* XXX FIXME XXX
*/
& ~XB_CTRL_RCV_CNT_OFLOW_IE
& ~XB_CTRL_XMT_CNT_OFLOW_IE
& ~XB_CTRL_BNDWDTH_ALLOC_IE
& ~XB_CTRL_RCV_IE)
/*
* These are the ones we want to turn on.
*/
| (XB_CTRL_ILLEGAL_DST_IE
| XB_CTRL_OALLOC_IBUF_IE
| XB_CTRL_XMT_MAX_RTRY_IE
| XB_CTRL_MAXREQ_TOUT_IE
| XB_CTRL_XMT_RTRY_IE
| XB_CTRL_SRC_TOUT_IE) );
}
xswitch_provider_register(busv, &xbow_provider);
return 0; /* attach successful */
}
/*ARGSUSED */
int
xbow_open(devfs_handle_t *devp, int oflag, int otyp, cred_t *credp)
{
return 0;
}
/*ARGSUSED */
int
xbow_close(devfs_handle_t dev, int oflag, int otyp, cred_t *crp)
{
return 0;
}
/*ARGSUSED */
int
xbow_map(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
{
devfs_handle_t vhdl = dev_to_vhdl(dev);
xbow_soft_t soft = xbow_soft_get(vhdl);
int error;
ASSERT(soft);
len = ctob(btoc(len));
/* XXX- this ignores the offset!!! */
error = v_mapphys(vt, (void *) soft->base, len);
return error;
}
/*ARGSUSED */
int
xbow_unmap(devfs_handle_t dev, vhandl_t *vt)
{
return 0;
}
/* This contains special-case code for grio. There are plans to make
* this general sometime in the future, but till then this should
* be good enough.
*/
xwidgetnum_t
xbow_widget_num_get(devfs_handle_t dev)
{
devfs_handle_t tdev;
char devname[MAXDEVNAME];
xwidget_info_t xwidget_info;
int i;
vertex_to_name(dev, devname, MAXDEVNAME);
/* If this is a pci controller vertex, traverse up using
* the ".." links to get to the widget.
*/
if (strstr(devname, EDGE_LBL_PCI) &&
strstr(devname, EDGE_LBL_CONTROLLER)) {
tdev = dev;
for (i=0; i< 2; i++) {
if (hwgraph_edge_get(tdev,
HWGRAPH_EDGELBL_DOTDOT, &tdev) !=
GRAPH_SUCCESS)
return XWIDGET_NONE;
}
if ((xwidget_info = xwidget_info_chk(tdev)) != NULL) {
return (xwidget_info_id_get(xwidget_info));
} else {
return XWIDGET_NONE;
}
}
return XWIDGET_NONE;
}
int
xbow_ioctl(devfs_handle_t dev,
int cmd,
void *arg,
int flag,
struct cred *cr,
int *rvalp)
{
devfs_handle_t vhdl;
int error = 0;
#if defined (DEBUG)
int rc;
devfs_handle_t conn;
struct xwidget_info_s *xwidget_info;
xbow_soft_t xbow_soft;
#endif
*rvalp = 0;
vhdl = dev_to_vhdl(dev);
#if defined (DEBUG)
xbow_soft = xbow_soft_get(vhdl);
conn = xbow_soft->conn;
xwidget_info = xwidget_info_get(conn);
ASSERT_ALWAYS(xwidget_info != NULL);
rc = xwidget_hwid_is_xswitch(&xwidget_info->w_hwid);
ASSERT_ALWAYS(rc != 0);
#endif
switch (cmd) {
case XBOWIOC_LLP_ERROR_ENABLE:
if ((error = xbow_enable_llp_monitor(vhdl)) != 0)
error = EINVAL;
break;
case XBOWIOC_LLP_ERROR_DISABLE:
if ((error = xbow_disable_llp_monitor(vhdl)) != 0)
error = EINVAL;
break;
default:
break;
}
return error;
}
/*
* xbow_widget_present: See if a device is present
* on the specified port of this crossbow.
*/
int
xbow_widget_present(xbow_t *xbow, int port)
{
if ( IS_RUNNING_ON_SIMULATOR() ) {
if ( (port == 14) || (port == 15) ) {
return 1;
}
else {
return 0;
}
}
else {
/* WAR: port 0xf on PIC is missing present bit */
if (XBOW_WAR_ENABLED(PV854827, xbow->xb_wid_id) &&
IS_PIC_XBOW(xbow->xb_wid_id) && port==0xf) {
return 1;
}
return xbow->xb_link(port).link_aux_status & XB_AUX_STAT_PRESENT;
}
}
static int
xbow_link_alive(xbow_t * xbow, int port)
{
xbwX_stat_t xbow_linkstat;
xbow_linkstat.linkstatus = xbow->xb_link(port).link_status;
return (xbow_linkstat.link_alive);
}
/*
* xbow_widget_lookup
* Lookup the edges connected to the xbow specified, and
* retrieve the handle corresponding to the widgetnum
* specified.
* If not found, return 0.
*/
devfs_handle_t
xbow_widget_lookup(devfs_handle_t vhdl,
int widgetnum)
{
xswitch_info_t xswitch_info;
devfs_handle_t conn;
xswitch_info = xswitch_info_get(vhdl);
conn = xswitch_info_vhdl_get(xswitch_info, widgetnum);
return conn;
}
/*
* xbow_setwidint: called when xtalk
* is establishing or migrating our
* interrupt service.
*/
static void
xbow_setwidint(xtalk_intr_t intr)
{
xwidgetnum_t targ = xtalk_intr_target_get(intr);
iopaddr_t addr = xtalk_intr_addr_get(intr);
xtalk_intr_vector_t vect = xtalk_intr_vector_get(intr);
xbow_t *xbow = (xbow_t *) xtalk_intr_sfarg_get(intr);
xbow_intr_preset((void *) xbow, 0, targ, addr, vect);
}
/*
* xbow_intr_preset: called during mlreset time
* if the platform specific code needs to route
* an xbow interrupt before the xtalk infrastructure
* is available for use.
*
* Also called from xbow_setwidint, so we don't
* replicate the guts of the routine.
*
* XXX- probably should be renamed xbow_wid_intr_set or
* something to reduce confusion.
*/
/*ARGSUSED3 */
void
xbow_intr_preset(void *which_widget,
int which_widget_intr,
xwidgetnum_t targ,
iopaddr_t addr,
xtalk_intr_vector_t vect)
{
xbow_t *xbow = (xbow_t *) which_widget;
xbow->xb_wid_int_upper = ((0xFF000000 & (vect << 24)) |
(0x000F0000 & (targ << 16)) |
XTALK_ADDR_TO_UPPER(addr));
xbow->xb_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
}
#define XEM_ADD_STR(s) printk("%s", (s))
#define XEM_ADD_NVAR(n,v) printk("\t%20s: 0x%llx\n", (n), ((unsigned long long)v))
#define XEM_ADD_VAR(v) XEM_ADD_NVAR(#v,(v))
#define XEM_ADD_IOEF(p,n) if (IOERROR_FIELDVALID(ioe,n)) { \
IOERROR_GETVALUE(p,ioe,n); \
XEM_ADD_NVAR("ioe." #n, p); \
}
#ifdef LATER
static void
xem_add_ioe(ioerror_t *ioe)
{
union tmp {
ushort stmp;
unsigned long long lltmp;
cpuid_t cputmp;
cnodeid_t cntmp;
iopaddr_t iotmp;
caddr_t catmp;
paddr_t patmp;
} tmp;
XEM_ADD_IOEF(tmp.stmp, errortype);
XEM_ADD_IOEF(tmp.stmp, widgetnum);
XEM_ADD_IOEF(tmp.stmp, widgetdev);
XEM_ADD_IOEF(tmp.cputmp, srccpu);
XEM_ADD_IOEF(tmp.cntmp, srcnode);
XEM_ADD_IOEF(tmp.cntmp, errnode);
XEM_ADD_IOEF(tmp.iotmp, sysioaddr);
XEM_ADD_IOEF(tmp.iotmp, xtalkaddr);
XEM_ADD_IOEF(tmp.iotmp, busspace);
XEM_ADD_IOEF(tmp.iotmp, busaddr);
XEM_ADD_IOEF(tmp.catmp, vaddr);
XEM_ADD_IOEF(tmp.patmp, memaddr);
XEM_ADD_IOEF(tmp.catmp, epc);
XEM_ADD_IOEF(tmp.catmp, ef);
XEM_ADD_IOEF(tmp.stmp, tnum);
}
#define XEM_ADD_IOE() (xem_add_ioe(ioe))
#endif /* LATER */
int xbow_xmit_retry_errors = 0;
int
xbow_xmit_retry_error(xbow_soft_t soft,
int port)
{
xswitch_info_t info;
devfs_handle_t vhdl;
widget_cfg_t *wid;
widgetreg_t id;
int part;
int mfgr;
wid = soft->wpio[port - BASE_XBOW_PORT];
if (wid == NULL) {
/* If we can't track down a PIO
* pointer to our widget yet,
* leave our caller knowing that
* we are interested in this
* interrupt if it occurs in
* the future.
*/
info = xswitch_info_get(soft->busv);
if (!info)
return 1;
vhdl = xswitch_info_vhdl_get(info, port);
if (vhdl == GRAPH_VERTEX_NONE)
return 1;
wid = (widget_cfg_t *) xtalk_piotrans_addr
(vhdl, 0, 0, sizeof *wid, 0);
if (!wid)
return 1;
soft->wpio[port - BASE_XBOW_PORT] = wid;
}
id = wid->w_id;
part = XWIDGET_PART_NUM(id);
mfgr = XWIDGET_MFG_NUM(id);
/* If this thing is not a Bridge,
* do not activate the WAR, and
* tell our caller we do not need
* to be called again.
*/
if ((part != BRIDGE_WIDGET_PART_NUM) ||
(mfgr != BRIDGE_WIDGET_MFGR_NUM)) {
/* FIXME: add Xbridge to the WAR.
* Shouldn't hurt anything. Later need to
* check if we can remove this.
*/
if ((part != XBRIDGE_WIDGET_PART_NUM) ||
(mfgr != XBRIDGE_WIDGET_MFGR_NUM))
return 0;
}
/* count how many times we
* have picked up after
* LLP Transmit problems.
*/
xbow_xmit_retry_errors++;
/* rewrite the control register
* to fix things up.
*/
wid->w_control = wid->w_control;
wid->w_control;
return 1;
}
/*
* xbow_errintr_handler will be called if the xbow
* sends an interrupt request to report an error.
*/
static void
xbow_errintr_handler(int irq, void *arg, struct pt_regs *ep)
{
ioerror_t ioe[1];
xbow_soft_t soft = (xbow_soft_t) arg;
xbow_t *xbow = soft->base;
xbowreg_t wid_control;
xbowreg_t wid_stat;
xbowreg_t wid_err_cmdword;
xbowreg_t wid_err_upper;
xbowreg_t wid_err_lower;
w_err_cmd_word_u wid_err;
unsigned long long wid_err_addr;
int fatal = 0;
int dump_ioe = 0;
static int xbow_error_handler(void *, int, ioerror_mode_t, ioerror_t *);
wid_control = xbow->xb_wid_control;
wid_stat = xbow->xb_wid_stat_clr;
wid_err_cmdword = xbow->xb_wid_err_cmdword;
wid_err_upper = xbow->xb_wid_err_upper;
wid_err_lower = xbow->xb_wid_err_lower;
xbow->xb_wid_err_cmdword = 0;
wid_err_addr = wid_err_lower | (((iopaddr_t) wid_err_upper & WIDGET_ERR_UPPER_ADDR_ONLY) << 32);
if (wid_stat & XB_WID_STAT_LINK_INTR_MASK) {
int port;
wid_err.r = wid_err_cmdword;
for (port = MAX_PORT_NUM - MAX_XBOW_PORTS;
port < MAX_PORT_NUM; port++) {
if (wid_stat & XB_WID_STAT_LINK_INTR(port)) {
xb_linkregs_t *link = &(xbow->xb_link(port));
xbowreg_t link_control = link->link_control;
xbowreg_t link_status = link->link_status_clr;
xbowreg_t link_aux_status = link->link_aux_status;
xbowreg_t link_pend;
link_pend = link_status & link_control &
(XB_STAT_ILLEGAL_DST_ERR
| XB_STAT_OALLOC_IBUF_ERR
| XB_STAT_RCV_CNT_OFLOW_ERR
| XB_STAT_XMT_CNT_OFLOW_ERR
| XB_STAT_XMT_MAX_RTRY_ERR
| XB_STAT_RCV_ERR
| XB_STAT_XMT_RTRY_ERR
| XB_STAT_MAXREQ_TOUT_ERR
| XB_STAT_SRC_TOUT_ERR
);
if (link_pend & XB_STAT_ILLEGAL_DST_ERR) {
if (wid_err.f.sidn == port) {
IOERROR_INIT(ioe);
IOERROR_SETVALUE(ioe, widgetnum, port);
IOERROR_SETVALUE(ioe, xtalkaddr, wid_err_addr);
if (IOERROR_HANDLED ==
xbow_error_handler(soft,
IOECODE_DMA,
MODE_DEVERROR,
ioe)) {
link_pend &= ~XB_STAT_ILLEGAL_DST_ERR;
} else {
dump_ioe++;
}
}
}
/* Xbow/Bridge WAR:
* if the bridge signals an LLP Transmitter Retry,
* rewrite its control register.
* If someone else triggers this interrupt,
* ignore (and disable) the interrupt.
*/
if (link_pend & XB_STAT_XMT_RTRY_ERR) {
if (!xbow_xmit_retry_error(soft, port)) {
link_control &= ~XB_CTRL_XMT_RTRY_IE;
link->link_control = link_control;
link->link_control; /* stall until written */
}
link_pend &= ~XB_STAT_XMT_RTRY_ERR;
}
if (link_pend) {
devfs_handle_t xwidget_vhdl;
char *xwidget_name;
/* Get the widget name corresponding to the current
* xbow link.
*/
xwidget_vhdl = xbow_widget_lookup(soft->busv,port);
xwidget_name = xwidget_name_get(xwidget_vhdl);
printk("%s port %X[%s] XIO Bus Error",
soft->name, port, xwidget_name);
if (link_status & XB_STAT_MULTI_ERR)
XEM_ADD_STR("\tMultiple Errors\n");
if (link_status & XB_STAT_ILLEGAL_DST_ERR)
XEM_ADD_STR("\tInvalid Packet Destination\n");
if (link_status & XB_STAT_OALLOC_IBUF_ERR)
XEM_ADD_STR("\tInput Overallocation Error\n");
if (link_status & XB_STAT_RCV_CNT_OFLOW_ERR)
XEM_ADD_STR("\tLLP receive error counter overflow\n");
if (link_status & XB_STAT_XMT_CNT_OFLOW_ERR)
XEM_ADD_STR("\tLLP transmit retry counter overflow\n");
if (link_status & XB_STAT_XMT_MAX_RTRY_ERR)
XEM_ADD_STR("\tLLP Max Transmitter Retry\n");
if (link_status & XB_STAT_RCV_ERR)
XEM_ADD_STR("\tLLP Receiver error\n");
if (link_status & XB_STAT_XMT_RTRY_ERR)
XEM_ADD_STR("\tLLP Transmitter Retry\n");
if (link_status & XB_STAT_MAXREQ_TOUT_ERR)
XEM_ADD_STR("\tMaximum Request Timeout\n");
if (link_status & XB_STAT_SRC_TOUT_ERR)
XEM_ADD_STR("\tSource Timeout Error\n");
{
int other_port;
for (other_port = 8; other_port < 16; ++other_port) {
if (link_aux_status & (1 << other_port)) {
/* XXX- need to go to "other_port"
* and clean up after the timeout?
*/
XEM_ADD_VAR(other_port);
}
}
}
#if !DEBUG
if (kdebug) {
#endif
XEM_ADD_VAR(link_control);
XEM_ADD_VAR(link_status);
XEM_ADD_VAR(link_aux_status);
#ifdef LATER
if (dump_ioe) {
XEM_ADD_IOE();
dump_ioe = 0;
}
#endif
#if !DEBUG
}
#endif
fatal++;
}
}
}
}
if (wid_stat & wid_control & XB_WID_STAT_WIDGET0_INTR) {
/* we have a "widget zero" problem */
if (wid_stat & (XB_WID_STAT_MULTI_ERR
| XB_WID_STAT_XTALK_ERR
| XB_WID_STAT_REG_ACC_ERR)) {
printk("%s Port 0 XIO Bus Error",
soft->name);
if (wid_stat & XB_WID_STAT_MULTI_ERR)
XEM_ADD_STR("\tMultiple Error\n");
if (wid_stat & XB_WID_STAT_XTALK_ERR)
XEM_ADD_STR("\tXIO Error\n");
if (wid_stat & XB_WID_STAT_REG_ACC_ERR)
XEM_ADD_STR("\tRegister Access Error\n");
fatal++;
}
}
if (fatal) {
XEM_ADD_VAR(wid_stat);
XEM_ADD_VAR(wid_control);
XEM_ADD_VAR(wid_err_cmdword);
XEM_ADD_VAR(wid_err_upper);
XEM_ADD_VAR(wid_err_lower);
XEM_ADD_VAR(wid_err_addr);
PRINT_PANIC("XIO Bus Error");
}
}
/*
* XBOW ERROR Handling routines.
* These get invoked as part of walking down the error handling path
* from hub/heart towards the I/O device that caused the error.
*/
/*
* xbow_error_handler
* XBow error handling dispatch routine.
* This is the primary interface used by external world to invoke
* in case of an error related to a xbow.
* Only functionality in this layer is to identify the widget handle
* given the widgetnum. Otherwise, xbow does not gathers any error
* data.
*/
static int
xbow_error_handler(
void *einfo,
int error_code,
ioerror_mode_t mode,
ioerror_t *ioerror)
{
int retval = IOERROR_WIDGETLEVEL;
xbow_soft_t soft = (xbow_soft_t) einfo;
int port;
devfs_handle_t conn;
devfs_handle_t busv;
xbow_t *xbow = soft->base;
xbowreg_t wid_stat;
xbowreg_t wid_err_cmdword;
xbowreg_t wid_err_upper;
xbowreg_t wid_err_lower;
unsigned long long wid_err_addr;
xb_linkregs_t *link;
xbowreg_t link_control;
xbowreg_t link_status;
xbowreg_t link_aux_status;
ASSERT(soft != 0);
busv = soft->busv;
#if DEBUG && ERROR_DEBUG
printk("%s: xbow_error_handler\n", soft->name, busv);
#endif
IOERROR_GETVALUE(port, ioerror, widgetnum);
if (port == 0) {
/* error during access to xbow:
* do NOT attempt to access xbow regs.
*/
if (mode == MODE_DEVPROBE)
return IOERROR_HANDLED;
if (error_code & IOECODE_DMA) {
printk(KERN_ALERT
"DMA error blamed on Crossbow at %s\n"
"\tbut Crosbow never initiates DMA!",
soft->name);
}
if (error_code & IOECODE_PIO) {
iopaddr_t tmp;
IOERROR_GETVALUE(tmp, ioerror, xtalkaddr);
printk(KERN_ALERT "PIO Error on XIO Bus %s\n"
"\tattempting to access XIO controller\n"
"\twith offset 0x%lx",
soft->name, tmp);
}
/* caller will dump contents of ioerror
* in DEBUG and kdebug kernels.
*/
return retval;
}
/*
* error not on port zero:
* safe to read xbow registers.
*/
wid_stat = xbow->xb_wid_stat;
wid_err_cmdword = xbow->xb_wid_err_cmdword;
wid_err_upper = xbow->xb_wid_err_upper;
wid_err_lower = xbow->xb_wid_err_lower;
wid_err_addr =
wid_err_lower
| (((iopaddr_t) wid_err_upper
& WIDGET_ERR_UPPER_ADDR_ONLY)
<< 32);
if ((port < BASE_XBOW_PORT) ||
(port >= MAX_PORT_NUM)) {
if (mode == MODE_DEVPROBE)
return IOERROR_HANDLED;
if (error_code & IOECODE_DMA) {
printk(KERN_ALERT
"DMA error blamed on XIO port at %s/%d\n"
"\tbut Crossbow does not support that port",
soft->name, port);
}
if (error_code & IOECODE_PIO) {
iopaddr_t tmp;
IOERROR_GETVALUE(tmp, ioerror, xtalkaddr);
printk(KERN_ALERT
"PIO Error on XIO Bus %s\n"
"\tattempting to access XIO port %d\n"
"\t(which Crossbow does not support)"
"\twith offset 0x%lx",
soft->name, port, tmp);
}
#if !DEBUG
if (kdebug) {
#endif
XEM_ADD_STR("Raw status values for Crossbow:\n");
XEM_ADD_VAR(wid_stat);
XEM_ADD_VAR(wid_err_cmdword);
XEM_ADD_VAR(wid_err_upper);
XEM_ADD_VAR(wid_err_lower);
XEM_ADD_VAR(wid_err_addr);
#if !DEBUG
}
#endif
/* caller will dump contents of ioerror
* in DEBUG and kdebug kernels.
*/
return retval;
}
/* access to valid port:
* ok to check port status.
*/
link = &(xbow->xb_link(port));
link_control = link->link_control;
link_status = link->link_status;
link_aux_status = link->link_aux_status;
/* Check that there is something present
* in that XIO port.
*/
/* WAR: PIC widget 0xf is missing prescense bit */
if (XBOW_WAR_ENABLED(PV854827, xbow->xb_wid_id) &&
IS_PIC_XBOW(xbow->xb_wid_id) && (port==0xf))
;
else
if (!(link_aux_status & XB_AUX_STAT_PRESENT)) {
/* nobody connected. */
if (mode == MODE_DEVPROBE)
return IOERROR_HANDLED;
if (error_code & IOECODE_DMA) {
printk(KERN_ALERT
"DMA error blamed on XIO port at %s/%d\n"
"\tbut there is no device connected there.",
soft->name, port);
}
if (error_code & IOECODE_PIO) {
iopaddr_t tmp;
IOERROR_GETVALUE(tmp, ioerror, xtalkaddr);
printk(KERN_ALERT
"PIO Error on XIO Bus %s\n"
"\tattempting to access XIO port %d\n"
"\t(which has no device connected)"
"\twith offset 0x%lx",
soft->name, port, tmp);
}
#if !DEBUG
if (kdebug) {
#endif
XEM_ADD_STR("Raw status values for Crossbow:\n");
XEM_ADD_VAR(wid_stat);
XEM_ADD_VAR(wid_err_cmdword);
XEM_ADD_VAR(wid_err_upper);
XEM_ADD_VAR(wid_err_lower);
XEM_ADD_VAR(wid_err_addr);
XEM_ADD_VAR(port);
XEM_ADD_VAR(link_control);
XEM_ADD_VAR(link_status);
XEM_ADD_VAR(link_aux_status);
#if !DEBUG
}
#endif
return retval;
}
/* Check that the link is alive.
*/
if (!(link_status & XB_STAT_LINKALIVE)) {
iopaddr_t tmp;
/* nobody connected. */
if (mode == MODE_DEVPROBE)
return IOERROR_HANDLED;
printk(KERN_ALERT
"%s%sError on XIO Bus %s port %d",
(error_code & IOECODE_DMA) ? "DMA " : "",
(error_code & IOECODE_PIO) ? "PIO " : "",
soft->name, port);
IOERROR_GETVALUE(tmp, ioerror, xtalkaddr);
if ((error_code & IOECODE_PIO) &&
(IOERROR_FIELDVALID(ioerror, xtalkaddr))) {
printk("\tAccess attempted to offset 0x%lx\n", tmp);
}
if (link_aux_status & XB_AUX_LINKFAIL_RST_BAD)
XEM_ADD_STR("\tLink never came out of reset\n");
else
XEM_ADD_STR("\tLink failed while transferring data\n");
}
/* get the connection point for the widget
* involved in this error; if it exists and
* is not our connectpoint, cycle back through
* xtalk_error_handler to deliver control to
* the proper handler (or to report a generic
* crosstalk error).
*
* If the downstream handler won't handle
* the problem, we let our upstream caller
* deal with it, after (in DEBUG and kdebug
* kernels) dumping the xbow state for this
* port.
*/
conn = xbow_widget_lookup(busv, port);
if ((conn != GRAPH_VERTEX_NONE) &&
(conn != soft->conn)) {
retval = xtalk_error_handler(conn, error_code, mode, ioerror);
if (retval == IOERROR_HANDLED)
return IOERROR_HANDLED;
}
if (mode == MODE_DEVPROBE)
return IOERROR_HANDLED;
if (retval == IOERROR_UNHANDLED) {
iopaddr_t tmp;
retval = IOERROR_PANIC;
printk(KERN_ALERT
"%s%sError on XIO Bus %s port %d",
(error_code & IOECODE_DMA) ? "DMA " : "",
(error_code & IOECODE_PIO) ? "PIO " : "",
soft->name, port);
IOERROR_GETVALUE(tmp, ioerror, xtalkaddr);
if ((error_code & IOECODE_PIO) &&
(IOERROR_FIELDVALID(ioerror, xtalkaddr))) {
printk("\tAccess attempted to offset 0x%lx\n", tmp);
}
}
#if !DEBUG
if (kdebug) {
#endif
XEM_ADD_STR("Raw status values for Crossbow:\n");
XEM_ADD_VAR(wid_stat);
XEM_ADD_VAR(wid_err_cmdword);
XEM_ADD_VAR(wid_err_upper);
XEM_ADD_VAR(wid_err_lower);
XEM_ADD_VAR(wid_err_addr);
XEM_ADD_VAR(port);
XEM_ADD_VAR(link_control);
XEM_ADD_VAR(link_status);
XEM_ADD_VAR(link_aux_status);
#if !DEBUG
}
#endif
/* caller will dump raw ioerror data
* in DEBUG and kdebug kernels.
*/
return retval;
}
void
xbow_update_perf_counters(devfs_handle_t vhdl)
{
xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
xbow_perf_t *xbow_perf = xbow_soft->xbow_perfcnt;
xbow_perf_link_t *xbow_plink = xbow_soft->xbow_perflink;
xbow_perfcount_t perf_reg;
unsigned long s;
int link, i;
for (i = 0; i < XBOW_PERF_COUNTERS; i++, xbow_perf++) {
if (xbow_perf->xp_mode == XBOW_MONITOR_NONE)
continue;
s = mutex_spinlock(&xbow_soft->xbow_perf_lock);
perf_reg.xb_counter_val = *(xbowreg_t *) xbow_perf->xp_perf_reg;
link = perf_reg.xb_perf.link_select;
(xbow_plink + link)->xlp_cumulative[xbow_perf->xp_curmode] +=
((perf_reg.xb_perf.count - xbow_perf->xp_current) & XBOW_COUNTER_MASK);
xbow_perf->xp_current = perf_reg.xb_perf.count;
mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
}
}
xbow_perf_link_t *
xbow_get_perf_counters(devfs_handle_t vhdl)
{
xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
xbow_perf_link_t *xbow_perf_link = xbow_soft->xbow_perflink;
return xbow_perf_link;
}
int
xbow_enable_perf_counter(devfs_handle_t vhdl, int link, int mode, int counter)
{
xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
xbow_perf_t *xbow_perf = xbow_soft->xbow_perfcnt;
xbow_linkctrl_t xbow_link_ctrl;
xbow_t *xbow = xbow_soft->base;
xbow_perfcount_t perf_reg;
unsigned long s;
int i;
link -= BASE_XBOW_PORT;
if ((link < 0) || (link >= MAX_XBOW_PORTS))
return -1;
if ((mode < XBOW_MONITOR_NONE) || (mode > XBOW_MONITOR_DEST_LINK))
return -1;
if ((counter < 0) || (counter >= XBOW_PERF_COUNTERS))
return -1;
s = mutex_spinlock(&xbow_soft->xbow_perf_lock);
if ((xbow_perf + counter)->xp_mode && mode) {
mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
return -1;
}
for (i = 0; i < XBOW_PERF_COUNTERS; i++) {
if (i == counter)
continue;
if (((xbow_perf + i)->xp_link == link) &&
((xbow_perf + i)->xp_mode)) {
mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
return -1;
}
}
xbow_perf += counter;
xbow_perf->xp_curlink = xbow_perf->xp_link = link;
xbow_perf->xp_curmode = xbow_perf->xp_mode = mode;
xbow_link_ctrl.xbl_ctrlword = xbow->xb_link_raw[link].link_control;
xbow_link_ctrl.xb_linkcontrol.perf_mode = mode;
xbow->xb_link_raw[link].link_control = xbow_link_ctrl.xbl_ctrlword;
perf_reg.xb_counter_val = *(xbowreg_t *) xbow_perf->xp_perf_reg;
perf_reg.xb_perf.link_select = link;
*(xbowreg_t *) xbow_perf->xp_perf_reg = perf_reg.xb_counter_val;
xbow_perf->xp_current = perf_reg.xb_perf.count;
mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
return 0;
}
xbow_link_status_t *
xbow_get_llp_status(devfs_handle_t vhdl)
{
xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
xbow_link_status_t *xbow_llp_status = xbow_soft->xbow_link_status;
return xbow_llp_status;
}
void
xbow_update_llp_status(devfs_handle_t vhdl)
{
xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
xbow_link_status_t *xbow_llp_status = xbow_soft->xbow_link_status;
xbow_t *xbow;
xbwX_stat_t lnk_sts;
xbow_aux_link_status_t aux_sts;
int link;
devfs_handle_t xwidget_vhdl;
char *xwidget_name;
xbow = (xbow_t *) xbow_soft->base;
for (link = 0; link < MAX_XBOW_PORTS; link++, xbow_llp_status++) {
/* Get the widget name corresponding the current link.
* Note : 0 <= link < MAX_XBOW_PORTS(8).
* BASE_XBOW_PORT(0x8) <= xwidget number < MAX_PORT_NUM (0x10)
*/
xwidget_vhdl = xbow_widget_lookup(xbow_soft->busv,link+BASE_XBOW_PORT);
xwidget_name = xwidget_name_get(xwidget_vhdl);
aux_sts.aux_linkstatus
= xbow->xb_link_raw[link].link_aux_status;
lnk_sts.linkstatus = xbow->xb_link_raw[link].link_status_clr;
if (lnk_sts.link_alive == 0)
continue;
xbow_llp_status->rx_err_count +=
aux_sts.xb_aux_linkstatus.rx_err_cnt;
xbow_llp_status->tx_retry_count +=
aux_sts.xb_aux_linkstatus.tx_retry_cnt;
if (lnk_sts.linkstatus & ~(XB_STAT_RCV_ERR | XB_STAT_XMT_RTRY_ERR | XB_STAT_LINKALIVE)) {
#ifdef LATER
printk(KERN_WARNING "link %d[%s]: bad status 0x%x\n",
link, xwidget_name, lnk_sts.linkstatus);
#endif
}
}
}
int
xbow_disable_llp_monitor(devfs_handle_t vhdl)
{
xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
int port;
for (port = 0; port < MAX_XBOW_PORTS; port++) {
xbow_soft->xbow_link_status[port].rx_err_count = 0;
xbow_soft->xbow_link_status[port].tx_retry_count = 0;
}
xbow_soft->link_monitor = 0;
return 0;
}
int
xbow_enable_llp_monitor(devfs_handle_t vhdl)
{
xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
xbow_soft->link_monitor = 1;
return 0;
}
int
xbow_reset_link(devfs_handle_t xconn_vhdl)
{
xwidget_info_t widget_info;
xwidgetnum_t port;
xbow_t *xbow;
xbowreg_t ctrl;
xbwX_stat_t stat;
unsigned itick;
unsigned dtick;
static int ticks_per_ms = 0;
if (!ticks_per_ms) {
itick = get_timestamp();
us_delay(1000);
ticks_per_ms = get_timestamp() - itick;
}
widget_info = xwidget_info_get(xconn_vhdl);
port = xwidget_info_id_get(widget_info);
#ifdef XBOW_K1PTR /* defined if we only have one xbow ... */
xbow = XBOW_K1PTR;
#else
{
devfs_handle_t xbow_vhdl;
xbow_soft_t xbow_soft;
hwgraph_traverse(xconn_vhdl, ".master/xtalk/0/xbow", &xbow_vhdl);
xbow_soft = xbow_soft_get(xbow_vhdl);
xbow = xbow_soft->base;
}
#endif
/*
* This requires three PIOs (reset the link, check for the
* reset, restore the control register for the link) plus
* 10us to wait for the reset. We allow up to 1ms for the
* widget to come out of reset before giving up and
* returning a failure.
*/
ctrl = xbow->xb_link(port).link_control;
xbow->xb_link(port).link_reset = 0;
itick = get_timestamp();
while (1) {
stat.linkstatus = xbow->xb_link(port).link_status;
if (stat.link_alive)
break;
dtick = get_timestamp() - itick;
if (dtick > ticks_per_ms) {
return -1; /* never came out of reset */
}
DELAY(2); /* don't beat on link_status */
}
xbow->xb_link(port).link_control = ctrl;
return 0;
}
/*
* Dump xbow registers.
* input parameter is either a pointer to
* the xbow chip or the vertex handle for
* an xbow vertex.
*/
void
idbg_xbowregs(int64_t regs)
{
xbow_t *xbow;
int i;
xb_linkregs_t *link;
xbow = (xbow_t *) regs;
#ifdef LATER
qprintf("Printing xbow registers starting at 0x%x\n", xbow);
qprintf("wid %x status %x erruppr %x errlower %x control %x timeout %x\n",
xbow->xb_wid_id, xbow->xb_wid_stat, xbow->xb_wid_err_upper,
xbow->xb_wid_err_lower, xbow->xb_wid_control,
xbow->xb_wid_req_timeout);
qprintf("intr uppr %x lower %x errcmd %x llp ctrl %x arb_reload %x\n",
xbow->xb_wid_int_upper, xbow->xb_wid_int_lower,
xbow->xb_wid_err_cmdword, xbow->xb_wid_llp,
xbow->xb_wid_arb_reload);
#endif
for (i = 8; i <= 0xf; i++) {
link = &xbow->xb_link(i);
#ifdef LATER
qprintf("Link %d registers\n", i);
qprintf("\tctrl %x stat %x arbuppr %x arblowr %x auxstat %x\n",
link->link_control, link->link_status,
link->link_arb_upper, link->link_arb_lower,
link->link_aux_status);
#endif
}
}
#define XBOW_ARB_RELOAD_TICKS 25
/* granularity: 4 MB/s, max: 124 MB/s */
#define GRANULARITY ((100 * 1000000) / XBOW_ARB_RELOAD_TICKS)
#define XBOW_BYTES_TO_GBR(BYTES_per_s) (int) (BYTES_per_s / GRANULARITY)
#define XBOW_GBR_TO_BYTES(cnt) (bandwidth_t) ((cnt) * GRANULARITY)
#define CEILING_BYTES_TO_GBR(gbr, bytes_per_sec) \
((XBOW_GBR_TO_BYTES(gbr) < bytes_per_sec) ? gbr+1 : gbr)
#define XBOW_ARB_GBR_MAX 31
#define ABS(x) ((x > 0) ? (x) : (-1 * x))
/* absolute value */
int
xbow_bytes_to_gbr(bandwidth_t old_bytes_per_sec, bandwidth_t bytes_per_sec)
{
int gbr_granted;
int new_total_gbr;
int change_gbr;
bandwidth_t new_total_bw;
#ifdef GRIO_DEBUG
printk("xbow_bytes_to_gbr: old_bytes_per_sec %lld bytes_per_sec %lld\n",
old_bytes_per_sec, bytes_per_sec);
#endif /* GRIO_DEBUG */
gbr_granted = CEILING_BYTES_TO_GBR((XBOW_BYTES_TO_GBR(old_bytes_per_sec)),
old_bytes_per_sec);
new_total_bw = old_bytes_per_sec + bytes_per_sec;
new_total_gbr = CEILING_BYTES_TO_GBR((XBOW_BYTES_TO_GBR(new_total_bw)),
new_total_bw);
change_gbr = new_total_gbr - gbr_granted;
#ifdef GRIO_DEBUG
printk("xbow_bytes_to_gbr: gbr_granted %d new_total_gbr %d change_gbr %d\n",
gbr_granted, new_total_gbr, change_gbr);
#endif /* GRIO_DEBUG */
return (change_gbr);
}
/* Conversion from GBR to bytes */
bandwidth_t
xbow_gbr_to_bytes(int gbr)
{
return (XBOW_GBR_TO_BYTES(gbr));
}
/* Given the vhdl for the desired xbow, the src and dest. widget ids
* and the req_bw value, this xbow driver entry point accesses the
* xbow registers and allocates the desired bandwidth if available.
*
* If bandwidth allocation is successful, return success else return failure.
*/
int
xbow_prio_bw_alloc(devfs_handle_t vhdl,
xwidgetnum_t src_wid,
xwidgetnum_t dest_wid,
unsigned long long old_alloc_bw,
unsigned long long req_bw)
{
xbow_soft_t soft = xbow_soft_get(vhdl);
volatile xbowreg_t *xreg;
xbowreg_t mask;
unsigned long s;
int error = 0;
bandwidth_t old_bw_BYTES, req_bw_BYTES;
xbowreg_t old_xreg;
int old_bw_GBR, req_bw_GBR, new_bw_GBR;
#ifdef GRIO_DEBUG
printk("xbow_prio_bw_alloc: vhdl %d src_wid %d dest_wid %d req_bw %lld\n",
(int) vhdl, (int) src_wid, (int) dest_wid, req_bw);
#endif
ASSERT(XBOW_WIDGET_IS_VALID(src_wid));
ASSERT(XBOW_WIDGET_IS_VALID(dest_wid));
s = mutex_spinlock(&soft->xbow_bw_alloc_lock);
/* Get pointer to the correct register */
xreg = XBOW_PRIO_ARBREG_PTR(soft->base, dest_wid, src_wid);
/* Get mask for GBR count value */
mask = XB_ARB_GBR_MSK << XB_ARB_GBR_SHFT(src_wid);
req_bw_GBR = xbow_bytes_to_gbr(old_alloc_bw, req_bw);
req_bw_BYTES = (req_bw_GBR < 0) ? (-1 * xbow_gbr_to_bytes(ABS(req_bw_GBR)))
: xbow_gbr_to_bytes(req_bw_GBR);
#ifdef GRIO_DEBUG
printk("req_bw %lld req_bw_BYTES %lld req_bw_GBR %d\n",
req_bw, req_bw_BYTES, req_bw_GBR);
#endif /* GRIO_DEBUG */
old_bw_BYTES = soft->bw_cur_used[(int) dest_wid - MAX_XBOW_PORTS];
old_xreg = *xreg;
old_bw_GBR = (((*xreg) & mask) >> XB_ARB_GBR_SHFT(src_wid));
#ifdef GRIO_DEBUG
ASSERT(XBOW_BYTES_TO_GBR(old_bw_BYTES) == old_bw_GBR);
printk("old_bw_BYTES %lld old_bw_GBR %d\n", old_bw_BYTES, old_bw_GBR);
printk("req_bw_BYTES %lld old_bw_BYTES %lld soft->bw_hiwm %lld\n",
req_bw_BYTES, old_bw_BYTES,
soft->bw_hiwm[(int) dest_wid - MAX_XBOW_PORTS]);
#endif /* GRIO_DEBUG */
/* Accept the request only if we don't exceed the destination
* port HIWATER_MARK *AND* the max. link GBR arbitration count
*/
if (((old_bw_BYTES + req_bw_BYTES) <=
soft->bw_hiwm[(int) dest_wid - MAX_XBOW_PORTS]) &&
(req_bw_GBR + old_bw_GBR <= XBOW_ARB_GBR_MAX)) {
new_bw_GBR = (old_bw_GBR + req_bw_GBR);
/* Set this in the xbow link register */
*xreg = (old_xreg & ~mask) | \
(new_bw_GBR << XB_ARB_GBR_SHFT(src_wid) & mask);
soft->bw_cur_used[(int) dest_wid - MAX_XBOW_PORTS] =
xbow_gbr_to_bytes(new_bw_GBR);
} else {
error = 1;
}
mutex_spinunlock(&soft->xbow_bw_alloc_lock, s);
return (error);
}
/* $Id$
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <asm/sn/sgi.h>
#include <asm/sn/driver.h>
#include <asm/sn/io.h>
#include <asm/sn/iograph.h>
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h>
#include <asm/sn/hcl_util.h>
#include <asm/sn/xtalk/xtalk.h>
#include <asm/sn/xtalk/xswitch.h>
#include <asm/sn/xtalk/xwidget.h>
#include <asm/sn/xtalk/xtalk_private.h>
/*
* Implement crosstalk provider operations. The xtalk* layer provides a
* platform-independent interface for crosstalk devices. This layer
* switches among the possible implementations of a crosstalk adapter.
*
* On platforms with only one possible xtalk provider, macros can be
* set up at the top that cause the table lookups and indirections to
* completely disappear.
*/
#define NEW(ptr) (ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))
#define DEL(ptr) (kfree(ptr))
char widget_info_fingerprint[] = "widget_info";
cdl_p xtalk_registry = NULL;
#define DEV_FUNC(dev,func) hub_##func
#define CAST_PIOMAP(x) ((hub_piomap_t)(x))
#define CAST_DMAMAP(x) ((hub_dmamap_t)(x))
#define CAST_INTR(x) ((hub_intr_t)(x))
/* =====================================================================
* Function Table of Contents
*/
xtalk_piomap_t xtalk_piomap_alloc(devfs_handle_t, device_desc_t, iopaddr_t, size_t, size_t, unsigned);
void xtalk_piomap_free(xtalk_piomap_t);
caddr_t xtalk_piomap_addr(xtalk_piomap_t, iopaddr_t, size_t);
void xtalk_piomap_done(xtalk_piomap_t);
caddr_t xtalk_piotrans_addr(devfs_handle_t, device_desc_t, iopaddr_t, size_t, unsigned);
caddr_t xtalk_pio_addr(devfs_handle_t, device_desc_t, iopaddr_t, size_t, xtalk_piomap_t *, unsigned);
void xtalk_set_early_piotrans_addr(xtalk_early_piotrans_addr_f *);
caddr_t xtalk_early_piotrans_addr(xwidget_part_num_t, xwidget_mfg_num_t, int, iopaddr_t, size_t, unsigned);
static caddr_t null_xtalk_early_piotrans_addr(xwidget_part_num_t, xwidget_mfg_num_t, int, iopaddr_t, size_t, unsigned);
xtalk_dmamap_t xtalk_dmamap_alloc(devfs_handle_t, device_desc_t, size_t, unsigned);
void xtalk_dmamap_free(xtalk_dmamap_t);
iopaddr_t xtalk_dmamap_addr(xtalk_dmamap_t, paddr_t, size_t);
alenlist_t xtalk_dmamap_list(xtalk_dmamap_t, alenlist_t, unsigned);
void xtalk_dmamap_done(xtalk_dmamap_t);
iopaddr_t xtalk_dmatrans_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, unsigned);
alenlist_t xtalk_dmatrans_list(devfs_handle_t, device_desc_t, alenlist_t, unsigned);
void xtalk_dmamap_drain(xtalk_dmamap_t);
void xtalk_dmaaddr_drain(devfs_handle_t, iopaddr_t, size_t);
void xtalk_dmalist_drain(devfs_handle_t, alenlist_t);
xtalk_intr_t xtalk_intr_alloc(devfs_handle_t, device_desc_t, devfs_handle_t);
xtalk_intr_t xtalk_intr_alloc_nothd(devfs_handle_t, device_desc_t, devfs_handle_t);
void xtalk_intr_free(xtalk_intr_t);
int xtalk_intr_connect(xtalk_intr_t, intr_func_t, intr_arg_t, xtalk_intr_setfunc_t, void *);
void xtalk_intr_disconnect(xtalk_intr_t);
devfs_handle_t xtalk_intr_cpu_get(xtalk_intr_t);
int xtalk_error_handler(devfs_handle_t, int, ioerror_mode_t, ioerror_t *);
int xtalk_error_devenable(devfs_handle_t, int, int);
void xtalk_provider_startup(devfs_handle_t);
void xtalk_provider_shutdown(devfs_handle_t);
devfs_handle_t xtalk_intr_dev_get(xtalk_intr_t);
xwidgetnum_t xtalk_intr_target_get(xtalk_intr_t);
xtalk_intr_vector_t xtalk_intr_vector_get(xtalk_intr_t);
iopaddr_t xtalk_intr_addr_get(struct xtalk_intr_s *);
void *xtalk_intr_sfarg_get(xtalk_intr_t);
devfs_handle_t xtalk_pio_dev_get(xtalk_piomap_t);
xwidgetnum_t xtalk_pio_target_get(xtalk_piomap_t);
iopaddr_t xtalk_pio_xtalk_addr_get(xtalk_piomap_t);
ulong xtalk_pio_mapsz_get(xtalk_piomap_t);
caddr_t xtalk_pio_kvaddr_get(xtalk_piomap_t);
devfs_handle_t xtalk_dma_dev_get(xtalk_dmamap_t);
xwidgetnum_t xtalk_dma_target_get(xtalk_dmamap_t);
xwidget_info_t xwidget_info_chk(devfs_handle_t);
xwidget_info_t xwidget_info_get(devfs_handle_t);
void xwidget_info_set(devfs_handle_t, xwidget_info_t);
devfs_handle_t xwidget_info_dev_get(xwidget_info_t);
xwidgetnum_t xwidget_info_id_get(xwidget_info_t);
devfs_handle_t xwidget_info_master_get(xwidget_info_t);
xwidgetnum_t xwidget_info_masterid_get(xwidget_info_t);
xwidget_part_num_t xwidget_info_part_num_get(xwidget_info_t);
xwidget_mfg_num_t xwidget_info_mfg_num_get(xwidget_info_t);
char *xwidget_info_name_get(xwidget_info_t);
void xtalk_init(void);
void xtalk_provider_register(devfs_handle_t, xtalk_provider_t *);
void xtalk_provider_unregister(devfs_handle_t);
xtalk_provider_t *xtalk_provider_fns_get(devfs_handle_t);
int xwidget_driver_register(xwidget_part_num_t,
xwidget_mfg_num_t,
char *, unsigned);
void xwidget_driver_unregister(char *);
int xwidget_register(xwidget_hwid_t, devfs_handle_t,
xwidgetnum_t, devfs_handle_t,
xwidgetnum_t, async_attach_t);
int xwidget_unregister(devfs_handle_t);
void xwidget_reset(devfs_handle_t);
char *xwidget_name_get(devfs_handle_t);
#if !defined(DEV_FUNC)
/*
* There is more than one possible provider
* for this platform. We need to examine the
* master vertex of the current vertex for
* a provider function structure, and indirect
* through the appropriately named member.
*/
#define DEV_FUNC(dev,func) xwidget_to_provider_fns(dev)->func
#define CAST_PIOMAP(x) ((xtalk_piomap_t)(x))
#define CAST_DMAMAP(x) ((xtalk_dmamap_t)(x))
#define CAST_INTR(x) ((xtalk_intr_t)(x))
static xtalk_provider_t *
xwidget_to_provider_fns(devfs_handle_t xconn)
{
xwidget_info_t widget_info;
xtalk_provider_t *provider_fns;
widget_info = xwidget_info_get(xconn);
ASSERT(widget_info != NULL);
provider_fns = xwidget_info_pops_get(widget_info);
ASSERT(provider_fns != NULL);
return (provider_fns);
}
#endif
/*
* Many functions are not passed their vertex
* information directly; rather, they must
* dive through a resource map. These macros
* are available to coordinate this detail.
*/
#define PIOMAP_FUNC(map,func) DEV_FUNC(map->xp_dev,func)
#define DMAMAP_FUNC(map,func) DEV_FUNC(map->xd_dev,func)
#define INTR_FUNC(intr,func) DEV_FUNC(intr_hdl->xi_dev,func)
/* =====================================================================
* PIO MANAGEMENT
*
* For mapping system virtual address space to
* xtalk space on a specified widget
*/
xtalk_piomap_t
xtalk_piomap_alloc(devfs_handle_t dev, /* set up mapping for this device */
device_desc_t dev_desc, /* device descriptor */
iopaddr_t xtalk_addr, /* map for this xtalk_addr range */
size_t byte_count,
size_t byte_count_max, /* maximum size of a mapping */
unsigned flags)
{ /* defined in sys/pio.h */
return (xtalk_piomap_t) DEV_FUNC(dev, piomap_alloc)
(dev, dev_desc, xtalk_addr, byte_count, byte_count_max, flags);
}
void
xtalk_piomap_free(xtalk_piomap_t xtalk_piomap)
{
PIOMAP_FUNC(xtalk_piomap, piomap_free)
(CAST_PIOMAP(xtalk_piomap));
}
caddr_t
xtalk_piomap_addr(xtalk_piomap_t xtalk_piomap, /* mapping resources */
iopaddr_t xtalk_addr, /* map for this xtalk address */
size_t byte_count)
{ /* map this many bytes */
return PIOMAP_FUNC(xtalk_piomap, piomap_addr)
(CAST_PIOMAP(xtalk_piomap), xtalk_addr, byte_count);
}
void
xtalk_piomap_done(xtalk_piomap_t xtalk_piomap)
{
PIOMAP_FUNC(xtalk_piomap, piomap_done)
(CAST_PIOMAP(xtalk_piomap));
}
caddr_t
xtalk_piotrans_addr(devfs_handle_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
iopaddr_t xtalk_addr, /* Crosstalk address */
size_t byte_count, /* map this many bytes */
unsigned flags)
{ /* (currently unused) */
return DEV_FUNC(dev, piotrans_addr)
(dev, dev_desc, xtalk_addr, byte_count, flags);
}
caddr_t
xtalk_pio_addr(devfs_handle_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
iopaddr_t addr, /* starting address (or offset in window) */
size_t byte_count, /* map this many bytes */
xtalk_piomap_t *mapp, /* where to return the map pointer */
unsigned flags)
{ /* PIO flags */
xtalk_piomap_t map = 0;
caddr_t res;
if (mapp)
*mapp = 0; /* record "no map used" */
res = xtalk_piotrans_addr
(dev, dev_desc, addr, byte_count, flags);
if (res)
return res; /* xtalk_piotrans worked */
map = xtalk_piomap_alloc
(dev, dev_desc, addr, byte_count, byte_count, flags);
if (!map)
return res; /* xtalk_piomap_alloc failed */
res = xtalk_piomap_addr
(map, addr, byte_count);
if (!res) {
xtalk_piomap_free(map);
return res; /* xtalk_piomap_addr failed */
}
if (mapp)
*mapp = map; /* pass back map used */
return res; /* xtalk_piomap_addr succeeded */
}
/* =====================================================================
* EARLY PIOTRANS SUPPORT
*
* There are places where drivers (mgras, for instance)
* need to get PIO translations before the infrastructure
* is extended to them (setting up textports, for
* instance). These drivers should call
* xtalk_early_piotrans_addr with their xtalk ID
* information, a sequence number (so we can use the second
* mgras for instance), and the usual piotrans parameters.
*
* Machine specific code should provide an implementation
* of early_piotrans_addr, and present a pointer to this
* function to xtalk_set_early_piotrans_addr so it can be
* used by clients without the clients having to know what
* platform or what xtalk provider is in use.
*/
static xtalk_early_piotrans_addr_f null_xtalk_early_piotrans_addr;
xtalk_early_piotrans_addr_f *impl_early_piotrans_addr = null_xtalk_early_piotrans_addr;
/* xtalk_set_early_piotrans_addr:
* specify the early_piotrans_addr implementation function.
*/
void
xtalk_set_early_piotrans_addr(xtalk_early_piotrans_addr_f *impl)
{
impl_early_piotrans_addr = impl;
}
/* xtalk_early_piotrans_addr:
* figure out a PIO address for the "nth" crosstalk widget that
* matches the specified part and mfgr number. Returns NULL if
* there is no such widget, or if the requested mapping can not
* be constructed.
* Limitations on which crosstalk slots (and busses) are
* checked, and definitions of the ordering of the search across
* the crosstalk slots, are defined by the platform.
*/
caddr_t
xtalk_early_piotrans_addr(xwidget_part_num_t part_num,
xwidget_mfg_num_t mfg_num,
int which,
iopaddr_t xtalk_addr,
size_t byte_count,
unsigned flags)
{
return impl_early_piotrans_addr
(part_num, mfg_num, which, xtalk_addr, byte_count, flags);
}
/* null_xtalk_early_piotrans_addr:
* used as the early_piotrans_addr implementation until and
* unless a real implementation is provided. In DEBUG kernels,
* we want to know who is calling before the implementation is
* registered; in non-DEBUG kernels, return NULL representing
* lack of mapping support.
*/
/*ARGSUSED */
static caddr_t
null_xtalk_early_piotrans_addr(xwidget_part_num_t part_num,
xwidget_mfg_num_t mfg_num,
int which,
iopaddr_t xtalk_addr,
size_t byte_count,
unsigned flags)
{
#if DEBUG
PRINT_PANIC("null_xtalk_early_piotrans_addr");
#endif
return NULL;
}
/* =====================================================================
* DMA MANAGEMENT
*
* For mapping from crosstalk space to system
* physical space.
*/
xtalk_dmamap_t
xtalk_dmamap_alloc(devfs_handle_t dev, /* set up mappings for this device */
device_desc_t dev_desc, /* device descriptor */
size_t byte_count_max, /* max size of a mapping */
unsigned flags)
{ /* defined in dma.h */
return (xtalk_dmamap_t) DEV_FUNC(dev, dmamap_alloc)
(dev, dev_desc, byte_count_max, flags);
}
void
xtalk_dmamap_free(xtalk_dmamap_t xtalk_dmamap)
{
DMAMAP_FUNC(xtalk_dmamap, dmamap_free)
(CAST_DMAMAP(xtalk_dmamap));
}
iopaddr_t
xtalk_dmamap_addr(xtalk_dmamap_t xtalk_dmamap, /* use these mapping resources */
paddr_t paddr, /* map for this address */
size_t byte_count)
{ /* map this many bytes */
return DMAMAP_FUNC(xtalk_dmamap, dmamap_addr)
(CAST_DMAMAP(xtalk_dmamap), paddr, byte_count);
}
alenlist_t
xtalk_dmamap_list(xtalk_dmamap_t xtalk_dmamap, /* use these mapping resources */
alenlist_t alenlist, /* map this Address/Length List */
unsigned flags)
{
return DMAMAP_FUNC(xtalk_dmamap, dmamap_list)
(CAST_DMAMAP(xtalk_dmamap), alenlist, flags);
}
void
xtalk_dmamap_done(xtalk_dmamap_t xtalk_dmamap)
{
DMAMAP_FUNC(xtalk_dmamap, dmamap_done)
(CAST_DMAMAP(xtalk_dmamap));
}
iopaddr_t
xtalk_dmatrans_addr(devfs_handle_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
paddr_t paddr, /* system physical address */
size_t byte_count, /* length */
unsigned flags)
{ /* defined in dma.h */
return DEV_FUNC(dev, dmatrans_addr)
(dev, dev_desc, paddr, byte_count, flags);
}
alenlist_t
xtalk_dmatrans_list(devfs_handle_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
alenlist_t palenlist, /* system address/length list */
unsigned flags)
{ /* defined in dma.h */
return DEV_FUNC(dev, dmatrans_list)
(dev, dev_desc, palenlist, flags);
}
void
xtalk_dmamap_drain(xtalk_dmamap_t map)
{
DMAMAP_FUNC(map, dmamap_drain)
(CAST_DMAMAP(map));
}
void
xtalk_dmaaddr_drain(devfs_handle_t dev, paddr_t addr, size_t size)
{
DEV_FUNC(dev, dmaaddr_drain)
(dev, addr, size);
}
void
xtalk_dmalist_drain(devfs_handle_t dev, alenlist_t list)
{
DEV_FUNC(dev, dmalist_drain)
(dev, list);
}
/* =====================================================================
* INTERRUPT MANAGEMENT
*
* Allow crosstalk devices to establish interrupts
*/
/*
* Allocate resources required for an interrupt as specified in intr_desc.
* Return resource handle in intr_hdl.
*/
xtalk_intr_t
xtalk_intr_alloc(devfs_handle_t dev, /* which Crosstalk device */
device_desc_t dev_desc, /* device descriptor */
devfs_handle_t owner_dev)
{ /* owner of this interrupt */
return (xtalk_intr_t) DEV_FUNC(dev, intr_alloc)
(dev, dev_desc, owner_dev);
}
/*
* Allocate resources required for an interrupt as specified in dev_desc.
* Unconditionally setup resources to be non-threaded.
* Return resource handle in intr_hdl.
*/
xtalk_intr_t
xtalk_intr_alloc_nothd(devfs_handle_t dev, /* which Crosstalk device */
device_desc_t dev_desc, /* device descriptor */
devfs_handle_t owner_dev) /* owner of this interrupt */
{
return (xtalk_intr_t) DEV_FUNC(dev, intr_alloc_nothd)
(dev, dev_desc, owner_dev);
}
/*
* Free resources consumed by intr_alloc.
*/
void
xtalk_intr_free(xtalk_intr_t intr_hdl)
{
INTR_FUNC(intr_hdl, intr_free)
(CAST_INTR(intr_hdl));
}
/*
* Associate resources allocated with a previous xtalk_intr_alloc call with the
* described handler, arg, name, etc.
*
* Returns 0 on success, returns <0 on failure.
*/
int
xtalk_intr_connect(xtalk_intr_t intr_hdl, /* xtalk intr resource handle */
intr_func_t intr_func, /* xtalk intr handler */
intr_arg_t intr_arg, /* arg to intr handler */
xtalk_intr_setfunc_t setfunc, /* func to set intr hw */
void *setfunc_arg) /* arg to setfunc */
{
return INTR_FUNC(intr_hdl, intr_connect)
(CAST_INTR(intr_hdl), intr_func, intr_arg, setfunc, setfunc_arg);
}
/*
* Disassociate handler with the specified interrupt.
*/
void
xtalk_intr_disconnect(xtalk_intr_t intr_hdl)
{
INTR_FUNC(intr_hdl, intr_disconnect)
(CAST_INTR(intr_hdl));
}
/*
* Return a hwgraph vertex that represents the CPU currently
* targeted by an interrupt.
*/
devfs_handle_t
xtalk_intr_cpu_get(xtalk_intr_t intr_hdl)
{
return INTR_FUNC(intr_hdl, intr_cpu_get)
(CAST_INTR(intr_hdl));
}
/*
* =====================================================================
* ERROR MANAGEMENT
*/
/*
* xtalk_error_handler:
* pass this error on to the handler registered
* at the specified xtalk connecdtion point,
* or complain about it here if there is no handler.
*
* This routine plays two roles during error delivery
* to most widgets: first, the external agent (heart,
* hub, or whatever) calls in with the error and the
* connect point representing the crosstalk switch,
* or whatever crosstalk device is directly connected
* to the agent.
*
* If there is a switch, it will generally look at the
* widget number stashed in the ioerror structure; and,
* if the error came from some widget other than the
* switch, it will call back into xtalk_error_handler
* with the connection point of the offending port.
*/
int
xtalk_error_handler(
devfs_handle_t xconn,
int error_code,
ioerror_mode_t mode,
ioerror_t *ioerror)
{
xwidget_info_t xwidget_info;
xwidget_info = xwidget_info_get(xconn);
/* Make sure that xwidget_info is a valid pointer before derefencing it.
* We could come in here during very early initialization.
*/
if (xwidget_info && xwidget_info->w_efunc)
return xwidget_info->w_efunc
(xwidget_info->w_einfo,
error_code, mode, ioerror);
/*
* no error handler registered for
* the offending port. it's not clear
* what needs to be done, but reporting
* it would be a good thing, unless it
* is a mode that requires nothing.
*/
if ((mode == MODE_DEVPROBE) || (mode == MODE_DEVUSERERROR) ||
(mode == MODE_DEVREENABLE))
return IOERROR_HANDLED;
#if defined(SUPPORT_PRINTING_V_FORMAT)
printk(KERN_WARNING "Xbow at %v encountered Fatal error", xconn);
#else
printk(KERN_WARNING "Xbow at 0x%p encountered Fatal error", xconn);
#endif
ioerror_dump("xtalk", error_code, mode, ioerror);
return IOERROR_UNHANDLED;
}
int
xtalk_error_devenable(devfs_handle_t xconn_vhdl, int devnum, int error_code)
{
return DEV_FUNC(xconn_vhdl, error_devenable) (xconn_vhdl, devnum, error_code);
}
/* =====================================================================
* CONFIGURATION MANAGEMENT
*/
/*
* Startup a crosstalk provider
*/
void
xtalk_provider_startup(devfs_handle_t xtalk_provider)
{
DEV_FUNC(xtalk_provider, provider_startup)
(xtalk_provider);
}
/*
* Shutdown a crosstalk provider
*/
void
xtalk_provider_shutdown(devfs_handle_t xtalk_provider)
{
DEV_FUNC(xtalk_provider, provider_shutdown)
(xtalk_provider);
}
/*
* Enable a device on a xtalk widget
*/
void
xtalk_widgetdev_enable(devfs_handle_t xconn_vhdl, int devnum)
{
DEV_FUNC(xconn_vhdl, widgetdev_enable) (xconn_vhdl, devnum);
}
/*
* Shutdown a device on a xtalk widget
*/
void
xtalk_widgetdev_shutdown(devfs_handle_t xconn_vhdl, int devnum)
{
DEV_FUNC(xconn_vhdl, widgetdev_shutdown) (xconn_vhdl, devnum);
}
int
xtalk_dma_enabled(devfs_handle_t xconn_vhdl)
{
return DEV_FUNC(xconn_vhdl, dma_enabled) (xconn_vhdl);
}
/*
* Generic crosstalk functions, for use with all crosstalk providers
* and all crosstalk devices.
*/
/****** Generic crosstalk interrupt interfaces ******/
devfs_handle_t
xtalk_intr_dev_get(xtalk_intr_t xtalk_intr)
{
return (xtalk_intr->xi_dev);
}
xwidgetnum_t
xtalk_intr_target_get(xtalk_intr_t xtalk_intr)
{
return (xtalk_intr->xi_target);
}
xtalk_intr_vector_t
xtalk_intr_vector_get(xtalk_intr_t xtalk_intr)
{
return (xtalk_intr->xi_vector);
}
iopaddr_t
xtalk_intr_addr_get(struct xtalk_intr_s *xtalk_intr)
{
return (xtalk_intr->xi_addr);
}
void *
xtalk_intr_sfarg_get(xtalk_intr_t xtalk_intr)
{
return (xtalk_intr->xi_sfarg);
}
/****** Generic crosstalk pio interfaces ******/
devfs_handle_t
xtalk_pio_dev_get(xtalk_piomap_t xtalk_piomap)
{
return (xtalk_piomap->xp_dev);
}
xwidgetnum_t
xtalk_pio_target_get(xtalk_piomap_t xtalk_piomap)
{
return (xtalk_piomap->xp_target);
}
iopaddr_t
xtalk_pio_xtalk_addr_get(xtalk_piomap_t xtalk_piomap)
{
return (xtalk_piomap->xp_xtalk_addr);
}
ulong
xtalk_pio_mapsz_get(xtalk_piomap_t xtalk_piomap)
{
return (xtalk_piomap->xp_mapsz);
}
caddr_t
xtalk_pio_kvaddr_get(xtalk_piomap_t xtalk_piomap)
{
return (xtalk_piomap->xp_kvaddr);
}
/****** Generic crosstalk dma interfaces ******/
devfs_handle_t
xtalk_dma_dev_get(xtalk_dmamap_t xtalk_dmamap)
{
return (xtalk_dmamap->xd_dev);
}
xwidgetnum_t
xtalk_dma_target_get(xtalk_dmamap_t xtalk_dmamap)
{
return (xtalk_dmamap->xd_target);
}
/****** Generic crosstalk widget information interfaces ******/
/* xwidget_info_chk:
* check to see if this vertex is a widget;
* if so, return its widget_info (if any).
* if not, return NULL.
*/
xwidget_info_t
xwidget_info_chk(devfs_handle_t xwidget)
{
arbitrary_info_t ainfo = 0;
hwgraph_info_get_LBL(xwidget, INFO_LBL_XWIDGET, &ainfo);
return (xwidget_info_t) ainfo;
}
xwidget_info_t
xwidget_info_get(devfs_handle_t xwidget)
{
xwidget_info_t widget_info;
widget_info = (xwidget_info_t)
hwgraph_fastinfo_get(xwidget);
#ifdef LATER
if ((widget_info != NULL) &&
(widget_info->w_fingerprint != widget_info_fingerprint))
#ifdef SUPPORT_PRINTING_V_FORMAT
PRINT_PANIC("%v bad xwidget_info", xwidget);
#else
PRINT_PANIC("%x bad xwidget_info", xwidget);
#endif
#endif /* LATER */
return (widget_info);
}
void
xwidget_info_set(devfs_handle_t xwidget, xwidget_info_t widget_info)
{
if (widget_info != NULL)
widget_info->w_fingerprint = widget_info_fingerprint;
hwgraph_fastinfo_set(xwidget, (arbitrary_info_t) widget_info);
/* Also, mark this vertex as an xwidget,
* and use the widget_info, so xwidget_info_chk
* can work (and be fairly efficient).
*/
hwgraph_info_add_LBL(xwidget, INFO_LBL_XWIDGET,
(arbitrary_info_t) widget_info);
}
devfs_handle_t
xwidget_info_dev_get(xwidget_info_t xwidget_info)
{
if (xwidget_info == NULL)
panic("null xwidget_info");
return (xwidget_info->w_vertex);
}
xwidgetnum_t
xwidget_info_id_get(xwidget_info_t xwidget_info)
{
if (xwidget_info == NULL)
panic("null xwidget_info");
return (xwidget_info->w_id);
}
devfs_handle_t
xwidget_info_master_get(xwidget_info_t xwidget_info)
{
if (xwidget_info == NULL)
panic("null xwidget_info");
return (xwidget_info->w_master);
}
xwidgetnum_t
xwidget_info_masterid_get(xwidget_info_t xwidget_info)
{
if (xwidget_info == NULL)
panic("null xwidget_info");
return (xwidget_info->w_masterid);
}
xwidget_part_num_t
xwidget_info_part_num_get(xwidget_info_t xwidget_info)
{
if (xwidget_info == NULL)
panic("null xwidget_info");
return (xwidget_info->w_hwid.part_num);
}
xwidget_mfg_num_t
xwidget_info_mfg_num_get(xwidget_info_t xwidget_info)
{
if (xwidget_info == NULL)
panic("null xwidget_info");
return (xwidget_info->w_hwid.mfg_num);
}
/* Extract the widget name from the widget information
* for the xtalk widget.
*/
char *
xwidget_info_name_get(xwidget_info_t xwidget_info)
{
if (xwidget_info == NULL)
panic("null xwidget info");
return(xwidget_info->w_name);
}
/****** Generic crosstalk initialization interfaces ******/
/*
* One-time initialization needed for systems that support crosstalk.
*/
void
xtalk_init(void)
{
cdl_p cp;
#if DEBUG && ATTACH_DEBUG
printf("xtalk_init\n");
#endif
/* Allocate the registry.
* We might already have one.
* If we don't, go get one.
* MPness: someone might have
* set one up for us while we
* were not looking; use an atomic
* compare-and-swap to commit to
* using the new registry if and
* only if nobody else did first.
* If someone did get there first,
* toss the one we allocated back
* into the pool.
*/
if (xtalk_registry == NULL) {
cp = cdl_new(EDGE_LBL_XIO, "part", "mfgr");
if (!compare_and_swap_ptr((void **) &xtalk_registry, NULL, (void *) cp)) {
cdl_del(cp);
}
}
ASSERT(xtalk_registry != NULL);
}
/*
* Associate a set of xtalk_provider functions with a vertex.
*/
void
xtalk_provider_register(devfs_handle_t provider, xtalk_provider_t *xtalk_fns)
{
hwgraph_fastinfo_set(provider, (arbitrary_info_t) xtalk_fns);
}
/*
* Disassociate a set of xtalk_provider functions with a vertex.
*/
void
xtalk_provider_unregister(devfs_handle_t provider)
{
hwgraph_fastinfo_set(provider, (arbitrary_info_t)NULL);
}
/*
* Obtain a pointer to the xtalk_provider functions for a specified Crosstalk
* provider.
*/
xtalk_provider_t *
xtalk_provider_fns_get(devfs_handle_t provider)
{
return ((xtalk_provider_t *) hwgraph_fastinfo_get(provider));
}
/*
* Announce a driver for a particular crosstalk part.
* Returns 0 on success or -1 on failure. Failure occurs if the
* specified hardware already has a driver.
*/
/*ARGSUSED4 */
int
xwidget_driver_register(xwidget_part_num_t part_num,
xwidget_mfg_num_t mfg_num,
char *driver_prefix,
unsigned flags)
{
/* a driver's init routine could call
* xwidget_driver_register before the
* system calls xtalk_init; so, we
* make the call here.
*/
if (xtalk_registry == NULL)
xtalk_init();
return cdl_add_driver(xtalk_registry,
part_num, mfg_num,
driver_prefix, flags, NULL);
}
/*
* Inform xtalk infrastructure that a driver is no longer available for
* handling any widgets.
*/
void
xwidget_driver_unregister(char *driver_prefix)
{
/* before a driver calls unregister,
* it must have called registger; so we
* can assume we have a registry here.
*/
ASSERT(xtalk_registry != NULL);
cdl_del_driver(xtalk_registry, driver_prefix, NULL);
}
/*
* Call some function with each vertex that
* might be one of this driver's attach points.
*/
void
xtalk_iterate(char *driver_prefix,
xtalk_iter_f *func)
{
ASSERT(xtalk_registry != NULL);
cdl_iterate(xtalk_registry, driver_prefix, (cdl_iter_f *)func);
}
/*
* xwidget_register:
* Register a xtalk device (xwidget) by doing the following.
* -allocate and initialize xwidget_info data
* -allocate a hwgraph vertex with name based on widget number (id)
* -look up the widget's initialization function and call it,
* or remember the vertex for later initialization.
*
*/
int
xwidget_register(xwidget_hwid_t hwid, /* widget's hardware ID */
devfs_handle_t widget, /* widget to initialize */
xwidgetnum_t id, /* widget's target id (0..f) */
devfs_handle_t master, /* widget's master vertex */
xwidgetnum_t targetid, /* master's target id (9/a) */
async_attach_t aa)
{
xwidget_info_t widget_info;
char *s,devnm[MAXDEVNAME];
/* Allocate widget_info and associate it with widget vertex */
NEW(widget_info);
/* Initialize widget_info */
widget_info->w_vertex = widget;
widget_info->w_id = id;
widget_info->w_master = master;
widget_info->w_masterid = targetid;
widget_info->w_hwid = *hwid; /* structure copy */
widget_info->w_efunc = 0;
widget_info->w_einfo = 0;
/*
* get the name of this xwidget vertex and keep the info.
* This is needed during errors and interupts, but as
* long as we have it, we can use it elsewhere.
*/
s = dev_to_name(widget,devnm,MAXDEVNAME);
widget_info->w_name = kmalloc(strlen(s) + 1, GFP_KERNEL);
strcpy(widget_info->w_name,s);
xwidget_info_set(widget, widget_info);
device_master_set(widget, master);
/* All the driver init routines (including
* xtalk_init) are called before we get into
* attaching devices, so we can assume we
* have a registry here.
*/
ASSERT(xtalk_registry != NULL);
/*
* Add pointer to async attach info -- tear down will be done when
* the particular descendant is done with the info.
*/
if (aa)
async_attach_add_info(widget, aa);
return cdl_add_connpt(xtalk_registry, hwid->part_num, hwid->mfg_num,
widget, 0);
}
/*
* xwidget_unregister :
* Unregister the xtalk device and detach all its hwgraph namespace.
*/
int
xwidget_unregister(devfs_handle_t widget)
{
xwidget_info_t widget_info;
xwidget_hwid_t hwid;
/* Make sure that we have valid widget information initialized */
if (!(widget_info = xwidget_info_get(widget)))
return(1);
/* Remove the inventory information associated
* with the widget.
*/
hwgraph_inventory_remove(widget, -1, -1, -1, -1, -1);
hwid = &(widget_info->w_hwid);
cdl_del_connpt(xtalk_registry, hwid->part_num, hwid->mfg_num,
widget, 0);
/* Clean out the xwidget information */
(void)kfree(widget_info->w_name);
BZERO((void *)widget_info, sizeof(widget_info));
DEL(widget_info);
return(0);
}
void
xwidget_error_register(devfs_handle_t xwidget,
error_handler_f *efunc,
error_handler_arg_t einfo)
{
xwidget_info_t xwidget_info;
xwidget_info = xwidget_info_get(xwidget);
ASSERT(xwidget_info != NULL);
xwidget_info->w_efunc = efunc;
xwidget_info->w_einfo = einfo;
}
/*
* Issue a link reset to a widget.
*/
void
xwidget_reset(devfs_handle_t xwidget)
{
xswitch_reset_link(xwidget);
}
void
xwidget_gfx_reset(devfs_handle_t xwidget)
{
xwidget_info_t info;
xswitch_reset_link(xwidget);
info = xwidget_info_get(xwidget);
#ifdef LATER
ASSERT_ALWAYS(info != NULL);
#endif
/*
* Enable this for other architectures once we add widget_reset to the
* xtalk provider interface.
*/
DEV_FUNC(xtalk_provider, widget_reset)
(xwidget_info_master_get(info), xwidget_info_id_get(info));
}
#define ANON_XWIDGET_NAME "No Name" /* Default Widget Name */
/* Get the canonical hwgraph name of xtalk widget */
char *
xwidget_name_get(devfs_handle_t xwidget_vhdl)
{
xwidget_info_t info;
/* If we have a bogus widget handle then return
* a default anonymous widget name.
*/
if (xwidget_vhdl == GRAPH_VERTEX_NONE)
return(ANON_XWIDGET_NAME);
/* Read the widget name stored in the widget info
* for the widget setup during widget initialization.
*/
info = xwidget_info_get(xwidget_vhdl);
ASSERT(info != NULL);
return(xwidget_info_name_get(info));
}
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <asm/io.h>
#include <linux/module.h>
extern void * sn_io_addr(unsigned long port); /* defined in sn[12]/iomv.c */
/**
* sn_inb - read a byte from a port
* @port: port to read from
*
* Reads a byte from @port and returns it to the caller.
*/
unsigned int
sn_inb (unsigned long port)
{
volatile unsigned char *addr = sn_io_addr(port);
unsigned char ret;
ret = *addr;
__ia64_mf_a();
return ret;
}
/**
* sn_inw - read a word from a port
* @port: port to read from
*
* Reads a word from @port and returns it to the caller.
*/
unsigned int
sn_inw (unsigned long port)
{
volatile unsigned short *addr = sn_io_addr(port);
unsigned short ret;
ret = *addr;
__ia64_mf_a();
return ret;
}
/**
* sn_inl - read a word from a port
* @port: port to read from
*
* Reads a word from @port and returns it to the caller.
*/
unsigned int
sn_inl (unsigned long port)
{
volatile unsigned int *addr = sn_io_addr(port);
unsigned int ret;
ret = *addr;
__ia64_mf_a();
return ret;
}
/**
* sn_outb - write a byte to a port
* @port: port to write to
* @val: value to write
*
* Writes @val to @port.
*/
void
sn_outb (unsigned char val, unsigned long port)
{
volatile unsigned char *addr = sn_io_addr(port);
*addr = val;
}
/**
* sn_outw - write a word to a port
* @port: port to write to
* @val: value to write
*
* Writes @val to @port.
*/
void
sn_outw (unsigned short val, unsigned long port)
{
volatile unsigned short *addr = sn_io_addr(port);
*addr = val;
}
/**
* sn_outl - write a word to a port
* @port: port to write to
* @val: value to write
*
* Writes @val to @port.
*/
void
sn_outl (unsigned int val, unsigned long port)
{
volatile unsigned int *addr = sn_io_addr(port);
*addr = val;
}
EXPORT_SYMBOL(sn_inb);
EXPORT_SYMBOL(sn_inw);
EXPORT_SYMBOL(sn_inl);
EXPORT_SYMBOL(sn_outb);
EXPORT_SYMBOL(sn_outw);
EXPORT_SYMBOL(sn_outl);
...@@ -40,7 +40,10 @@ ...@@ -40,7 +40,10 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/acpi.h>
#ifdef CONFIG_KDB
#include <linux/kdb.h> #include <linux/kdb.h>
#endif
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -53,7 +56,6 @@ ...@@ -53,7 +56,6 @@
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include <asm/acpi-ext.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/sn/sn_cpuid.h> #include <asm/sn/sn_cpuid.h>
......
...@@ -267,7 +267,7 @@ sn_setup(char **cmdline_p) ...@@ -267,7 +267,7 @@ sn_setup(char **cmdline_p)
/* PROM has wrong value on SN1 */ /* PROM has wrong value on SN1 */
sn_rtc_cycles_per_second = 990177; sn_rtc_cycles_per_second = 990177;
#endif #endif
sn_rtc_usec_per_cyc = ((1000000UL<<IA64_USEC_PER_CYC_SHIFT) sn_rtc_usec_per_cyc = ((1000000000UL<<IA64_NSEC_PER_CYC_SHIFT)
+ sn_rtc_cycles_per_second/2) / sn_rtc_cycles_per_second; + sn_rtc_cycles_per_second/2) / sn_rtc_cycles_per_second;
for (i=0;i<NR_CPUS;i++) for (i=0;i<NR_CPUS;i++)
......
...@@ -32,5 +32,7 @@ ...@@ -32,5 +32,7 @@
# http://oss.sgi.com/projects/GenInfo/NoticeExplan # http://oss.sgi.com/projects/GenInfo/NoticeExplan
# #
EXTRA_CFLAGS := -DLITTLE_ENDIAN EXTRA_CFLAGS := -DLITTLE_ENDIAN
obj-y += cache.o iomv.o ptc_deadlock.o sn2_smp.o \
sn_proc_fs.o
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <asm/sn/sn2/shub_mmr.h>
#define ZEROVAL 0x3f // "zero" value for outstanding PIO requests
#define DEADLOCKBIT SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK_SHFT
#define WRITECOUNT SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_SHFT
#define ALIAS_OFFSET (SH_PIO_WRITE_STATUS_0_ALIAS-SH_PIO_WRITE_STATUS_0)
.global sn2_ptc_deadlock_recovery_core
.proc sn2_ptc_deadlock_recovery_core
sn2_ptc_deadlock_recovery_core:
.regstk 5,0,0,0
ptc0 = in0
data0 = in1
ptc1 = in2
data1 = in3
piowc = in4
piowcphy = r30
psrsave = r2
zeroval = r3
scr1 = r16
scr2 = r17
extr.u piowcphy=piowc,0,61;; // Convert piowc to uncached physical address
dep piowcphy=-1,piowcphy,63,1
mov zeroval=ZEROVAL // "zero" value for PIO write count
1:
add scr2=ALIAS_OFFSET,piowc // Address of WRITE_STATUS alias register
mov scr1=7;; // Clear DEADLOCK, WRITE_ERROR, MULTI_WRITE_ERROR
st8.rel [scr2]=scr1;;
5: ld8.acq scr1=[piowc];; // Wait for PIOs to complete.
extr.u scr2=scr1,WRITECOUNT,7;;// PIO count
cmp.ne p6,p0=zeroval,scr2
(p6) br.cond.sptk 5b
////////////// BEGIN PHYSICAL MODE ////////////////////
mov psrsave=psr // Disable IC (no PMIs)
rsm psr.i | psr.dt | psr.ic;;
srlz.i;;
st8.rel [ptc0]=data0 // Write PTC0 & wait for completion.
5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete.
extr.u scr2=scr1,WRITECOUNT,7;;// PIO count
cmp.ne p6,p0=zeroval,scr2
(p6) br.cond.sptk 5b;;
tbit.nz p8,p7=scr1,DEADLOCKBIT;;// Test for DEADLOCK
(p7) st8.rel [ptc1]=data1;; // Now write PTC1.
5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete.
extr.u scr2=scr1,WRITECOUNT,7;;// PIO count
cmp.ne p6,p0=zeroval,scr2
(p6) br.cond.sptk 5b
tbit.nz p8,p0=scr1,DEADLOCKBIT;;// Test for DEADLOCK
mov psr.l=psrsave;; // Reenable IC
srlz.i;;
////////////// END PHYSICAL MODE ////////////////////
(p8) br.cond.spnt 1b;; // Repeat if DEADLOCK occurred.
br.ret.sptk rp
.endp sn2_ptc_deadlock_recovery_core
/*
*
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
#include <linux/config.h>
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
#include <asm/sn/sn_sal.h>
static int partition_id_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data) {
return sprintf(page, "%d\n", sn_local_partid());
}
struct proc_dir_entry * sgi_proc_dir = NULL;
void
register_sn_partition_id(void) {
struct proc_dir_entry *entry;
if (!sgi_proc_dir) {
sgi_proc_dir = proc_mkdir("sgi_sn", 0);
}
entry = create_proc_entry("partition_id", 0444, sgi_proc_dir);
if (entry) {
entry->nlink = 1;
entry->data = 0;
entry->read_proc = partition_id_read_proc;
entry->write_proc = NULL;
}
}
static int
system_serial_number_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data) {
return sprintf(page, "%s\n", sn_system_serial_number());
}
static int
licenseID_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data) {
return sprintf(page, "0x%lx\n",sn_partition_serial_number_val());
}
void
register_sn_serial_numbers(void) {
struct proc_dir_entry *entry;
if (!sgi_proc_dir) {
sgi_proc_dir = proc_mkdir("sgi_sn", 0);
}
entry = create_proc_entry("system_serial_number", 0444, sgi_proc_dir);
if (entry) {
entry->nlink = 1;
entry->data = 0;
entry->read_proc = system_serial_number_read_proc;
entry->write_proc = NULL;
}
entry = create_proc_entry("licenseID", 0444, sgi_proc_dir);
if (entry) {
entry->nlink = 1;
entry->data = 0;
entry->read_proc = licenseID_read_proc;
entry->write_proc = NULL;
}
}
// Disable forced interrupts, but leave the code in, just in case.
int sn_force_interrupt_flag = 0;
static int
sn_force_interrupt_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data) {
if (sn_force_interrupt_flag) {
return sprintf(page, "Force interrupt is enabled\n");
}
return sprintf(page, "Force interrupt is disabled\n");
}
static int
sn_force_interrupt_write_proc(struct file *file, const char *buffer,
unsigned long count, void *data)
{
if (*buffer == '0') {
sn_force_interrupt_flag = 0;
} else {
sn_force_interrupt_flag = 1;
}
return 1;
}
void
register_sn_force_interrupt(void) {
struct proc_dir_entry *entry;
if (!sgi_proc_dir) {
sgi_proc_dir = proc_mkdir("sgi_sn", 0);
}
entry = create_proc_entry("sn_force_interrupt",0444, sgi_proc_dir);
if (entry) {
entry->nlink = 1;
entry->data = 0;
entry->read_proc = sn_force_interrupt_read_proc;
entry->write_proc = sn_force_interrupt_write_proc;
}
}
void
register_sn_procfs(void) {
register_sn_partition_id();
register_sn_serial_numbers();
register_sn_force_interrupt();
}
#endif /* CONFIG_PROC_FS */
/* $Id$
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef __SYS_GEO_H__
#define __SYS_GEO_H__
/* Include a platform-specific geo.h. It must define at least:
* geoid_t: Geographic identifier data type
* geo_type_t: Data type for the kind of geoid this is
* GEO_TYPE_xxx: Values for geo_type_t vars, eg. GEO_TYPE_NODE
* GEO_MAX_LEN: The maximum length of a geoid, formatted for printing
*/
#include <linux/config.h>
#ifdef CONFIG_IA64_SGI_SN2
#include <asm/sn/sn2/geo.h>
#else
#error <<BOMB! need geo.h for this platform >>
#endif /* !SN2 && ... */
/* Declarations applicable to all platforms */
/* parameter for hwcfg_format_geoid() */
#define GEO_FORMAT_HWGRAPH 1
#define GEO_FORMAT_BRIEF 2
/* (the parameter for hwcfg_format_geoid_compt() is defined in the
* platform-specific geo.h file) */
/* Routines for manipulating geoid_t values */
extern moduleid_t geo_module(geoid_t g);
extern slabid_t geo_slab(geoid_t g);
extern geo_type_t geo_type(geoid_t g);
extern int geo_valid(geoid_t g);
extern int geo_cmp(geoid_t g0, geoid_t g1);
extern geoid_t geo_new(geo_type_t type, ...);
extern geoid_t hwcfg_parse_geoid(char *buffer);
extern void hwcfg_format_geoid(char *buffer, geoid_t m, int fmt);
extern void hwcfg_format_geoid_compt(char *buffer, geoid_t m, int compt);
extern geoid_t hwcfg_geo_get_self(geo_type_t type);
extern geoid_t hwcfg_geo_get_by_nasid(geo_type_t type, nasid_t nasid);
#endif /* __SYS_GEO_H__ */
/*
* Copyright (c) 2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
#define IOCONFIG_PCIBUS "/boot/efi/ioconfig_pcibus"
#define POUND_CHAR '#'
#define MAX_LINE_LEN 128
#define MAXPATHLEN 128
struct ioconfig_parm {
unsigned long ioconfig_activated;
unsigned long number;
void *buffer;
};
struct ascii_moduleid{
unsigned char io_moduleid[8]; /* pci path name */
};
...@@ -273,6 +273,9 @@ struct module_s { ...@@ -273,6 +273,9 @@ struct module_s {
cnodeid_t nodes[MODULE_MAX_NODES]; cnodeid_t nodes[MODULE_MAX_NODES];
#ifdef CONFIG_IA64_SGI_SN2 #ifdef CONFIG_IA64_SGI_SN2
geoid_t geoid[MODULE_MAX_NODES]; geoid_t geoid[MODULE_MAX_NODES];
struct {
char moduleid[8];
} io[MODULE_MAX_NODES];
#endif #endif
int nodecnt; /* Number of nodes in array */ int nodecnt; /* Number of nodes in array */
......
/* $Id$
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_SN_PCI_PIC_H
#define _ASM_SN_PCI_PIC_H
/*
* The PIC ASIC is a follow-on to the Bridge and Xbridge ASICs.
* It shares many of the same registers as those chips and therefore
* the primary structure for the PIC will be bridge_s as defined
* in irix/kern/sys/PCI/bridge.h. This file is intended as a complement
* to bridge.h, which includes this file.
*/
/*
* PIC AS DEVICE ZERO
* ------------------
*
* PIC handles PCI/X busses. PCI/X requires that the 'bridge' (i.e. PIC)
* be designated as 'device 0'. That is a departure from earlier SGI
* PCI bridges. Because of that we use config space 1 to access the
* config space of the first actual PCI device on the bus.
* Here's what the PIC manual says:
*
* The current PCI-X bus specification now defines that the parent
* hosts bus bridge (PIC for example) must be device 0 on bus 0. PIC
* reduced the total number of devices from 8 to 4 and removed the
* device registers and windows, now only supporting devices 0,1,2, and
* 3. PIC did leave all 8 configuration space windows. The reason was
* there was nothing to gain by removing them. Here in lies the problem.
* The device numbering we do using 0 through 3 is unrelated to the device
* numbering which PCI-X requires in configuration space. In the past we
* correlated Configs pace and our device space 0 <-> 0, 1 <-> 1, etc.
* PCI-X requires we start a 1, not 0 and currently the PX brick
* does associate our:
*
* device 0 with configuration space window 1,
* device 1 with configuration space window 2,
* device 2 with configuration space window 3,
* device 3 with configuration space window 4.
*
* The net effect is that all config space access are off-by-one with
* relation to other per-slot accesses on the PIC.
* Here is a table that shows some of that:
*
* Internal Slot#
* |
* | 0 1 2 3
* ----------|---------------------------------------
* config | 0x21000 0x22000 0x23000 0x24000
* |
* even rrb | 0[0] n/a 1[0] n/a [] == implied even/odd
* |
* odd rrb | n/a 0[1] n/a 1[1]
* |
* int dev | 00 01 10 11
* |
* ext slot# | 1 2 3 4
* ----------|---------------------------------------
*/
#ifndef __ASSEMBLY__
#ifdef __cplusplus
extern "C" {
#endif
// #include <sys/types.h>
#include <asm/sn/pci/pciio.h>
/*********************************************************************
* bus provider function table
*
* Normally, this table is only handed off explicitly
* during provider initialization, and the PCI generic
* layer will stash a pointer to it in the vertex; however,
* exporting it explicitly enables a performance hack in
* the generic PCI provider where if we know at compile
* time that the only possible PCI provider is a
* pcibr, we can go directly to this ops table.
*/
extern pciio_provider_t pci_pic_provider;
/*********************************************************************
* misc defines
*
*/
#define PIC_WIDGET_PART_NUM_BUS0 0xd102
#define PIC_WIDGET_PART_NUM_BUS1 0xd112
#define PIC_WIDGET_MFGR_NUM 0x24
#define PIC_WIDGET_REV_A 0x1
#define IS_PIC_PART_REV_A(rev) \
((rev == (PIC_WIDGET_PART_NUM_BUS0 << 4 | PIC_WIDGET_REV_A)) || \
(rev == (PIC_WIDGET_PART_NUM_BUS1 << 4 | PIC_WIDGET_REV_A)))
/*********************************************************************
* register offset defines
*
*/
/* Identification Register -- read-only */
#define PIC_IDENTIFICATION 0x00000000
/* Status Register -- read-only */
#define PIC_STATUS 0x00000008
/* Upper Address Holding Register Bus Side Errors -- read-only */
#define PIC_UPPER_ADDR_REG_BUS_SIDE_ERRS 0x00000010
/* Lower Address Holding Register Bus Side Errors -- read-only */
#define PIC_LOWER_ADDR_REG_BUS_SIDE_ERRS 0x00000018
/* Control Register -- read/write */
#define PIC_CONTROL 0x00000020
/* PCI Request Time-out Value Register -- read/write */
#define PIC_PCI_REQ_TIME_OUT_VALUE 0x00000028
/* Interrupt Destination Upper Address Register -- read/write */
#define PIC_INTR_DEST_UPPER_ADDR 0x00000030
/* Interrupt Destination Lower Address Register -- read/write */
#define PIC_INTR_DEST_LOWER_ADDR 0x00000038
/* Command Word Holding Register Bus Side -- read-only */
#define PIC_CMD_WORD_REG_BUS_SIDE 0x00000040
/* LLP Configuration Register (Bus 0 Only) -- read/write */
#define PIC_LLP_CFG_REG_(BUS_0_ONLY) 0x00000048
/* PCI Target Flush Register -- read-only */
#define PIC_PCI_TARGET_FLUSH 0x00000050
/* Command Word Holding Register Link Side -- read-only */
#define PIC_CMD_WORD_REG_LINK_SIDE 0x00000058
/* Response Buffer Error Upper Address Holding -- read-only */
#define PIC_RESP_BUF_ERR_UPPER_ADDR_ 0x00000060
/* Response Buffer Error Lower Address Holding -- read-only */
#define PIC_RESP_BUF_ERR_LOWER_ADDR_ 0x00000068
/* Test Pin Control Register -- read/write */
#define PIC_TEST_PIN_CONTROL 0x00000070
/* Address Holding Register Link Side Errors -- read-only */
#define PIC_ADDR_REG_LINK_SIDE_ERRS 0x00000078
/* Direct Map Register -- read/write */
#define PIC_DIRECT_MAP 0x00000080
/* PCI Map Fault Address Register -- read-only */
#define PIC_PCI_MAP_FAULT_ADDR 0x00000090
/* Arbitration Priority Register -- read/write */
#define PIC_ARBITRATION_PRIORITY 0x000000A0
/* Internal Ram Parity Error Register -- read-only */
#define PIC_INTERNAL_RAM_PARITY_ERR 0x000000B0
/* PCI Time-out Register -- read/write */
#define PIC_PCI_TIME_OUT 0x000000C0
/* PCI Type 1 Configuration Register -- read/write */
#define PIC_PCI_TYPE_1_CFG 0x000000C8
/* PCI Bus Error Upper Address Holding Register -- read-only */
#define PIC_PCI_BUS_ERR_UPPER_ADDR_ 0x000000D0
/* PCI Bus Error Lower Address Holding Register -- read-only */
#define PIC_PCI_BUS_ERR_LOWER_ADDR_ 0x000000D8
/* PCIX Error Address Register -- read-only */
#define PIC_PCIX_ERR_ADDR 0x000000E0
/* PCIX Error Attribute Register -- read-only */
#define PIC_PCIX_ERR_ATTRIBUTE 0x000000E8
/* PCIX Error Data Register -- read-only */
#define PIC_PCIX_ERR_DATA 0x000000F0
/* PCIX Read Request Timeout Error Register -- read-only */
#define PIC_PCIX_READ_REQ_TIMEOUT_ERR 0x000000F8
/* Interrupt Status Register -- read-only */
#define PIC_INTR_STATUS 0x00000100
/* Interrupt Enable Register -- read/write */
#define PIC_INTR_ENABLE 0x00000108
/* Reset Interrupt Status Register -- write-only */
#define PIC_RESET_INTR_STATUS 0x00000110
/* Interrupt Mode Register -- read/write */
#define PIC_INTR_MODE 0x00000118
/* Interrupt Device Register -- read/write */
#define PIC_INTR_DEVICE 0x00000120
/* Host Error Field Register -- read/write */
#define PIC_HOST_ERR_FIELD 0x00000128
/* Interrupt Pin 0 Host Address Register -- read/write */
#define PIC_INTR_PIN_0_HOST_ADDR 0x00000130
/* Interrupt Pin 1 Host Address Register -- read/write */
#define PIC_INTR_PIN_1_HOST_ADDR 0x00000138
/* Interrupt Pin 2 Host Address Register -- read/write */
#define PIC_INTR_PIN_2_HOST_ADDR 0x00000140
/* Interrupt Pin 3 Host Address Register -- read/write */
#define PIC_INTR_PIN_3_HOST_ADDR 0x00000148
/* Interrupt Pin 4 Host Address Register -- read/write */
#define PIC_INTR_PIN_4_HOST_ADDR 0x00000150
/* Interrupt Pin 5 Host Address Register -- read/write */
#define PIC_INTR_PIN_5_HOST_ADDR 0x00000158
/* Interrupt Pin 6 Host Address Register -- read/write */
#define PIC_INTR_PIN_6_HOST_ADDR 0x00000160
/* Interrupt Pin 7 Host Address Register -- read/write */
#define PIC_INTR_PIN_7_HOST_ADDR 0x00000168
/* Error Interrupt View Register -- read-only */
#define PIC_ERR_INTR_VIEW 0x00000170
/* Multiple Interrupt Register -- read-only */
#define PIC_MULTIPLE_INTR 0x00000178
/* Force Always Interrupt 0 Register -- write-only */
#define PIC_FORCE_ALWAYS_INTR_0 0x00000180
/* Force Always Interrupt 1 Register -- write-only */
#define PIC_FORCE_ALWAYS_INTR_1 0x00000188
/* Force Always Interrupt 2 Register -- write-only */
#define PIC_FORCE_ALWAYS_INTR_2 0x00000190
/* Force Always Interrupt 3 Register -- write-only */
#define PIC_FORCE_ALWAYS_INTR_3 0x00000198
/* Force Always Interrupt 4 Register -- write-only */
#define PIC_FORCE_ALWAYS_INTR_4 0x000001A0
/* Force Always Interrupt 5 Register -- write-only */
#define PIC_FORCE_ALWAYS_INTR_5 0x000001A8
/* Force Always Interrupt 6 Register -- write-only */
#define PIC_FORCE_ALWAYS_INTR_6 0x000001B0
/* Force Always Interrupt 7 Register -- write-only */
#define PIC_FORCE_ALWAYS_INTR_7 0x000001B8
/* Force w/Pin Interrupt 0 Register -- write-only */
#define PIC_FORCE_PIN_INTR_0 0x000001C0
/* Force w/Pin Interrupt 1 Register -- write-only */
#define PIC_FORCE_PIN_INTR_1 0x000001C8
/* Force w/Pin Interrupt 2 Register -- write-only */
#define PIC_FORCE_PIN_INTR_2 0x000001D0
/* Force w/Pin Interrupt 3 Register -- write-only */
#define PIC_FORCE_PIN_INTR_3 0x000001D8
/* Force w/Pin Interrupt 4 Register -- write-only */
#define PIC_FORCE_PIN_INTR_4 0x000001E0
/* Force w/Pin Interrupt 5 Register -- write-only */
#define PIC_FORCE_PIN_INTR_5 0x000001E8
/* Force w/Pin Interrupt 6 Register -- write-only */
#define PIC_FORCE_PIN_INTR_6 0x000001F0
/* Force w/Pin Interrupt 7 Register -- write-only */
#define PIC_FORCE_PIN_INTR_7 0x000001F8
/* Device 0 Register -- read/write */
#define PIC_DEVICE_0 0x00000200
/* Device 1 Register -- read/write */
#define PIC_DEVICE_1 0x00000208
/* Device 2 Register -- read/write */
#define PIC_DEVICE_2 0x00000210
/* Device 3 Register -- read/write */
#define PIC_DEVICE_3 0x00000218
/* Device 0 Write Request Buffer Register -- read-only */
#define PIC_DEVICE_0_WRITE_REQ_BUF 0x00000240
/* Device 1 Write Request Buffer Register -- read-only */
#define PIC_DEVICE_1_WRITE_REQ_BUF 0x00000248
/* Device 2 Write Request Buffer Register -- read-only */
#define PIC_DEVICE_2_WRITE_REQ_BUF 0x00000250
/* Device 3 Write Request Buffer Register -- read-only */
#define PIC_DEVICE_3_WRITE_REQ_BUF 0x00000258
/* Even Device Response Buffer Register -- read/write */
#define PIC_EVEN_DEVICE_RESP_BUF 0x00000280
/* Odd Device Response Buffer Register -- read/write */
#define PIC_ODD_DEVICE_RESP_BUF 0x00000288
/* Read Response Buffer Status Register -- read-only */
#define PIC_READ_RESP_BUF_STATUS 0x00000290
/* Read Response Buffer Clear Register -- write-only */
#define PIC_READ_RESP_BUF_CLEAR 0x00000298
/* PCI RR 0 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_0_UPPER_ADDR_MATCH 0x00000300
/* PCI RR 0 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_0_LOWER_ADDR_MATCH 0x00000308
/* PCI RR 1 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_1_UPPER_ADDR_MATCH 0x00000310
/* PCI RR 1 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_1_LOWER_ADDR_MATCH 0x00000318
/* PCI RR 2 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_2_UPPER_ADDR_MATCH 0x00000320
/* PCI RR 2 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_2_LOWER_ADDR_MATCH 0x00000328
/* PCI RR 3 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_3_UPPER_ADDR_MATCH 0x00000330
/* PCI RR 3 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_3_LOWER_ADDR_MATCH 0x00000338
/* PCI RR 4 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_4_UPPER_ADDR_MATCH 0x00000340
/* PCI RR 4 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_4_LOWER_ADDR_MATCH 0x00000348
/* PCI RR 5 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_5_UPPER_ADDR_MATCH 0x00000350
/* PCI RR 5 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_5_LOWER_ADDR_MATCH 0x00000358
/* PCI RR 6 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_6_UPPER_ADDR_MATCH 0x00000360
/* PCI RR 6 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_6_LOWER_ADDR_MATCH 0x00000368
/* PCI RR 7 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_7_UPPER_ADDR_MATCH 0x00000370
/* PCI RR 7 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_7_LOWER_ADDR_MATCH 0x00000378
/* PCI RR 8 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_8_UPPER_ADDR_MATCH 0x00000380
/* PCI RR 8 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_8_LOWER_ADDR_MATCH 0x00000388
/* PCI RR 9 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_9_UPPER_ADDR_MATCH 0x00000390
/* PCI RR 9 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_9_LOWER_ADDR_MATCH 0x00000398
/* PCI RR 10 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_10_UPPER_ADDR_MATCH 0x000003A0
/* PCI RR 10 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_10_LOWER_ADDR_MATCH 0x000003A8
/* PCI RR 11 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_11_UPPER_ADDR_MATCH 0x000003B0
/* PCI RR 11 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_11_LOWER_ADDR_MATCH 0x000003B8
/* PCI RR 12 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_12_UPPER_ADDR_MATCH 0x000003C0
/* PCI RR 12 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_12_LOWER_ADDR_MATCH 0x000003C8
/* PCI RR 13 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_13_UPPER_ADDR_MATCH 0x000003D0
/* PCI RR 13 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_13_LOWER_ADDR_MATCH 0x000003D8
/* PCI RR 14 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_14_UPPER_ADDR_MATCH 0x000003E0
/* PCI RR 14 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_14_LOWER_ADDR_MATCH 0x000003E8
/* PCI RR 15 Upper Address Match Register -- read-only */
#define PIC_PCI_RR_15_UPPER_ADDR_MATCH 0x000003F0
/* PCI RR 15 Lower Address Match Register -- read-only */
#define PIC_PCI_RR_15_LOWER_ADDR_MATCH 0x000003F8
/* Buffer 0 Flush Count with Data Touch Register -- read/write */
#define PIC_BUF_0_FLUSH_CNT_WITH_DATA_TOUCH 0x00000400
/* Buffer 0 Flush Count w/o Data Touch Register -- read/write */
#define PIC_BUF_0_FLUSH_CNT_W_O_DATA_TOUCH 0x00000408
/* Buffer 0 Request in Flight Count Register -- read/write */
#define PIC_BUF_0_REQ_IN_FLIGHT_CNT 0x00000410
/* Buffer 0 Prefetch Request Count Register -- read/write */
#define PIC_BUF_0_PREFETCH_REQ_CNT 0x00000418
/* Buffer 0 Total PCI Retry Count Register -- read/write */
#define PIC_BUF_0_TOTAL_PCI_RETRY_CNT 0x00000420
/* Buffer 0 Max PCI Retry Count Register -- read/write */
#define PIC_BUF_0_MAX_PCI_RETRY_CNT 0x00000428
/* Buffer 0 Max Latency Count Register -- read/write */
#define PIC_BUF_0_MAX_LATENCY_CNT 0x00000430
/* Buffer 0 Clear All Register -- read/write */
#define PIC_BUF_0_CLEAR_ALL 0x00000438
/* Buffer 2 Flush Count with Data Touch Register -- read/write */
#define PIC_BUF_2_FLUSH_CNT_WITH_DATA_TOUCH 0x00000440
/* Buffer 2 Flush Count w/o Data Touch Register -- read/write */
#define PIC_BUF_2_FLUSH_CNT_W_O_DATA_TOUCH 0x00000448
/* Buffer 2 Request in Flight Count Register -- read/write */
#define PIC_BUF_2_REQ_IN_FLIGHT_CNT 0x00000450
/* Buffer 2 Prefetch Request Count Register -- read/write */
#define PIC_BUF_2_PREFETCH_REQ_CNT 0x00000458
/* Buffer 2 Total PCI Retry Count Register -- read/write */
#define PIC_BUF_2_TOTAL_PCI_RETRY_CNT 0x00000460
/* Buffer 2 Max PCI Retry Count Register -- read/write */
#define PIC_BUF_2_MAX_PCI_RETRY_CNT 0x00000468
/* Buffer 2 Max Latency Count Register -- read/write */
#define PIC_BUF_2_MAX_LATENCY_CNT 0x00000470
/* Buffer 2 Clear All Register -- read/write */
#define PIC_BUF_2_CLEAR_ALL 0x00000478
/* Buffer 4 Flush Count with Data Touch Register -- read/write */
#define PIC_BUF_4_FLUSH_CNT_WITH_DATA_TOUCH 0x00000480
/* Buffer 4 Flush Count w/o Data Touch Register -- read/write */
#define PIC_BUF_4_FLUSH_CNT_W_O_DATA_TOUCH 0x00000488
/* Buffer 4 Request in Flight Count Register -- read/write */
#define PIC_BUF_4_REQ_IN_FLIGHT_CNT 0x00000490
/* Buffer 4 Prefetch Request Count Register -- read/write */
#define PIC_BUF_4_PREFETCH_REQ_CNT 0x00000498
/* Buffer 4 Total PCI Retry Count Register -- read/write */
#define PIC_BUF_4_TOTAL_PCI_RETRY_CNT 0x000004A0
/* Buffer 4 Max PCI Retry Count Register -- read/write */
#define PIC_BUF_4_MAX_PCI_RETRY_CNT 0x000004A8
/* Buffer 4 Max Latency Count Register -- read/write */
#define PIC_BUF_4_MAX_LATENCY_CNT 0x000004B0
/* Buffer 4 Clear All Register -- read/write */
#define PIC_BUF_4_CLEAR_ALL 0x000004B8
/* Buffer 6 Flush Count with Data Touch Register -- read/write */
#define PIC_BUF_6_FLUSH_CNT_WITH_DATA_TOUCH 0x000004C0
/* Buffer 6 Flush Count w/o Data Touch Register -- read/write */
#define PIC_BUF_6_FLUSH_CNT_W_O_DATA_TOUCH 0x000004C8
/* Buffer 6 Request in Flight Count Register -- read/write */
#define PIC_BUF_6_REQ_IN_FLIGHT_CNT 0x000004D0
/* Buffer 6 Prefetch Request Count Register -- read/write */
#define PIC_BUF_6_PREFETCH_REQ_CNT 0x000004D8
/* Buffer 6 Total PCI Retry Count Register -- read/write */
#define PIC_BUF_6_TOTAL_PCI_RETRY_CNT 0x000004E0
/* Buffer 6 Max PCI Retry Count Register -- read/write */
#define PIC_BUF_6_MAX_PCI_RETRY_CNT 0x000004E8
/* Buffer 6 Max Latency Count Register -- read/write */
#define PIC_BUF_6_MAX_LATENCY_CNT 0x000004F0
/* Buffer 6 Clear All Register -- read/write */
#define PIC_BUF_6_CLEAR_ALL 0x000004F8
/* Buffer 8 Flush Count with Data Touch Register -- read/write */
#define PIC_BUF_8_FLUSH_CNT_WITH_DATA_TOUCH 0x00000500
/* Buffer 8 Flush Count w/o Data Touch Register -- read/write */
#define PIC_BUF_8_FLUSH_CNT_W_O_DATA_TOUCH 0x00000508
/* Buffer 8 Request in Flight Count Register -- read/write */
#define PIC_BUF_8_REQ_IN_FLIGHT_CNT 0x00000510
/* Buffer 8 Prefetch Request Count Register -- read/write */
#define PIC_BUF_8_PREFETCH_REQ_CNT 0x00000518
/* Buffer 8 Total PCI Retry Count Register -- read/write */
#define PIC_BUF_8_TOTAL_PCI_RETRY_CNT 0x00000520
/* Buffer 8 Max PCI Retry Count Register -- read/write */
#define PIC_BUF_8_MAX_PCI_RETRY_CNT 0x00000528
/* Buffer 8 Max Latency Count Register -- read/write */
#define PIC_BUF_8_MAX_LATENCY_CNT 0x00000530
/* Buffer 8 Clear All Register -- read/write */
#define PIC_BUF_8_CLEAR_ALL 0x00000538
/* Buffer 10 Flush Count with Data Touch Register -- read/write */
#define PIC_BUF_10_FLUSH_CNT_WITH_DATA_TOUCH 0x00000540
/* Buffer 10 Flush Count w/o Data Touch Register -- read/write */
#define PIC_BUF_10_FLUSH_CNT_W_O_DATA_TOUCH 0x00000548
/* Buffer 10 Request in Flight Count Register -- read/write */
#define PIC_BUF_10_REQ_IN_FLIGHT_CNT 0x00000550
/* Buffer 10 Prefetch Request Count Register -- read/write */
#define PIC_BUF_10_PREFETCH_REQ_CNT 0x00000558
/* Buffer 10 Total PCI Retry Count Register -- read/write */
#define PIC_BUF_10_TOTAL_PCI_RETRY_CNT 0x00000560
/* Buffer 10 Max PCI Retry Count Register -- read/write */
#define PIC_BUF_10_MAX_PCI_RETRY_CNT 0x00000568
/* Buffer 10 Max Latency Count Register -- read/write */
#define PIC_BUF_10_MAX_LATENCY_CNT 0x00000570
/* Buffer 10 Clear All Register -- read/write */
#define PIC_BUF_10_CLEAR_ALL 0x00000578
/* Buffer 12 Flush Count with Data Touch Register -- read/write */
#define PIC_BUF_12_FLUSH_CNT_WITH_DATA_TOUCH 0x00000580
/* Buffer 12 Flush Count w/o Data Touch Register -- read/write */
#define PIC_BUF_12_FLUSH_CNT_W_O_DATA_TOUCH 0x00000588
/* Buffer 12 Request in Flight Count Register -- read/write */
#define PIC_BUF_12_REQ_IN_FLIGHT_CNT 0x00000590
/* Buffer 12 Prefetch Request Count Register -- read/write */
#define PIC_BUF_12_PREFETCH_REQ_CNT 0x00000598
/* Buffer 12 Total PCI Retry Count Register -- read/write */
#define PIC_BUF_12_TOTAL_PCI_RETRY_CNT 0x000005A0
/* Buffer 12 Max PCI Retry Count Register -- read/write */
#define PIC_BUF_12_MAX_PCI_RETRY_CNT 0x000005A8
/* Buffer 12 Max Latency Count Register -- read/write */
#define PIC_BUF_12_MAX_LATENCY_CNT 0x000005B0
/* Buffer 12 Clear All Register -- read/write */
#define PIC_BUF_12_CLEAR_ALL 0x000005B8
/* Buffer 14 Flush Count with Data Touch Register -- read/write */
#define PIC_BUF_14_FLUSH_CNT_WITH_DATA_TOUCH 0x000005C0
/* Buffer 14 Flush Count w/o Data Touch Register -- read/write */
#define PIC_BUF_14_FLUSH_CNT_W_O_DATA_TOUCH 0x000005C8
/* Buffer 14 Request in Flight Count Register -- read/write */
#define PIC_BUF_14_REQ_IN_FLIGHT_CNT 0x000005D0
/* Buffer 14 Prefetch Request Count Register -- read/write */
#define PIC_BUF_14_PREFETCH_REQ_CNT 0x000005D8
/* Buffer 14 Total PCI Retry Count Register -- read/write */
#define PIC_BUF_14_TOTAL_PCI_RETRY_CNT 0x000005E0
/* Buffer 14 Max PCI Retry Count Register -- read/write */
#define PIC_BUF_14_MAX_PCI_RETRY_CNT 0x000005E8
/* Buffer 14 Max Latency Count Register -- read/write */
#define PIC_BUF_14_MAX_LATENCY_CNT 0x000005F0
/* Buffer 14 Clear All Register -- read/write */
#define PIC_BUF_14_CLEAR_ALL 0x000005F8
/* PCIX Read Buffer 0 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_0_ADDR 0x00000A00
/* PCIX Read Buffer 0 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_0_ATTRIBUTE 0x00000A08
/* PCIX Read Buffer 1 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_1_ADDR 0x00000A10
/* PCIX Read Buffer 1 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_1_ATTRIBUTE 0x00000A18
/* PCIX Read Buffer 2 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_2_ADDR 0x00000A20
/* PCIX Read Buffer 2 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_2_ATTRIBUTE 0x00000A28
/* PCIX Read Buffer 3 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_3_ADDR 0x00000A30
/* PCIX Read Buffer 3 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_3_ATTRIBUTE 0x00000A38
/* PCIX Read Buffer 4 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_4_ADDR 0x00000A40
/* PCIX Read Buffer 4 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_4_ATTRIBUTE 0x00000A48
/* PCIX Read Buffer 5 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_5_ADDR 0x00000A50
/* PCIX Read Buffer 5 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_5_ATTRIBUTE 0x00000A58
/* PCIX Read Buffer 6 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_6_ADDR 0x00000A60
/* PCIX Read Buffer 6 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_6_ATTRIBUTE 0x00000A68
/* PCIX Read Buffer 7 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_7_ADDR 0x00000A70
/* PCIX Read Buffer 7 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_7_ATTRIBUTE 0x00000A78
/* PCIX Read Buffer 8 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_8_ADDR 0x00000A80
/* PCIX Read Buffer 8 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_8_ATTRIBUTE 0x00000A88
/* PCIX Read Buffer 9 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_9_ADDR 0x00000A90
/* PCIX Read Buffer 9 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_9_ATTRIBUTE 0x00000A98
/* PCIX Read Buffer 10 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_10_ADDR 0x00000AA0
/* PCIX Read Buffer 10 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_10_ATTRIBUTE 0x00000AA8
/* PCIX Read Buffer 11 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_11_ADDR 0x00000AB0
/* PCIX Read Buffer 11 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_11_ATTRIBUTE 0x00000AB8
/* PCIX Read Buffer 12 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_12_ADDR 0x00000AC0
/* PCIX Read Buffer 12 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_12_ATTRIBUTE 0x00000AC8
/* PCIX Read Buffer 13 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_13_ADDR 0x00000AD0
/* PCIX Read Buffer 13 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_13_ATTRIBUTE 0x00000AD8
/* PCIX Read Buffer 14 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_14_ADDR 0x00000AE0
/* PCIX Read Buffer 14 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_14_ATTRIBUTE 0x00000AE8
/* PCIX Read Buffer 15 Address Register -- read-only */
#define PIC_PCIX_READ_BUF_15_ADDR 0x00000AF0
/* PCIX Read Buffer 15 Attribute Register -- read-only */
#define PIC_PCIX_READ_BUF_15_ATTRIBUTE 0x00000AF8
/* PCIX Write Buffer 0 Address Register -- read-only */
#define PIC_PCIX_WRITE_BUF_0_ADDR 0x00000B00
/* PCIX Write Buffer 0 Attribute Register -- read-only */
#define PIC_PCIX_WRITE_BUF_0_ATTRIBUTE 0x00000B08
/* PCIX Write Buffer 0 Valid Register -- read-only */
#define PIC_PCIX_WRITE_BUF_0_VALID 0x00000B10
/* PCIX Write Buffer 1 Address Register -- read-only */
#define PIC_PCIX_WRITE_BUF_1_ADDR 0x00000B20
/* PCIX Write Buffer 1 Attribute Register -- read-only */
#define PIC_PCIX_WRITE_BUF_1_ATTRIBUTE 0x00000B28
/* PCIX Write Buffer 1 Valid Register -- read-only */
#define PIC_PCIX_WRITE_BUF_1_VALID 0x00000B30
/* PCIX Write Buffer 2 Address Register -- read-only */
#define PIC_PCIX_WRITE_BUF_2_ADDR 0x00000B40
/* PCIX Write Buffer 2 Attribute Register -- read-only */
#define PIC_PCIX_WRITE_BUF_2_ATTRIBUTE 0x00000B48
/* PCIX Write Buffer 2 Valid Register -- read-only */
#define PIC_PCIX_WRITE_BUF_2_VALID 0x00000B50
/* PCIX Write Buffer 3 Address Register -- read-only */
#define PIC_PCIX_WRITE_BUF_3_ADDR 0x00000B60
/* PCIX Write Buffer 3 Attribute Register -- read-only */
#define PIC_PCIX_WRITE_BUF_3_ATTRIBUTE 0x00000B68
/* PCIX Write Buffer 3 Valid Register -- read-only */
#define PIC_PCIX_WRITE_BUF_3_VALID 0x00000B70
/* PCIX Write Buffer 4 Address Register -- read-only */
#define PIC_PCIX_WRITE_BUF_4_ADDR 0x00000B80
/* PCIX Write Buffer 4 Attribute Register -- read-only */
#define PIC_PCIX_WRITE_BUF_4_ATTRIBUTE 0x00000B88
/* PCIX Write Buffer 4 Valid Register -- read-only */
#define PIC_PCIX_WRITE_BUF_4_VALID 0x00000B90
/* PCIX Write Buffer 5 Address Register -- read-only */
#define PIC_PCIX_WRITE_BUF_5_ADDR 0x00000BA0
/* PCIX Write Buffer 5 Attribute Register -- read-only */
#define PIC_PCIX_WRITE_BUF_5_ATTRIBUTE 0x00000BA8
/* PCIX Write Buffer 5 Valid Register -- read-only */
#define PIC_PCIX_WRITE_BUF_5_VALID 0x00000BB0
/* PCIX Write Buffer 6 Address Register -- read-only */
#define PIC_PCIX_WRITE_BUF_6_ADDR 0x00000BC0
/* PCIX Write Buffer 6 Attribute Register -- read-only */
#define PIC_PCIX_WRITE_BUF_6_ATTRIBUTE 0x00000BC8
/* PCIX Write Buffer 6 Valid Register -- read-only */
#define PIC_PCIX_WRITE_BUF_6_VALID 0x00000BD0
/* PCIX Write Buffer 7 Address Register -- read-only */
#define PIC_PCIX_WRITE_BUF_7_ADDR 0x00000BE0
/* PCIX Write Buffer 7 Attribute Register -- read-only */
#define PIC_PCIX_WRITE_BUF_7_ATTRIBUTE 0x00000BE8
/* PCIX Write Buffer 7 Valid Register -- read-only */
#define PIC_PCIX_WRITE_BUF_7_VALID 0x00000BF0
/*********************************************************************
* misc typedefs
*
*/
typedef uint64_t picreg_t;
/*********************************************************************
* PIC register structures
*
*/
/*
* Identification Register
*
* The Identification register is a read only register used by the host CPU
* during configuration to determine the type of the widget. The format is
* the same as defined in IEEE 1149.1 JTAG Device Identification Register.
*/
typedef union pic_id_reg_u {
picreg_t pic_id_reg_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t rev_num : 4; /* 31:28 */
picreg_t part_num : 16; /* 27:12 */
picreg_t mfg_num : 11; /* 11:1 */
picreg_t : 1; /* 0:0 */
} pic_id_reg_fld_s;
} pic_id_reg_u_t;
/*
* Status Register
*
* The status register is a read register which holds status information of the
* Bus Subsection.
*/
typedef union pic_stat_reg_u {
picreg_t pic_stat_reg_regval;
struct {
picreg_t : 28; /* 63:36 */
picreg_t pci_x_speed : 2; /* 35:34 */
picreg_t pci_x_active : 1; /* 33:33 */
picreg_t : 1; /* 32:32 */
picreg_t llp_rec_cnt : 8; /* 31:24 */
picreg_t llp_tx_cnt : 8; /* 23:16 */
picreg_t rx_credit_cnt : 4; /* 15:12 */
picreg_t tx_credit_cnt : 4; /* 11:8 */
picreg_t pci_misc_input : 8; /* 7:0 */
} pic_stat_reg_fld_s;
} pic_stat_reg_u_t;
/*
* Upper Address Holding Register Bus Side Errors
*
* The upper address holding register is a read only register which contains
* the upper 16-bits of the address when certain error occurs (see error cases
* chapter). Subsequent errors are not logged until the error is cleared. The
* last logged value is held until the group is cleared and enabled.
*/
typedef union pic_upper_bus_err_u {
picreg_t pic_upper_bus_err_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 16; /* 31:16 */
picreg_t upp_addr : 16; /* 15:0 */
} pic_upper_bus_err_fld_s;
} pic_upper_bus_err_u_t;
/*
* Lower Address Holding Register Bus Side Errors
*
* The lower address holding register is a read only register which contains
* the address which either can be accessed as a word or double word. Sub-
* sequent errors are not logged until the error is cleared. The last logged
* value is held until the group is cleared and enabled.
*/
typedef union pic_lower_bus_err_u {
picreg_t pic_lower_bus_err_regval;
struct {
picreg_t : 16; /* 63:48 */
picreg_t upp_addr : 16; /* 47:32 */
picreg_t low_addr : 32; /* 31:0 */
} pic_lower_bus_err_fld_s;
} pic_lower_bus_err_u_t;
/*
* Control Register
*
* The control register is a read/write register which holds control informa-
* tion for the bus subsection.
*/
typedef union pic_control_reg_u {
picreg_t pic_control_reg_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 4; /* 31:28 */
picreg_t rst_pin_n : 4; /* 27:24 */
picreg_t : 1; /* 23:23 */
picreg_t mem_swap : 1; /* 22:22 */
picreg_t page_size : 1; /* 21:21 */
picreg_t : 4; /* 20:17 */
picreg_t f_bad_pkt : 1; /* 16:16 */
picreg_t llp_xbar_crd : 4; /* 15:12 */
picreg_t clr_rllp_cnt : 1; /* 11:11 */
picreg_t clr_tllp_cnt : 1; /* 10:10 */
picreg_t sys_end : 1; /* 9:9 */
picreg_t : 3; /* 8:6 */
picreg_t pci_speed : 2; /* 5:4 */
picreg_t widget_id : 4; /* 3:0 */
} pic_control_reg_fld_s;
} pic_control_reg_u_t;
/*
* PCI/PCI-X Request Time-out Value Register
*
* This register contains the reload value for the response timer. The request
* timer counts every 960 nS (32 PCI clocks)
*/
typedef union pic_pci_req_to_u {
picreg_t pic_pci_req_to_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 12; /* 31:20 */
picreg_t time_out : 20; /* 19:0 */
} pic_pci_req_to_fld_s;
} pic_pci_req_to_u_t;
/*
* Interrupt Destination Upper Address Register
*
* The interrupt destination upper address register is a read/write register
* containing the upper 16-bits of address of the host to which the interrupt
* is targeted. In addition the target ID is also contained in this register for
* use in Crosstalk mode.
*/
typedef union pic_int_desc_upper_u {
picreg_t pic_int_desc_upper_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 12; /* 31:20 */
picreg_t target_id : 4; /* 19:16 */
picreg_t upp_addr : 16; /* 15:0 */
} pic_int_desc_upper_fld_s;
} pic_int_desc_upper_u_t;
/*
* Interrupt Destination Lower Address Register
*
* The interrupt destination lower address register is a read/write register
* which contains the entire address of the host to which the interrupt is tar-
* geted. In addition the target ID is also contained in this register for use in
* Crosstalk mode.
*/
typedef union pic_int_desc_lower_u {
picreg_t pic_int_desc_lower_regval;
struct {
picreg_t : 12; /* 63:52 */
picreg_t target_id : 4; /* 51:48 */
picreg_t upp_addr : 16; /* 47:32 */
picreg_t low_addr : 32; /* 31:0 */
} pic_int_desc_lower_fld_s;
} pic_int_desc_lower_u_t;
/*
* Command Word Holding Register Bus Side Errors
*
* The command word holding is a read register that holds the command
* word of a Crosstalk packet when errors occur on the link side (see error
* chapter). Errors are indicated with error bits in the interrupt status regis-
* ter. Subsequent errors are not logged until the interrupt is cleared..
*/
typedef union pic_cmd_word_bus_err_u {
picreg_t pic_cmd_word_bus_err_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t didn : 4; /* 31:28 */
picreg_t sidn : 4; /* 27:24 */
picreg_t pactyp : 4; /* 23:20 */
picreg_t tnum : 5; /* 19:15 */
picreg_t coherent : 1; /* 14:14 */
picreg_t ds : 2; /* 13:12 */
picreg_t gbr : 1; /* 11:11 */
picreg_t vbpm : 1; /* 10:10 */
picreg_t error : 1; /* 9:9 */
picreg_t barrier : 1; /* 8:8 */
picreg_t : 8; /* 7:0 */
} pic_cmd_word_bus_err_fld_s;
} pic_cmd_word_bus_err_u_t;
/*
* LLP Configuration Register
*
* This register contains the configuration information for the LLP modules
* and is only valid on bus 0 side.
*/
typedef union pic_llp_cfg_u {
picreg_t pic_llp_cfg_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 6; /* 31:26 */
picreg_t llp_maxretry : 10; /* 25:16 */
picreg_t llp_nulltimeout : 6; /* 15:10 */
picreg_t llp_maxburst : 10; /* 9:0 */
} pic_llp_cfg_fld_s;
} pic_llp_cfg_u_t;
/*
* PCI/PCI-X Target Flush Register
*
* When read, this register will return a 0x00 after all previous transfers to
* the PCI bus subsection have completed.
*/
/*
* Command Word Holding Register Link Side Errors
*
* The command word holding is a read-only register that holds the com-
* mand word of a Crosstalk packet when request fifo overflow or unexpect-
* ed response errors occur. Errors are indicated with error bits in the
* interrupt status register. Subsequent errors are not logged until this inter-
* rupt is cleared.
*/
typedef union pic_cmd_word_link_err_u {
picreg_t pic_cmd_word_link_err_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t didn : 4; /* 31:28 */
picreg_t sidn : 4; /* 27:24 */
picreg_t pactyp : 4; /* 23:20 */
picreg_t tnum : 5; /* 19:15 */
picreg_t coherent : 1; /* 14:14 */
picreg_t ds : 2; /* 13:12 */
picreg_t gbr : 1; /* 11:11 */
picreg_t vbpm : 1; /* 10:10 */
picreg_t error : 1; /* 9:9 */
picreg_t barrier : 1; /* 8:8 */
picreg_t : 8; /* 7:0 */
} pic_cmd_word_link_err_fld_s;
} pic_cmd_word_link_err_u_t;
/*
* PCI Response Buffer Error Upper Address Holding Reg
*
* The response buffer error upper address holding register is a read only
* register which contains the upper 16-bits of the address when error asso-
* ciated with response buffer entries occur. Subsequent errors are not
* logged until the interrupt is cleared.
*/
typedef union pic_pci_rbuf_err_upper_u {
picreg_t pic_pci_rbuf_err_upper_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 9; /* 31:23 */
picreg_t dev_num : 3; /* 22:20 */
picreg_t buff_num : 4; /* 19:16 */
picreg_t upp_addr : 16; /* 15:0 */
} pic_pci_rbuf_err_upper_fld_s;
} pic_pci_rbuf_err_upper_u_t;
/*
* PCI Response Buffer Error Lower Address Holding Reg
*
* The response buffer error lower address holding register is a read only
* register which contains the address of the error associated with response
* buffer entries. Subsequent errors are not logged until the interrupt is
* cleared.
*/
typedef union pic_pci_rbuf_err_lower_u {
picreg_t pic_pci_rbuf_err_lower_regval;
struct {
picreg_t : 9; /* 63:55 */
picreg_t dev_num : 3; /* 54:52 */
picreg_t buff_num : 4; /* 51:48 */
picreg_t upp_addr : 16; /* 47:32 */
picreg_t low_addr : 32; /* 31:0 */
} pic_pci_rbuf_err_lower_fld_s;
} pic_pci_rbuf_err_lower_u_t;
/*
* Test Pin Control Register
*
* This register selects the output function and value to the four test pins on
* the PIC .
*/
typedef union pic_test_pin_cntl_u {
picreg_t pic_test_pin_cntl_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 8; /* 31:24 */
picreg_t tdata_out : 8; /* 23:16 */
picreg_t sel_tpin_7 : 2; /* 15:14 */
picreg_t sel_tpin_6 : 2; /* 13:12 */
picreg_t sel_tpin_5 : 2; /* 11:10 */
picreg_t sel_tpin_4 : 2; /* 9:8 */
picreg_t sel_tpin_3 : 2; /* 7:6 */
picreg_t sel_tpin_2 : 2; /* 5:4 */
picreg_t sel_tpin_1 : 2; /* 3:2 */
picreg_t sel_tpin_0 : 2; /* 1:0 */
} pic_test_pin_cntl_fld_s;
} pic_test_pin_cntl_u_t;
/*
* Address Holding Register Link Side Errors
*
* The address holding register is a read only register which contains the ad-
* dress which either can be accessed as a word or double word. Subsequent
* errors are not logged until the error is cleared. The last logged value is
* held until the group is cleared and enabled.
*/
typedef union pic_p_addr_lkerr_u {
picreg_t pic_p_addr_lkerr_regval;
struct {
picreg_t : 16; /* 63:48 */
picreg_t upp_addr : 16; /* 47:32 */
picreg_t low_addr : 32; /* 31:0 */
} pic_p_addr_lkerr_fld_s;
} pic_p_addr_lkerr_u_t;
/*
* PCI Direct Mapping Register
*
* This register is used to relocate a 2 GByte region for PCI to Crosstalk
* transfers.
*/
typedef union pic_p_dir_map_u {
picreg_t pic_p_dir_map_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 8; /* 31:24 */
picreg_t dir_w_id : 4; /* 23:20 */
picreg_t : 2; /* 19:18 */
picreg_t dir_add512 : 1; /* 17:17 */
picreg_t dir_off : 17; /* 16:0 */
} pic_p_dir_map_fld_s;
} pic_p_dir_map_u_t;
/*
* PCI Page Map Fault Address Register
*
* This register contains the address and device number when a page map
* fault occurred.
*/
typedef union pic_p_map_fault_u {
picreg_t pic_p_map_fault_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 10; /* 31:22 */
picreg_t pci_addr : 18; /* 21:4 */
picreg_t : 1; /* 3:3 */
picreg_t pci_dev_num : 3; /* 2:0 */
} pic_p_map_fault_fld_s;
} pic_p_map_fault_u_t;
/*
* Arbitration Register
*
* This register defines the priority and bus time out timing in PCI bus arbi-
* tration.
*/
typedef union pic_p_arb_u {
picreg_t pic_p_arb_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 8; /* 31:24 */
picreg_t dev_broke : 4; /* 23:20 */
picreg_t : 2; /* 19:18 */
picreg_t req_wait_tick : 2; /* 17:16 */
picreg_t : 4; /* 15:12 */
picreg_t req_wait_en : 4; /* 11:8 */
picreg_t disarb : 1; /* 7:7 */
picreg_t freeze_gnt : 1; /* 6:6 */
picreg_t : 1; /* 5:5 */
picreg_t en_bridge_hi : 2; /* 4:3 */
picreg_t : 1; /* 2:2 */
picreg_t en_bridge_lo : 2; /* 1:0 */
} pic_p_arb_fld_s;
} pic_p_arb_u_t;
/*
* Internal Ram Parity Error Register
*
* This register logs information about parity errors on internal ram access.
*/
typedef union pic_p_ram_perr_u {
picreg_t pic_p_ram_perr_regval;
struct {
picreg_t : 6; /* 63:58 */
picreg_t ate_err_addr : 10; /* 57:48 */
picreg_t : 7; /* 47:41 */
picreg_t rd_resp_err_addr : 9; /* 40:32 */
picreg_t wrt_resp_err_addr : 8; /* 31:24 */
picreg_t : 2; /* 23:22 */
picreg_t ate_err : 1; /* 21:21 */
picreg_t rd_resp_err : 1; /* 20:20 */
picreg_t wrt_resp_err : 1; /* 19:19 */
picreg_t dbe_ate : 3; /* 18:16 */
picreg_t dbe_rd : 8; /* 15:8 */
picreg_t dbe_wrt : 8; /* 7:0 */
} pic_p_ram_perr_fld_s;
} pic_p_ram_perr_u_t;
/*
* Time-out Register
*
* This register determines retry hold off and max retries allowed for PIO
* accesses to PCI/PCI-X.
*/
typedef union pic_p_bus_timeout_u {
picreg_t pic_p_bus_timeout_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 11; /* 31:21 */
picreg_t pci_retry_hld : 5; /* 20:16 */
picreg_t : 6; /* 15:10 */
picreg_t pci_retry_cnt : 10; /* 9:0 */
} pic_p_bus_timeout_fld_s;
} pic_p_bus_timeout_u_t;
/*
* PCI/PCI-X Type 1 Configuration Register
*
* This register is use during accesses to the PCI/PCI-X type 1 configuration
* space. The bits in this register are used to supplement the address during
* the configuration cycle to select the correct secondary bus and device.
*/
typedef union pic_type1_cfg_u {
picreg_t pic_type1_cfg_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 8; /* 31:24 */
picreg_t bus_num : 8; /* 23:16 */
picreg_t dev_num : 5; /* 15:11 */
picreg_t : 11; /* 10:0 */
} pic_type1_cfg_fld_s;
} pic_type1_cfg_u_t;
/*
* PCI Bus Error Upper Address Holding Register
*
* This register holds the value of the upper address on the PCI Bus when an
* error occurs.
*/
typedef union pic_p_pci_err_upper_u {
picreg_t pic_p_pci_err_upper_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 4; /* 31:28 */
picreg_t pci_xtalk_did : 4; /* 27:24 */
picreg_t : 2; /* 23:22 */
picreg_t pci_dac : 1; /* 21:21 */
picreg_t pci_dev_master : 1; /* 20:20 */
picreg_t pci_vdev : 1; /* 19:19 */
picreg_t pci_dev_num : 3; /* 18:16 */
picreg_t pci_uaddr_err : 16; /* 15:0 */
} pic_p_pci_err_upper_fld_s;
} pic_p_pci_err_upper_u_t;
/*
* PCI Bus Error Lower Address Holding Register
*
* This register holds the value of the lower address on the PCI Bus when an
* error occurs.
*/
typedef union pic_p_pci_err_lower_u {
picreg_t pic_p_pci_err_lower_regval;
struct {
picreg_t : 4; /* 63:60 */
picreg_t pci_xtalk_did : 4; /* 59:56 */
picreg_t : 2; /* 55:54 */
picreg_t pci_dac : 1; /* 53:53 */
picreg_t pci_dev_master : 1; /* 52:52 */
picreg_t pci_vdev : 1; /* 51:51 */
picreg_t pci_dev_num : 3; /* 50:48 */
picreg_t pci_uaddr_err : 16; /* 47:32 */
picreg_t pci_laddr_err : 32; /* 31:0 */
} pic_p_pci_err_lower_fld_s;
} pic_p_pci_err_lower_u_t;
/*
* PCI-X Error Address Register
*
* This register contains the address on the PCI-X bus when an error oc-
* curred.
*/
typedef union pic_p_pcix_err_addr_u {
picreg_t pic_p_pcix_err_addr_regval;
struct {
picreg_t pcix_err_addr : 64; /* 63:0 */
} pic_p_pcix_err_addr_fld_s;
} pic_p_pcix_err_addr_u_t;
/*
* PCI-X Error Attribute Register
*
* This register contains the attribute data on the PCI-X bus when an error
* occurred.
*/
typedef union pic_p_pcix_err_attr_u {
picreg_t pic_p_pcix_err_attr_regval;
struct {
picreg_t : 16; /* 63:48 */
picreg_t bus_cmd : 4; /* 47:44 */
picreg_t byte_cnt : 12; /* 43:32 */
picreg_t : 1; /* 31:31 */
picreg_t ns : 1; /* 30:30 */
picreg_t ro : 1; /* 29:29 */
picreg_t tag : 5; /* 28:24 */
picreg_t bus_num : 8; /* 23:16 */
picreg_t dev_num : 5; /* 15:11 */
picreg_t fun_num : 3; /* 10:8 */
picreg_t l_byte_cnt : 8; /* 7:0 */
} pic_p_pcix_err_attr_fld_s;
} pic_p_pcix_err_attr_u_t;
/*
* PCI-X Error Data Register
*
* This register contains the Data on the PCI-X bus when an error occurred.
*/
typedef union pic_p_pcix_err_data_u {
picreg_t pic_p_pcix_err_data_regval;
struct {
picreg_t pcix_err_data : 64; /* 63:0 */
} pic_p_pcix_err_data_fld_s;
} pic_p_pcix_err_data_u_t;
/*
* PCI-X Read Request Timeout Error Register
*
* This register contains a pointer into the PCI-X read data structure.
*/
typedef union pic_p_pcix_read_req_to_u {
picreg_t pic_p_pcix_read_req_to_regval;
struct {
picreg_t : 55; /* 63:9 */
picreg_t rd_buff_loc : 5; /* 8:4 */
picreg_t rd_buff_struct : 4; /* 3:0 */
} pic_p_pcix_read_req_to_fld_s;
} pic_p_pcix_read_req_to_u_t;
/*
* INT_STATUS Register
*
* This is the current interrupt status register which maintains the current
* status of all the interrupting devices which generated a n interrupt. This
* register is read only and all the bits are active high. A high bit at
* INT_STATE means the corresponding INT_N pin has been asserted
* (low).
*/
typedef union pic_p_int_status_u {
picreg_t pic_p_int_status_regval;
struct {
picreg_t : 22; /* 63:42 */
picreg_t int_ram_perr : 1; /* 41:41 */
picreg_t bus_arb_broke : 1; /* 40:40 */
picreg_t pci_x_req_tout : 1; /* 39:39 */
picreg_t pci_x_tabort : 1; /* 38:38 */
picreg_t pci_x_perr : 1; /* 37:37 */
picreg_t pci_x_serr : 1; /* 36:36 */
picreg_t pci_x_mretry : 1; /* 35:35 */
picreg_t pci_x_mtout : 1; /* 34:34 */
picreg_t pci_x_da_parity : 1; /* 33:33 */
picreg_t pci_x_ad_parity : 1; /* 32:32 */
picreg_t : 1; /* 31:31 */
picreg_t pmu_page_fault : 1; /* 30:30 */
picreg_t unexpected_resp : 1; /* 29:29 */
picreg_t bad_xresp_packet : 1; /* 28:28 */
picreg_t bad_xreq_packet : 1; /* 27:27 */
picreg_t resp_xtalk_error : 1; /* 26:26 */
picreg_t req_xtalk_error : 1; /* 25:25 */
picreg_t invalid_access : 1; /* 24:24 */
picreg_t unsupported_xop : 1; /* 23:23 */
picreg_t xreq_fifo_oflow : 1; /* 22:22 */
picreg_t llp_rec_snerror : 1; /* 21:21 */
picreg_t llp_rec_cberror : 1; /* 20:20 */
picreg_t llp_rcty : 1; /* 19:19 */
picreg_t llp_tx_retry : 1; /* 18:18 */
picreg_t llp_tcty : 1; /* 17:17 */
picreg_t : 1; /* 16:16 */
picreg_t pci_abort : 1; /* 15:15 */
picreg_t pci_parity : 1; /* 14:14 */
picreg_t pci_serr : 1; /* 13:13 */
picreg_t pci_perr : 1; /* 12:12 */
picreg_t pci_master_tout : 1; /* 11:11 */
picreg_t pci_retry_cnt : 1; /* 10:10 */
picreg_t xread_req_tout : 1; /* 9:9 */
picreg_t : 1; /* 8:8 */
picreg_t int_state : 8; /* 7:0 */
} pic_p_int_status_fld_s;
} pic_p_int_status_u_t;
/*
* Interrupt Enable Register
*
* This register enables the reporting of interrupt to the host. Each bit in this
* register corresponds to the same bit in Interrupt Status register. All bits
* are zero after reset.
*/
typedef union pic_p_int_enable_u {
picreg_t pic_p_int_enable_regval;
struct {
picreg_t : 22; /* 63:42 */
picreg_t en_int_ram_perr : 1; /* 41:41 */
picreg_t en_bus_arb_broke : 1; /* 40:40 */
picreg_t en_pci_x_req_tout : 1; /* 39:39 */
picreg_t en_pci_x_tabort : 1; /* 38:38 */
picreg_t en_pci_x_perr : 1; /* 37:37 */
picreg_t en_pci_x_serr : 1; /* 36:36 */
picreg_t en_pci_x_mretry : 1; /* 35:35 */
picreg_t en_pci_x_mtout : 1; /* 34:34 */
picreg_t en_pci_x_da_parity : 1; /* 33:33 */
picreg_t en_pci_x_ad_parity : 1; /* 32:32 */
picreg_t : 1; /* 31:31 */
picreg_t en_pmu_page_fault : 1; /* 30:30 */
picreg_t en_unexpected_resp : 1; /* 29:29 */
picreg_t en_bad_xresp_packet : 1; /* 28:28 */
picreg_t en_bad_xreq_packet : 1; /* 27:27 */
picreg_t en_resp_xtalk_error : 1; /* 26:26 */
picreg_t en_req_xtalk_error : 1; /* 25:25 */
picreg_t en_invalid_access : 1; /* 24:24 */
picreg_t en_unsupported_xop : 1; /* 23:23 */
picreg_t en_xreq_fifo_oflow : 1; /* 22:22 */
picreg_t en_llp_rec_snerror : 1; /* 21:21 */
picreg_t en_llp_rec_cberror : 1; /* 20:20 */
picreg_t en_llp_rcty : 1; /* 19:19 */
picreg_t en_llp_tx_retry : 1; /* 18:18 */
picreg_t en_llp_tcty : 1; /* 17:17 */
picreg_t : 1; /* 16:16 */
picreg_t en_pci_abort : 1; /* 15:15 */
picreg_t en_pci_parity : 1; /* 14:14 */
picreg_t en_pci_serr : 1; /* 13:13 */
picreg_t en_pci_perr : 1; /* 12:12 */
picreg_t en_pci_master_tout : 1; /* 11:11 */
picreg_t en_pci_retry_cnt : 1; /* 10:10 */
picreg_t en_xread_req_tout : 1; /* 9:9 */
picreg_t : 1; /* 8:8 */
picreg_t en_int_state : 8; /* 7:0 */
} pic_p_int_enable_fld_s;
} pic_p_int_enable_u_t;
/*
* Reset Interrupt Register
*
* A write of a "1" clears the bit and rearms the error registers. Writes also
* clear the error view register.
*/
typedef union pic_p_int_rst_u {
picreg_t pic_p_int_rst_regval;
struct {
picreg_t : 22; /* 63:42 */
picreg_t logv_int_ram_perr : 1; /* 41:41 */
picreg_t logv_bus_arb_broke : 1; /* 40:40 */
picreg_t logv_pci_x_req_tout : 1; /* 39:39 */
picreg_t logv_pci_x_tabort : 1; /* 38:38 */
picreg_t logv_pci_x_perr : 1; /* 37:37 */
picreg_t logv_pci_x_serr : 1; /* 36:36 */
picreg_t logv_pci_x_mretry : 1; /* 35:35 */
picreg_t logv_pci_x_mtout : 1; /* 34:34 */
picreg_t logv_pci_x_da_parity : 1; /* 33:33 */
picreg_t logv_pci_x_ad_parity : 1; /* 32:32 */
picreg_t : 1; /* 31:31 */
picreg_t logv_pmu_page_fault : 1; /* 30:30 */
picreg_t logv_unexpected_resp : 1; /* 29:29 */
picreg_t logv_bad_xresp_packet : 1; /* 28:28 */
picreg_t logv_bad_xreq_packet : 1; /* 27:27 */
picreg_t logv_resp_xtalk_error : 1; /* 26:26 */
picreg_t logv_req_xtalk_error : 1; /* 25:25 */
picreg_t logv_invalid_access : 1; /* 24:24 */
picreg_t logv_unsupported_xop : 1; /* 23:23 */
picreg_t logv_xreq_fifo_oflow : 1; /* 22:22 */
picreg_t logv_llp_rec_snerror : 1; /* 21:21 */
picreg_t logv_llp_rec_cberror : 1; /* 20:20 */
picreg_t logv_llp_rcty : 1; /* 19:19 */
picreg_t logv_llp_tx_retry : 1; /* 18:18 */
picreg_t logv_llp_tcty : 1; /* 17:17 */
picreg_t : 1; /* 16:16 */
picreg_t logv_pci_abort : 1; /* 15:15 */
picreg_t logv_pci_parity : 1; /* 14:14 */
picreg_t logv_pci_serr : 1; /* 13:13 */
picreg_t logv_pci_perr : 1; /* 12:12 */
picreg_t logv_pci_master_tout : 1; /* 11:11 */
picreg_t logv_pci_retry_cnt : 1; /* 10:10 */
picreg_t logv_xread_req_tout : 1; /* 9:9 */
picreg_t : 2; /* 8:7 */
picreg_t multi_clr : 1; /* 6:6 */
picreg_t : 6; /* 5:0 */
} pic_p_int_rst_fld_s;
} pic_p_int_rst_u_t;
/*
* Interrupt Mode Register
*
* This register defines the interrupting mode of the INT_N pins.
*/
typedef union pic_p_int_mode_u {
picreg_t pic_p_int_mode_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 24; /* 31:8 */
picreg_t en_clr_pkt : 8; /* 7:0 */
} pic_p_int_mode_fld_s;
} pic_p_int_mode_u_t;
/*
* Interrupt Device Select Register
*
* This register associates interrupt pins with devices thus allowing buffer
* management (flushing) when a device interrupt occurs.
*/
typedef union pic_p_int_device_u {
picreg_t pic_p_int_device_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 8; /* 31:24 */
picreg_t int7_dev : 3; /* 23:21 */
picreg_t int6_dev : 3; /* 20:18 */
picreg_t int5_dev : 3; /* 17:15 */
picreg_t int4_dev : 3; /* 14:12 */
picreg_t int3_dev : 3; /* 11:9 */
picreg_t int2_dev : 3; /* 8:6 */
picreg_t int1_dev : 3; /* 5:3 */
picreg_t int0_dev : 3; /* 2:0 */
} pic_p_int_device_fld_s;
} pic_p_int_device_u_t;
/*
* Host Error Interrupt Field Register
*
* This register tells which bit location in the host's Interrupt Status register
* to set or reset when any error condition happens.
*/
typedef union pic_p_int_host_err_u {
picreg_t pic_p_int_host_err_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 24; /* 31:8 */
picreg_t bridge_err_fld : 8; /* 7:0 */
} pic_p_int_host_err_fld_s;
} pic_p_int_host_err_u_t;
/*
* Interrupt (x) Host Address Register
*
* This register allow different host address to be assigned to each interrupt
* pin and the bit in the host.
*/
typedef union pic_p_int_addr_u {
picreg_t pic_p_int_addr_regval;
struct {
picreg_t : 8; /* 63:56 */
picreg_t int_fld : 8; /* 55:48 */
picreg_t int_addr : 48; /* 47:0 */
} pic_p_int_addr_fld_s;
} pic_p_int_addr_u_t;
/*
* Error Interrupt View Register
*
* This register contains the view of which interrupt occur even if they are
* not currently enabled. The group clear is used to clear these bits just like
* the interrupt status register bits.
*/
typedef union pic_p_err_int_view_u {
picreg_t pic_p_err_int_view_regval;
struct {
picreg_t : 22; /* 63:42 */
picreg_t int_ram_perr : 1; /* 41:41 */
picreg_t bus_arb_broke : 1; /* 40:40 */
picreg_t pci_x_req_tout : 1; /* 39:39 */
picreg_t pci_x_tabort : 1; /* 38:38 */
picreg_t pci_x_perr : 1; /* 37:37 */
picreg_t pci_x_serr : 1; /* 36:36 */
picreg_t pci_x_mretry : 1; /* 35:35 */
picreg_t pci_x_mtout : 1; /* 34:34 */
picreg_t pci_x_da_parity : 1; /* 33:33 */
picreg_t pci_x_ad_parity : 1; /* 32:32 */
picreg_t : 1; /* 31:31 */
picreg_t pmu_page_fault : 1; /* 30:30 */
picreg_t unexpected_resp : 1; /* 29:29 */
picreg_t bad_xresp_packet : 1; /* 28:28 */
picreg_t bad_xreq_packet : 1; /* 27:27 */
picreg_t resp_xtalk_error : 1; /* 26:26 */
picreg_t req_xtalk_error : 1; /* 25:25 */
picreg_t invalid_access : 1; /* 24:24 */
picreg_t unsupported_xop : 1; /* 23:23 */
picreg_t xreq_fifo_oflow : 1; /* 22:22 */
picreg_t llp_rec_snerror : 1; /* 21:21 */
picreg_t llp_rec_cberror : 1; /* 20:20 */
picreg_t llp_rcty : 1; /* 19:19 */
picreg_t llp_tx_retry : 1; /* 18:18 */
picreg_t llp_tcty : 1; /* 17:17 */
picreg_t : 1; /* 16:16 */
picreg_t pci_abort : 1; /* 15:15 */
picreg_t pci_parity : 1; /* 14:14 */
picreg_t pci_serr : 1; /* 13:13 */
picreg_t pci_perr : 1; /* 12:12 */
picreg_t pci_master_tout : 1; /* 11:11 */
picreg_t pci_retry_cnt : 1; /* 10:10 */
picreg_t xread_req_tout : 1; /* 9:9 */
picreg_t : 9; /* 8:0 */
} pic_p_err_int_view_fld_s;
} pic_p_err_int_view_u_t;
/*
* Multiple Interrupt Register
*
* This register indicates if any interrupt occurs more than once without be-
* ing cleared.
*/
typedef union pic_p_mult_int_u {
picreg_t pic_p_mult_int_regval;
struct {
picreg_t : 22; /* 63:42 */
picreg_t int_ram_perr : 1; /* 41:41 */
picreg_t bus_arb_broke : 1; /* 40:40 */
picreg_t pci_x_req_tout : 1; /* 39:39 */
picreg_t pci_x_tabort : 1; /* 38:38 */
picreg_t pci_x_perr : 1; /* 37:37 */
picreg_t pci_x_serr : 1; /* 36:36 */
picreg_t pci_x_mretry : 1; /* 35:35 */
picreg_t pci_x_mtout : 1; /* 34:34 */
picreg_t pci_x_da_parity : 1; /* 33:33 */
picreg_t pci_x_ad_parity : 1; /* 32:32 */
picreg_t : 1; /* 31:31 */
picreg_t pmu_page_fault : 1; /* 30:30 */
picreg_t unexpected_resp : 1; /* 29:29 */
picreg_t bad_xresp_packet : 1; /* 28:28 */
picreg_t bad_xreq_packet : 1; /* 27:27 */
picreg_t resp_xtalk_error : 1; /* 26:26 */
picreg_t req_xtalk_error : 1; /* 25:25 */
picreg_t invalid_access : 1; /* 24:24 */
picreg_t unsupported_xop : 1; /* 23:23 */
picreg_t xreq_fifo_oflow : 1; /* 22:22 */
picreg_t llp_rec_snerror : 1; /* 21:21 */
picreg_t llp_rec_cberror : 1; /* 20:20 */
picreg_t llp_rcty : 1; /* 19:19 */
picreg_t llp_tx_retry : 1; /* 18:18 */
picreg_t llp_tcty : 1; /* 17:17 */
picreg_t : 1; /* 16:16 */
picreg_t pci_abort : 1; /* 15:15 */
picreg_t pci_parity : 1; /* 14:14 */
picreg_t pci_serr : 1; /* 13:13 */
picreg_t pci_perr : 1; /* 12:12 */
picreg_t pci_master_tout : 1; /* 11:11 */
picreg_t pci_retry_cnt : 1; /* 10:10 */
picreg_t xread_req_tout : 1; /* 9:9 */
picreg_t : 1; /* 8:8 */
picreg_t int_state : 8; /* 7:0 */
} pic_p_mult_int_fld_s;
} pic_p_mult_int_u_t;
/*
* Force Always Interrupt (x) Register
*
* A write to this data independent write only register will force a set inter-
* rupt to occur as if the interrupt line had transitioned. If the interrupt line
* is already active an addition set interrupt packet is set. All buffer flush op-
* erations also occur on this operation.
*/
/*
* Force Interrupt (x) Register
*
* A write to this data independent write only register in conjunction with
* the assertion of the corresponding interrupt line will force a set interrupt
* to occur as if the interrupt line had transitioned. The interrupt line must
* be active for this operation to generate a set packet, otherwise the write
* PIO is ignored. All buffer flush operations also occur when the set packet
* is sent on this operation.
*/
/*
* Device Registers
*
* The Device registers contain device specific and mapping information.
*/
typedef union pic_device_reg_u {
picreg_t pic_device_reg_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 2; /* 31:30 */
picreg_t en_virtual1 : 1; /* 29:29 */
picreg_t en_error_lock : 1; /* 28:28 */
picreg_t en_page_chk : 1; /* 27:27 */
picreg_t force_pci_par : 1; /* 26:26 */
picreg_t en_virtual0 : 1; /* 25:25 */
picreg_t : 1; /* 24:24 */
picreg_t dir_wrt_gen : 1; /* 23:23 */
picreg_t dev_size : 1; /* 22:22 */
picreg_t real_time : 1; /* 21:21 */
picreg_t : 1; /* 20:20 */
picreg_t swap_direct : 1; /* 19:19 */
picreg_t prefetch : 1; /* 18:18 */
picreg_t precise : 1; /* 17:17 */
picreg_t coherent : 1; /* 16:16 */
picreg_t barrier : 1; /* 15:15 */
picreg_t gbr : 1; /* 14:14 */
picreg_t dev_swap : 1; /* 13:13 */
picreg_t dev_io_mem : 1; /* 12:12 */
picreg_t dev_off : 12; /* 11:0 */
} pic_device_reg_fld_s;
} pic_device_reg_u_t;
/*
* Device (x) Write Request Buffer Flush
*
* When read, this register will return a 0x00 after the write buffer associat-
* ed with the device has been flushed. (PCI Only)
*/
/*
* Even Device Read Response Buffer Register (PCI Only)
*
* This register is use to allocate the read response buffers for the even num-
* bered devices. (0,2)
*/
typedef union pic_p_even_resp_u {
picreg_t pic_p_even_resp_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t buff_14_en : 1; /* 31:31 */
picreg_t buff_14_vdev : 2; /* 30:29 */
picreg_t buff_14_pdev : 1; /* 28:28 */
picreg_t buff_12_en : 1; /* 27:27 */
picreg_t buff_12_vdev : 2; /* 26:25 */
picreg_t buff_12_pdev : 1; /* 24:24 */
picreg_t buff_10_en : 1; /* 23:23 */
picreg_t buff_10_vdev : 2; /* 22:21 */
picreg_t buff_10_pdev : 1; /* 20:20 */
picreg_t buff_8_en : 1; /* 19:19 */
picreg_t buff_8_vdev : 2; /* 18:17 */
picreg_t buff_8_pdev : 1; /* 16:16 */
picreg_t buff_6_en : 1; /* 15:15 */
picreg_t buff_6_vdev : 2; /* 14:13 */
picreg_t buff_6_pdev : 1; /* 12:12 */
picreg_t buff_4_en : 1; /* 11:11 */
picreg_t buff_4_vdev : 2; /* 10:9 */
picreg_t buff_4_pdev : 1; /* 8:8 */
picreg_t buff_2_en : 1; /* 7:7 */
picreg_t buff_2_vdev : 2; /* 6:5 */
picreg_t buff_2_pdev : 1; /* 4:4 */
picreg_t buff_0_en : 1; /* 3:3 */
picreg_t buff_0_vdev : 2; /* 2:1 */
picreg_t buff_0_pdev : 1; /* 0:0 */
} pic_p_even_resp_fld_s;
} pic_p_even_resp_u_t;
/*
* Odd Device Read Response Buffer Register (PCI Only)
*
* This register is use to allocate the read response buffers for the odd num-
* bered devices. (1,3))
*/
typedef union pic_p_odd_resp_u {
picreg_t pic_p_odd_resp_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t buff_15_en : 1; /* 31:31 */
picreg_t buff_15_vdev : 2; /* 30:29 */
picreg_t buff_15_pdev : 1; /* 28:28 */
picreg_t buff_13_en : 1; /* 27:27 */
picreg_t buff_13_vdev : 2; /* 26:25 */
picreg_t buff_13_pdev : 1; /* 24:24 */
picreg_t buff_11_en : 1; /* 23:23 */
picreg_t buff_11_vdev : 2; /* 22:21 */
picreg_t buff_11_pdev : 1; /* 20:20 */
picreg_t buff_9_en : 1; /* 19:19 */
picreg_t buff_9_vdev : 2; /* 18:17 */
picreg_t buff_9_pdev : 1; /* 16:16 */
picreg_t buff_7_en : 1; /* 15:15 */
picreg_t buff_7_vdev : 2; /* 14:13 */
picreg_t buff_7_pdev : 1; /* 12:12 */
picreg_t buff_5_en : 1; /* 11:11 */
picreg_t buff_5_vdev : 2; /* 10:9 */
picreg_t buff_5_pdev : 1; /* 8:8 */
picreg_t buff_3_en : 1; /* 7:7 */
picreg_t buff_3_vdev : 2; /* 6:5 */
picreg_t buff_3_pdev : 1; /* 4:4 */
picreg_t buff_1_en : 1; /* 3:3 */
picreg_t buff_1_vdev : 2; /* 2:1 */
picreg_t buff_1_pdev : 1; /* 0:0 */
} pic_p_odd_resp_fld_s;
} pic_p_odd_resp_u_t;
/*
* Read Response Buffer Status Register (PCI Only)
*
* This read only register contains the current response buffer status.
*/
typedef union pic_p_resp_status_u {
picreg_t pic_p_resp_status_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t rrb_valid : 16; /* 31:16 */
picreg_t rrb_inuse : 16; /* 15:0 */
} pic_p_resp_status_fld_s;
} pic_p_resp_status_u_t;
/*
* Read Response Buffer Clear Register (PCI Only)
*
* A write to this register clears the current contents of the buffer.
*/
typedef union pic_p_resp_clear_u {
picreg_t pic_p_resp_clear_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 16; /* 31:16 */
picreg_t rrb_clear : 16; /* 15:0 */
} pic_p_resp_clear_fld_s;
} pic_p_resp_clear_u_t;
/*
* PCI Read Response Buffer (x) Upper Address Match
*
* The PCI Bridge read response buffer upper address register is a read only
* register which contains the upper 16-bits of the address and status used to
* select the buffer for a PCI transaction.
*/
typedef union pic_p_buf_upper_addr_match_u {
picreg_t pic_p_buf_upper_addr_match_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t filled : 1; /* 31:31 */
picreg_t armed : 1; /* 30:30 */
picreg_t flush : 1; /* 29:29 */
picreg_t xerr : 1; /* 28:28 */
picreg_t pkterr : 1; /* 27:27 */
picreg_t timeout : 1; /* 26:26 */
picreg_t prefetch : 1; /* 25:25 */
picreg_t precise : 1; /* 24:24 */
picreg_t dw_be : 8; /* 23:16 */
picreg_t upp_addr : 16; /* 15:0 */
} pic_p_buf_upper_addr_match_fld_s;
} pic_p_buf_upper_addr_match_u_t;
/*
* PCI Read Response Buffer (x) Lower Address Match
*
* The PCI Bridge read response buffer lower address Match register is a
* read only register which contains the address and status used to select the
* buffer for a PCI transaction.
*/
typedef union pic_p_buf_lower_addr_match_u {
picreg_t pic_p_buf_lower_addr_match_regval;
struct {
picreg_t filled : 1; /* 63:63 */
picreg_t armed : 1; /* 62:62 */
picreg_t flush : 1; /* 61:61 */
picreg_t xerr : 1; /* 60:60 */
picreg_t pkterr : 1; /* 59:59 */
picreg_t timeout : 1; /* 58:58 */
picreg_t prefetch : 1; /* 57:57 */
picreg_t precise : 1; /* 56:56 */
picreg_t dw_be : 8; /* 55:48 */
picreg_t upp_addr : 16; /* 47:32 */
picreg_t low_addr : 32; /* 31:0 */
} pic_p_buf_lower_addr_match_fld_s;
} pic_p_buf_lower_addr_match_u_t;
/*
* PCI Buffer (x) Flush Count with Data Touch Register
*
* This counter is incremented each time the corresponding response buffer
* is flushed after at least a single data element in the buffer is used. A word
* write to this address clears the count.
*/
typedef union pic_flush_w_touch_u {
picreg_t pic_flush_w_touch_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 16; /* 31:16 */
picreg_t touch_cnt : 16; /* 15:0 */
} pic_flush_w_touch_fld_s;
} pic_flush_w_touch_u_t;
/*
* PCI Buffer (x) Flush Count w/o Data Touch Register
*
* This counter is incremented each time the corresponding response buffer
* is flushed without any data element in the buffer being used. A word
* write to this address clears the count.
*/
typedef union pic_flush_wo_touch_u {
picreg_t pic_flush_wo_touch_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 16; /* 31:16 */
picreg_t notouch_cnt : 16; /* 15:0 */
} pic_flush_wo_touch_fld_s;
} pic_flush_wo_touch_u_t;
/*
* PCI Buffer (x) Request in Flight Count Register
*
* This counter is incremented on each bus clock while the request is in-
* flight. A word write to this address clears the count. ]
*/
typedef union pic_inflight_u {
picreg_t pic_inflight_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 16; /* 31:16 */
picreg_t inflight_cnt : 16; /* 15:0 */
} pic_inflight_fld_s;
} pic_inflight_u_t;
/*
* PCI Buffer (x) Prefetch Request Count Register
*
* This counter is incremented each time the request using this buffer was
* generated from the prefetcher. A word write to this address clears the
* count.
*/
typedef union pic_prefetch_u {
picreg_t pic_prefetch_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 16; /* 31:16 */
picreg_t prefetch_cnt : 16; /* 15:0 */
} pic_prefetch_fld_s;
} pic_prefetch_u_t;
/*
* PCI Buffer (x) Total PCI Retry Count Register
*
* This counter is incremented each time a PCI bus retry occurs and the ad-
* dress matches the tag for the selected buffer. The buffer must also has this
* request in-flight. A word write to this address clears the count.
*/
typedef union pic_total_pci_retry_u {
picreg_t pic_total_pci_retry_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 16; /* 31:16 */
picreg_t retry_cnt : 16; /* 15:0 */
} pic_total_pci_retry_fld_s;
} pic_total_pci_retry_u_t;
/*
* PCI Buffer (x) Max PCI Retry Count Register
*
* This counter is contains the maximum retry count for a single request
* which was in-flight for this buffer. A word write to this address clears the
* count.
*/
typedef union pic_max_pci_retry_u {
picreg_t pic_max_pci_retry_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 16; /* 31:16 */
picreg_t max_retry_cnt : 16; /* 15:0 */
} pic_max_pci_retry_fld_s;
} pic_max_pci_retry_u_t;
/*
* PCI Buffer (x) Max Latency Count Register
*
* This counter is contains the maximum count (in bus clocks) for a single
* request which was in-flight for this buffer. A word write to this address
* clears the count.
*/
typedef union pic_max_latency_u {
picreg_t pic_max_latency_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t : 16; /* 31:16 */
picreg_t max_latency_cnt : 16; /* 15:0 */
} pic_max_latency_fld_s;
} pic_max_latency_u_t;
/*
* PCI Buffer (x) Clear All Register
*
* Any access to this register clears all the count values for the (x) registers.
*/
/*
* PCI-X Registers
*
* This register contains the address in the read buffer structure. There are
* 16 read buffer structures.
*/
typedef union pic_rd_buf_addr_u {
picreg_t pic_rd_buf_addr_regval;
struct {
picreg_t pcix_err_addr : 64; /* 63:0 */
} pic_rd_buf_addr_fld_s;
} pic_rd_buf_addr_u_t;
/*
* PCI-X Read Buffer (x) Attribute Register
*
* This register contains the attribute data in the read buffer structure. There
* are 16 read buffer structures.
*/
typedef union pic_px_read_buf_attr_u {
picreg_t pic_px_read_buf_attr_regval;
struct {
picreg_t : 16; /* 63:48 */
picreg_t bus_cmd : 4; /* 47:44 */
picreg_t byte_cnt : 12; /* 43:32 */
picreg_t entry_valid : 1; /* 31:31 */
picreg_t ns : 1; /* 30:30 */
picreg_t ro : 1; /* 29:29 */
picreg_t tag : 5; /* 28:24 */
picreg_t bus_num : 8; /* 23:16 */
picreg_t dev_num : 5; /* 15:11 */
picreg_t fun_num : 3; /* 10:8 */
picreg_t : 2; /* 7:6 */
picreg_t f_buffer_index : 6; /* 5:0 */
} pic_px_read_buf_attr_fld_s;
} pic_px_read_buf_attr_u_t;
/*
* PCI-X Write Buffer (x) Address Register
*
* This register contains the address in the write buffer structure. There are
* 8 write buffer structures.
*/
typedef union pic_px_write_buf_addr_u {
picreg_t pic_px_write_buf_addr_regval;
struct {
picreg_t pcix_err_addr : 64; /* 63:0 */
} pic_px_write_buf_addr_fld_s;
} pic_px_write_buf_addr_u_t;
/*
* PCI-X Write Buffer (x) Attribute Register
*
* This register contains the attribute data in the write buffer structure.
* There are 8 write buffer structures.
*/
typedef union pic_px_write_buf_attr_u {
picreg_t pic_px_write_buf_attr_regval;
struct {
picreg_t : 16; /* 63:48 */
picreg_t bus_cmd : 4; /* 47:44 */
picreg_t byte_cnt : 12; /* 43:32 */
picreg_t entry_valid : 1; /* 31:31 */
picreg_t ns : 1; /* 30:30 */
picreg_t ro : 1; /* 29:29 */
picreg_t tag : 5; /* 28:24 */
picreg_t bus_num : 8; /* 23:16 */
picreg_t dev_num : 5; /* 15:11 */
picreg_t fun_num : 3; /* 10:8 */
picreg_t : 2; /* 7:6 */
picreg_t f_buffer_index : 6; /* 5:0 */
} pic_px_write_buf_attr_fld_s;
} pic_px_write_buf_attr_u_t;
/*
* PCI-X Write Buffer (x) Valid Register
*
* This register contains the valid or inuse cache lines for this buffer struc-
* ture.
*/
typedef union pic_px_write_buf_valid_u {
picreg_t pic_px_write_buf_valid_regval;
struct {
picreg_t : 32; /* 63:32 */
picreg_t wrt_valid_buff : 32; /* 31:0 */
} pic_px_write_buf_valid_fld_s;
} pic_px_write_buf_valid_u_t;
#endif /* __ASSEMBLY__ */
#endif /* _ASM_SN_PCI_PIC_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_IA64_SN_RW_MMR_H
#define _ASM_IA64_SN_RW_MMR_H
/*
* This file contains macros used to access MMR registers via
* uncached physical addresses.
* pio_phys_read_mmr - read an MMR
* pio_phys_write_mmr - write an MMR
* pio_atomic_phys_write_mmrs - atomically write 2 MMRs with psr.ic=0
* (interrupt collection)
*
* Addresses passed to these routines should be uncached physical addresses
* ie., 0x80000....
*/
extern inline long
pio_phys_read_mmr(volatile long *mmr)
{
long val;
asm volatile
("mov r2=psr;;"
"rsm psr.i | psr.dt;;"
"srlz.i;;"
"ld8.acq %0=[%1];;"
"mov psr.l=r2;;"
"srlz.i;;"
: "=r"(val)
: "r"(mmr)
: "r2");
return val;
}
extern inline void
pio_phys_write_mmr(volatile long *mmr, long val)
{
asm volatile
("mov r2=psr;;"
"rsm psr.i | psr.dt;;"
"srlz.i;;"
"st8.rel [%0]=%1;;"
"mov psr.l=r2;;"
"srlz.i;;"
:: "r"(mmr), "r"(val)
: "r2", "memory");
}
extern inline void
pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2)
{
asm volatile
("mov r2=psr;;"
"rsm psr.i | psr.dt | psr.ic;;"
"srlz.i;;"
"st8.rel [%0]=%1;"
"st8.rel [%2]=%3;;"
"mov psr.l=r2;;"
"srlz.i;;"
:: "r"(mmr1), "r"(val1), "r"(mmr2), "r"(val2)
: "r2", "memory");
}
#endif /* _ASM_IA64_SN_RW_MMR_H */
/* $Id$
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef __SYS_SN_SN2_GEO_H__
#define __SYS_SN_SN2_GEO_H__
/* Headers required by declarations in this file */
#include <asm/sn/slotnum.h>
/* The geoid_t implementation below is based loosely on the pcfg_t
implementation in sys/SN/promcfg.h. */
/* Type declaractions */
/* Size of a geoid_t structure (must be before decl. of geoid_u) */
#define GEOID_SIZE 8 /* Would 16 be better? The size can
be different on different platforms. */
#define MAX_SLABS 0xe /* slabs per module */
typedef unsigned char geo_type_t;
/* Fields common to all substructures */
typedef struct geo_any_s {
moduleid_t module; /* The module (box) this h/w lives in */
geo_type_t type; /* What type of h/w is named by this geoid_t */
slabid_t slab; /* The logical assembly within the module */
} geo_any_t;
/* Additional fields for particular types of hardware */
typedef struct geo_node_s {
geo_any_t any; /* No additional fields needed */
} geo_node_t;
typedef struct geo_rtr_s {
geo_any_t any; /* No additional fields needed */
} geo_rtr_t;
typedef struct geo_iocntl_s {
geo_any_t any; /* No additional fields needed */
} geo_iocntl_t;
typedef struct geo_pcicard_s {
geo_iocntl_t any;
char bus; /* Bus/widget number */
slotid_t slot; /* PCI slot number */
} geo_pcicard_t;
/* Subcomponents of a node */
typedef struct geo_cpu_s {
geo_node_t node;
char slice; /* Which CPU on the node */
} geo_cpu_t;
typedef struct geo_mem_s {
geo_node_t node;
char membus; /* The memory bus on the node */
char memslot; /* The memory slot on the bus */
} geo_mem_t;
typedef union geoid_u {
geo_any_t any;
geo_node_t node;
geo_iocntl_t iocntl;
geo_pcicard_t pcicard;
geo_rtr_t rtr;
geo_cpu_t cpu;
geo_mem_t mem;
char padsize[GEOID_SIZE];
} geoid_t;
/* Preprocessor macros */
#define GEO_MAX_LEN 48 /* max. formatted length, plus some pad:
module/001c07/slab/5/node/memory/2/slot/4 */
/* Values for geo_type_t */
#define GEO_TYPE_INVALID 0
#define GEO_TYPE_MODULE 1
#define GEO_TYPE_NODE 2
#define GEO_TYPE_RTR 3
#define GEO_TYPE_IOCNTL 4
#define GEO_TYPE_IOCARD 5
#define GEO_TYPE_CPU 6
#define GEO_TYPE_MEM 7
#define GEO_TYPE_MAX (GEO_TYPE_MEM+1)
/* Parameter for hwcfg_format_geoid_compt() */
#define GEO_COMPT_MODULE 1
#define GEO_COMPT_SLAB 2
#define GEO_COMPT_IOBUS 3
#define GEO_COMPT_IOSLOT 4
#define GEO_COMPT_CPU 5
#define GEO_COMPT_MEMBUS 6
#define GEO_COMPT_MEMSLOT 7
#define GEO_INVALID_STR "<invalid>"
#endif /* __SYS_SN_SN2_GEO_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment