Commit c95f5b8d authored by Pat Gefre's avatar Pat Gefre Committed by David Mosberger

[PATCH] ia64: cleanup SN2 pci_bus_cvlink.c

I incorporated (at least in spirit I hope) hch's suggestions on the fixup code
put in some kfrees that I was missing and static for sn_alloc_pci_sysdata
(thanks Bartlomiej Zolnierkiewicz).  White space clean up.
parent df39ffc5
......@@ -53,7 +53,7 @@ set_pci_provider(struct sn_device_sysdata *device_sysdata)
}
/*
* pci_bus_cvlink_init() - To be called once during initialization before
* pci_bus_cvlink_init() - To be called once during initialization before
* SGI IO Infrastructure init is called.
*/
int
......@@ -73,7 +73,7 @@ pci_bus_cvlink_init(void)
}
/*
* pci_bus_to_vertex() - Given a logical Linux Bus Number returns the associated
* pci_bus_to_vertex() - Given a logical Linux Bus Number returns the associated
* pci bus vertex from the SGI IO Infrastructure.
*/
static inline vertex_hdl_t
......@@ -91,7 +91,7 @@ pci_bus_to_vertex(unsigned char busnum)
}
/*
* devfn_to_vertex() - returns the vertex of the device given the bus, slot,
* devfn_to_vertex() - returns the vertex of the device given the bus, slot,
* and function numbers.
*/
vertex_hdl_t
......@@ -132,8 +132,8 @@ devfn_to_vertex(unsigned char busnum, unsigned int devfn)
* ../pci/1, ../pci/2 ..
*/
if (func == 0) {
sprintf(name, "%d", slot);
if (hwgraph_traverse(pci_bus, name, &device_vertex) ==
sprintf(name, "%d", slot);
if (hwgraph_traverse(pci_bus, name, &device_vertex) ==
GRAPH_SUCCESS) {
if (device_vertex) {
return(device_vertex);
......@@ -160,7 +160,7 @@ devfn_to_vertex(unsigned char busnum, unsigned int devfn)
* which is expected as the pci_dev and pci_bus sysdata by the Linux
* PCI infrastructure.
*/
struct pci_controller *
static struct pci_controller *
sn_alloc_pci_sysdata(void)
{
struct pci_controller *pci_sysdata;
......@@ -194,6 +194,7 @@ sn_pci_fixup_bus(struct pci_bus *bus)
if (!widget_sysdata) {
printk(KERN_WARNING "sn_pci_fixup_bus(): Unable to "
"allocate memory for widget_sysdata\n");
kfree(pci_sysdata);
return -ENOMEM;
}
......@@ -239,6 +240,7 @@ sn_pci_fixup_slot(struct pci_dev *dev)
if (!device_sysdata) {
printk(KERN_WARNING "sn_pci_fixup_slot: Unable to "
"allocate memory for device_sysdata\n");
kfree(pci_sysdata);
return -ENOMEM;
}
......@@ -261,69 +263,69 @@ sn_pci_fixup_slot(struct pci_dev *dev)
vhdl = device_sysdata->vhdl;
/* Allocate the IORESOURCE_IO space first */
for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
unsigned long start, end, addr;
for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
unsigned long start, end, addr;
device_sysdata->pio_map[idx] = NULL;
if (!(dev->resource[idx].flags & IORESOURCE_IO))
continue;
if (!(dev->resource[idx].flags & IORESOURCE_IO))
continue;
start = dev->resource[idx].start;
end = dev->resource[idx].end;
size = end - start;
if (!size)
continue;
start = dev->resource[idx].start;
end = dev->resource[idx].end;
size = end - start;
if (!size)
continue;
addr = (unsigned long)pciio_pio_addr(vhdl, 0,
PCIIO_SPACE_WIN(idx), 0, size,
addr = (unsigned long)pciio_pio_addr(vhdl, 0,
PCIIO_SPACE_WIN(idx), 0, size,
&device_sysdata->pio_map[idx], 0);
if (!addr) {
dev->resource[idx].start = 0;
dev->resource[idx].end = 0;
printk("sn_pci_fixup(): pio map failure for "
"%s bar%d\n", dev->slot_name, idx);
} else {
addr |= __IA64_UNCACHED_OFFSET;
dev->resource[idx].start = addr;
dev->resource[idx].end = addr + size;
}
if (dev->resource[idx].flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
}
/* Allocate the IORESOURCE_MEM space next */
for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
unsigned long start, end, addr;
if ((dev->resource[idx].flags & IORESOURCE_IO))
continue;
start = dev->resource[idx].start;
end = dev->resource[idx].end;
size = end - start;
if (!size)
continue;
addr = (unsigned long)pciio_pio_addr(vhdl, 0,
PCIIO_SPACE_WIN(idx), 0, size,
if (!addr) {
dev->resource[idx].start = 0;
dev->resource[idx].end = 0;
printk("sn_pci_fixup(): pio map failure for "
"%s bar%d\n", dev->slot_name, idx);
} else {
addr |= __IA64_UNCACHED_OFFSET;
dev->resource[idx].start = addr;
dev->resource[idx].end = addr + size;
}
if (dev->resource[idx].flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
}
/* Allocate the IORESOURCE_MEM space next */
for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
unsigned long start, end, addr;
if ((dev->resource[idx].flags & IORESOURCE_IO))
continue;
start = dev->resource[idx].start;
end = dev->resource[idx].end;
size = end - start;
if (!size)
continue;
addr = (unsigned long)pciio_pio_addr(vhdl, 0,
PCIIO_SPACE_WIN(idx), 0, size,
&device_sysdata->pio_map[idx], 0);
if (!addr) {
dev->resource[idx].start = 0;
dev->resource[idx].end = 0;
printk("sn_pci_fixup(): pio map failure for "
"%s bar%d\n", dev->slot_name, idx);
} else {
addr |= __IA64_UNCACHED_OFFSET;
dev->resource[idx].start = addr;
dev->resource[idx].end = addr + size;
}
if (dev->resource[idx].flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
if (!addr) {
dev->resource[idx].start = 0;
dev->resource[idx].end = 0;
printk("sn_pci_fixup(): pio map failure for "
"%s bar%d\n", dev->slot_name, idx);
} else {
addr |= __IA64_UNCACHED_OFFSET;
dev->resource[idx].start = addr;
dev->resource[idx].end = addr + size;
}
if (dev->resource[idx].flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
/*
......@@ -346,6 +348,8 @@ sn_pci_fixup_slot(struct pci_dev *dev)
intr_handle = (pci_provider->intr_alloc)(device_vertex, NULL, lines, device_vertex);
if (intr_handle == NULL) {
printk(KERN_WARNING "sn_pci_fixup: pcibr_intr_alloc() failed\n");
kfree(pci_sysdata);
kfree(device_sysdata);
return -ENOMEM;
}
......@@ -383,16 +387,16 @@ sn_pci_fixup_slot(struct pci_dev *dev)
struct sn_flush_nasid_entry flush_nasid_list[MAX_NASIDS];
/* Initialize the data structures for flushing write buffers after a PIO read.
* The theory is:
* The theory is:
* Take an unused int. pin and associate it with a pin that is in use.
* After a PIO read, force an interrupt on the unused pin, forcing a write buffer flush
* on the in use pin. This will prevent the race condition between PIO read responses and
* on the in use pin. This will prevent the race condition between PIO read responses and
* DMA writes.
*/
static struct sn_flush_device_list *
sn_dma_flush_init(unsigned long start, unsigned long end, int idx, int pin, int slot)
{
nasid_t nasid;
nasid_t nasid;
unsigned long dnasid;
int wid_num;
int bus;
......@@ -421,8 +425,8 @@ sn_dma_flush_init(unsigned long start, unsigned long end, int idx, int pin, int
itte = HUB_L(IIO_ITTE_GET(nasid, itte_index));
flush_nasid_list[nasid].iio_itte[bwin] = itte;
wid_num = (itte >> IIO_ITTE_WIDGET_SHIFT) &
IIO_ITTE_WIDGET_MASK;
wid_num = (itte >> IIO_ITTE_WIDGET_SHIFT)
& IIO_ITTE_WIDGET_MASK;
bus = itte & IIO_ITTE_OFFSET_MASK;
if (bus == 0x4 || bus == 0x8) {
bus = 0;
......@@ -444,7 +448,7 @@ sn_dma_flush_init(unsigned long start, unsigned long end, int idx, int pin, int
printk(KERN_WARNING "sn_dma_flush_init: Cannot allocate memory for nasid sub-list\n");
return NULL;
}
memset(flush_nasid_list[nasid].widget_p[wid_num], 0,
memset(flush_nasid_list[nasid].widget_p[wid_num], 0,
DEV_PER_WIDGET * sizeof (struct sn_flush_device_list));
p = &flush_nasid_list[nasid].widget_p[wid_num][0];
for (i=0; i<DEV_PER_WIDGET;i++) {
......@@ -483,7 +487,7 @@ sn_dma_flush_init(unsigned long start, unsigned long end, int idx, int pin, int
* about the case when there is a card in slot 2. A multifunction card will appear
* to be in slot 6 (from an interrupt point of view) also. That's the most we'll
* have to worry about. A four function card will overload the interrupt lines in
* slot 2 and 6.
* slot 2 and 6.
* We also need to special case the 12160 device in slot 3. Fortunately, we have
* a spare intr. line for pin 4, so we'll use that for the 12160.
* All other buses have slot 3 and 4 and slots 7 and 8 unused. Since we can only
......@@ -503,21 +507,21 @@ sn_dma_flush_init(unsigned long start, unsigned long end, int idx, int pin, int
pcireg_bridge_intr_device_bit_set(b, (1<<18));
dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
pcireg_bridge_intr_addr_set(b, 6, ((virt_to_phys(&p->flush_addr) & 0xfffffffff) |
(dnasid << 36) | (0xfUL << 48)));
(dnasid << 36) | (0xfUL << 48)));
} else if (pin == 2) { /* 12160 SCSI device in IO9 */
p->force_int_addr = (unsigned long)pcireg_bridge_force_always_addr_get(b, 4);
pcireg_bridge_intr_device_bit_set(b, (2<<12));
dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
pcireg_bridge_intr_addr_set(b, 4,
((virt_to_phys(&p->flush_addr) & 0xfffffffff) |
(dnasid << 36) | (0xfUL << 48)));
(dnasid << 36) | (0xfUL << 48)));
} else { /* slot == 6 */
p->force_int_addr = (unsigned long)pcireg_bridge_force_always_addr_get(b, 7);
pcireg_bridge_intr_device_bit_set(b, (5<<21));
dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
pcireg_bridge_intr_addr_set(b, 7,
((virt_to_phys(&p->flush_addr) & 0xfffffffff) |
(dnasid << 36) | (0xfUL << 48)));
(dnasid << 36) | (0xfUL << 48)));
}
} else {
p->force_int_addr = (unsigned long)pcireg_bridge_force_always_addr_get(b, (pin +2));
......@@ -525,99 +529,13 @@ sn_dma_flush_init(unsigned long start, unsigned long end, int idx, int pin, int
dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
pcireg_bridge_intr_addr_set(b, (pin + 2),
((virt_to_phys(&p->flush_addr) & 0xfffffffff) |
(dnasid << 36) | (0xfUL << 48)));
(dnasid << 36) | (0xfUL << 48)));
}
return p;
}
/*
* sn_pci_fixup() - This routine is called when platform_pci_fixup() is
* invoked at the end of pcibios_init() to link the Linux pci
* infrastructure to SGI IO Infrasturcture - ia64/kernel/pci.c
*
* Other platform specific fixup can also be done here.
*/
static void __init
sn_pci_fixup(int arg)
{
struct list_head *ln;
struct pci_bus *pci_bus = NULL;
struct pci_dev *pci_dev = NULL;
extern int numnodes;
int cnode, ret;
if (arg == 0) {
#ifdef CONFIG_PROC_FS
extern void register_sn_procfs(void);
#endif
extern void sgi_master_io_infr_init(void);
extern void sn_init_cpei_timer(void);
sgi_master_io_infr_init();
for (cnode = 0; cnode < numnodes; cnode++) {
extern void intr_init_vecblk(cnodeid_t);
intr_init_vecblk(cnode);
}
sn_init_cpei_timer();
#ifdef CONFIG_PROC_FS
register_sn_procfs();
#endif
return;
}
done_probing = 1;
/*
* Initialize the pci bus vertex in the pci_bus struct.
*/
for( ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
pci_bus = pci_bus_b(ln);
ret = sn_pci_fixup_bus(pci_bus);
if ( ret ) {
printk(KERN_WARNING
"sn_pci_fixup: sn_pci_fixup_bus fails : error %d\n",
ret);
return;
}
}
/*
* set the root start and end so that drivers calling check_region()
* won't see a conflict
*/
#ifdef CONFIG_IA64_SGI_SN_SIM
if (! IS_RUNNING_ON_SIMULATOR()) {
ioport_resource.start = 0xc000000000000000;
ioport_resource.end = 0xcfffffffffffffff;
}
#endif
/*
* Set the root start and end for Mem Resource.
*/
iomem_resource.start = 0;
iomem_resource.end = 0xffffffffffffffff;
/*
* Initialize the device vertex in the pci_dev struct.
*/
while ((pci_dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL) {
ret = sn_pci_fixup_slot(pci_dev);
if ( ret ) {
printk(KERN_WARNING
"sn_pci_fixup: sn_pci_fixup_slot fails : error %d\n",
ret);
return;
}
}
}
/*
* linux_bus_cvlink() Creates a link between the Linux PCI Bus number
* linux_bus_cvlink() Creates a link between the Linux PCI Bus number
* to the actual hardware component that it represents:
* /dev/hw/linux/busnum/0 -> ../../../hw/module/001c01/slab/0/Ibrick/xtalk/15/pci
*
......@@ -637,7 +555,7 @@ linux_bus_cvlink(void)
continue;
sprintf(name, "%x", index);
(void) hwgraph_edge_add(linux_busnum, busnum_to_pcibr_vhdl[index],
(void) hwgraph_edge_add(linux_busnum, busnum_to_pcibr_vhdl[index],
name);
}
}
......@@ -648,7 +566,7 @@ linux_bus_cvlink(void)
* Linux PCI Bus numbers are assigned from lowest module_id numbers
* (rack/slot etc.)
*/
static int
static int
pci_bus_map_create(struct pcibr_list_s *softlistp, moduleid_t moduleid)
{
......@@ -658,10 +576,10 @@ pci_bus_map_create(struct pcibr_list_s *softlistp, moduleid_t moduleid)
memset(moduleid_str, 0, 16);
format_module_id(moduleid_str, moduleid, MODULE_FORMAT_BRIEF);
(void) ioconfig_get_busnum((char *)moduleid_str, &basebus_num);
(void) ioconfig_get_busnum((char *)moduleid_str, &basebus_num);
/*
* Assign the correct bus number and also the nasid of this
* Assign the correct bus number and also the nasid of this
* pci Xwidget.
*/
bus_number = basebus_num + pcibr_widget_to_bus(pci_bus);
......@@ -689,20 +607,20 @@ pci_bus_map_create(struct pcibr_list_s *softlistp, moduleid_t moduleid)
printk("pci_bus_map_create: Cannot allocate memory for ate maps\n");
return -1;
}
memset(busnum_to_atedmamaps[bus_number], 0x0,
memset(busnum_to_atedmamaps[bus_number], 0x0,
sizeof(struct pcibr_dmamap_s) * MAX_ATE_MAPS);
return(0);
}
/*
* pci_bus_to_hcl_cvlink() - This routine is called after SGI IO Infrastructure
* pci_bus_to_hcl_cvlink() - This routine is called after SGI IO Infrastructure
* initialization has completed to set up the mappings between PCI BRIDGE
* ASIC and logical pci bus numbers.
* ASIC and logical pci bus numbers.
*
* Must be called before pci_init() is invoked.
*/
int
pci_bus_to_hcl_cvlink(void)
pci_bus_to_hcl_cvlink(void)
{
int i;
extern pcibr_list_p pcibr_list;
......@@ -719,7 +637,7 @@ pci_bus_to_hcl_cvlink(void)
/* Is this PCI bus associated with this moduleid? */
moduleid = NODE_MODULEID(
NASID_TO_COMPACT_NODEID(pcibr_soft->bs_nasid));
NASID_TO_COMPACT_NODEID(pcibr_soft->bs_nasid));
if (modules[i]->id == moduleid) {
struct pcibr_list_s *new_element;
......@@ -740,9 +658,9 @@ pci_bus_to_hcl_cvlink(void)
continue;
}
/*
* BASEIO IObricks attached to a module have
* a higher priority than non BASEIO IOBricks
/*
* BASEIO IObricks attached to a module have
* a higher priority than non BASEIO IOBricks
* when it comes to persistant pci bus
* numbering, so put them on the front of the
* list.
......@@ -758,7 +676,7 @@ pci_bus_to_hcl_cvlink(void)
softlistp = softlistp->bl_next;
}
/*
/*
* We now have a list of all the pci bridges associated with
* the module_id, modules[i]. Call pci_bus_map_create() for
* each pci bridge
......@@ -786,13 +704,26 @@ pci_bus_to_hcl_cvlink(void)
/*
* Ugly hack to get PCI setup until we have a proper ACPI namespace.
*/
#define PCI_BUSES_TO_SCAN 256
extern struct pci_ops sn_pci_ops;
int __init
sn_pci_init (void)
{
# define PCI_BUSES_TO_SCAN 256
int i = 0;
struct pci_controller *controller;
struct list_head *ln;
struct pci_bus *pci_bus = NULL;
struct pci_dev *pci_dev = NULL;
extern int numnodes;
int cnode, ret;
#ifdef CONFIG_PROC_FS
extern void register_sn_procfs(void);
#endif
extern void sgi_master_io_infr_init(void);
extern void sn_init_cpei_timer(void);
if (!ia64_platform_is("sn2") || IS_RUNNING_ON_SIMULATOR())
return 0;
......@@ -805,7 +736,19 @@ sn_pci_init (void)
/*
* set pci_raw_ops, etc.
*/
sn_pci_fixup(0);
sgi_master_io_infr_init();
for (cnode = 0; cnode < numnodes; cnode++) {
extern void intr_init_vecblk(cnodeid_t);
intr_init_vecblk(cnode);
}
sn_init_cpei_timer();
#ifdef CONFIG_PROC_FS
register_sn_procfs();
#endif
controller = kmalloc(sizeof(struct pci_controller), GFP_KERNEL);
if (controller) {
......@@ -818,7 +761,53 @@ sn_pci_init (void)
/*
* actually find devices and fill in hwgraph structs
*/
sn_pci_fixup(1);
done_probing = 1;
/*
* Initialize the pci bus vertex in the pci_bus struct.
*/
for( ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
pci_bus = pci_bus_b(ln);
ret = sn_pci_fixup_bus(pci_bus);
if ( ret ) {
printk(KERN_WARNING
"sn_pci_fixup: sn_pci_fixup_bus fails : error %d\n",
ret);
return;
}
}
/*
* set the root start and end so that drivers calling check_region()
* won't see a conflict
*/
#ifdef CONFIG_IA64_SGI_SN_SIM
if (! IS_RUNNING_ON_SIMULATOR()) {
ioport_resource.start = 0xc000000000000000;
ioport_resource.end = 0xcfffffffffffffff;
}
#endif
/*
* Set the root start and end for Mem Resource.
*/
iomem_resource.start = 0;
iomem_resource.end = 0xffffffffffffffff;
/*
* Initialize the device vertex in the pci_dev struct.
*/
while ((pci_dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL) {
ret = sn_pci_fixup_slot(pci_dev);
if ( ret ) {
printk(KERN_WARNING
"sn_pci_fixup: sn_pci_fixup_slot fails : error %d\n",
ret);
return;
}
}
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment