Commit 6f6a50c8 authored by Ben Collins's avatar Ben Collins

[PATCH] IEEE-1394/Firewire update

This covers a lot of ground in the Linux1394 SVN tree.  I haven't had
time to keep in sync with you in a more granular way, so here's a
bohemoth patch.  However, consider it well tested.
parent 8b4ad80b
......@@ -5,7 +5,7 @@ menu "IEEE 1394 (FireWire) support (EXPERIMENTAL)"
config IEEE1394
tristate "IEEE 1394 (FireWire) support (EXPERIMENTAL)"
---help---
help
IEEE 1394 describes a high performance serial bus, which is also
known as FireWire(tm) or i.Link(tm) and is used for connecting all
sorts of devices (most notably digital video cameras) to your
......@@ -20,6 +20,36 @@ config IEEE1394
say M here and read <file:Documentation/modules.txt>. The module
will be called ieee1394.o.
comment "Subsystem Options"
depends on IEEE1394
config IEEE1394_VERBOSEDEBUG
bool "Excessive debugging output"
depends on IEEE1394
help
If you say Y here, you will get very verbose debugging logs from
the subsystem which includes a dump of the header of every sent
and received packet. This can amount to a high amount of data
collected in a very short time which is usually also saved to
disk by the system logging daemons.
Say Y if you really want or need the debugging output, everyone
else says N.
config IEEE1394_OUI_DB
bool "OUI Database built-in"
depends on IEEE1394
help
If you say Y here, then an OUI list (vendor unique ID's) will be
compiled into the ieee1394 module. This doesn't really do much
accept being able to display the vendor of a hardware node. The
downside is that it adds about 300k to the size of the module,
or kernel (depending on whether you compile ieee1394 as a
module, or static in the kernel).
This option is not needed for userspace programs like gscanbus
to show this information.
comment "Device Drivers"
depends on IEEE1394
......@@ -47,7 +77,7 @@ config IEEE1394_PCILYNX
config IEEE1394_OHCI1394
tristate "OHCI-1394 support"
depends on IEEE1394
---help---
help
Enable this driver if you have an IEEE 1394 controller based on the
OHCI-1394 specification. The current driver is only tested with OHCI
chipsets made by Texas Instruments and NEC. Most third-party vendors
......@@ -64,7 +94,7 @@ comment "Protocol Drivers"
config IEEE1394_VIDEO1394
tristate "OHCI-1394 Video support"
depends on IEEE1394_OHCI1394
depends on IEEE1394 && IEEE1394_OHCI1394
help
This option enables video device usage for OHCI-1394 cards. Enable
this option only if you have an IEEE 1394 video device connected to
......@@ -72,14 +102,14 @@ config IEEE1394_VIDEO1394
config IEEE1394_SBP2
tristate "SBP-2 support (Harddisks etc.)"
depends on SCSI && IEEE1394
depends on IEEE1394 && SCSI
help
This option enables you to use SBP-2 devices connected to your IEEE
1394 bus. SBP-2 devices include harddrives and DVD devices.
config IEEE1394_SBP2_PHYS_DMA
bool "Enable Phys DMA support for SBP2 (Debug)"
depends on IEEE1394_SBP2
depends on IEEE1394 && IEEE1394_SBP2
config IEEE1394_ETH1394
tristate "Ethernet over 1394"
......@@ -90,8 +120,8 @@ config IEEE1394_ETH1394
config IEEE1394_DV1394
tristate "OHCI-DV I/O support"
depends on IEEE1394_OHCI1394
---help---
depends on IEEE1394 && IEEE1394_OHCI1394
help
This driver allows you to transmit and receive DV (digital video)
streams on an OHCI-1394 card using a simple frame-oriented
interface.
......@@ -131,8 +161,8 @@ config IEEE1394_CMP
config IEEE1394_AMDTP
tristate "IEC61883-6 (Audio transmission) support"
depends on IEEE1394_OHCI1394 && IEEE1394_CMP
---help---
depends on IEEE1394 && IEEE1394_OHCI1394 && IEEE1394_CMP
help
This option enables the Audio & Music Data Transmission Protocol
(IEC61883-6) driver, which implements audio transmission over
IEEE1394.
......@@ -144,18 +174,4 @@ config IEEE1394_AMDTP
say M here and read <file:Documentation/modules.txt>. The module
will be called amdtp.o.
config IEEE1394_VERBOSEDEBUG
bool "Excessive debugging output"
depends on IEEE1394
help
If you say Y here, you will get very verbose debugging logs from the
subsystem which includes a dump of the header of every sent and
received packet. This can amount to a high amount of data collected
in a very short time which is usually also saved to disk by the
system logging daemons.
Say Y if you really want or need the debugging output, everyone else
says N.
endmenu
......@@ -5,7 +5,7 @@
export-objs := ieee1394_core.o ohci1394.o cmp.o
ieee1394-objs := ieee1394_core.o ieee1394_transactions.o hosts.o \
highlevel.o csr.o nodemgr.o
highlevel.o csr.o nodemgr.o oui.o dma.o iso.o
obj-$(CONFIG_IEEE1394) += ieee1394.o
obj-$(CONFIG_IEEE1394_PCILYNX) += pcilynx.o
......@@ -18,4 +18,14 @@ obj-$(CONFIG_IEEE1394_ETH1394) += eth1394.o
obj-$(CONFIG_IEEE1394_AMDTP) += amdtp.o
obj-$(CONFIG_IEEE1394_CMP) += cmp.o
clean-files := oui.c
include $(TOPDIR)/Rules.make
ifeq ($(obj),)
obj = .
endif
$(obj)/oui.o: $(obj)/oui.c
$(obj)/oui.c: $(obj)/oui.db $(obj)/oui2c.sh
$(CONFIG_SHELL) $(obj)/oui2c.sh < $(obj)/oui.db > $(obj)/oui.c
......@@ -688,7 +688,7 @@ static u32 get_header_bits(struct stream *s, int sub_frame, u32 sample)
return get_iec958_header_bits(s, sub_frame, sample);
case AMDTP_FORMAT_RAW:
return 0x40000000;
return 0x40;
default:
return 0;
......@@ -833,8 +833,9 @@ static int stream_alloc_packet_lists(struct stream *s)
max_nevents = fraction_ceil(&s->samples_per_cycle);
max_packet_size = max_nevents * s->dimension * 4 + 8;
s->packet_pool = pci_pool_create("packet pool", s->host->ohci->dev,
max_packet_size, 0, 0);
s->packet_pool = hpsb_pci_pool_create("packet pool", s->host->ohci->dev,
max_packet_size, 0, 0 ,SLAB_KERNEL);
if (s->packet_pool == NULL)
return -1;
......@@ -1018,9 +1019,10 @@ struct stream *stream_alloc(struct amdtp_host *host)
return NULL;
}
s->descriptor_pool = pci_pool_create("descriptor pool", host->ohci->dev,
s->descriptor_pool = hpsb_pci_pool_create("descriptor pool", host->ohci->dev,
sizeof(struct descriptor_block),
16, 0);
16, 0 ,SLAB_KERNEL);
if (s->descriptor_pool == NULL) {
kfree(s->input);
kfree(s);
......@@ -1107,7 +1109,7 @@ static ssize_t amdtp_write(struct file *file, const char *buffer, size_t count,
*/
for (i = 0; i < count; i += length) {
p = buffer_put_bytes(s->input, count, &length);
p = buffer_put_bytes(s->input, count - i, &length);
copy_from_user(p, buffer + i, length);
if (s->input->length < s->input->size)
continue;
......@@ -1210,7 +1212,7 @@ static void amdtp_add_host(struct hpsb_host *host)
if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME) != 0)
return;
ah = kmalloc(sizeof *ah, SLAB_KERNEL);
ah = kmalloc(sizeof *ah, in_interrupt() ? SLAB_ATOMIC : SLAB_KERNEL);
ah->host = host;
ah->ohci = host->hostdata;
INIT_LIST_HEAD(&ah->stream_list);
......
......@@ -34,6 +34,7 @@
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <linux/interrupt.h>
#include "hosts.h"
#include "highlevel.h"
......@@ -158,7 +159,7 @@ static void cmp_add_host(struct hpsb_host *host)
{
struct cmp_host *ch;
ch = kmalloc(sizeof *ch, SLAB_KERNEL);
ch = kmalloc(sizeof *ch, in_interrupt() ? SLAB_ATOMIC : SLAB_KERNEL);
if (ch == NULL) {
HPSB_ERR("Failed to allocate cmp_host");
return;
......
......@@ -28,7 +28,7 @@
/* Module Parameters */
/* this module parameter can be used to disable mapping of the FCP registers */
MODULE_PARM(fcp,"i");
MODULE_PARM_DESC(fcp, "FCP-registers");
MODULE_PARM_DESC(fcp, "Map FCP registers (default = 1, disable = 0).");
static int fcp = 1;
static u16 csr_crc16(unsigned *data, int length)
......@@ -54,8 +54,15 @@ static void host_reset(struct hpsb_host *host)
host->csr.bus_manager_id = 0x3f;
host->csr.bandwidth_available = 4915;
host->csr.channels_available_hi = ~0;
host->csr.channels_available_hi = 0xfffffffe; /* pre-alloc ch 31 per 1394a-2000 */
host->csr.channels_available_lo = ~0;
host->csr.broadcast_channel = 0x80000000 | 31;
if (host->is_irm) {
if (host->driver->hw_csr_reg) {
host->driver->hw_csr_reg(host, 2, 0xfffffffe, ~0);
}
}
host->csr.node_ids = host->node_id << 16;
......@@ -95,8 +102,15 @@ static void add_host(struct hpsb_host *host)
host->csr.bus_time = 0;
host->csr.bus_manager_id = 0x3f;
host->csr.bandwidth_available = 4915;
host->csr.channels_available_hi = ~0;
host->csr.channels_available_hi = 0xfffffffe; /* pre-alloc ch 31 per 1394a-2000 */
host->csr.channels_available_lo = ~0;
host->csr.broadcast_channel = 0x80000000 | 31;
if (host->is_irm) {
if (host->driver->hw_csr_reg) {
host->driver->hw_csr_reg(host, 2, 0xfffffffe, ~0);
}
}
}
int hpsb_update_config_rom(struct hpsb_host *host, const quadlet_t *new_rom,
......@@ -268,6 +282,10 @@ static int read_regs(struct hpsb_host *host, int nodeid, quadlet_t *buf,
*(buf++) = cpu_to_be32(ret);
out;
case CSR_BROADCAST_CHANNEL:
*(buf++) = cpu_to_be32(host->csr.broadcast_channel);
out;
/* address gap to end - fall through to default */
default:
return RCODE_ADDRESS_ERROR;
......@@ -345,6 +363,12 @@ static int write_regs(struct hpsb_host *host, int nodeid, int destid,
/* these are not writable, only lockable */
return RCODE_TYPE_ERROR;
case CSR_BROADCAST_CHANNEL:
/* only the valid bit can be written */
host->csr.broadcast_channel = (host->csr.broadcast_channel & ~0x40000000)
| (be32_to_cpu(*data) & 0x40000000);
out;
/* address gap to end - fall through */
default:
return RCODE_ADDRESS_ERROR;
......@@ -373,6 +397,18 @@ static int lock_regs(struct hpsb_host *host, int nodeid, quadlet_t *store,
data = be32_to_cpu(data);
arg = be32_to_cpu(arg);
/* Is somebody releasing the broadcast_channel on us? */
if (csraddr == CSR_CHANNELS_AVAILABLE_HI && (data & 0x1)) {
/* Note: this is may not be the right way to handle
* the problem, so we should look into the proper way
* eventually. */
HPSB_WARN("Node [" NODE_BUS_FMT "] wants to release "
"broadcast channel 31. Ignoring.",
NODE_BUS_ARGS(nodeid));
data &= ~0x1; /* keep broadcast channel allocated */
}
if (host->driver->hw_csr_reg) {
quadlet_t old;
......@@ -389,23 +425,84 @@ static int lock_regs(struct hpsb_host *host, int nodeid, quadlet_t *store,
switch (csraddr) {
case CSR_BUS_MANAGER_ID:
regptr = &host->csr.bus_manager_id;
*store = cpu_to_be32(*regptr);
if (*regptr == arg)
*regptr = data;
break;
case CSR_BANDWIDTH_AVAILABLE:
{
quadlet_t bandwidth;
quadlet_t old;
quadlet_t new;
regptr = &host->csr.bandwidth_available;
old = *regptr;
/* bandwidth available algorithm adapted from IEEE 1394a-2000 spec */
if (arg > 0x1fff) {
*store = cpu_to_be32(old); /* change nothing */
break;
}
data &= 0x1fff;
if (arg >= data) {
/* allocate bandwidth */
bandwidth = arg - data;
if (old >= bandwidth) {
new = old - bandwidth;
*store = cpu_to_be32(arg);
*regptr = new;
} else {
*store = cpu_to_be32(old);
}
} else {
/* deallocate bandwidth */
bandwidth = data - arg;
if (old + bandwidth < 0x2000) {
new = old + bandwidth;
*store = cpu_to_be32(arg);
*regptr = new;
} else {
*store = cpu_to_be32(old);
}
}
break;
}
case CSR_CHANNELS_AVAILABLE_HI:
{
/* Lock algorithm for CHANNELS_AVAILABLE as recommended by 1394a-2000 */
quadlet_t affected_channels = arg ^ data;
regptr = &host->csr.channels_available_hi;
if ((arg & affected_channels) == (*regptr & affected_channels)) {
*regptr ^= affected_channels;
*store = cpu_to_be32(arg);
} else {
*store = cpu_to_be32(*regptr);
}
break;
}
case CSR_CHANNELS_AVAILABLE_LO:
{
/* Lock algorithm for CHANNELS_AVAILABLE as recommended by 1394a-2000 */
quadlet_t affected_channels = arg ^ data;
regptr = &host->csr.channels_available_lo;
if ((arg & affected_channels) == (*regptr & affected_channels)) {
*regptr ^= affected_channels;
*store = cpu_to_be32(arg);
} else {
*store = cpu_to_be32(*regptr);
}
break;
}
}
*store = cpu_to_be32(*regptr);
if (*regptr == arg) *regptr = data;
spin_unlock_irqrestore(&host->csr.lock, flags);
return RCODE_COMPLETE;
......@@ -420,10 +517,7 @@ static int lock_regs(struct hpsb_host *host, int nodeid, quadlet_t *store,
case CSR_SPLIT_TIMEOUT_LO:
case CSR_CYCLE_TIME:
case CSR_BUS_TIME:
case CSR_BUS_MANAGER_ID:
case CSR_BANDWIDTH_AVAILABLE:
case CSR_CHANNELS_AVAILABLE_HI:
case CSR_CHANNELS_AVAILABLE_LO:
case CSR_BROADCAST_CHANNEL:
return RCODE_TYPE_ERROR;
case CSR_BUSY_TIMEOUT:
......@@ -433,6 +527,97 @@ static int lock_regs(struct hpsb_host *host, int nodeid, quadlet_t *store,
}
}
static int lock64_regs(struct hpsb_host *host, int nodeid, octlet_t * store,
u64 addr, octlet_t data, octlet_t arg, int extcode, u16 fl)
{
int csraddr = addr - CSR_REGISTER_BASE;
unsigned long flags;
data = be64_to_cpu(data);
arg = be64_to_cpu(arg);
if (csraddr & 0x3)
return RCODE_TYPE_ERROR;
if (csraddr != CSR_CHANNELS_AVAILABLE
|| extcode != EXTCODE_COMPARE_SWAP)
goto unsupported_lock64req;
/* Is somebody releasing the broadcast_channel on us? */
if (csraddr == CSR_CHANNELS_AVAILABLE_HI && (data & 0x100000000ULL)) {
/* Note: this is may not be the right way to handle
* the problem, so we should look into the proper way
* eventually. */
HPSB_WARN("Node [" NODE_BUS_FMT "] wants to release "
"broadcast channel 31. Ignoring.",
NODE_BUS_ARGS(nodeid));
data &= ~0x100000000ULL; /* keep broadcast channel allocated */
}
if (host->driver->hw_csr_reg) {
quadlet_t data_hi, data_lo;
quadlet_t arg_hi, arg_lo;
quadlet_t old_hi, old_lo;
data_hi = data >> 32;
data_lo = data & 0xFFFFFFFF;
arg_hi = arg >> 32;
arg_lo = arg & 0xFFFFFFFF;
old_hi = host->driver->hw_csr_reg(host, (csraddr - CSR_BUS_MANAGER_ID) >> 2,
data_hi, arg_hi);
old_lo = host->driver->hw_csr_reg(host, ((csraddr + 4) - CSR_BUS_MANAGER_ID) >> 2,
data_lo, arg_lo);
*store = cpu_to_be64(((octlet_t)old_hi << 32) | old_lo);
} else {
octlet_t old;
octlet_t affected_channels = arg ^ data;
spin_lock_irqsave(&host->csr.lock, flags);
old = ((octlet_t)host->csr.channels_available_hi << 32) | host->csr.channels_available_lo;
if ((arg & affected_channels) == (old & affected_channels)) {
host->csr.channels_available_hi ^= (affected_channels >> 32);
host->csr.channels_available_lo ^= (affected_channels & 0xffffffff);
*store = cpu_to_be64(arg);
} else {
*store = cpu_to_be64(old);
}
spin_unlock_irqrestore(&host->csr.lock, flags);
}
/* Is somebody erroneously releasing the broadcast_channel on us? */
if (host->csr.channels_available_hi & 0x1)
host->csr.channels_available_hi &= ~0x1;
return RCODE_COMPLETE;
unsupported_lock64req:
switch (csraddr) {
case CSR_STATE_CLEAR:
case CSR_STATE_SET:
case CSR_RESET_START:
case CSR_NODE_IDS:
case CSR_SPLIT_TIMEOUT_HI:
case CSR_SPLIT_TIMEOUT_LO:
case CSR_CYCLE_TIME:
case CSR_BUS_TIME:
case CSR_BUS_MANAGER_ID:
case CSR_BROADCAST_CHANNEL:
case CSR_BUSY_TIMEOUT:
case CSR_BANDWIDTH_AVAILABLE:
return RCODE_TYPE_ERROR;
default:
return RCODE_ADDRESS_ERROR;
}
}
static int write_fcp(struct hpsb_host *host, int nodeid, int dest,
quadlet_t *data, u64 addr, unsigned int length, u16 flags)
{
......@@ -474,6 +659,7 @@ static struct hpsb_address_ops reg_ops = {
.read = read_regs,
.write = write_regs,
.lock = lock_regs,
.lock64 = lock64_regs,
};
static struct hpsb_highlevel *hl;
......
......@@ -16,8 +16,10 @@
#define CSR_BUSY_TIMEOUT 0x210
#define CSR_BUS_MANAGER_ID 0x21c
#define CSR_BANDWIDTH_AVAILABLE 0x220
#define CSR_CHANNELS_AVAILABLE 0x224
#define CSR_CHANNELS_AVAILABLE_HI 0x224
#define CSR_CHANNELS_AVAILABLE_LO 0x228
#define CSR_BROADCAST_CHANNEL 0x234
#define CSR_CONFIG_ROM 0x400
#define CSR_CONFIG_ROM_END 0x800
#define CSR_FCP_COMMAND 0xB00
......@@ -40,6 +42,7 @@ struct csr_control {
quadlet_t bus_manager_id;
quadlet_t bandwidth_available;
quadlet_t channels_available_hi, channels_available_lo;
quadlet_t broadcast_channel;
quadlet_t *rom;
size_t rom_size;
......
/*
* DMA region bookkeeping routines
*
* Copyright (C) 2002 Maas Digital LLC
*
* This code is licensed under the GPL. See the file COPYING in the root
* directory of the kernel sources for details.
*/
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include "dma.h"
/* dma_prog_region */
void dma_prog_region_init(struct dma_prog_region *prog)
{
prog->kvirt = NULL;
prog->dev = NULL;
prog->n_pages = 0;
prog->bus_addr = 0;
}
int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, struct pci_dev *dev)
{
/* round up to page size */
if(n_bytes % PAGE_SIZE)
n_bytes += PAGE_SIZE - (n_bytes & PAGE_SIZE);
prog->n_pages = n_bytes / PAGE_SIZE;
prog->kvirt = pci_alloc_consistent(dev, prog->n_pages * PAGE_SIZE, &prog->bus_addr);
if(!prog->kvirt) {
printk(KERN_ERR "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
dma_prog_region_free(prog);
return -ENOMEM;
}
prog->dev = dev;
return 0;
}
void dma_prog_region_free(struct dma_prog_region *prog)
{
if(prog->kvirt) {
pci_free_consistent(prog->dev, prog->n_pages * PAGE_SIZE, prog->kvirt, prog->bus_addr);
}
prog->kvirt = NULL;
prog->dev = NULL;
prog->n_pages = 0;
prog->bus_addr = 0;
}
/* dma_region */
void dma_region_init(struct dma_region *dma)
{
dma->kvirt = NULL;
dma->dev = NULL;
dma->n_pages = 0;
dma->n_dma_pages = 0;
dma->sglist = NULL;
}
int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_dev *dev, int direction)
{
unsigned int i, n_pages;
/* round up to page size */
if(n_bytes % PAGE_SIZE)
n_bytes += PAGE_SIZE - (n_bytes & PAGE_SIZE);
n_pages = n_bytes / PAGE_SIZE;
dma->kvirt = vmalloc_32(n_pages * PAGE_SIZE);
if(!dma->kvirt) {
printk(KERN_ERR "dma_region_alloc: vmalloc_32() failed\n");
goto err;
}
dma->n_pages = n_pages;
/* Clear the ram out, no junk to the user */
memset(dma->kvirt, 0, n_pages * PAGE_SIZE);
/* allocate scatter/gather list */
dma->sglist = kmalloc(dma->n_pages * sizeof(struct scatterlist), GFP_KERNEL);
if(!dma->sglist) {
printk(KERN_ERR "dma_region_alloc: kmalloc(sglist) failed\n");
goto err;
}
/* just to be safe - this will become unnecessary once sglist->address goes away */
memset(dma->sglist, 0, dma->n_pages * sizeof(struct scatterlist));
/* fill scatter/gather list with pages */
for(i = 0; i < dma->n_pages; i++) {
unsigned long va = (unsigned long) dma->kvirt + i * PAGE_SIZE;
dma->sglist[i].page = vmalloc_to_page((void *)va);
dma->sglist[i].length = PAGE_SIZE;
}
/* map sglist to the IOMMU */
dma->n_dma_pages = pci_map_sg(dev, &dma->sglist[0], dma->n_pages, direction);
if(dma->n_dma_pages == 0) {
printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n");
goto err;
}
dma->dev = dev;
dma->direction = direction;
return 0;
err:
dma_region_free(dma);
return -ENOMEM;
}
void dma_region_free(struct dma_region *dma)
{
if(dma->n_dma_pages) {
pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages, dma->direction);
dma->n_dma_pages = 0;
dma->dev = NULL;
}
if(dma->sglist) {
kfree(dma->sglist);
dma->sglist = NULL;
}
if(dma->kvirt) {
vfree(dma->kvirt);
dma->kvirt = NULL;
dma->n_pages = 0;
}
}
/* find the scatterlist index and remaining offset corresponding to a
given offset from the beginning of the buffer */
static inline int dma_region_find(struct dma_region *dma, unsigned long offset, unsigned long *rem)
{
int i;
unsigned long off = offset;
for(i = 0; i < dma->n_dma_pages; i++) {
if(off < sg_dma_len(&dma->sglist[i])) {
*rem = off;
return i;
}
off -= sg_dma_len(&dma->sglist[i]);
}
panic("dma_region_find: offset %lu beyond end of DMA mapping\n", offset);
}
dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, unsigned long offset)
{
unsigned long rem;
struct scatterlist *sg = &dma->sglist[dma_region_find(dma, offset, &rem)];
return sg_dma_address(sg) + rem;
}
void dma_region_sync(struct dma_region *dma, unsigned long offset, unsigned long len)
{
int first, last;
unsigned long rem;
if(!len)
len = 1;
first = dma_region_find(dma, offset, &rem);
last = dma_region_find(dma, offset + len - 1, &rem);
pci_dma_sync_sg(dma->dev, &dma->sglist[first], last - first + 1, dma->direction);
}
/* nopage() handler for mmap access */
static struct page*
dma_region_pagefault(struct vm_area_struct *area, unsigned long address, int write_access)
{
unsigned long offset;
unsigned long kernel_virt_addr;
struct page *ret = NOPAGE_SIGBUS;
struct dma_region *dma = (struct dma_region*) area->vm_private_data;
if(!dma->kvirt)
goto out;
if( (address < (unsigned long) area->vm_start) ||
(address > (unsigned long) area->vm_start + (PAGE_SIZE * dma->n_pages)) )
goto out;
offset = address - area->vm_start;
kernel_virt_addr = (unsigned long) dma->kvirt + offset;
ret = vmalloc_to_page((void*) kernel_virt_addr);
get_page(ret);
out:
return ret;
}
static struct vm_operations_struct dma_region_vm_ops = {
nopage: dma_region_pagefault,
};
int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma)
{
unsigned long size;
if(!dma->kvirt)
return -EINVAL;
/* must be page-aligned */
if(vma->vm_pgoff != 0)
return -EINVAL;
/* check the length */
size = vma->vm_end - vma->vm_start;
if(size > (PAGE_SIZE * dma->n_pages))
return -EINVAL;
vma->vm_ops = &dma_region_vm_ops;
vma->vm_private_data = dma;
vma->vm_file = file;
vma->vm_flags |= VM_RESERVED;
return 0;
}
/*
* DMA region bookkeeping routines
*
* Copyright (C) 2002 Maas Digital LLC
*
* This code is licensed under the GPL. See the file COPYING in the root
* directory of the kernel sources for details.
*/
#ifndef IEEE1394_DMA_H
#define IEEE1394_DMA_H
#include <linux/pci.h>
#include <asm/scatterlist.h>
/* struct dma_prog_region
a small, physically-contiguous DMA buffer with random-access,
synchronous usage characteristics
*/
struct dma_prog_region {
unsigned char *kvirt; /* kernel virtual address */
struct pci_dev *dev; /* PCI device */
unsigned int n_pages; /* # of kernel pages */
dma_addr_t bus_addr; /* base bus address */
};
/* clear out all fields but do not allocate any memory */
void dma_prog_region_init(struct dma_prog_region *prog);
int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, struct pci_dev *dev);
void dma_prog_region_free(struct dma_prog_region *prog);
static inline dma_addr_t dma_prog_region_offset_to_bus(struct dma_prog_region *prog, unsigned long offset)
{
return prog->bus_addr + offset;
}
/* struct dma_region
a large, non-physically-contiguous DMA buffer with streaming,
asynchronous usage characteristics
*/
struct dma_region {
unsigned char *kvirt; /* kernel virtual address */
struct pci_dev *dev; /* PCI device */
unsigned int n_pages; /* # of kernel pages */
unsigned int n_dma_pages; /* # of IOMMU pages */
struct scatterlist *sglist; /* IOMMU mapping */
int direction; /* PCI_DMA_TODEVICE, etc */
};
/* clear out all fields but do not allocate anything */
void dma_region_init(struct dma_region *dma);
/* allocate the buffer and map it to the IOMMU */
int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_dev *dev, int direction);
/* unmap and free the buffer */
void dma_region_free(struct dma_region *dma);
/* sync the IO bus' view of the buffer with the CPU's view */
void dma_region_sync(struct dma_region *dma, unsigned long offset, unsigned long len);
/* map the buffer into a user space process */
int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma);
/* macro to index into a DMA region (or dma_prog_region) */
#define dma_region_i(_dma, _type, _index) ( ((_type*) ((_dma)->kvirt)) + (_index) )
/* return the DMA bus address of the byte with the given offset
relative to the beginning of the dma_region */
dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, unsigned long offset);
#endif /* IEEE1394_DMA_H */
......@@ -28,8 +28,7 @@
#include "ieee1394.h"
#include "ohci1394.h"
#include <linux/pci.h>
#include <asm/scatterlist.h>
#include "dma.h"
/* data structures private to the dv1394 driver */
/* none of this is exposed to user-space */
......@@ -167,11 +166,13 @@ static inline void fill_input_more(struct input_more *im,
}
static inline void fill_input_last(struct input_last *il,
int want_interrupt,
unsigned int data_size,
unsigned long data_phys_addr)
{
u32 temp = 3 << 28; /* INPUT_LAST */
temp |= 8 << 24; /* s = 1, update xferStatus and resCount */
if (want_interrupt)
temp |= 3 << 20; /* enable interrupts */
temp |= 0xC << 16; /* enable branch to address */
/* disable wait on sync field, not used in DV :-( */
......@@ -301,8 +302,7 @@ struct frame {
unsigned long data;
/* Max # of packets per frame */
/* 320 is enough for NTSC, need to check what PAL is */
#define MAX_PACKETS 500
#define MAX_PACKETS 500
/* a PAGE_SIZE memory pool for allocating CIP headers
......@@ -383,35 +383,6 @@ static void frame_delete(struct frame *f);
/* reset f so that it can be used again */
static void frame_reset(struct frame *f);
/* structure for bookkeeping of a large non-physically-contiguous DMA buffer */
struct dma_region {
unsigned int n_pages;
unsigned int n_dma_pages;
struct scatterlist *sglist;
};
/* return the DMA bus address of the byte with the given offset
relative to the beginning of the dma_region */
static inline dma_addr_t dma_offset_to_bus(struct dma_region *dma, unsigned long offset)
{
int i;
struct scatterlist *sg;
for(i = 0, sg = &dma->sglist[0]; i < dma->n_dma_pages; i++, sg++) {
if(offset < sg_dma_len(sg)) {
return sg_dma_address(sg) + offset;
}
offset -= sg_dma_len(sg);
}
printk(KERN_ERR "dv1394: dma_offset_to_bus failed for offset %lu!\n", offset);
return 0;
}
/* struct video_card contains all data associated with one instance
of the dv1394 driver
*/
......@@ -508,9 +479,8 @@ struct video_card {
/* the large, non-contiguous (rvmalloc()) ringbuffer for DV
data, exposed to user-space via mmap() */
unsigned char *user_buf;
unsigned long user_buf_size;
struct dma_region user_dma;
unsigned long dv_buf_size;
struct dma_region dv_buf;
/* next byte in the ringbuffer that a write() call will fill */
size_t write_off;
......@@ -579,10 +549,8 @@ struct video_card {
/* physically contiguous packet ringbuffer for receive */
#define MAX_PACKET_BUFFER 30
struct packet *packet_buffer;
dma_addr_t packet_buffer_dma;
unsigned long packet_buffer_size;
struct dma_region packet_buf;
unsigned long packet_buf_size;
unsigned int current_packet;
int first_frame; /* received first start frame marker? */
......
......@@ -53,6 +53,12 @@
via pci_alloc_consistent()
DONE:
- during reception, better handling of dropped frames and continuity errors
- during reception, prevent DMA from bypassing the irq tasklets
- reduce irq rate during reception (1/250 packets).
- add many more internal buffers during reception with scatter/gather dma.
- add dbc (continuity) checking on receive, increment status.dropped_frames
if not continuous.
- restart IT DMA after a bus reset
- safely obtain and release ISO Tx channels in cooperation with OHCI driver
- map received DIF blocks to their proper location in DV frame (ensure
......@@ -91,9 +97,9 @@
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/smp_lock.h>
#include <linux/bitops.h>
#include <asm/byteorder.h>
#include <asm/atomic.h>
#include <asm/bitops.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <linux/proc_fs.h>
......@@ -132,7 +138,7 @@
(will cause undeflows if your machine is too slow!)
*/
#define DV1394_DEBUG_LEVEL 0
#define DV1394_DEBUG_LEVEL 1
/* for debugging use ONLY: allow more than one open() of the device */
/* #define DV1394_ALLOW_MORE_THAN_ONE_OPEN 1 */
......@@ -169,6 +175,15 @@ static spinlock_t dv1394_cards_lock = SPIN_LOCK_UNLOCKED;
static struct hpsb_highlevel *hl_handle; /* = NULL; */
static LIST_HEAD(dv1394_devfs);
struct dv1394_devfs_entry {
struct list_head list;
devfs_handle_t devfs;
char name[32];
struct dv1394_devfs_entry *parent;
};
static spinlock_t dv1394_devfs_lock = SPIN_LOCK_UNLOCKED;
/* translate from a struct file* to the corresponding struct video_card* */
static inline struct video_card* file_to_video_card(struct file *file)
......@@ -176,39 +191,6 @@ static inline struct video_card* file_to_video_card(struct file *file)
return (struct video_card*) file->private_data;
}
/*******************************/
/* Memory management functions */
/*******************************/
/* note: we no longer use mem_map_reserve, because it causes a memory
leak, and setting vma->vm_flags to VM_RESERVED should be sufficient
to pin the pages in memory anyway. */
static void * rvmalloc(unsigned long size)
{
void * mem;
mem = vmalloc_32(size);
if(mem)
memset(mem, 0, size); /* Clear the ram out,
no junk to the user */
return mem;
}
static void rvfree(void * mem, unsigned long size)
{
if (mem) {
vfree(mem);
}
}
/***********************************/
/* END Memory management functions */
/***********************************/
/*** FRAME METHODS *********************************************************/
static void frame_reset(struct frame *f)
......@@ -437,11 +419,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
/******************************/
/* first descriptor - OUTPUT_MORE_IMMEDIATE, for the controller's IT header */
fill_output_more_immediate( &(block->u.out.omi),
/* tag - what is this??? */ 1,
video->channel,
/* sync tag - what is this??? */ 0,
payload_size);
fill_output_more_immediate( &(block->u.out.omi), 1, video->channel, 0, payload_size);
if(empty_packet) {
/* second descriptor - OUTPUT_LAST for CIP header */
......@@ -492,8 +470,8 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
PAGE_SIZE - (data_p % PAGE_SIZE),
/* DMA address of data_p */
dma_offset_to_bus(&f->video->user_dma,
data_p - (unsigned long) f->video->user_buf));
dma_region_offset_to_bus(&video->dv_buf,
data_p - (unsigned long) video->dv_buf.kvirt));
fill_output_last( &(block->u.out.u.full.u.cross.ol),
......@@ -507,8 +485,8 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
480 - (PAGE_SIZE - (data_p % PAGE_SIZE)),
/* DMA address of data_p + PAGE_SIZE - (data_p % PAGE_SIZE) */
dma_offset_to_bus(&f->video->user_dma,
data_p + PAGE_SIZE - (data_p % PAGE_SIZE) - (unsigned long) f->video->user_buf));
dma_region_offset_to_bus(&video->dv_buf,
data_p + PAGE_SIZE - (data_p % PAGE_SIZE) - (unsigned long) video->dv_buf.kvirt));
if(first_packet)
f->frame_begin_timestamp = &(block->u.out.u.full.u.cross.ol.q[3]);
......@@ -542,8 +520,8 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
/* DMA address of data_p */
dma_offset_to_bus(&f->video->user_dma,
data_p - (unsigned long) f->video->user_buf));
dma_region_offset_to_bus(&video->dv_buf,
data_p - (unsigned long) video->dv_buf.kvirt));
if(first_packet)
f->frame_begin_timestamp = &(block->u.out.u.full.u.nocross.ol.q[3]);
......@@ -584,13 +562,8 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
to loop back up to the top */
*(f->frame_end_branch) = cpu_to_le32(f->descriptor_pool_dma | f->first_n_descriptors);
/* make the latest version of the frame buffer visible to the PCI card */
/* could optimize this by only syncing the pages associated with this frame */
pci_dma_sync_sg(video->ohci->dev,
&video->user_dma.sglist[0],
video->user_dma.n_dma_pages,
PCI_DMA_TODEVICE);
/* make the latest version of this frame visible to the PCI card */
dma_region_sync(&video->dv_buf, f->data - (unsigned long) video->dv_buf.kvirt, video->frame_size);
/* lock against DMA interrupt */
spin_lock_irqsave(&video->spinlock, irq_flags);
......@@ -789,6 +762,9 @@ frame_put_packet (struct frame *f, struct packet *p)
int dif_sequence = p->data[1] >> 4; /* dif sequence number is in bits 4 - 7 */
int dif_block = p->data[2];
/* sanity check */
if (dif_sequence > 11 || dif_block > 149) return;
switch (section_type) {
case 0: /* 1 Header block */
memcpy( (void *) f->data + dif_sequence * 150 * 80, p->data, 480);
......@@ -816,8 +792,17 @@ frame_put_packet (struct frame *f, struct packet *p)
}
static void start_dma_receive(struct video_card *video, struct frame *frame)
static void start_dma_receive(struct video_card *video)
{
if (video->first_run == 1) {
video->first_run = 0;
/* start DMA once all of the frames are READY */
video->n_clear_frames = 0;
video->first_clear_frame = -1;
video->current_packet = 0;
video->active_frame = 0;
/* reset iso recv control register */
reg_write(video->ohci, video->ohci_IsoRcvContextControlClear, 0xFFFFFFFF);
wmb();
......@@ -830,7 +815,7 @@ static void start_dma_receive(struct video_card *video, struct frame *frame)
/* address and first descriptor block + Z=1 */
reg_write(video->ohci, video->ohci_IsoRcvCommandPtr,
frame->descriptor_pool_dma | 1); /* Z=1 */
video->frames[0]->descriptor_pool_dma | 1); /* Z=1 */
wmb();
/* run */
......@@ -857,6 +842,14 @@ static void start_dma_receive(struct video_card *video, struct frame *frame)
printk("RUNNING!\n");
}
#endif
}
else if( reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 11) ) {
debug_printk("DEAD, event = %x\n",
reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & 0x1F);
/* wake */
reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 12));
}
}
......@@ -864,7 +857,7 @@ static void start_dma_receive(struct video_card *video, struct frame *frame)
receive_packets() - build the DMA program for receiving
*/
static void receive_packets(struct video_card *video, struct frame *f)
static void receive_packets(struct video_card *video)
{
struct DMA_descriptor_block *block = NULL;
dma_addr_t block_dma = 0;
......@@ -872,52 +865,46 @@ static void receive_packets(struct video_card *video, struct frame *f)
dma_addr_t data_dma = 0;
u32 *last_branch_address = NULL;
unsigned long irq_flags;
int want_interrupt = 0;
struct frame *f = NULL;
int i, j;
spin_lock_irqsave(&video->spinlock, irq_flags);
video->n_clear_frames = 0;
video->first_clear_frame = -1;
for (j = 0; j < video->n_frames; j++) {
for (video->current_packet = 0; video->current_packet < MAX_PACKET_BUFFER; ++video->current_packet) {
/* connect frames */
if (j > 0 && f != NULL && f->frame_end_branch != NULL)
*(f->frame_end_branch) = cpu_to_le32(video->frames[j]->descriptor_pool_dma | 1); /* set Z=1 */
f = video->frames[j];
for (i = 0; i < MAX_PACKETS; i++) {
/* locate a descriptor block and packet from the buffer */
block = &(f->descriptor_pool[video->current_packet]);
block = &(f->descriptor_pool[i]);
block_dma = ((unsigned long) block - (unsigned long) f->descriptor_pool) + f->descriptor_pool_dma;
data = &(video->packet_buffer[video->current_packet]);
data_dma = ((unsigned long) data - (unsigned long) video->packet_buffer) + video->packet_buffer_dma;
data = ((struct packet*)video->packet_buf.kvirt) + f->frame_num * MAX_PACKETS + i;
data_dma = dma_region_offset_to_bus( &video->packet_buf,
((unsigned long) data - (unsigned long) video->packet_buf.kvirt) );
/* setup DMA descriptor block */
fill_input_last( &(block->u.in.il), 512, data_dma);
want_interrupt = ((i % (MAX_PACKETS/2)) == 0 || i == (MAX_PACKETS-1));
fill_input_last( &(block->u.in.il), want_interrupt, 512, data_dma);
/* link descriptors */
last_branch_address = f->frame_end_branch;
if (last_branch_address)
if (last_branch_address != NULL)
*(last_branch_address) = cpu_to_le32(block_dma | 1); /* set Z=1 */
f->frame_end_branch = &(block->u.in.il.q[2]);
}
/* loop tail to head */
if (f->frame_end_branch)
*(f->frame_end_branch) = cpu_to_le32(f->descriptor_pool_dma | 1); /* set Z=1 */
} /* next j */
spin_unlock_irqrestore(&video->spinlock, irq_flags);
if (video->first_run) {
/* start DMA once all of the frames are READY */
video->first_run = 0;
video->current_packet = 0;
video->active_frame = f->frame_num;
start_dma_receive(video, f);
}
else if( reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 11) ) {
debug_printk("DEAD, event = %x\n",
reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & 0x1F);
/* wake */
reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 12));
}
}
......@@ -960,10 +947,8 @@ static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
if(new_buf_size % PAGE_SIZE) new_buf_size += PAGE_SIZE - (new_buf_size % PAGE_SIZE);
/* don't allow the user to allocate the DMA buffer more than once */
if( (video->user_buf) &&
(video->user_buf_size != new_buf_size) ) {
if(video->dv_buf.kvirt && video->dv_buf_size != new_buf_size)
goto err;
}
/* shutdown the card if it's currently active */
/* (the card should not be reset if the parameters are screwy) */
......@@ -1067,93 +1052,39 @@ static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
}
}
if(video->user_buf == NULL) {
unsigned int i;
if(!video->dv_buf.kvirt) {
/* allocate the ringbuffer */
video->user_buf = rvmalloc(new_buf_size);
if(!video->user_buf) {
printk(KERN_ERR "dv1394: Cannot allocate frame buffers\n");
retval = dma_region_alloc(&video->dv_buf, new_buf_size, video->ohci->dev, PCI_DMA_TODEVICE);
if(retval)
goto err_frames;
}
video->user_buf_size = new_buf_size;
/* allocate the sglist to hold the DMA addresses */
video->user_dma.n_pages = video->user_buf_size / PAGE_SIZE;
video->user_dma.sglist = kmalloc(video->user_dma.n_pages * sizeof(struct scatterlist), GFP_KERNEL);
if(!video->user_dma.sglist) {
printk(KERN_ERR "dv1394: Cannot allocate sglist for user buffer\n");
goto err_user_buf;
}
/* initialize all fields of all sglist entries to zero
(new requirement due to PCI changes in 2.4.13) */
memset(video->user_dma.sglist, 0, video->user_dma.n_pages * sizeof(struct scatterlist));
/* fill the sglist with the kernel addresses of pages in the non-contiguous buffer */
for(i = 0; i < video->user_dma.n_pages; i++) {
unsigned long va = (unsigned long) video->user_buf + i * PAGE_SIZE;
video->user_dma.sglist[i].page = vmalloc_to_page((void *)va);
video->user_dma.sglist[i].length = PAGE_SIZE;
}
/* map the buffer in the IOMMU */
/* the user_data buffer only allows DMA *to* the card for transmission;
incoming DV data comes through the packet_buffer first, and then is copied to user_data */
video->user_dma.n_dma_pages = pci_map_sg(video->ohci->dev,
&video->user_dma.sglist[0],
video->user_dma.n_pages,
PCI_DMA_TODEVICE);
if(video->user_dma.n_dma_pages == 0) {
printk(KERN_ERR "dv1394: Error mapping user buffer to the IOMMU\n");
goto err_user_buf;
}
video->dv_buf_size = new_buf_size;
debug_printk("dv1394: Allocated %d frame buffers, total %u pages (%u DMA pages), %lu bytes\n",
video->n_frames, video->user_dma.n_pages,
video->user_dma.n_dma_pages, video->user_buf_size);
video->n_frames, video->dv_buf.n_pages,
video->dv_buf.n_dma_pages, video->dv_buf_size);
}
/* set up the frame->data pointers */
for(i = 0; i < video->n_frames; i++)
video->frames[i]->data = (unsigned long) video->user_buf + i * video->frame_size;
/* allocate packet buffers */
video->packet_buffer_size = sizeof(struct packet) * MAX_PACKET_BUFFER;
if (video->packet_buffer_size % PAGE_SIZE)
video->packet_buffer_size += PAGE_SIZE - (video->packet_buffer_size % PAGE_SIZE);
video->frames[i]->data = (unsigned long) video->dv_buf.kvirt + i * video->frame_size;
if(!video->packet_buf.kvirt) {
/* allocate packet buffer */
video->packet_buf_size = sizeof(struct packet) * video->n_frames * MAX_PACKETS;
if (video->packet_buf_size % PAGE_SIZE)
video->packet_buf_size += PAGE_SIZE - (video->packet_buf_size % PAGE_SIZE);
video->packet_buffer = kmalloc(video->packet_buffer_size, GFP_KERNEL);
retval = dma_region_alloc(&video->packet_buf, video->packet_buf_size,
video->ohci->dev, PCI_DMA_FROMDEVICE);
if(retval)
goto err_dv_buf;
if(!video->packet_buffer) {
printk(KERN_ERR "dv1394: Cannot allocate packet buffers");
retval = -ENOMEM;
goto err_user_buf;
}
/* map the packet buffer into the IOMMU */
video->packet_buffer_dma = pci_map_single(video->ohci->dev,
video->packet_buffer,
video->packet_buffer_size,
PCI_DMA_FROMDEVICE);
if(!video->packet_buffer_dma) {
printk(KERN_ERR "dv1394: Cannot map packet buffer to IOMMU");
kfree(video->packet_buffer);
video->packet_buffer = NULL;
retval = -ENOMEM;
goto err_user_buf;
debug_printk("dv1394: Allocated %d packets in buffer, total %u pages (%u DMA pages), %lu bytes\n",
video->n_frames*MAX_PACKETS, video->packet_buf.n_pages,
video->packet_buf.n_dma_pages, video->packet_buf_size);
}
debug_printk("dv1394: Allocated %d packet buffers for receive, total %lu bytes\n",
MAX_PACKET_BUFFER, video->packet_buffer_size);
/* set up register offsets for IT context */
/* IT DMA context registers are spaced 16 bytes apart */
video->ohci_IsoXmitContextControlSet = OHCI1394_IsoXmitContextControlSet+16*video->ohci_it_ctx;
......@@ -1177,25 +1108,8 @@ static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
return 0;
err_user_buf:
if(video->user_buf) {
if(video->user_dma.sglist) {
if(video->user_dma.n_dma_pages > 0) {
/* unmap it from the IOMMU */
pci_unmap_sg(video->ohci->dev,
video->user_dma.sglist,
video->user_dma.n_pages,
PCI_DMA_TODEVICE);
video->user_dma.n_dma_pages = 0;
}
kfree(video->user_dma.sglist);
video->user_dma.sglist = NULL;
video->user_dma.n_pages = 0;
}
rvfree(video->user_buf, video->user_buf_size);
video->user_buf = NULL;
video->user_buf_size = 0;
}
err_dv_buf:
dma_region_free(&video->dv_buf);
err_frames:
for(i = 0; i < DV1394_MAX_FRAMES; i++) {
......@@ -1229,7 +1143,7 @@ static int do_dv1394_init_default(struct video_card *video)
struct dv1394_init init;
init.api_version = DV1394_API_VERSION;
init.n_frames = 2;
init.n_frames = DV1394_MAX_FRAMES / 4;
/* the following are now set via proc_fs or devfs */
init.channel = video->channel;
init.format = video->pal_or_ntsc;
......@@ -1262,7 +1176,6 @@ static void stop_dma(struct video_card *video)
video->active_frame = -1;
video->first_run = 1;
/* wait until DMA really stops */
i = 0;
while(i < 1000) {
......@@ -1292,7 +1205,7 @@ static void stop_dma(struct video_card *video)
static int do_dv1394_shutdown(struct video_card *video, int free_user_buf)
static int do_dv1394_shutdown(struct video_card *video, int free_dv_buf)
{
int i;
unsigned long flags;
......@@ -1334,6 +1247,16 @@ static int do_dv1394_shutdown(struct video_card *video, int free_user_buf)
spin_unlock_irqrestore(&video->spinlock, flags);
/* remove tasklets */
if(video->ohci_it_ctx != -1) {
ohci1394_unregister_iso_tasklet(video->ohci, &video->it_tasklet);
video->ohci_it_ctx = -1;
}
if(video->ohci_ir_ctx != -1) {
ohci1394_unregister_iso_tasklet(video->ohci, &video->ir_tasklet);
video->ohci_ir_ctx = -1;
}
/* release the ISO channel */
if(video->channel != -1) {
u64 chan_mask;
......@@ -1360,42 +1283,20 @@ static int do_dv1394_shutdown(struct video_card *video, int free_user_buf)
/* we can't free the DMA buffer unless it is guaranteed that
no more user-space mappings exist */
if(free_user_buf && video->user_buf) {
if(video->user_dma.sglist) {
if(video->user_dma.n_dma_pages > 0) {
/* unmap it from the IOMMU */
pci_unmap_sg(video->ohci->dev,
video->user_dma.sglist,
video->user_dma.n_pages,
PCI_DMA_TODEVICE);
video->user_dma.n_dma_pages = 0;
}
kfree(video->user_dma.sglist);
video->user_dma.sglist = NULL;
video->user_dma.n_pages = 0;
}
rvfree(video->user_buf, video->user_buf_size);
video->user_buf = NULL;
video->user_buf_size = 0;
if(free_dv_buf) {
dma_region_free(&video->dv_buf);
video->dv_buf_size = 0;
}
if (video->packet_buffer) {
pci_unmap_single(video->ohci->dev,
video->packet_buffer_dma,
video->packet_buffer_size,
PCI_DMA_FROMDEVICE);
kfree(video->packet_buffer);
video->packet_buffer = NULL;
video->packet_buffer_size = 0;
}
/* free packet buffer */
dma_region_free(&video->packet_buf);
video->packet_buf_size = 0;
debug_printk("dv1394: shutdown complete\n");
return 0;
}
/*
**********************************
*** MMAP() THEORY OF OPERATION ***
......@@ -1418,96 +1319,28 @@ static int do_dv1394_shutdown(struct video_card *video, int free_user_buf)
force the user to choose one buffer size and stick with
it. This small sacrifice is worth the huge reduction in
error-prone code in dv1394.
Note: dv1394_mmap does no page table manipulation. The page
table entries are created by the dv1394_nopage() handler as
page faults are taken by the user.
*/
static struct page * dv1394_nopage(struct vm_area_struct * area, unsigned long address, int write_access)
{
unsigned long offset;
unsigned long kernel_virt_addr;
struct page *ret = NOPAGE_SIGBUS;
struct video_card *video = (struct video_card*) area->vm_private_data;
/* guard against process-context operations and the interrupt */
/* (by definition page faults are taken in interrupt context) */
spin_lock(&video->spinlock);
if(!video->user_buf)
goto out;
if( (address < (unsigned long) area->vm_start) ||
(address > (unsigned long) area->vm_start + video->user_buf_size) )
goto out;
offset = address - area->vm_start;
kernel_virt_addr = (unsigned long) video->user_buf + offset;
ret = vmalloc_to_page((void *)kernel_virt_addr);
get_page(ret);
out:
spin_unlock(&video->spinlock);
return ret;
}
static struct vm_operations_struct dv1394_vm_ops = {
.nopage = dv1394_nopage
};
/*
dv1394_mmap does no page table manipulation. The page table entries
are created by the dv1394_nopage() handler as page faults are taken
by the user.
*/
int dv1394_mmap(struct file *file, struct vm_area_struct *vma)
{
struct video_card *video = file_to_video_card(file);
unsigned long size;
int res = -EINVAL;
int retval = -EINVAL;
/* serialize mmap */
down(&video->sem);
if( ! video_card_initialized(video) ) {
res = do_dv1394_init_default(video);
if(res)
goto err;
retval = do_dv1394_init_default(video);
if(retval)
goto out;
}
/* region must be page-aligned */
if(vma->vm_pgoff != 0)
goto err;
/* check the size the user is trying to map */
size = vma->vm_end - vma->vm_start;
if(size > video->user_buf_size)
goto err;
/*
we don't actually mess with the page tables here.
(nopage() takes care of that from the page fault handler)
Just set up the vma->vm_ops.
*/
vma->vm_ops = &dv1394_vm_ops;
vma->vm_private_data = video;
vma->vm_file = file;
/* don't try to swap this out =) */
vma->vm_flags |= VM_RESERVED;
retval = dma_region_mmap(&video->dv_buf, file, vma);
out:
up(&video->sem);
return 0;
err:
up(&video->sem);
return res;
return retval;
}
/*** DEVICE FILE INTERFACE *************************************************/
/* no need to serialize, multiple threads OK */
......@@ -1623,7 +1456,7 @@ static ssize_t dv1394_write(struct file *file, const char *buffer, size_t count,
continue; /* start over from 'while(count > 0)...' */
}
if(copy_from_user(video->user_buf + video->write_off, buffer, cnt)) {
if(copy_from_user(video->dv_buf.kvirt + video->write_off, buffer, cnt)) {
if(!ret)
ret = -EFAULT;
break;
......@@ -1670,7 +1503,11 @@ static ssize_t dv1394_read(struct file *file, char *buffer, size_t count, loff_
up(&video->sem);
return ret;
}
receive_packets(video, video->frames[video->first_clear_frame]);
video->continuity_counter = -1;
receive_packets(video);
start_dma_receive(video);
}
ret = 0;
......@@ -1723,7 +1560,7 @@ static ssize_t dv1394_read(struct file *file, char *buffer, size_t count, loff_
continue; /* start over from 'while(count > 0)...' */
}
if(copy_to_user(buffer, video->user_buf + video->write_off, cnt)) {
if(copy_to_user(buffer, video->dv_buf.kvirt + video->write_off, cnt)) {
if(!ret)
ret = -EFAULT;
break;
......@@ -1912,14 +1749,17 @@ static int dv1394_ioctl(struct inode *inode, struct file *file,
}
case DV1394_START_RECEIVE: {
if( !video_card_initialized(video) ) {
ret = do_dv1394_init_default(video);
if(ret)
goto out;
}
receive_packets(video, video->frames[video->first_clear_frame]);
video->continuity_counter = -1;
receive_packets(video);
start_dma_receive(video);
ret = 0;
break;
......@@ -2017,7 +1857,7 @@ static int dv1394_open(struct inode *inode, struct file *file)
struct video_card *p;
list_for_each(lh, &dv1394_cards) {
p = list_entry(lh, struct video_card, list);
if((p->id >> 2) == ieee1394_file_to_instance(file)) {
if((p->id) == ieee1394_file_to_instance(file)) {
video = p;
break;
}
......@@ -2310,8 +2150,6 @@ static void it_tasklet_func(unsigned long data)
spin_lock(&video->spinlock);
irq_printk("INTERRUPT! Video = %08lx Iso event Recv: %08x Xmit: %08x\n",
(unsigned long) video, isoRecvIntEvent, isoXmitIntEvent);
irq_printk("ContextControl = %08x, CommandPtr = %08x\n",
reg_read(video->ohci, video->ohci_IsoXmitContextControlSet),
reg_read(video->ohci, video->ohci_IsoXmitCommandPtr)
......@@ -2453,28 +2291,67 @@ static void ir_tasklet_func(unsigned long data)
int wake = 0;
struct video_card *video = (struct video_card*) data;
if( (video->ohci_ir_ctx != -1) &&
(reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 10)) ) {
spin_lock(&video->spinlock);
if( (video->ohci_ir_ctx != -1)
&& (reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 10)) )
{
int sof=0; /* start-of-frame flag */
struct frame *f;
u16 packet_length, packet_time;
int i, dbc=0;
struct DMA_descriptor_block *block = NULL;
u16 xferstatus;
int next_i, prev_i;
struct DMA_descriptor_block *next = NULL;
dma_addr_t next_dma = 0;
struct DMA_descriptor_block *prev = NULL;
packet_length = le16_to_cpu(video->packet_buffer[video->current_packet].data_length);
packet_time = le16_to_cpu(video->packet_buffer[video->current_packet].timestamp);
/* loop over all descriptors in all frames */
for (i = 0; i < video->n_frames*MAX_PACKETS; i++) {
struct packet *p = dma_region_i(&video->packet_buf, struct packet, video->current_packet);
/* make sure we are seeing the latest changes to p */
dma_region_sync(&video->packet_buf,
(unsigned long) p - (unsigned long) video->packet_buf.kvirt,
sizeof(struct packet));
packet_length = le16_to_cpu(p->data_length);
packet_time = le16_to_cpu(p->timestamp);
irq_printk("received packet %02d, timestamp=%04x, length=%04x, sof=%02x%02x\n", video->current_packet,
packet_time, packet_length,
video->packet_buffer[video->current_packet].data[0], video->packet_buffer[video->current_packet].data[1]);
p->data[0], p->data[1]);
/* get the descriptor based on packet_buffer cursor */
f = video->frames[video->current_packet / MAX_PACKETS];
block = &(f->descriptor_pool[video->current_packet % MAX_PACKETS]);
xferstatus = le16_to_cpu(block->u.in.il.q[3] >> 16);
xferstatus &= 0x1F;
/* get the current frame */
f = video->frames[video->active_frame];
/* exclude empty packet */
if (packet_length > 8) {
if (packet_length > 8 && xferstatus == 0x11) {
irq_printk("ir_tasklet_func: xferStatus/resCount [%d] = 0x%08x\n", i, le32_to_cpu(block->u.in.il.q[3]) );
/* check for start of frame */
sof = (video->packet_buffer[video->current_packet].data[0] == 0x1f &&
video->packet_buffer[video->current_packet].data[1] == 0x07);
/* DRD> Changed to check section type ([0]>>5==0)
and dif sequence ([1]>>4==0) */
sof = ( (p->data[0] >> 5) == 0 && (p->data[1] >> 4) == 0);
dbc = (int) (p->cip_h1 >> 24);
if ( video->continuity_counter != -1 && dbc > ((video->continuity_counter + 1) % 256) )
{
video->dropped_frames += video->n_clear_frames + 1;
video->first_frame = 0;
video->n_clear_frames = 0;
video->first_clear_frame = -1;
}
video->continuity_counter = dbc;
if (!video->first_frame) {
if (sof) {
......@@ -2486,8 +2363,11 @@ static void ir_tasklet_func(unsigned long data)
frame_reset(f); /* f->state = STATE_CLEAR */
video->n_clear_frames++;
if (video->n_clear_frames > video->n_frames) {
video->n_clear_frames = video->n_frames;
video->dropped_frames++;
video->n_clear_frames--;
if (video->n_clear_frames < 0)
video->n_clear_frames = 0;
video->first_clear_frame = (video->first_clear_frame + 1) % video->n_frames;
}
if (video->first_clear_frame == -1)
video->first_clear_frame = video->active_frame;
......@@ -2510,20 +2390,46 @@ static void ir_tasklet_func(unsigned long data)
printk(KERN_ERR "frame buffer overflow during receive\n");
}
/* make sure we are seeing the latest changes to packet_buffer */
pci_dma_sync_single(video->ohci->dev,
video->packet_buffer_dma,
video->packet_buffer_size,
PCI_DMA_FROMDEVICE);
frame_put_packet( f, &video->packet_buffer[video->current_packet]);
frame_put_packet(f, p);
} /* first_frame */
}
} /* not empty packet */
/* stop, end of ready packets */
else if (xferstatus == 0) {
break;
}
/* reset xferStatus & resCount */
block->u.in.il.q[3] = cpu_to_le32(512);
/* terminate dma chain at this (next) packet */
next_i = video->current_packet;
f = video->frames[next_i / MAX_PACKETS];
next = &(f->descriptor_pool[next_i % MAX_PACKETS]);
next_dma = ((unsigned long) block - (unsigned long) f->descriptor_pool) + f->descriptor_pool_dma;
next->u.in.il.q[0] |= 3 << 20; /* enable interrupt */
next->u.in.il.q[2] = 0; /* disable branch */
/* link previous to next */
prev_i = (next_i == 0) ? (MAX_PACKETS * video->n_frames - 1) : (next_i - 1);
f = video->frames[prev_i / MAX_PACKETS];
prev = &(f->descriptor_pool[prev_i % MAX_PACKETS]);
if(prev_i % (MAX_PACKETS/2)) {
prev->u.in.il.q[0] &= ~(3 << 20); /* no interrupt */
} else {
prev->u.in.il.q[0] |= 3 << 20; /* enable interrupt */
}
prev->u.in.il.q[2] = (cpu_to_le32(next_dma) | 1); /* set Z=1 */
wmb();
/* wake up DMA in case it fell asleep */
reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 12));
/* advance packet_buffer cursor */
video->current_packet = (video->current_packet + 1) % MAX_PACKET_BUFFER;
video->current_packet = (video->current_packet + 1) % (MAX_PACKETS * video->n_frames);
} /* for all packets */
wake = 1; /* why the hell not? */
......@@ -2555,40 +2461,135 @@ static struct file_operations dv1394_fops=
/*** DEVFS HELPERS *********************************************************/
struct dv1394_devfs_entry *
dv1394_devfs_find( char *name)
{
struct list_head *lh;
struct dv1394_devfs_entry *p;
spin_lock( &dv1394_devfs_lock);
if(!list_empty(&dv1394_devfs)) {
list_for_each(lh, &dv1394_devfs) {
p = list_entry(lh, struct dv1394_devfs_entry, list);
if(!strncmp(p->name, name, sizeof(p->name))) {
goto found;
}
}
}
p = NULL;
found:
spin_unlock( &dv1394_devfs_lock);
return p;
}
#ifdef CONFIG_DEVFS_FS
static int dv1394_devfs_add_entry(struct video_card *video)
{
char buf[64];
snprintf(buf, sizeof(buf), "ieee1394/dv/host%d/%s/%s",
(video->id>>2),
(video->pal_or_ntsc == DV1394_NTSC ? "NTSC" : "PAL"),
(video->mode == MODE_RECEIVE ? "in" : "out"));
char buf[32];
struct dv1394_devfs_entry *p;
struct dv1394_devfs_entry *parent;
video->devfs_handle = devfs_register(NULL, buf, DEVFS_FL_NONE,
p = kmalloc(sizeof(struct dv1394_devfs_entry), GFP_KERNEL);
if(!p) {
printk(KERN_ERR "dv1394: cannot allocate dv1394_devfs_entry\n");
goto err;
}
memset(p, 0, sizeof(struct dv1394_devfs_entry));
snprintf(buf, sizeof(buf), "dv/host%d/%s", (video->id>>2),
(video->pal_or_ntsc == DV1394_NTSC ? "NTSC" : "PAL"));
parent = dv1394_devfs_find(buf);
if (parent == NULL) {
printk(KERN_ERR "dv1394: unable to locate parent devfs of %s\n", buf);
goto err_free;
}
video->devfs_handle = devfs_register(
parent->devfs,
(video->mode == MODE_RECEIVE ? "in" : "out"),
DEVFS_FL_NONE,
IEEE1394_MAJOR,
IEEE1394_MINOR_BLOCK_DV1394*16 + video->id,
S_IFCHR | S_IRUGO | S_IWUGO,
&dv1394_fops,
(void*) video);
if (video->devfs_handle == NULL) {
printk(KERN_ERR "dv1394: unable to create /dev/%s\n", buf);
return -ENOMEM;
p->devfs = video->devfs_handle;
if (p->devfs == NULL) {
printk(KERN_ERR "dv1394: unable to create /dev/ieee1394/%s/%s\n",
parent->name,
(video->mode == MODE_RECEIVE ? "in" : "out"));
goto err_free;
}
spin_lock( &dv1394_devfs_lock);
INIT_LIST_HEAD(&p->list);
list_add_tail(&p->list, &dv1394_devfs);
spin_unlock( &dv1394_devfs_lock);
return 0;
err_free:
kfree(p);
err:
return -ENOMEM;
}
static int dv1394_devfs_add_dir(char *name)
static int
dv1394_devfs_add_dir( char *name,
struct dv1394_devfs_entry *parent,
struct dv1394_devfs_entry **out)
{
if (!devfs_mk_dir(NULL, name, NULL)) {
printk(KERN_ERR "dv1394: unable to create /dev/%s\n", name);
return -ENOMEM;
struct dv1394_devfs_entry *p;
p = kmalloc(sizeof(struct dv1394_devfs_entry), GFP_KERNEL);
if(!p) {
printk(KERN_ERR "dv1394: cannot allocate dv1394_devfs_entry\n");
goto err;
}
memset(p, 0, sizeof(struct dv1394_devfs_entry));
if (parent == NULL) {
snprintf(p->name, sizeof(p->name), "%s", name);
p->devfs = devfs_mk_dir(ieee1394_devfs_handle, name, NULL);
} else {
snprintf(p->name, sizeof(p->name), "%s/%s", parent->name, name);
p->devfs = devfs_mk_dir(parent->devfs, name, NULL);
}
if (p->devfs == NULL) {
printk(KERN_ERR "dv1394: unable to create /dev/ieee1394/%s\n", p->name);
goto err_free;
}
p->parent = parent;
if (out != NULL) *out = p;
spin_lock( &dv1394_devfs_lock);
INIT_LIST_HEAD(&p->list);
list_add_tail(&p->list, &dv1394_devfs);
spin_unlock( &dv1394_devfs_lock);
return 0;
err_free:
kfree(p);
err:
return -ENOMEM;
}
void dv1394_devfs_del(char *name)
void dv1394_devfs_del( char *name)
{
devfs_remove("ieee1394/%s", name);
struct dv1394_devfs_entry *p = dv1394_devfs_find(name);
if (p != NULL) {
devfs_unregister(p->devfs);
spin_lock( &dv1394_devfs_lock);
list_del(&p->list);
spin_unlock( &dv1394_devfs_lock);
kfree(p);
}
}
#endif /* CONFIG_DEVFS_FS */
......@@ -2612,6 +2613,10 @@ static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes
/* lower 2 bits of id indicate which of four "plugs"
per host */
video->id = ohci->id << 2;
if (format == DV1394_NTSC)
video->id |= mode;
else
video->id |= 2 + mode;
video->ohci_it_ctx = -1;
video->ohci_ir_ctx = -1;
......@@ -2644,8 +2649,10 @@ static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes
for(i = 0; i < DV1394_MAX_FRAMES; i++)
video->frames[i] = NULL;
video->user_buf = NULL;
video->user_buf_size = 0;
dma_region_init(&video->dv_buf);
video->dv_buf_size = 0;
dma_region_init(&video->packet_buf);
video->packet_buf_size = 0;
clear_bit(0, &video->open);
spin_lock_init(&video->spinlock);
......@@ -2658,10 +2665,6 @@ static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes
list_add_tail(&video->list, &dv1394_cards);
spin_unlock_irqrestore(&dv1394_cards_lock, flags);
if (format == DV1394_NTSC)
video->id |= mode;
else video->id |= 2 + mode;
#ifdef CONFIG_DEVFS_FS
if (dv1394_devfs_add_entry(video) < 0)
goto err_free;
......@@ -2770,12 +2773,15 @@ static void dv1394_add_host (struct hpsb_host *host)
#endif
#ifdef CONFIG_DEVFS_FS
snprintf(buf, sizeof(buf), "ieee1394/dv/host%d", ohci->id);
dv1394_devfs_add_dir(buf);
snprintf(buf, sizeof(buf), "ieee1394/dv/host%d/NTSC", ohci->id);
dv1394_devfs_add_dir(buf);
snprintf(buf, sizeof(buf), "ieee1394/dv/host%d/PAL", ohci->id);
dv1394_devfs_add_dir(buf);
{
struct dv1394_devfs_entry *devfs_entry = dv1394_devfs_find("dv");
if (devfs_entry != NULL) {
snprintf(buf, sizeof(buf), "host%d", ohci->id);
dv1394_devfs_add_dir(buf, devfs_entry, &devfs_entry);
dv1394_devfs_add_dir("NTSC", devfs_entry, NULL);
dv1394_devfs_add_dir("PAL", devfs_entry, NULL);
}
}
#endif
dv1394_init(ohci, DV1394_NTSC, MODE_RECEIVE);
......@@ -2935,7 +2941,7 @@ static int __init dv1394_init_module(void)
}
#ifdef CONFIG_DEVFS_FS
if (dv1394_devfs_add_dir("ieee1394/dv") < 0) {
if (dv1394_devfs_add_dir("dv", NULL, NULL) < 0) {
printk(KERN_ERR "dv1394: unable to create /dev/ieee1394/dv\n");
ieee1394_unregister_chardev(IEEE1394_MINOR_BLOCK_DV1394);
return -ENOMEM;
......
......@@ -55,9 +55,9 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
#include <asm/delay.h>
#include <asm/semaphore.h>
#include <asm/bitops.h>
#include <net/arp.h>
#include "ieee1394_types.h"
......@@ -77,7 +77,7 @@
printk(KERN_ERR fmt, ## args)
static char version[] __devinitdata =
"$Rev: 601 $ Ben Collins <bcollins@debian.org>";
"$Rev: 641 $ Ben Collins <bcollins@debian.org>";
/* Our ieee1394 highlevel driver */
#define ETHER1394_DRIVER_NAME "ether1394"
......@@ -360,7 +360,7 @@ static void ether1394_add_host (struct hpsb_host *host)
priv->host = host;
hi = (struct host_info *)kmalloc (sizeof (struct host_info),
GFP_KERNEL);
in_interrupt() ? SLAB_ATOMIC : SLAB_KERNEL);
if (hi == NULL)
goto out;
......@@ -682,6 +682,8 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
ptask->skb = skb;
ptask->addr = addr;
ptask->dest_node = dest_node;
/* TODO: When 2.4 is out of the way, give each of our ethernet
* dev's a workqueue to handle these. */
HPSB_INIT_WORK(&ptask->tq, hpsb_write_sched, ptask);
hpsb_schedule_work(&ptask->tq);
......
......@@ -28,7 +28,7 @@
LIST_HEAD(hl_drivers);
rwlock_t hl_drivers_lock = RW_LOCK_UNLOCKED;
static DECLARE_MUTEX(hl_drivers_lock);
LIST_HEAD(addr_space);
rwlock_t addr_space_lock = RW_LOCK_UNLOCKED;
......@@ -53,9 +53,9 @@ struct hpsb_highlevel *hpsb_register_highlevel(const char *name,
hl->name = name;
hl->op = ops;
write_lock_irq(&hl_drivers_lock);
down(&hl_drivers_lock);
list_add_tail(&hl->hl_list, &hl_drivers);
write_unlock_irq(&hl_drivers_lock);
up(&hl_drivers_lock);
hl_all_hosts(hl->op->add_host);
......@@ -82,9 +82,9 @@ void hpsb_unregister_highlevel(struct hpsb_highlevel *hl)
}
write_unlock_irq(&addr_space_lock);
write_lock_irq(&hl_drivers_lock);
down(&hl_drivers_lock);
list_del(&hl->hl_list);
write_unlock_irq(&hl_drivers_lock);
up(&hl_drivers_lock);
if (hl->op->remove_host)
hl_all_hosts(hl->op->remove_host);
......@@ -119,10 +119,8 @@ int hpsb_register_addrspace(struct hpsb_highlevel *hl,
write_lock_irq(&addr_space_lock);
entry = addr_space.next;
while (list_entry(entry, struct hpsb_address_serve, as_list)->end
<= start) {
if (list_entry(entry->next, struct hpsb_address_serve, as_list)
->start >= end) {
while (list_entry(entry, struct hpsb_address_serve, as_list)->end <= start) {
if (list_entry(entry->next, struct hpsb_address_serve, as_list)->start >= end) {
list_add(&as->as_list, entry);
list_add_tail(&as->addr_list, &hl->addr_list);
retval = 1;
......@@ -198,13 +196,13 @@ void highlevel_add_host(struct hpsb_host *host)
struct list_head *entry;
struct hpsb_highlevel *hl;
read_lock(&hl_drivers_lock);
down(&hl_drivers_lock);
list_for_each(entry, &hl_drivers) {
hl = list_entry(entry, struct hpsb_highlevel, hl_list);
hl->op->add_host(host);
}
read_unlock(&hl_drivers_lock);
up(&hl_drivers_lock);
}
void highlevel_remove_host(struct hpsb_host *host)
......@@ -212,14 +210,14 @@ void highlevel_remove_host(struct hpsb_host *host)
struct list_head *entry;
struct hpsb_highlevel *hl;
write_lock_irq(&hl_drivers_lock);
down(&hl_drivers_lock);
list_for_each(entry, &hl_drivers) {
hl = list_entry(entry, struct hpsb_highlevel, hl_list);
if (hl->op->remove_host)
hl->op->remove_host(host);
}
write_unlock_irq(&hl_drivers_lock);
up(&hl_drivers_lock);
}
void highlevel_host_reset(struct hpsb_host *host)
......@@ -227,14 +225,14 @@ void highlevel_host_reset(struct hpsb_host *host)
struct list_head *entry;
struct hpsb_highlevel *hl;
read_lock(&hl_drivers_lock);
down(&hl_drivers_lock);
list_for_each(entry, &hl_drivers) {
hl = list_entry(entry, struct hpsb_highlevel, hl_list);
if (hl->op->host_reset)
hl->op->host_reset(host);
}
read_unlock(&hl_drivers_lock);
up(&hl_drivers_lock);
}
void highlevel_iso_receive(struct hpsb_host *host, quadlet_t *data,
......@@ -244,7 +242,7 @@ void highlevel_iso_receive(struct hpsb_host *host, quadlet_t *data,
struct hpsb_highlevel *hl;
int channel = (data[0] >> 8) & 0x3f;
read_lock(&hl_drivers_lock);
down(&hl_drivers_lock);
entry = hl_drivers.next;
while (entry != &hl_drivers) {
......@@ -254,7 +252,7 @@ void highlevel_iso_receive(struct hpsb_host *host, quadlet_t *data,
}
entry = entry->next;
}
read_unlock(&hl_drivers_lock);
up(&hl_drivers_lock);
}
void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
......@@ -264,7 +262,7 @@ void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
struct hpsb_highlevel *hl;
int cts = data[0] >> 4;
read_lock(&hl_drivers_lock);
down(&hl_drivers_lock);
entry = hl_drivers.next;
while (entry != &hl_drivers) {
......@@ -275,7 +273,7 @@ void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction,
}
entry = entry->next;
}
read_unlock(&hl_drivers_lock);
up(&hl_drivers_lock);
}
int highlevel_read(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
......
......@@ -121,6 +121,7 @@ void hpsb_unref_host(struct hpsb_host *host)
struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra)
{
struct hpsb_host *h;
int i;
h = kmalloc(sizeof(struct hpsb_host) + extra, SLAB_KERNEL);
if (!h) return NULL;
......@@ -133,8 +134,8 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra)
INIT_LIST_HEAD(&h->pending_packets);
spin_lock_init(&h->pending_pkt_lock);
sema_init(&h->tlabel_count, 64);
spin_lock_init(&h->tlabel_lock);
for (i = 0; i < ARRAY_SIZE(h->tpool); i++)
HPSB_TPOOL_INIT(&h->tpool[i]);
atomic_set(&h->generation, 0);
......
......@@ -18,6 +18,7 @@
#define CSR_CONFIG_ROM_SIZE 0x100
struct hpsb_packet;
struct hpsb_iso;
struct hpsb_host {
struct list_head host_list;
......@@ -32,13 +33,6 @@ struct hpsb_host {
spinlock_t pending_pkt_lock;
struct hpsb_queue_struct timeout_tq;
/* A bitmask where a set bit means that this tlabel is in use.
* FIXME - should be handled per node instead of per bus. */
u32 tlabel_pool[2];
struct semaphore tlabel_count;
spinlock_t tlabel_lock;
u32 tlabel_current;
unsigned char iso_listen_count[64];
int node_count; /* number of identified nodes on this bus */
......@@ -64,6 +58,9 @@ struct hpsb_host {
u8 *speed_map;
struct csr_control csr;
/* Per node tlabel pool allocation */
struct hpsb_tlabel_pool tpool[64];
struct hpsb_host_driver *driver;
struct pci_dev *pdev;
......@@ -108,6 +105,28 @@ enum devctl_cmd {
ISO_UNLISTEN_CHANNEL
};
enum isoctl_cmd {
/* rawiso API - see iso.h for the meanings of these commands
* INIT = allocate resources
* START = begin transmission/reception (arg: cycle to start on)
* STOP = halt transmission/reception
* QUEUE/RELEASE = produce/consume packets (arg: # of packets)
* SHUTDOWN = deallocate resources
*/
XMIT_INIT,
XMIT_START,
XMIT_STOP,
XMIT_QUEUE,
XMIT_SHUTDOWN,
RECV_INIT,
RECV_START,
RECV_STOP,
RECV_RELEASE,
RECV_SHUTDOWN,
};
enum reset_types {
/* 166 microsecond reset -- only type of reset available on
non-1394a capable IEEE 1394 controllers */
......@@ -115,7 +134,13 @@ enum reset_types {
/* Short (arbitrated) reset -- only available on 1394a capable
IEEE 1394 capable controllers */
SHORT_RESET
SHORT_RESET,
/* Variants, that set force_root before issueing the bus reset */
LONG_RESET_FORCE_ROOT, SHORT_RESET_FORCE_ROOT,
/* Variants, that clear force_root before issueing the bus reset */
LONG_RESET_NO_FORCE_ROOT, SHORT_RESET_NO_FORCE_ROOT
};
struct hpsb_host_driver {
......@@ -145,6 +170,11 @@ struct hpsb_host_driver {
*/
int (*devctl) (struct hpsb_host *host, enum devctl_cmd command, int arg);
/* ISO transmission/reception functions. Return 0 on success, -1 on failure.
* If the low-level driver does not support the new ISO API, set isoctl to NULL.
*/
int (*isoctl) (struct hpsb_iso *iso, enum isoctl_cmd command, int arg);
/* This function is mainly to redirect local CSR reads/locks to the iso
* management registers (bus manager id, bandwidth available, channels
* available) to the hardware registers in OHCI. reg is 0,1,2,3 for bus
......@@ -156,9 +186,6 @@ struct hpsb_host_driver {
quadlet_t data, quadlet_t compare);
};
/* core internal use */
void register_builtin_lowlevels(void);
/* high level internal use */
struct hpsb_highlevel;
void hl_all_hosts(void (*function)(struct hpsb_host*));
......
......@@ -15,6 +15,7 @@
#define TCODE_CYCLE_START 0x8
#define TCODE_LOCK_REQUEST 0x9
#define TCODE_ISO_DATA 0xa
#define TCODE_STREAM_DATA 0xa
#define TCODE_LOCK_RESPONSE 0xb
#define RCODE_COMPLETE 0x0
......
......@@ -29,7 +29,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <asm/bitops.h>
#include <linux/bitops.h>
#include <asm/byteorder.h>
#include <asm/semaphore.h>
......@@ -42,6 +42,8 @@
#include "csr.h"
#include "nodemgr.h"
#include "ieee1394_hotplug.h"
#include "dma.h"
#include "iso.h"
/*
* Disable the nodemgr detection and config rom reading functionality.
......@@ -76,28 +78,31 @@ static void dump_packet(const char *text, quadlet_t *data, int size)
printk("\n");
}
static void process_complete_tasks(struct hpsb_packet *packet)
static void run_packet_complete(struct hpsb_packet *packet)
{
struct list_head *lh, *next;
list_for_each_safe(lh, next, &packet->complete_tq) {
struct hpsb_queue_struct *tq =
list_entry(lh, struct hpsb_queue_struct, hpsb_queue_list);
list_del(&tq->hpsb_queue_list);
hpsb_schedule_work(tq);
if (packet->complete_routine != NULL) {
packet->complete_routine(packet->complete_data);
packet->complete_routine = NULL;
packet->complete_data = NULL;
}
return;
}
/**
* hpsb_add_packet_complete_task - add a new task for when a packet completes
* hpsb_set_packet_complete_task - set the task that runs when a packet
* completes. You cannot call this more than once on a single packet
* before it is sent.
*
* @packet: the packet whose completion we want the task added to
* @tq: the hpsb_queue_struct describing the task to add
* @routine: function to call
* @data: data (if any) to pass to the above function
*/
void hpsb_add_packet_complete_task(struct hpsb_packet *packet, struct hpsb_queue_struct *tq)
void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
void (*routine)(void *), void *data)
{
list_add_tail(&tq->hpsb_queue_list, &packet->complete_tq);
BUG_ON(packet->complete_routine != NULL);
packet->complete_routine = routine;
packet->complete_data = data;
return;
}
......@@ -145,9 +150,10 @@ struct hpsb_packet *alloc_hpsb_packet(size_t data_size)
packet->data_size = data_size;
}
INIT_LIST_HEAD(&packet->complete_tq);
INIT_LIST_HEAD(&packet->list);
sema_init(&packet->state_change, 0);
packet->complete_routine = NULL;
packet->complete_data = NULL;
packet->state = hpsb_unused;
packet->generation = -1;
packet->data_be = 1;
......@@ -372,6 +378,7 @@ void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
/* selfid stage did not complete without error */
HPSB_NOTICE("Error in SelfID stage, resetting");
host->in_bus_reset = 0;
/* this should work from ohci1394 now... */
hpsb_reset_bus(host, LONG_RESET);
return;
} else {
......@@ -397,7 +404,6 @@ void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
host->is_irm = 0;
}
host->reset_retries = 0;
if (isroot) {
host->driver->devctl(host, ACT_CYCLE_MASTER, 1);
host->is_cycmst = 1;
......@@ -405,6 +411,29 @@ void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
atomic_inc(&host->generation);
host->in_bus_reset = 0;
highlevel_host_reset(host);
/* check for common cycle master error */
hpsb_check_cycle_master(host);
}
void hpsb_check_cycle_master(struct hpsb_host *host)
{
/* check if host is IRM and not ROOT */
if (host->is_irm && !host->is_root) {
HPSB_NOTICE("Host is IRM but not root, resetting");
if (host->reset_retries++ < 4) {
/* selfid stage did not yield valid cycle master */
hpsb_reset_bus(host, LONG_RESET_FORCE_ROOT);
} else {
host->reset_retries = 0;
HPSB_NOTICE("Stopping out-of-control reset loop");
HPSB_NOTICE("Warning - Cycle Master not set correctly");
}
return;
}
host->reset_retries = 0;
}
......@@ -425,7 +454,7 @@ void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
packet->state = hpsb_complete;
up(&packet->state_change);
up(&packet->state_change);
process_complete_tasks(packet);
run_packet_complete(packet);
return;
}
......@@ -614,7 +643,7 @@ void handle_packet_response(struct hpsb_host *host, int tcode, quadlet_t *data,
packet->state = hpsb_complete;
up(&packet->state_change);
process_complete_tasks(packet);
run_packet_complete(packet);
}
......@@ -647,6 +676,54 @@ static struct hpsb_packet *create_reply_packet(struct hpsb_host *host,
return p;
}
#define PREP_ASYNC_HEAD_RCODE(tc) \
packet->tcode = tc; \
packet->header[0] = (packet->node_id << 16) | (packet->tlabel << 10) \
| (1 << 8) | (tc << 4); \
packet->header[1] = (packet->host->node_id << 16) | (rcode << 12); \
packet->header[2] = 0
static void fill_async_readquad_resp(struct hpsb_packet *packet, int rcode,
quadlet_t data)
{
PREP_ASYNC_HEAD_RCODE(TCODE_READQ_RESPONSE);
packet->header[3] = data;
packet->header_size = 16;
packet->data_size = 0;
}
static void fill_async_readblock_resp(struct hpsb_packet *packet, int rcode,
int length)
{
if (rcode != RCODE_COMPLETE)
length = 0;
PREP_ASYNC_HEAD_RCODE(TCODE_READB_RESPONSE);
packet->header[3] = length << 16;
packet->header_size = 16;
packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0);
}
static void fill_async_write_resp(struct hpsb_packet *packet, int rcode)
{
PREP_ASYNC_HEAD_RCODE(TCODE_WRITE_RESPONSE);
packet->header[2] = 0;
packet->header_size = 12;
packet->data_size = 0;
}
static void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extcode,
int length)
{
if (rcode != RCODE_COMPLETE)
length = 0;
PREP_ASYNC_HEAD_RCODE(TCODE_LOCK_RESPONSE);
packet->header[3] = (length << 16) | extcode;
packet->header_size = 16;
packet->data_size = length;
}
#define PREP_REPLY_PACKET(length) \
packet = create_reply_packet(host, data, length); \
if (packet == NULL) break
......@@ -848,7 +925,7 @@ void abort_requests(struct hpsb_host *host)
packet->state = hpsb_complete;
packet->ack_code = ACKX_ABORTED;
up(&packet->state_change);
process_complete_tasks(packet);
run_packet_complete(packet);
}
}
......@@ -890,7 +967,7 @@ void abort_timedouts(struct hpsb_host *host)
packet->state = hpsb_complete;
packet->ack_code = ACKX_TIMEOUT;
up(&packet->state_change);
process_complete_tasks(packet);
run_packet_complete(packet);
}
}
......@@ -1143,14 +1220,17 @@ module_init(ieee1394_init);
module_exit(ieee1394_cleanup);
/* Exported symbols */
/** hosts.c **/
EXPORT_SYMBOL(hpsb_alloc_host);
EXPORT_SYMBOL(hpsb_add_host);
EXPORT_SYMBOL(hpsb_remove_host);
EXPORT_SYMBOL(hpsb_ref_host);
EXPORT_SYMBOL(hpsb_unref_host);
EXPORT_SYMBOL(hpsb_speedto_str);
EXPORT_SYMBOL(hpsb_add_packet_complete_task);
/** ieee1394_core.c **/
EXPORT_SYMBOL(hpsb_speedto_str);
EXPORT_SYMBOL(hpsb_set_packet_complete_task);
EXPORT_SYMBOL(alloc_hpsb_packet);
EXPORT_SYMBOL(free_hpsb_packet);
EXPORT_SYMBOL(hpsb_send_packet);
......@@ -1158,35 +1238,30 @@ EXPORT_SYMBOL(hpsb_reset_bus);
EXPORT_SYMBOL(hpsb_bus_reset);
EXPORT_SYMBOL(hpsb_selfid_received);
EXPORT_SYMBOL(hpsb_selfid_complete);
EXPORT_SYMBOL(hpsb_check_cycle_master);
EXPORT_SYMBOL(hpsb_packet_sent);
EXPORT_SYMBOL(hpsb_packet_received);
EXPORT_SYMBOL(ieee1394_register_chardev);
EXPORT_SYMBOL(ieee1394_unregister_chardev);
EXPORT_SYMBOL(ieee1394_devfs_handle);
EXPORT_SYMBOL(ieee1394_procfs_entry);
EXPORT_SYMBOL(get_tlabel);
EXPORT_SYMBOL(free_tlabel);
EXPORT_SYMBOL(fill_async_readquad);
EXPORT_SYMBOL(fill_async_readquad_resp);
EXPORT_SYMBOL(fill_async_readblock);
EXPORT_SYMBOL(fill_async_readblock_resp);
EXPORT_SYMBOL(fill_async_writequad);
EXPORT_SYMBOL(fill_async_writeblock);
EXPORT_SYMBOL(fill_async_write_resp);
EXPORT_SYMBOL(fill_async_lock);
EXPORT_SYMBOL(fill_async_lock_resp);
EXPORT_SYMBOL(fill_iso_packet);
EXPORT_SYMBOL(fill_phy_packet);
EXPORT_SYMBOL(hpsb_make_readqpacket);
EXPORT_SYMBOL(hpsb_make_readbpacket);
EXPORT_SYMBOL(hpsb_make_writeqpacket);
EXPORT_SYMBOL(hpsb_make_writebpacket);
/** ieee1394_transactions.c **/
EXPORT_SYMBOL(hpsb_get_tlabel);
EXPORT_SYMBOL(hpsb_free_tlabel);
EXPORT_SYMBOL(hpsb_make_readpacket);
EXPORT_SYMBOL(hpsb_make_writepacket);
EXPORT_SYMBOL(hpsb_make_lockpacket);
EXPORT_SYMBOL(hpsb_make_lock64packet);
EXPORT_SYMBOL(hpsb_make_phypacket);
EXPORT_SYMBOL(hpsb_packet_success);
EXPORT_SYMBOL(hpsb_make_packet);
EXPORT_SYMBOL(hpsb_make_isopacket);
EXPORT_SYMBOL(hpsb_read);
EXPORT_SYMBOL(hpsb_write);
EXPORT_SYMBOL(hpsb_lock);
EXPORT_SYMBOL(hpsb_lock64);
EXPORT_SYMBOL(hpsb_packet_success);
/** highlevel.c **/
EXPORT_SYMBOL(hpsb_register_highlevel);
EXPORT_SYMBOL(hpsb_unregister_highlevel);
EXPORT_SYMBOL(hpsb_register_addrspace);
......@@ -1201,20 +1276,42 @@ EXPORT_SYMBOL(highlevel_add_host);
EXPORT_SYMBOL(highlevel_remove_host);
EXPORT_SYMBOL(highlevel_host_reset);
/** nodemgr.c **/
EXPORT_SYMBOL(hpsb_guid_get_entry);
EXPORT_SYMBOL(hpsb_nodeid_get_entry);
EXPORT_SYMBOL(hpsb_check_nodeid);
EXPORT_SYMBOL(hpsb_node_fill_packet);
EXPORT_SYMBOL(hpsb_node_read);
EXPORT_SYMBOL(hpsb_node_write);
EXPORT_SYMBOL(hpsb_node_lock);
EXPORT_SYMBOL(hpsb_update_config_rom);
EXPORT_SYMBOL(hpsb_get_config_rom);
EXPORT_SYMBOL(hpsb_register_protocol);
EXPORT_SYMBOL(hpsb_unregister_protocol);
EXPORT_SYMBOL(hpsb_release_unit_directory);
EXPORT_SYMBOL(ieee1394_register_chardev);
EXPORT_SYMBOL(ieee1394_unregister_chardev);
EXPORT_SYMBOL(ieee1394_devfs_handle);
/** csr.c **/
EXPORT_SYMBOL(hpsb_update_config_rom);
EXPORT_SYMBOL(hpsb_get_config_rom);
EXPORT_SYMBOL(ieee1394_procfs_entry);
/** dma.c **/
EXPORT_SYMBOL(dma_prog_region_init);
EXPORT_SYMBOL(dma_prog_region_alloc);
EXPORT_SYMBOL(dma_prog_region_free);
EXPORT_SYMBOL(dma_region_init);
EXPORT_SYMBOL(dma_region_alloc);
EXPORT_SYMBOL(dma_region_free);
EXPORT_SYMBOL(dma_region_sync);
EXPORT_SYMBOL(dma_region_mmap);
EXPORT_SYMBOL(dma_region_offset_to_bus);
/** iso.c **/
EXPORT_SYMBOL(hpsb_iso_xmit_init);
EXPORT_SYMBOL(hpsb_iso_recv_init);
EXPORT_SYMBOL(hpsb_iso_xmit_start);
EXPORT_SYMBOL(hpsb_iso_recv_start);
EXPORT_SYMBOL(hpsb_iso_stop);
EXPORT_SYMBOL(hpsb_iso_shutdown);
EXPORT_SYMBOL(hpsb_iso_xmit_queue_packets);
EXPORT_SYMBOL(hpsb_iso_recv_release_packets);
EXPORT_SYMBOL(hpsb_iso_n_ready);
EXPORT_SYMBOL(hpsb_iso_packet_data);
EXPORT_SYMBOL(hpsb_iso_packet_info);
......@@ -68,7 +68,10 @@ struct hpsb_packet {
/* Very core internal, don't care. */
struct semaphore state_change;
struct list_head complete_tq;
/* Function (and possible data to pass to it) to call when this
* packet is completed. */
void (*complete_routine)(void *);
void *complete_data;
/* Store jiffies for implementing bus timeouts. */
unsigned long sendtime;
......@@ -76,8 +79,9 @@ struct hpsb_packet {
quadlet_t embedded_header[5];
};
/* add a new task for when a packet completes */
void hpsb_add_packet_complete_task(struct hpsb_packet *packet, struct hpsb_queue_struct *tq);
/* Set a task for when a packet completes */
void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
void (*routine)(void *), void *data);
static inline struct hpsb_packet *driver_packet(struct list_head *l)
{
......@@ -136,6 +140,12 @@ void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid);
*/
void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot);
/*
* Check bus reset results to find cycle master
*/
void hpsb_check_cycle_master(struct hpsb_host *host);
/*
* Notify core of sending a packet. Ackcode is the ack code returned for async
* transmits or ACKX_SEND_ERROR if the transmission failed completely; ACKX_NONE
......
......@@ -10,14 +10,16 @@
*/
#include <linux/sched.h>
#include <linux/bitops.h>
#include <asm/errno.h>
#include <asm/bitops.h>
#include <linux/interrupt.h>
#include "ieee1394.h"
#include "ieee1394_types.h"
#include "hosts.h"
#include "ieee1394_core.h"
#include "highlevel.h"
#include "nodemgr.h"
#define PREP_ASYNC_HEAD_ADDRESS(tc) \
......@@ -27,15 +29,8 @@
packet->header[1] = (packet->host->node_id << 16) | (addr >> 32); \
packet->header[2] = addr & 0xffffffff
#define PREP_ASYNC_HEAD_RCODE(tc) \
packet->tcode = tc; \
packet->header[0] = (packet->node_id << 16) | (packet->tlabel << 10) \
| (1 << 8) | (tc << 4); \
packet->header[1] = (packet->host->node_id << 16) | (rcode << 12); \
packet->header[2] = 0
void fill_async_readquad(struct hpsb_packet *packet, u64 addr)
static void fill_async_readquad(struct hpsb_packet *packet, u64 addr)
{
PREP_ASYNC_HEAD_ADDRESS(TCODE_READQ);
packet->header_size = 12;
......@@ -43,16 +38,7 @@ void fill_async_readquad(struct hpsb_packet *packet, u64 addr)
packet->expect_response = 1;
}
void fill_async_readquad_resp(struct hpsb_packet *packet, int rcode,
quadlet_t data)
{
PREP_ASYNC_HEAD_RCODE(TCODE_READQ_RESPONSE);
packet->header[3] = data;
packet->header_size = 16;
packet->data_size = 0;
}
void fill_async_readblock(struct hpsb_packet *packet, u64 addr, int length)
static void fill_async_readblock(struct hpsb_packet *packet, u64 addr, int length)
{
PREP_ASYNC_HEAD_ADDRESS(TCODE_READB);
packet->header[3] = length << 16;
......@@ -61,20 +47,7 @@ void fill_async_readblock(struct hpsb_packet *packet, u64 addr, int length)
packet->expect_response = 1;
}
void fill_async_readblock_resp(struct hpsb_packet *packet, int rcode,
int length)
{
if (rcode != RCODE_COMPLETE) {
length = 0;
}
PREP_ASYNC_HEAD_RCODE(TCODE_READB_RESPONSE);
packet->header[3] = length << 16;
packet->header_size = 16;
packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0);
}
void fill_async_writequad(struct hpsb_packet *packet, u64 addr, quadlet_t data)
static void fill_async_writequad(struct hpsb_packet *packet, u64 addr, quadlet_t data)
{
PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEQ);
packet->header[3] = data;
......@@ -83,7 +56,7 @@ void fill_async_writequad(struct hpsb_packet *packet, u64 addr, quadlet_t data)
packet->expect_response = 1;
}
void fill_async_writeblock(struct hpsb_packet *packet, u64 addr, int length)
static void fill_async_writeblock(struct hpsb_packet *packet, u64 addr, int length)
{
PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEB);
packet->header[3] = length << 16;
......@@ -92,15 +65,7 @@ void fill_async_writeblock(struct hpsb_packet *packet, u64 addr, int length)
packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0);
}
void fill_async_write_resp(struct hpsb_packet *packet, int rcode)
{
PREP_ASYNC_HEAD_RCODE(TCODE_WRITE_RESPONSE);
packet->header[2] = 0;
packet->header_size = 12;
packet->data_size = 0;
}
void fill_async_lock(struct hpsb_packet *packet, u64 addr, int extcode,
static void fill_async_lock(struct hpsb_packet *packet, u64 addr, int extcode,
int length)
{
PREP_ASYNC_HEAD_ADDRESS(TCODE_LOCK_REQUEST);
......@@ -110,20 +75,7 @@ void fill_async_lock(struct hpsb_packet *packet, u64 addr, int extcode,
packet->expect_response = 1;
}
void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extcode,
int length)
{
if (rcode != RCODE_COMPLETE) {
length = 0;
}
PREP_ASYNC_HEAD_RCODE(TCODE_LOCK_RESPONSE);
packet->header[3] = (length << 16) | extcode;
packet->header_size = 16;
packet->data_size = length;
}
void fill_iso_packet(struct hpsb_packet *packet, int length, int channel,
static void fill_iso_packet(struct hpsb_packet *packet, int length, int channel,
int tag, int sync)
{
packet->header[0] = (length << 16) | (tag << 14) | (channel << 8)
......@@ -135,7 +87,7 @@ void fill_iso_packet(struct hpsb_packet *packet, int length, int channel,
packet->tcode = TCODE_ISO_DATA;
}
void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data)
static void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data)
{
packet->header[0] = data;
packet->header[1] = ~data;
......@@ -148,9 +100,8 @@ void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data)
/**
* get_tlabel - allocate a transaction label
* @host: host to be used for transmission
* @nodeid: the node ID of the transmission target
* hpsb_get_tlabel - allocate a transaction label
* @packet: the packet who's tlabel/tpool we set
* @wait: whether to sleep if no tlabel is available
*
* Every asynchronous transaction on the 1394 bus needs a transaction label to
......@@ -159,72 +110,66 @@ void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data)
* matching possible without ambiguity.
*
* There are 64 different tlabels, so an allocated tlabel has to be freed with
* free_tlabel() after the transaction is complete (unless it's reused again for
* hpsb_free_tlabel() after the transaction is complete (unless it's reused again for
* the same target node).
*
* @wait must not be set to true if you are calling from interrupt context.
* @wait cannot be set if in_interrupt()
*
* Return value: The allocated transaction label or -1 if there was no free
* tlabel and @wait is false.
* Return value: Zero on success, otherwise non-zero. A non-zero return
* generally means there are no available tlabels.
*/
int get_tlabel(struct hpsb_host *host, nodeid_t nodeid, int wait)
int hpsb_get_tlabel(struct hpsb_packet *packet, int wait)
{
int tlabel = 0;
unsigned long flags;
int found_tlabel = 0;
struct hpsb_tlabel_pool *tp;
tp = &packet->host->tpool[packet->node_id & NODE_MASK];
if (wait) {
down(&host->tlabel_count);
BUG_ON(in_interrupt());
down(&tp->count);
} else {
if (down_trylock(&host->tlabel_count)) return -1;
if (down_trylock(&tp->count))
return 1;
}
spin_lock_irqsave(&host->tlabel_lock, flags);
while (!found_tlabel) {
tlabel = host->tlabel_current;
if (tlabel < 32 && !(host->tlabel_pool[0] & 1 << tlabel)) {
host->tlabel_pool[0] |= 1 << tlabel;
found_tlabel = 1;
} else if (!(host->tlabel_pool[1] & 1 << (tlabel - 32))) {
host->tlabel_pool[1] |= 1 << (tlabel - 32);
found_tlabel = 1;
}
host->tlabel_current = (host->tlabel_current + 1) % 64;
}
spin_lock_irqsave(&tp->lock, flags);
spin_unlock_irqrestore(&host->tlabel_lock, flags);
packet->tlabel = find_next_zero_bit(&tp->pool, 64, tp->next);
tp->next = (packet->tlabel + 1) % 64;
/* Should _never_ happen */
BUG_ON(test_and_set_bit(packet->tlabel, &tp->pool));
tp->allocations++;
spin_unlock_irqrestore(&tp->lock, flags);
return tlabel;
return 0;
}
/**
* free_tlabel - free an allocated transaction label
* @host: host to be used for transmission
* @nodeid: the node ID of the transmission target
* @tlabel: the transaction label to free
* hpsb_free_tlabel - free an allocated transaction label
* @packet: packet whos tlabel/tpool needs to be cleared
*
* Frees the transaction label allocated with get_tlabel(). The tlabel has to
* be freed after the transaction is complete (i.e. response was received for a
* split transaction or packet was sent for a unified transaction).
* Frees the transaction label allocated with hpsb_get_tlabel(). The
* tlabel has to be freed after the transaction is complete (i.e. response
* was received for a split transaction or packet was sent for a unified
* transaction).
*
* A tlabel must not be freed twice.
*/
void free_tlabel(struct hpsb_host *host, nodeid_t nodeid, int tlabel)
void hpsb_free_tlabel(struct hpsb_packet *packet)
{
unsigned long flags;
struct hpsb_tlabel_pool *tp;
spin_lock_irqsave(&host->tlabel_lock, flags);
tp = &packet->host->tpool[packet->node_id & NODE_MASK];
if (tlabel < 32) {
host->tlabel_pool[0] &= ~(1 << tlabel);
} else {
host->tlabel_pool[1] &= ~(1 << (tlabel-32));
}
BUG_ON(packet->tlabel > 63 || packet->tlabel < 0);
spin_unlock_irqrestore(&host->tlabel_lock, flags);
spin_lock_irqsave(&tp->lock, flags);
BUG_ON(!test_and_clear_bit(packet->tlabel, &tp->pool));
spin_unlock_irqrestore(&tp->lock, flags);
up(&host->tlabel_count);
up(&tp->count);
}
......@@ -297,122 +242,142 @@ int hpsb_packet_success(struct hpsb_packet *packet)
HPSB_PANIC("reached unreachable code 2 in %s", __FUNCTION__);
}
struct hpsb_packet *hpsb_make_readqpacket(struct hpsb_host *host, nodeid_t node,
u64 addr)
struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node,
u64 addr, size_t length)
{
struct hpsb_packet *p;
p = alloc_hpsb_packet(0);
if (!p) return NULL;
struct hpsb_packet *packet;
p->host = host;
p->tlabel = get_tlabel(host, node, 1);
p->node_id = node;
fill_async_readquad(p, addr);
if (length == 0)
return NULL;
return p;
}
packet = alloc_hpsb_packet(length + (length % 4 ? 4 - (length % 4) : 0));
if (!packet)
return NULL;
struct hpsb_packet *hpsb_make_readbpacket(struct hpsb_host *host, nodeid_t node,
u64 addr, size_t length)
{
struct hpsb_packet *p;
packet->host = host;
packet->node_id = node;
p = alloc_hpsb_packet(length + (length % 4 ? 4 - (length % 4) : 0));
if (!p) return NULL;
if (hpsb_get_tlabel(packet, in_interrupt() ? 0 : 1)) {
free_hpsb_packet(packet);
return NULL;
}
p->host = host;
p->tlabel = get_tlabel(host, node, 1);
p->node_id = node;
fill_async_readblock(p, addr, length);
if (length == 4)
fill_async_readquad(packet, addr);
else
fill_async_readblock(packet, addr, length);
return p;
return packet;
}
struct hpsb_packet *hpsb_make_writeqpacket(struct hpsb_host *host,
nodeid_t node, u64 addr,
quadlet_t data)
struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node,
u64 addr, quadlet_t *buffer, size_t length)
{
struct hpsb_packet *p;
p = alloc_hpsb_packet(0);
if (!p) return NULL;
p->host = host;
p->tlabel = get_tlabel(host, node, 1);
p->node_id = node;
fill_async_writequad(p, addr, data);
struct hpsb_packet *packet;
return p;
}
if (length == 0)
return NULL;
struct hpsb_packet *hpsb_make_writebpacket(struct hpsb_host *host,
nodeid_t node, u64 addr,
size_t length)
{
struct hpsb_packet *p;
packet = alloc_hpsb_packet(length + (length % 4 ? 4 - (length % 4) : 0));
if (!packet)
return NULL;
p = alloc_hpsb_packet(length + (length % 4 ? 4 - (length % 4) : 0));
if (!p) return NULL;
if (length % 4) { /* zero padding bytes */
packet->data[length >> 2] = 0;
}
packet->host = host;
packet->node_id = node;
if (length % 4) {
p->data[length / 4] = 0;
if (hpsb_get_tlabel(packet, in_interrupt() ? 0 : 1)) {
free_hpsb_packet(packet);
return NULL;
}
p->host = host;
p->tlabel = get_tlabel(host, node, 1);
p->node_id = node;
fill_async_writeblock(p, addr, length);
if (length == 4) {
fill_async_writequad(packet, addr, buffer ? *buffer : 0);
} else {
fill_async_writeblock(packet, addr, length);
if (buffer)
memcpy(packet->data, buffer, length);
}
return p;
return packet;
}
struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node,
u64 addr, int extcode)
u64 addr, int extcode, quadlet_t *data,
quadlet_t arg)
{
struct hpsb_packet *p;
u32 length;
p = alloc_hpsb_packet(8);
if (!p) return NULL;
p->host = host;
p->tlabel = get_tlabel(host, node, 1);
p->node_id = node;
if (hpsb_get_tlabel(p, in_interrupt() ? 0 : 1)) {
free_hpsb_packet(p);
return NULL;
}
switch (extcode) {
case EXTCODE_FETCH_ADD:
case EXTCODE_LITTLE_ADD:
fill_async_lock(p, addr, extcode, 4);
length = 4;
if (data)
p->data[0] = *data;
break;
default:
fill_async_lock(p, addr, extcode, 8);
length = 8;
if (data) {
p->data[0] = arg;
p->data[1] = *data;
}
break;
}
fill_async_lock(p, addr, extcode, length);
return p;
}
struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host, nodeid_t node,
u64 addr, int extcode)
u64 addr, int extcode, octlet_t *data,
octlet_t arg)
{
struct hpsb_packet *p;
u32 length;
p = alloc_hpsb_packet(16);
if (!p) return NULL;
p->host = host;
p->tlabel = get_tlabel(host, node, 1);
p->node_id = node;
if (hpsb_get_tlabel(p, in_interrupt() ? 0 : 1)) {
free_hpsb_packet(p);
return NULL;
}
switch (extcode) {
case EXTCODE_FETCH_ADD:
case EXTCODE_LITTLE_ADD:
fill_async_lock(p, addr, extcode, 8);
length = 8;
if (data) {
p->data[0] = *data >> 32;
p->data[1] = *data & 0xffffffff;
}
break;
default:
fill_async_lock(p, addr, extcode, 16);
length = 16;
if (data) {
p->data[0] = arg >> 32;
p->data[1] = arg & 0xffffffff;
p->data[2] = *data >> 32;
p->data[3] = *data & 0xffffffff;
}
break;
}
fill_async_lock(p, addr, extcode, length);
return p;
}
......@@ -431,6 +396,23 @@ struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host,
return p;
}
struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host,
int length, int channel,
int tag, int sync)
{
struct hpsb_packet *p;
p = alloc_hpsb_packet(length);
if (!p) return NULL;
p->host = host;
fill_iso_packet(p, length, channel, tag, sync);
p->generation = get_hpsb_generation(host);
return p;
}
/*
* FIXME - these functions should probably read from / write to user space to
* avoid in kernel buffers for user space callers
......@@ -442,15 +424,12 @@ int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
struct hpsb_packet *packet;
int retval = 0;
if (length == 0) {
if (length == 0)
return -EINVAL;
}
if (length == 4) {
packet = hpsb_make_readqpacket(host, node, addr);
} else {
packet = hpsb_make_readbpacket(host, node, addr, length);
}
BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
packet = hpsb_make_readpacket(host, node, addr, length);
if (!packet) {
return -ENOMEM;
......@@ -475,35 +454,12 @@ int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
}
hpsb_read_fail:
free_tlabel(host, node, packet->tlabel);
hpsb_free_tlabel(packet);
free_hpsb_packet(packet);
return retval;
}
struct hpsb_packet *hpsb_make_packet (struct hpsb_host *host, nodeid_t node,
u64 addr, quadlet_t *buffer, size_t length)
{
struct hpsb_packet *packet;
if (length == 0)
return NULL;
if (length == 4)
packet = hpsb_make_writeqpacket(host, node, addr, *buffer);
else
packet = hpsb_make_writebpacket(host, node, addr, length);
if (!packet)
return NULL;
/* Sometimes this may be called without data, just to allocate the
* packet. */
if (length != 4 && buffer)
memcpy(packet->data, buffer, length);
return packet;
}
int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
u64 addr, quadlet_t *buffer, size_t length)
......@@ -514,7 +470,9 @@ int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
if (length == 0)
return -EINVAL;
packet = hpsb_make_packet (host, node, addr, buffer, length);
BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
packet = hpsb_make_writepacket (host, node, addr, buffer, length);
if (!packet)
return -ENOMEM;
......@@ -530,7 +488,7 @@ int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
retval = hpsb_packet_success(packet);
hpsb_write_fail:
free_tlabel(host, node, packet->tlabel);
hpsb_free_tlabel(packet);
free_hpsb_packet(packet);
return retval;
......@@ -542,35 +500,13 @@ int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
u64 addr, int extcode, quadlet_t *data, quadlet_t arg)
{
struct hpsb_packet *packet;
int retval = 0, length;
packet = alloc_hpsb_packet(8);
if (!packet) {
return -ENOMEM;
}
int retval = 0;
packet->host = host;
packet->tlabel = get_tlabel(host, node, 1);
packet->node_id = node;
BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
switch (extcode) {
case EXTCODE_MASK_SWAP:
case EXTCODE_COMPARE_SWAP:
case EXTCODE_BOUNDED_ADD:
case EXTCODE_WRAP_ADD:
length = 8;
packet->data[0] = arg;
packet->data[1] = *data;
break;
case EXTCODE_FETCH_ADD:
case EXTCODE_LITTLE_ADD:
length = 4;
packet->data[0] = *data;
break;
default:
return -EINVAL;
}
fill_async_lock(packet, addr, extcode, length);
packet = hpsb_make_lockpacket(host, node, addr, extcode, data, arg);
if (!packet)
return -ENOMEM;
packet->generation = generation;
if (!hpsb_send_packet(packet)) {
......@@ -586,7 +522,38 @@ int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
}
hpsb_lock_fail:
free_tlabel(host, node, packet->tlabel);
hpsb_free_tlabel(packet);
free_hpsb_packet(packet);
return retval;
}
int hpsb_lock64(struct hpsb_host *host, nodeid_t node, unsigned int generation,
u64 addr, int extcode, octlet_t *data, octlet_t arg)
{
struct hpsb_packet *packet;
int retval = 0;
BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
packet = hpsb_make_lock64packet(host, node, addr, extcode, data, arg);
if (!packet)
return -ENOMEM;
packet->generation = generation;
if (!hpsb_send_packet(packet)) {
retval = -EINVAL;
goto hpsb_lock64_fail;
}
down(&packet->state_change);
down(&packet->state_change);
retval = hpsb_packet_success(packet);
if (retval == 0)
*data = (u64)packet->data[1] << 32 | packet->data[0];
hpsb_lock64_fail:
hpsb_free_tlabel(packet);
free_hpsb_packet(packet);
return retval;
......
......@@ -4,49 +4,27 @@
#include "ieee1394_core.h"
/*
* Utility functions to fill out packet headers.
*/
void fill_async_readquad(struct hpsb_packet *packet, u64 addr);
void fill_async_readquad_resp(struct hpsb_packet *packet, int rcode,
quadlet_t data);
void fill_async_readblock(struct hpsb_packet *packet, u64 addr, int length);
void fill_async_readblock_resp(struct hpsb_packet *packet, int rcode,
int length);
void fill_async_writequad(struct hpsb_packet *packet, u64 addr, quadlet_t data);
void fill_async_writeblock(struct hpsb_packet *packet, u64 addr, int length);
void fill_async_write_resp(struct hpsb_packet *packet, int rcode);
void fill_async_lock(struct hpsb_packet *packet, u64 addr, int extcode,
int length);
void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extcode,
int length);
void fill_iso_packet(struct hpsb_packet *packet, int length, int channel,
int tag, int sync);
void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data);
/*
* Get and free transaction labels.
*/
int get_tlabel(struct hpsb_host *host, nodeid_t nodeid, int wait);
void free_tlabel(struct hpsb_host *host, nodeid_t nodeid, int tlabel);
int hpsb_get_tlabel(struct hpsb_packet *packet, int wait);
void hpsb_free_tlabel(struct hpsb_packet *packet);
struct hpsb_packet *hpsb_make_readqpacket(struct hpsb_host *host, nodeid_t node,
u64 addr);
struct hpsb_packet *hpsb_make_readbpacket(struct hpsb_host *host, nodeid_t node,
struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node,
u64 addr, size_t length);
struct hpsb_packet *hpsb_make_writeqpacket(struct hpsb_host *host,
nodeid_t node, u64 addr,
quadlet_t data);
struct hpsb_packet *hpsb_make_writebpacket(struct hpsb_host *host,
nodeid_t node, u64 addr,
size_t length);
struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node,
u64 addr, int extcode);
u64 addr, int extcode, quadlet_t *data,
quadlet_t arg);
struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host, nodeid_t node,
u64 addr, int extcode);
u64 addr, int extcode, octlet_t *data,
octlet_t arg);
struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host,
quadlet_t data) ;
struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host,
int length, int channel,
int tag, int sync);
struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node,
u64 addr, quadlet_t *buffer, size_t length);
/*
* hpsb_packet_success - Make sense of the ack and reply codes and
......@@ -75,10 +53,7 @@ int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
u64 addr, quadlet_t *buffer, size_t length);
int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
u64 addr, int extcode, quadlet_t *data, quadlet_t arg);
/* Generic packet creation. Used by hpsb_write. Also useful for protocol
* drivers that want to implement their own hpsb_write replacement. */
struct hpsb_packet *hpsb_make_packet (struct hpsb_host *host, nodeid_t node,
u64 addr, quadlet_t *buffer, size_t length);
int hpsb_lock64(struct hpsb_host *host, nodeid_t node, unsigned int generation,
u64 addr, int extcode, octlet_t *data, octlet_t arg);
#endif /* _IEEE1394_TRANSACTIONS_H */
......@@ -8,6 +8,7 @@
#include <linux/list.h>
#include <linux/init.h>
#include <linux/string.h>
#include <asm/semaphore.h>
#include <asm/byteorder.h>
......@@ -62,6 +63,30 @@
#define HPSB_PREPARE_WORK(x,y,z) PREPARE_WORK(x,y,z)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,44)
/* pci_pool_create changed. does not take the flags arg any longer */
#define hpsb_pci_pool_create(a,b,c,d,e,f) pci_pool_create(a,b,c,d,e,f)
#else
#define hpsb_pci_pool_create(a,b,c,d,e,f) pci_pool_create(a,b,c,d,e)
#endif
/* Transaction Label handling */
struct hpsb_tlabel_pool {
u64 pool;
spinlock_t lock;
u8 next;
u32 allocations;
struct semaphore count;
};
#define HPSB_TPOOL_INIT(_tp) \
do { \
sema_init(&(_tp)->count, 63); \
spin_lock_init(&(_tp)->lock); \
(_tp)->next = 0; \
(_tp)->pool = 0; \
} while(0)
typedef u32 quadlet_t;
typedef u64 octlet_t;
......
/*
* IEEE 1394 for Linux
*
* kernel ISO transmission/reception
*
* Copyright (C) 2002 Maas Digital LLC
*
* This code is licensed under the GPL. See the file COPYING in the root
* directory of the kernel sources for details.
*/
#include <linux/slab.h>
#include "iso.h"
void hpsb_iso_stop(struct hpsb_iso *iso)
{
if(!iso->flags & HPSB_ISO_DRIVER_STARTED)
return;
iso->host->driver->isoctl(iso, iso->type == HPSB_ISO_XMIT ? XMIT_STOP : RECV_STOP, 0);
iso->flags &= ~HPSB_ISO_DRIVER_STARTED;
}
void hpsb_iso_shutdown(struct hpsb_iso *iso)
{
if(iso->flags & HPSB_ISO_DRIVER_INIT) {
hpsb_iso_stop(iso);
iso->host->driver->isoctl(iso, iso->type == HPSB_ISO_XMIT ? XMIT_SHUTDOWN : RECV_SHUTDOWN, 0);
iso->flags &= ~HPSB_ISO_DRIVER_INIT;
}
dma_region_free(&iso->buf);
kfree(iso);
}
static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_iso_type type,
unsigned int buf_packets,
unsigned int max_packet_size,
int channel,
int irq_interval,
void (*callback)(struct hpsb_iso*))
{
struct hpsb_iso *iso;
unsigned int packet_plus_info;
int dma_direction;
int iso_header_bytes;
const int info_bytes = sizeof(struct hpsb_iso_packet_info);
/* make sure driver supports the ISO API */
if(!host->driver->isoctl)
return NULL;
if(type == HPSB_ISO_RECV) {
/* when receiving, leave 8 extra bytes in front
of the data payload for the iso header */
iso_header_bytes = 8;
} else {
iso_header_bytes = 0;
}
/* sanitize parameters */
if(buf_packets < 2)
buf_packets = 2;
if(irq_interval < 1 || irq_interval > buf_packets / 2)
irq_interval = buf_packets / 2;
if(max_packet_size + info_bytes + iso_header_bytes > PAGE_SIZE)
return NULL;
/* size of packet payload plus the per-packet info must be a power of 2
and at most equal to the page size */
for(packet_plus_info = 256; packet_plus_info < PAGE_SIZE; packet_plus_info *= 2) {
if(packet_plus_info >= (max_packet_size + info_bytes + iso_header_bytes)) {
break;
}
}
/* allocate and write the struct hpsb_iso */
iso = kmalloc(sizeof(*iso), SLAB_KERNEL);
if(!iso)
return NULL;
iso->type = type;
iso->host = host;
iso->hostdata = NULL;
iso->callback = callback;
iso->channel = channel;
iso->irq_interval = irq_interval;
dma_region_init(&iso->buf);
iso->buf_packets = buf_packets;
iso->buf_stride = packet_plus_info;
iso->max_packet_size = max_packet_size;
iso->packet_data_offset = iso_header_bytes;
iso->packet_info_offset = iso_header_bytes + max_packet_size;
iso->first_packet = 0;
if(iso->type == HPSB_ISO_XMIT) {
atomic_set(&iso->n_dma_packets, 0);
dma_direction = PCI_DMA_TODEVICE;
} else {
atomic_set(&iso->n_dma_packets, iso->buf_packets);
dma_direction = PCI_DMA_FROMDEVICE;
}
atomic_set(&iso->overflows, 0);
iso->flags = 0;
iso->prebuffer = 0;
/* allocate the packet buffer */
if(dma_region_alloc(&iso->buf, iso->buf_packets * iso->buf_stride,
host->pdev, dma_direction))
goto err;
return iso;
err:
hpsb_iso_shutdown(iso);
return NULL;
}
int hpsb_iso_n_ready(struct hpsb_iso* iso)
{
return iso->buf_packets - atomic_read(&iso->n_dma_packets);
}
struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
unsigned int buf_packets,
unsigned int max_packet_size,
int channel,
int speed,
int irq_interval,
void (*callback)(struct hpsb_iso*))
{
struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_XMIT,
buf_packets, max_packet_size,
channel, irq_interval, callback);
if(!iso)
return NULL;
iso->speed = speed;
/* tell the driver to start working */
if(host->driver->isoctl(iso, XMIT_INIT, 0))
goto err;
iso->flags |= HPSB_ISO_DRIVER_INIT;
return iso;
err:
hpsb_iso_shutdown(iso);
return NULL;
}
struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
unsigned int buf_packets,
unsigned int max_packet_size,
int channel,
int irq_interval,
void (*callback)(struct hpsb_iso*))
{
struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_RECV,
buf_packets, max_packet_size,
channel, irq_interval, callback);
if(!iso)
return NULL;
/* tell the driver to start working */
if(host->driver->isoctl(iso, RECV_INIT, 0))
goto err;
iso->flags |= HPSB_ISO_DRIVER_INIT;
return iso;
err:
hpsb_iso_shutdown(iso);
return NULL;
}
static int do_iso_xmit_start(struct hpsb_iso *iso, int cycle)
{
int retval = iso->host->driver->isoctl(iso, XMIT_START, cycle);
if(retval)
return retval;
iso->flags |= HPSB_ISO_DRIVER_STARTED;
return retval;
}
int hpsb_iso_xmit_start(struct hpsb_iso *iso, int cycle, int prebuffer)
{
if(iso->type != HPSB_ISO_XMIT)
return -1;
if(iso->flags & HPSB_ISO_DRIVER_STARTED)
return 0;
if(prebuffer < 1)
prebuffer = 1;
if(prebuffer > iso->buf_packets)
prebuffer = iso->buf_packets;
iso->prebuffer = prebuffer;
if(cycle != -1) {
/* pre-fill info->cycle */
int pkt = iso->first_packet;
int c, i;
cycle %= 8000;
c = cycle;
for(i = 0; i < iso->buf_packets; i++) {
struct hpsb_iso_packet_info *info = hpsb_iso_packet_info(iso, pkt);
info->cycle = c;
c = (c+1) % 8000;
pkt = (pkt+1) % iso->buf_packets;
}
}
/* remember the starting cycle; DMA will commence from xmit_queue_packets() */
iso->start_cycle = cycle;
return 0;
}
int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle)
{
int retval = 0;
if(iso->type != HPSB_ISO_RECV)
return -1;
if(iso->flags & HPSB_ISO_DRIVER_STARTED)
return 0;
retval = iso->host->driver->isoctl(iso, RECV_START, cycle);
if(retval)
return retval;
iso->flags |= HPSB_ISO_DRIVER_STARTED;
return retval;
}
int hpsb_iso_xmit_queue_packets(struct hpsb_iso *iso, unsigned int n_packets)
{
int i, retval;
int pkt = iso->first_packet;
if(iso->type != HPSB_ISO_XMIT)
return -1;
/* check packet sizes for sanity */
for(i = 0; i < n_packets; i++) {
struct hpsb_iso_packet_info *info = hpsb_iso_packet_info(iso, pkt);
if(info->len > iso->max_packet_size) {
printk(KERN_ERR "hpsb_iso_xmit_queue_packets: packet too long (%u, max is %u)\n",
info->len, iso->max_packet_size);
return -EINVAL;
}
pkt = (pkt+1) % iso->buf_packets;
}
retval = iso->host->driver->isoctl(iso, XMIT_QUEUE, n_packets);
if(retval)
return retval;
if(iso->prebuffer != 0) {
iso->prebuffer -= n_packets;
if(iso->prebuffer <= 0) {
iso->prebuffer = 0;
return do_iso_xmit_start(iso,
iso->start_cycle);
}
}
return 0;
}
int hpsb_iso_recv_release_packets(struct hpsb_iso *iso, unsigned int n_packets)
{
if(iso->type != HPSB_ISO_RECV)
return -1;
return iso->host->driver->isoctl(iso, RECV_RELEASE, n_packets);
}
unsigned char* hpsb_iso_packet_data(struct hpsb_iso *iso, unsigned int pkt)
{
return (iso->buf.kvirt + pkt * iso->buf_stride)
+ iso->packet_data_offset;
}
struct hpsb_iso_packet_info* hpsb_iso_packet_info(struct hpsb_iso *iso, unsigned int pkt)
{
return (struct hpsb_iso_packet_info*) ((iso->buf.kvirt + pkt * iso->buf_stride)
+ iso->packet_info_offset);
}
/*
* IEEE 1394 for Linux
*
* kernel ISO transmission/reception
*
* Copyright (C) 2002 Maas Digital LLC
*
* This code is licensed under the GPL. See the file COPYING in the root
* directory of the kernel sources for details.
*/
#ifndef IEEE1394_ISO_H
#define IEEE1394_ISO_H
#include "hosts.h"
#include "dma.h"
/* high-level ISO interface */
/* per-packet data embedded in the ringbuffer */
struct hpsb_iso_packet_info {
unsigned short len;
unsigned short cycle;
unsigned char channel; /* recv only */
unsigned char tag;
unsigned char sy;
};
/*
* each packet in the ringbuffer consists of three things:
* 1. the packet's data payload (no isochronous header)
* 2. a struct hpsb_iso_packet_info
* 3. some empty space before the next packet
*
* packets are separated by hpsb_iso.buf_stride bytes
* an even number of packets fit on one page
* no packet can be larger than one page
*/
enum hpsb_iso_type { HPSB_ISO_RECV = 0, HPSB_ISO_XMIT = 1 };
struct hpsb_iso {
enum hpsb_iso_type type;
/* pointer to low-level driver and its private data */
struct hpsb_host *host;
void *hostdata;
/* function to be called (from interrupt context) when the iso status changes */
void (*callback)(struct hpsb_iso*);
int speed; /* SPEED_100, 200, or 400 */
int channel;
/* greatest # of packets between interrupts - controls
the maximum latency of the buffer */
int irq_interval;
/* the packet ringbuffer */
struct dma_region buf;
/* # of packets in the ringbuffer */
unsigned int buf_packets;
/* offset between successive packets, in bytes -
you can assume that this is a power of 2,
and less than or equal to the page size */
int buf_stride;
/* largest possible packet size, in bytes */
unsigned int max_packet_size;
/* offset relative to (buf.kvirt + N*buf_stride) at which
the data payload begins for packet N */
int packet_data_offset;
/* offset relative to (buf.kvirt + N*buf_stride) at which the
struct hpsb_iso_packet_info is stored for packet N */
int packet_info_offset;
/* the index of the next packet that will be produced
or consumed by the user */
int first_packet;
/* number of packets owned by the low-level driver and
queued for transmission or reception.
this is related to the number of packets available
to the user process: n_ready = buf_packets - n_dma_packets */
atomic_t n_dma_packets;
/* how many times the buffer has overflowed or underflowed */
atomic_t overflows;
/* private flags to track initialization progress */
#define HPSB_ISO_DRIVER_INIT (1<<0)
#define HPSB_ISO_DRIVER_STARTED (1<<1)
unsigned int flags;
/* # of packets left to prebuffer (xmit only) */
int prebuffer;
/* starting cycle (xmit only) */
int start_cycle;
};
/* functions available to high-level drivers (e.g. raw1394) */
/* allocate the buffer and DMA context */
struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
unsigned int buf_packets,
unsigned int max_packet_size,
int channel,
int speed,
int irq_interval,
void (*callback)(struct hpsb_iso*));
struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
unsigned int buf_packets,
unsigned int max_packet_size,
int channel,
int irq_interval,
void (*callback)(struct hpsb_iso*));
/* start/stop DMA */
int hpsb_iso_xmit_start(struct hpsb_iso *iso, int start_on_cycle, int prebuffer);
int hpsb_iso_recv_start(struct hpsb_iso *iso, int start_on_cycle);
void hpsb_iso_stop(struct hpsb_iso *iso);
/* deallocate buffer and DMA context */
void hpsb_iso_shutdown(struct hpsb_iso *iso);
/* N packets have been written to the buffer; queue them for transmission */
int hpsb_iso_xmit_queue_packets(struct hpsb_iso *xmit, unsigned int n_packets);
/* N packets have been read out of the buffer, re-use the buffer space */
int hpsb_iso_recv_release_packets(struct hpsb_iso *recv, unsigned int n_packets);
/* returns # of packets ready to send or receive */
int hpsb_iso_n_ready(struct hpsb_iso *iso);
/* returns a pointer to the payload of packet 'pkt' */
unsigned char* hpsb_iso_packet_data(struct hpsb_iso *iso, unsigned int pkt);
/* returns a pointer to the info struct of packet 'pkt' */
struct hpsb_iso_packet_info* hpsb_iso_packet_info(struct hpsb_iso *iso, unsigned int pkt);
#endif /* IEEE1394_ISO_H */
......@@ -9,9 +9,9 @@
*/
#include <linux/kernel.h>
#include <linux/config.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/kmod.h>
......@@ -20,6 +20,8 @@
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
#endif
#include <asm/atomic.h>
#include <asm/byteorder.h>
#include "ieee1394_types.h"
#include "ieee1394.h"
......@@ -30,6 +32,24 @@
#include "csr.h"
#include "nodemgr.h"
#ifdef CONFIG_IEEE1394_OUI_DB
struct oui_list_struct {
int oui;
char *name;
};
extern struct oui_list_struct oui_list[];
static char *nodemgr_find_oui_name(int oui) {
int i;
for (i = 0; oui_list[i].name; i++)
if (oui_list[i].oui == oui)
return oui_list[i].name;
return NULL;
}
#endif
/*
* Basically what we do here is start off retrieving the bus_info block.
......@@ -86,6 +106,7 @@ static int raw1394_read_proc(char *page, char **start, off_t off,
struct node_entry *ne;
int len;
char *out = page;
unsigned long flags;
if (down_interruptible(&nodemgr_serialize))
return -EINTR;
......@@ -102,10 +123,17 @@ static int raw1394_read_proc(char *page, char **start, off_t off,
NODE_BUS_ARGS(ne->nodeid), (unsigned long long)ne->guid);
/* Generic Node information */
PUTF(" Vendor ID: `%s' [0x%06x]\n",
ne->vendor_name ?: "Unknown", ne->vendor_id);
PUTF(" Vendor ID : `%s' [0x%06x]\n", ne->oui_name, ne->vendor_id);
if (ne->vendor_name)
PUTF(" Vendor text : `%s'\n", ne->vendor_name);
PUTF(" Capabilities: 0x%06x\n", ne->capabilities);
PUTF(" Bus Options:\n");
PUTF(" Tlabel stats:\n");
spin_lock_irqsave(&ne->tpool->lock, flags);
PUTF(" Free : %d\n", atomic_read(&ne->tpool->count.count) + 1);
PUTF(" Total : %u\n", ne->tpool->allocations);
PUTF(" Mask : %016Lx\n", (unsigned long long)ne->tpool->pool);
spin_unlock_irqrestore(&ne->tpool->lock, flags);
PUTF(" Bus Options :\n");
PUTF(" IRMC(%d) CMC(%d) ISC(%d) BMC(%d) PMC(%d) GEN(%d)\n"
" LSPD(%d) MAX_REC(%d) CYC_CLK_ACC(%d)\n",
ne->busopt.irmc, ne->busopt.cmc, ne->busopt.isc, ne->busopt.bmc,
......@@ -136,15 +164,21 @@ static int raw1394_read_proc(char *page, char **start, off_t off,
int printed = 0; // small hack
PUTF(" Unit Directory %d:\n", ud_count++);
if (ud->flags & UNIT_DIRECTORY_VENDOR_ID ||
ud->flags & UNIT_DIRECTORY_MODEL_ID) {
PUTF(" Vendor/Model ID : ");
}
if (ud->flags & UNIT_DIRECTORY_VENDOR_ID) {
PUTF(" Vendor/Model ID: %s [%06x]",
ud->vendor_name ?: "Unknown", ud->vendor_id);
PUTF("%s [%06x]", ud->vendor_name ?: "Unknown",
ud->vendor_id);
printed = 1;
}
if (ud->flags & UNIT_DIRECTORY_MODEL_ID) {
if (!printed)
PUTF(" Vendor/Model ID: %s [%06x]",
ne->vendor_name ?: "Unknown", ne->vendor_id);
if (!printed) {
PUTF("%s [%06x]", ne->vendor_name ?: "Unknown",
ne->vendor_id);
}
PUTF(" / %s [%06x]", ud->model_name ?: "Unknown", ud->model_id);
printed = 1;
}
......@@ -152,11 +186,11 @@ static int raw1394_read_proc(char *page, char **start, off_t off,
PUTF("\n");
if (ud->flags & UNIT_DIRECTORY_SPECIFIER_ID)
PUTF(" Software Specifier ID: %06x\n", ud->specifier_id);
PUTF(" Software Spec ID : %06x\n", ud->specifier_id);
if (ud->flags & UNIT_DIRECTORY_VERSION)
PUTF(" Software Version: %06x\n", ud->version);
PUTF(" Software Version : %06x\n", ud->version);
if (ud->driver)
PUTF(" Driver: %s\n", ud->driver->name);
PUTF(" Driver : %s\n", ud->driver->name);
PUTF(" Length (in quads): %d\n", ud->count);
}
......@@ -297,6 +331,7 @@ static struct node_entry *nodemgr_scan_root_directory
code = CONFIG_ROM_KEY(quad);
if (code == CONFIG_ROM_VENDOR_ID && length > 0) {
/* Check if there is a text descriptor leaf
immediately after this. */
size = nodemgr_size_text_leaf(host, nodeid, generation,
......@@ -305,22 +340,23 @@ static struct node_entry *nodemgr_scan_root_directory
address += 4;
length--;
total_size += (size + 1) * sizeof (quadlet_t);
}
else if (size < 0)
} else if (size < 0)
return NULL;
}
}
ne = kmalloc(total_size, SLAB_ATOMIC);
if (ne != NULL) {
ne = kmalloc(total_size, GFP_KERNEL);
if (!ne)
return NULL;
if (size != 0) {
ne->vendor_name
= (const char *) &(ne->quadlets[2]);
ne->quadlets[size] = 0;
}
else {
} else {
ne->vendor_name = NULL;
}
}
return ne;
}
......@@ -335,6 +371,9 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, quadlet_t busoption
INIT_LIST_HEAD(&ne->list);
INIT_LIST_HEAD(&ne->unit_directories);
ne->tpool = &host->tpool[nodeid & NODE_MASK];
ne->host = host;
ne->nodeid = nodeid;
ne->guid = guid;
......@@ -344,9 +383,10 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, quadlet_t busoption
nodemgr_process_config_rom (ne, busoptions);
HPSB_DEBUG("%s added: Node[" NODE_BUS_FMT "] GUID[%016Lx] [%s]",
(host->node_id == nodeid) ? "Host" : "Device",
HPSB_DEBUG("%s added: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx] [%s] (%s)",
(host->node_id == nodeid) ? "Host" : "Node",
NODE_BUS_ARGS(nodeid), (unsigned long long)guid,
ne->oui_name,
ne->vendor_name ?: "Unknown");
return ne;
......@@ -648,6 +688,11 @@ static void nodemgr_process_root_directory(struct node_entry *ne)
switch (code) {
case CONFIG_ROM_VENDOR_ID:
ne->vendor_id = value;
#ifdef CONFIG_IEEE1394_OUI_DB
ne->oui_name = nodemgr_find_oui_name(value);
#else
ne->oui_name = "Unknown";
#endif
/* Now check if there is a vendor name text
string. */
if (ne->vendor_name != NULL) {
......@@ -1211,6 +1256,18 @@ struct node_entry *hpsb_nodeid_get_entry(nodeid_t nodeid)
return ne;
}
struct node_entry *hpsb_check_nodeid(nodeid_t nodeid)
{
struct node_entry *ne;
if (down_trylock(&nodemgr_serialize))
return NULL;
ne = find_entry_by_nodeid(nodeid);
up(&nodemgr_serialize);
return ne;
}
/* The following four convenience functions use a struct node_entry
* for addressing a node on the bus. They are intended for use by any
* process context, not just the nodemgr thread, so we need to be a
......@@ -1266,9 +1323,11 @@ int hpsb_node_lock(struct node_entry *ne, u64 addr,
static void nodemgr_add_host(struct hpsb_host *host)
{
struct host_info *hi = kmalloc (sizeof (struct host_info), GFP_KERNEL);
struct host_info *hi;
unsigned long flags;
hi = kmalloc(sizeof (struct host_info), in_interrupt() ? SLAB_ATOMIC : SLAB_KERNEL);
if (!hi) {
HPSB_ERR ("NodeMgr: out of memory in add host");
return;
......
......@@ -132,7 +132,11 @@ struct node_entry {
u32 capabilities;
struct list_head unit_directories;
struct hpsb_tlabel_pool *tpool;
const char *vendor_name;
char *oui_name;
quadlet_t quadlets[0];
};
......@@ -152,6 +156,10 @@ struct node_entry *hpsb_guid_get_entry(u64 guid);
* fool-proof by itself, since the nodeid can change. */
struct node_entry *hpsb_nodeid_get_entry(nodeid_t nodeid);
/* Same as above except that it will not block waiting for the nodemgr
* serialize semaphore. */
struct node_entry *hpsb_check_nodeid(nodeid_t nodeid);
/*
* If the entry refers to a local host, this function will return the pointer
* to the hpsb_host structure. It will return NULL otherwise. Once you have
......
......@@ -38,6 +38,7 @@
*
* Known bugs:
* . devctl BUS_RESET arg confusion (reset type or root holdoff?)
* added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
*/
/*
......@@ -75,6 +76,10 @@
* . Updated to 2.4.x module scheme (PCI aswell)
* . Removed procfs support since it trashes random mem
* . Config ROM generation
*
* Manfred Weihs <weihs@ict.tuwien.ac.at>
* . Reworked code for initiating bus resets
* (long, short, with or without hold-off)
*/
#include <linux/config.h>
......@@ -112,6 +117,8 @@
#include "ieee1394.h"
#include "ieee1394_types.h"
#include "hosts.h"
#include "dma.h"
#include "iso.h"
#include "ieee1394_core.h"
#include "highlevel.h"
#include "ohci1394.h"
......@@ -153,13 +160,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
printk(level "%s_%d: " fmt "\n" , OHCI1394_DRIVER_NAME, card , ## args)
static char version[] __devinitdata =
"$Rev: 601 $ Ben Collins <bcollins@debian.org>";
"$Rev: 675 $ Ben Collins <bcollins@debian.org>";
/* Module Parameters */
MODULE_PARM(attempt_root,"i");
MODULE_PARM_DESC(attempt_root, "Attempt to make the host root (default = 0).");
static int attempt_root = 0;
MODULE_PARM(phys_dma,"i");
MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
static int phys_dma = 1;
......@@ -170,12 +173,6 @@ static void dma_trm_reset(struct dma_trm_ctx *d);
static void ohci1394_pci_remove(struct pci_dev *pdev);
#ifndef __LITTLE_ENDIAN
/* Swap a series of quads inplace. */
static __inline__ void block_swab32(quadlet_t *data, size_t size) {
while (size--)
data[size] = swab32(data[size]);
}
static unsigned hdr_sizes[] =
{
3, /* TCODE_WRITEQ */
......@@ -193,16 +190,19 @@ static unsigned hdr_sizes[] =
};
/* Swap headers */
static inline void packet_swab(quadlet_t *data, int tcode, int len)
static inline void packet_swab(quadlet_t *data, int tcode)
{
size_t size = hdr_sizes[tcode];
if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
return;
block_swab32(data, hdr_sizes[tcode]);
while (size--)
data[size] = swab32(data[size]);
}
#else
/* Don't waste cycles on same sex byte swaps */
#define packet_swab(w,x,y)
#define block_swab32(x,y)
#define packet_swab(w,x)
#endif /* !LITTLE_ENDIAN */
/***********************************
......@@ -339,8 +339,6 @@ static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
DBGMSG(ohci->id, "SelfID complete");
hpsb_selfid_complete(host, phyid, isroot);
return;
}
......@@ -574,6 +572,7 @@ static void ohci_initialize(struct ti_ohci *ohci)
/* Enable interrupts */
reg_write(ohci, OHCI1394_IntMaskSet,
OHCI1394_unrecoverableError |
OHCI1394_masterIntEnable |
OHCI1394_busReset |
OHCI1394_selfIDComplete |
......@@ -646,8 +645,7 @@ static void insert_packet(struct ti_ohci *ohci,
(packet->header[0] & 0xFFFF0000);
d->prg_cpu[idx]->data[2] = packet->header[2];
d->prg_cpu[idx]->data[3] = packet->header[3];
packet_swab(d->prg_cpu[idx]->data, packet->tcode,
packet->header_size>>2);
packet_swab(d->prg_cpu[idx]->data, packet->tcode);
}
if (packet->data_size) { /* block transmit */
......@@ -712,7 +710,7 @@ static void insert_packet(struct ti_ohci *ohci,
d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
(packet->header[0] & 0xFFFF);
d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
packet_swab(d->prg_cpu[idx]->data, packet->tcode, packet->header_size>>2);
packet_swab(d->prg_cpu[idx]->data, packet->tcode);
d->prg_cpu[idx]->begin.control =
cpu_to_le32(DMA_CTL_OUTPUT_MORE |
......@@ -844,12 +842,57 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
struct ti_ohci *ohci = host->hostdata;
int retval = 0;
unsigned long flags;
int phy_reg;
switch (cmd) {
case RESET_BUS:
DBGMSG(ohci->id, "devctl: Bus reset requested%s",
attempt_root ? " and attempting to become root" : "");
set_phy_reg_mask (ohci, 1, 0x40 | (attempt_root ? 0x80 : 0));
switch (arg) {
case SHORT_RESET:
phy_reg = get_phy_reg(ohci, 5);
phy_reg |= 0x40;
set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
break;
case LONG_RESET:
phy_reg = get_phy_reg(ohci, 1);
phy_reg |= 0x40;
set_phy_reg(ohci, 1, phy_reg); /* set IBR */
break;
case SHORT_RESET_NO_FORCE_ROOT:
phy_reg = get_phy_reg(ohci, 1);
if (phy_reg & 0x80) {
phy_reg &= ~0x80;
set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
}
phy_reg = get_phy_reg(ohci, 5);
phy_reg |= 0x40;
set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
break;
case LONG_RESET_NO_FORCE_ROOT:
phy_reg = get_phy_reg(ohci, 1);
phy_reg &= ~0x80;
phy_reg |= 0x40;
set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
break;
case SHORT_RESET_FORCE_ROOT:
phy_reg = get_phy_reg(ohci, 1);
if (!(phy_reg & 0x80)) {
phy_reg |= 0x80;
set_phy_reg(ohci, 1, phy_reg); /* set RHB */
}
phy_reg = get_phy_reg(ohci, 5);
phy_reg |= 0x40;
set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
break;
case LONG_RESET_FORCE_ROOT:
phy_reg = get_phy_reg(ohci, 1);
phy_reg |= 0xc0;
set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
break;
default:
retval = -1;
}
break;
case GET_CYCLE_COUNTER:
......@@ -977,6 +1020,705 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
return retval;
}
/***********************************
* rawiso ISO reception *
***********************************/
struct ohci_iso_recv {
struct ti_ohci *ohci;
/* memory and PCI mapping for the DMA descriptors */
struct dma_prog_region prog;
struct ohci1394_iso_tasklet task;
int task_active;
/* index of next packet to arrive */
int pkt_dma;
u32 ContextControlSet;
u32 ContextControlClear;
u32 CommandPtr;
u32 ContextMatch;
};
static void ohci_iso_recv_task(unsigned long data);
static void ohci_iso_recv_stop(struct hpsb_iso *iso);
static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
static void ohci_iso_recv_program(struct hpsb_iso *iso);
static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle);
static int ohci_iso_recv_init(struct hpsb_iso *iso)
{
struct ohci_iso_recv *recv;
unsigned int prog_size;
int ctx;
int ret = -ENOMEM;
recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
if(!recv)
return -ENOMEM;
iso->hostdata = recv;
recv->ohci = iso->host->hostdata;
recv->task_active = 0;
recv->pkt_dma = iso->first_packet;
dma_prog_region_init(&recv->prog);
/* size of DMA program = one INPUT_LAST per packet in the buffer */
prog_size = sizeof(struct dma_cmd) * iso->buf_packets;
if(dma_prog_region_alloc(&recv->prog, prog_size, recv->ohci->dev))
goto err;
ohci1394_init_iso_tasklet(&recv->task, OHCI_ISO_RECEIVE,
ohci_iso_recv_task, (unsigned long) iso);
if(ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0)
goto err;
recv->task_active = 1;
/* recv context registers are spaced 32 bytes apart */
ctx = recv->task.context;
recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
/* enable interrupts */
reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << ctx);
/* write the DMA program */
ohci_iso_recv_program(iso);
return 0;
err:
ohci_iso_recv_shutdown(iso);
return ret;
}
static void ohci_iso_recv_stop(struct hpsb_iso *iso)
{
struct ohci_iso_recv *recv = iso->hostdata;
/* halt DMA */
ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
}
static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
{
struct ohci_iso_recv *recv = iso->hostdata;
if(recv->task_active) {
/* halt DMA */
ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
/* disable interrupts */
reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
recv->task_active = 0;
}
dma_prog_region_free(&recv->prog);
kfree(recv);
iso->hostdata = NULL;
}
static void ohci_iso_recv_program(struct hpsb_iso *iso)
{
struct ohci_iso_recv *recv = iso->hostdata;
/* address of 'branch' field in previous DMA descriptor */
u32 *prev_branch = NULL;
/* start at pkt_dma and go around the whole buffer */
int pkt = recv->pkt_dma;
int i;
for(i = 0; i < iso->buf_packets; i++) {
int want_interrupt;
unsigned int data_size;
/* pointer to the DMA descriptor */
struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + pkt;
/* offset of the DMA descriptor relative to the DMA prog buffer */
unsigned long prog_offset = pkt * sizeof(struct dma_cmd);
/* offset of this bus_cycle within the DMA buffer */
unsigned long buf_offset = hpsb_iso_packet_data(iso, pkt) - iso->buf.kvirt;
/* back up 8 bytes for the iso header */
buf_offset -= 8;
data_size = iso->max_packet_size + 8;
/* ask for an interrupt every now and then, and
always interrupt on the final descriptor */
if( ((i % iso->irq_interval) == 0) ||
(i == (iso->buf_packets - 1)) ) {
want_interrupt = 1;
} else {
want_interrupt = 0;
}
/* write the DMA descriptor */
il->control = 3 << 28; /* INPUT_LAST */
il->control |= 8 << 24; /* s = 1, update xferStatus and resCount */
if(want_interrupt)
il->control |= 3 << 20;
il->control |= 0xC << 16; /* enable branch to address */
il->control |= data_size;
il->address = dma_region_offset_to_bus(&iso->buf, buf_offset);
il->branchAddress = 0; /* filled in on next loop */
il->status = data_size;
/* link the previous descriptor to this one */
if(prev_branch) {
*prev_branch = dma_prog_region_offset_to_bus(&recv->prog, prog_offset);
*prev_branch |= 1; /* set Z=1 */
}
prev_branch = &il->branchAddress;
pkt = (pkt + 1) % iso->buf_packets;
}
/* the final descriptor's branch address and Z should be left at 0 */
}
static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle)
{
struct ohci_iso_recv *recv = iso->hostdata;
u32 command, contextMatch;
reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
wmb();
/* use packet-per-buffer mode, and keep ISO headers */
reg_write(recv->ohci, recv->ContextControlSet, (1 << 30));
/* match on all tags, listen on channel */
contextMatch = 0xF0000000 | iso->channel;
if(cycle != -1) {
u32 seconds;
/* enable cycleMatch */
reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
/* set starting cycle */
cycle &= 0x1FFF;
/* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
just snarf them from the current time */
seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
/* advance one second to give some extra time for DMA to start */
seconds += 1;
cycle |= (seconds & 3) << 13;
contextMatch |= cycle << 12;
}
reg_write(recv->ohci, recv->ContextMatch, contextMatch);
/* address of first descriptor block */
command = dma_prog_region_offset_to_bus(&recv->prog, recv->pkt_dma * sizeof(struct dma_cmd));
command |= 1; /* Z=1 */
reg_write(recv->ohci, recv->CommandPtr, command);
wmb();
/* run */
reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
/* issue a dummy read of the cycle timer register to force
all PCI writes to be posted immediately */
mb();
reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
/* check RUN */
if(!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
PRINT(KERN_ERR, recv->ohci->id, "Error starting IR DMA (ContextControl 0x%08x)\n",
reg_read(recv->ohci, recv->ContextControlSet));
return -1;
}
return 0;
}
static void ohci_iso_recv_release_one(struct hpsb_iso *iso)
{
struct ohci_iso_recv *recv = iso->hostdata;
/* re-use the DMA descriptor for first_packet */
/* by linking the previous descriptor to it */
int next_i = iso->first_packet;
int prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
struct dma_cmd *next = dma_region_i(&recv->prog, struct dma_cmd, next_i);
struct dma_cmd *prev = dma_region_i(&recv->prog, struct dma_cmd, prev_i);
/* 'next' becomes the new end of the DMA chain */
next->control |= 3 << 20; /* enable interrupt */
next->branchAddress = 0; /* disable branch */
/* link prev to next */
if(prev_i % iso->irq_interval) {
prev->control &= ~(3 << 20); /* no interrupt */
} else {
prev->control |= 3 << 20; /* enable interrupt */
}
prev->branchAddress = dma_prog_region_offset_to_bus(&recv->prog,
sizeof(struct dma_cmd) * next_i) | 1;
wmb();
/* wake up DMA in case it fell asleep */
reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
/* advance packet cursors */
iso->first_packet = (iso->first_packet+1) % iso->buf_packets;
atomic_inc(&iso->n_dma_packets);
}
static void ohci_iso_recv_release(struct hpsb_iso *iso, int n_packets)
{
int i;
for(i = 0; i < n_packets; i++)
ohci_iso_recv_release_one(iso);
}
static void ohci_iso_recv_task(unsigned long data)
{
struct hpsb_iso *iso = (struct hpsb_iso*) data;
struct ohci_iso_recv *recv = iso->hostdata;
int count;
int wake = 0;
/* loop over the entire buffer */
for(count = 0; count < iso->buf_packets; count++) {
u32 packet_len = 0;
/* pointer to the DMA descriptor */
struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + recv->pkt_dma;
/* check the DMA descriptor for new writes to xferStatus */
u16 xferstatus = il->status >> 16;
u16 rescount = il->status & 0xFFFF;
unsigned char event = xferstatus & 0x1F;
if(!event) {
/* this packet hasn't come in yet; we are done for now */
goto out;
}
if(event == 0x11) {
/* packet received successfully! */
/* rescount is the number of bytes *remaining* in the packet buffer,
after the packet was written */
packet_len = iso->max_packet_size - rescount;
} else if(event == 0x02) {
PRINT(KERN_ERR, recv->ohci->id, "IR DMA error - packet too long for buffer\n");
} else if(event) {
PRINT(KERN_ERR, recv->ohci->id, "IR DMA error - OHCI error code 0x%02x\n", event);
}
/* sync our view of the buffer */
dma_region_sync(&iso->buf, recv->pkt_dma * iso->buf_stride, iso->buf_stride);
/* record the per-packet info */
{
/* iso header is 8 bytes ahead of the data payload */
unsigned char *hdr = hpsb_iso_packet_data(iso, recv->pkt_dma) - 8;
struct hpsb_iso_packet_info *info = hpsb_iso_packet_info(iso, recv->pkt_dma);
info->len = packet_len;
info->cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
info->channel = hdr[5] & 0x3F;
info->tag = hdr[5] >> 6;
info->sy = hdr[4] & 0xF;
}
/* at least one packet came in, so wake up the reader */
wake = 1;
/* reset the DMA descriptor */
il->status = iso->max_packet_size;
/* advance DMA packet cursor */
recv->pkt_dma = (recv->pkt_dma + 1) % iso->buf_packets;
/* one more packet for the user, one less for us */
if(atomic_dec_and_test(&iso->n_dma_packets)) {
/* if n_dma_packets reaches zero, we have an overflow */
atomic_inc(&iso->overflows);
}
}
out:
if(wake && iso->callback) {
iso->callback(iso);
}
}
/***********************************
* rawiso ISO transmission *
***********************************/
struct ohci_iso_xmit {
struct ti_ohci *ohci;
struct dma_prog_region prog;
struct ohci1394_iso_tasklet task;
int task_active;
int pkt_dma;
u32 ContextControlSet;
u32 ContextControlClear;
u32 CommandPtr;
};
/* transmission DMA program:
one OUTPUT_MORE_IMMEDIATE for the IT header
one OUTPUT_LAST for the buffer data */
struct iso_xmit_cmd {
struct dma_cmd output_more_immediate;
u8 iso_hdr[8];
u32 unused[2];
struct dma_cmd output_last;
};
static int ohci_iso_xmit_init(struct hpsb_iso *iso);
static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
static void ohci_iso_xmit_task(unsigned long data);
static int ohci_iso_xmit_init(struct hpsb_iso *iso)
{
struct ohci_iso_xmit *xmit;
unsigned int prog_size;
int ctx;
int ret = -ENOMEM;
xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
if(!xmit)
return -ENOMEM;
iso->hostdata = xmit;
xmit->ohci = iso->host->hostdata;
xmit->task_active = 0;
xmit->pkt_dma = iso->first_packet;
dma_prog_region_init(&xmit->prog);
prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
if(dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
goto err;
ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
ohci_iso_xmit_task, (unsigned long) iso);
if(ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0)
goto err;
xmit->task_active = 1;
/* xmit context registers are spaced 16 bytes apart */
ctx = xmit->task.context;
xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
/* enable interrupts */
reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << ctx);
return 0;
err:
ohci_iso_xmit_shutdown(iso);
return ret;
}
static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
{
struct ohci_iso_xmit *xmit = iso->hostdata;
ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL);
}
static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
{
struct ohci_iso_xmit *xmit = iso->hostdata;
if(xmit->task_active) {
/* halt DMA */
ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL);
/* disable interrupts */
reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
xmit->task_active = 0;
}
dma_prog_region_free(&xmit->prog);
kfree(xmit);
iso->hostdata = NULL;
}
static void ohci_iso_xmit_task(unsigned long data)
{
struct hpsb_iso *iso = (struct hpsb_iso*) data;
struct ohci_iso_xmit *xmit = iso->hostdata;
int wake = 0;
int count;
/* check the whole buffer if necessary, starting at pkt_dma */
for(count = 0; count < iso->buf_packets; count++) {
/* DMA descriptor */
struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, xmit->pkt_dma);
/* check for new writes to xferStatus */
u16 xferstatus = cmd->output_last.status >> 16;
u8 event = xferstatus & 0x1F;
if(!event) {
/* packet hasn't been sent yet; we are done for now */
goto out;
}
if(event != 0x11) {
PRINT(KERN_ERR, xmit->ohci->id, "IT DMA error - OHCI error code 0x%02x\n", event);
}
/* at least one packet went out, so wake up the writer */
wake = 1;
/* predict the timestamp pkt_dma will have next time around the buffer */
{
struct hpsb_iso_packet_info* info = hpsb_iso_packet_info(iso, xmit->pkt_dma);
unsigned int cycle = cmd->output_last.status & 0x1FFF;
cycle += iso->buf_packets;
while(cycle > 8000)
cycle -= 8000;
info->cycle = cycle;
}
/* reset the DMA descriptor for next time */
cmd->output_last.status = 0;
/* advance packet cursor */
xmit->pkt_dma = (xmit->pkt_dma + 1) % iso->buf_packets;
/* one less packet for us */
if(atomic_dec_and_test(&iso->n_dma_packets)) {
/* underflow */
atomic_inc(&iso->overflows);
}
}
out:
if(wake && iso->callback) {
iso->callback(iso);
}
}
static void ohci_iso_xmit_queue_one(struct hpsb_iso *iso)
{
struct ohci_iso_xmit *xmit = iso->hostdata;
struct hpsb_iso_packet_info *info;
int next_i, prev_i;
struct iso_xmit_cmd *next, *prev;
/* sync up the card's view of the buffer */
dma_region_sync(&iso->buf, iso->first_packet * iso->buf_stride, iso->buf_stride);
/* append first_packet to the DMA chain */
/* by linking the previous descriptor to it */
/* (next will become the new end of the DMA chain) */
next_i = iso->first_packet;
prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
/* retrieve the packet info stashed in the buffer */
info = hpsb_iso_packet_info(iso, iso->first_packet);
/* set up the OUTPUT_MORE_IMMEDIATE descriptor */
memset(next, 0, sizeof(struct iso_xmit_cmd));
next->output_more_immediate.control = 0x02000008;
/* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
/* tcode = 0xA, and sy */
next->iso_hdr[0] = 0xA0 | (info->sy & 0xF);
/* tag and channel number */
next->iso_hdr[1] = (info->tag << 6) | (iso->channel & 0x3F);
/* transmission speed */
next->iso_hdr[2] = iso->speed & 0x7;
/* payload size */
next->iso_hdr[6] = info->len & 0xFF;
next->iso_hdr[7] = info->len >> 8;
/* set up the OUTPUT_LAST */
next->output_last.control = 1 << 28;
next->output_last.control |= 1 << 27; /* update timeStamp */
next->output_last.control |= 3 << 20; /* want interrupt */
next->output_last.control |= 3 << 18; /* enable branch */
next->output_last.control |= info->len;
/* payload bus address */
next->output_last.address = dma_region_offset_to_bus(&iso->buf,
hpsb_iso_packet_data(iso, iso->first_packet) - iso->buf.kvirt);
/* leave branchAddress at zero for now */
/* re-write the previous DMA descriptor to chain to this one */
/* set prev branch address to point to next (Z=3) */
prev->output_last.branchAddress =
dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3;
/* disable interrupt, unless required by the IRQ interval */
if(prev_i % iso->irq_interval) {
prev->output_last.control &= ~(3 << 20); /* no interrupt */
} else {
prev->output_last.control |= 3 << 20; /* enable interrupt */
}
wmb();
/* wake DMA in case it is sleeping */
reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
/* issue a dummy read of the cycle timer to force all PCI
writes to be posted immediately */
mb();
reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
/* increment cursors */
iso->first_packet = (iso->first_packet+1) % iso->buf_packets;
atomic_inc(&iso->n_dma_packets);
}
static void ohci_iso_xmit_queue(struct hpsb_iso *iso, int n_packets)
{
int i;
for(i = 0; i < n_packets; i++)
ohci_iso_xmit_queue_one(iso);
}
static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
{
struct ohci_iso_xmit *xmit = iso->hostdata;
/* clear out the control register */
reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
wmb();
/* address and length of first descriptor block (Z=3) */
reg_write(xmit->ohci, xmit->CommandPtr,
dma_prog_region_offset_to_bus(&xmit->prog, xmit->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
/* cycle match */
if(cycle != -1) {
u32 start = cycle & 0x1FFF;
/* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
just snarf them from the current time */
u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
/* advance one second to give some extra time for DMA to start */
seconds += 1;
start |= (seconds & 3) << 13;
reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
}
/* run */
reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
mb();
/* wait 100 usec to give the card time to go active */
udelay(100);
/* check the RUN bit */
if(!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
PRINT(KERN_ERR, xmit->ohci->id, "Error starting IT DMA (ContextControl 0x%08x)\n",
reg_read(xmit->ohci, xmit->ContextControlSet));
return -1;
}
return 0;
}
static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, int arg)
{
switch(cmd) {
case XMIT_INIT:
return ohci_iso_xmit_init(iso);
case XMIT_START:
return ohci_iso_xmit_start(iso, arg);
case XMIT_STOP:
ohci_iso_xmit_stop(iso);
return 0;
case XMIT_QUEUE:
ohci_iso_xmit_queue(iso, arg);
return 0;
case XMIT_SHUTDOWN:
ohci_iso_xmit_shutdown(iso);
return 0;
case RECV_INIT:
return ohci_iso_recv_init(iso);
case RECV_START:
return ohci_iso_recv_start(iso, arg);
case RECV_STOP:
ohci_iso_recv_stop(iso);
return 0;
case RECV_RELEASE:
ohci_iso_recv_release(iso, arg);
return 0;
case RECV_SHUTDOWN:
ohci_iso_recv_shutdown(iso);
return 0;
default:
PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
cmd);
break;
}
return -EINVAL;
}
/***************************************
* IEEE-1394 functionality section END *
***************************************/
......@@ -1056,7 +1798,7 @@ static void ohci_irq_handler(int irq, void *dev_id,
unsigned long flags;
/* Read and clear the interrupt event register. Don't clear
* the busReset event, though, this is done when we get the
* the busReset event, though. This is done when we get the
* selfIDComplete interrupt. */
spin_lock_irqsave(&ohci->event_lock, flags);
event = reg_read(ohci, OHCI1394_IntEventClear);
......@@ -1067,10 +1809,52 @@ static void ohci_irq_handler(int irq, void *dev_id,
DBGMSG(ohci->id, "IntEvent: %08x", event);
/* Die right here an now */
if (event & OHCI1394_unrecoverableError) {
PRINT(KERN_ERR, ohci->id, "Unrecoverable error, shutting down card!");
return;
int ctx;
PRINT(KERN_ERR, ohci->id, "Unrecoverable error!");
if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
PRINT(KERN_ERR, ohci->id, "Async Req Tx Context died: "
"ctrl[%08x] cmdptr[%08x]",
reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
PRINT(KERN_ERR, ohci->id, "Async Rsp Tx Context died: "
"ctrl[%08x] cmdptr[%08x]",
reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
PRINT(KERN_ERR, ohci->id, "Async Req Rcv Context died: "
"ctrl[%08x] cmdptr[%08x]",
reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
PRINT(KERN_ERR, ohci->id, "Async Rsp Rcv Context died: "
"ctrl[%08x] cmdptr[%08x]",
reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
PRINT(KERN_ERR, ohci->id, "Async Iso Xmit %d Context died: "
"ctrl[%08x] cmdptr[%08x]", ctx,
reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
}
for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
PRINT(KERN_ERR, ohci->id, "Async Iso Recv %d Context died: "
"ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
}
event &= ~OHCI1394_unrecoverableError;
}
if (event & OHCI1394_cycleInconsistent) {
......@@ -1099,9 +1883,7 @@ static void ohci_irq_handler(int irq, void *dev_id,
}
spin_unlock_irqrestore(&ohci->event_lock, flags);
if (!host->in_bus_reset) {
DBGMSG(ohci->id, "irq_handler: Bus reset requested%s",
(attempt_root) ? " and attempting to become root"
: "");
DBGMSG(ohci->id, "irq_handler: Bus reset requested");
/* Subsystem call */
hpsb_bus_reset(ohci->host);
......@@ -1175,14 +1957,14 @@ static void ohci_irq_handler(int irq, void *dev_id,
if (host->in_bus_reset) {
node_id = reg_read(ohci, OHCI1394_NodeID);
/* If our nodeid is not valid, give a msec delay
* to let it settle in and try again. */
if (!(node_id & 0x80000000)) {
mdelay(1);
node_id = reg_read(ohci, OHCI1394_NodeID);
PRINT(KERN_ERR, ohci->id,
"SelfID received, but NodeID invalid "
"(probably new bus reset occured): %08X",
node_id);
goto selfid_not_valid;
}
if (node_id & 0x80000000) { /* NodeID valid */
phyid = node_id & 0x0000003f;
isroot = (node_id & 0x40000000) != 0;
......@@ -1191,56 +1973,43 @@ static void ohci_irq_handler(int irq, void *dev_id,
"(phyid %d, %s)", phyid,
(isroot ? "root" : "not root"));
handle_selfid(ohci, host,
phyid, isroot);
} else {
PRINT(KERN_ERR, ohci->id,
"SelfID interrupt received, but "
"NodeID is not valid: %08X",
node_id);
}
/* Accept Physical requests from all nodes. */
reg_write(ohci,OHCI1394_AsReqFilterHiSet,
0xffffffff);
reg_write(ohci,OHCI1394_AsReqFilterLoSet,
0xffffffff);
} else
PRINT(KERN_ERR, ohci->id,
"SelfID received outside of bus reset sequence");
handle_selfid(ohci, host, phyid, isroot);
/* Finally, we clear the busReset event and reenable
* the busReset interrupt. */
/* Clear the bus reset event and re-enable the
* busReset interrupt. */
spin_lock_irqsave(&ohci->event_lock, flags);
reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
spin_unlock_irqrestore(&ohci->event_lock, flags);
event &= ~OHCI1394_selfIDComplete;
/* Turn on phys dma reception. We should
* probably manage the filtering somehow,
* instead of blindly turning it on. */
/* Accept Physical requests from all nodes. */
reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0xffffffff);
reg_write(ohci,OHCI1394_AsReqFilterLoSet, 0xffffffff);
/*
* CAUTION!
* Some chips (TI TSB43AB22) won't take a value in
* the PhyReqFilter register until after the IntEvent
* is cleared for bus reset, and even then a short
* delay is required.
/* Turn on phys dma reception.
*
* TODO: Enable some sort of filtering management.
*/
if (phys_dma) {
mdelay(1);
reg_write(ohci,OHCI1394_PhyReqFilterHiSet,
0xffffffff);
reg_write(ohci,OHCI1394_PhyReqFilterLoSet,
0xffffffff);
reg_write(ohci,OHCI1394_PhyUpperBound,
0xffff0000);
reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0xffffffff);
reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0xffffffff);
reg_write(ohci,OHCI1394_PhyUpperBound, 0xffff0000);
} else {
reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0x00000000);
reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0x00000000);
}
DBGMSG(ohci->id, "PhyReqFilter=%08x%08x\n",
reg_read(ohci,OHCI1394_PhyReqFilterHiSet),
reg_read(ohci,OHCI1394_PhyReqFilterLoSet));
hpsb_selfid_complete(host, phyid, isroot);
} else
PRINT(KERN_ERR, ohci->id,
"SelfID received outside of bus reset sequence");
event &= ~OHCI1394_selfIDComplete;
selfid_not_valid:
}
/* Make sure we handle everything, just in case we accidentally
......@@ -1412,7 +2181,7 @@ static void dma_rcv_tasklet (unsigned long data)
* bus reset. We always ignore it. */
if (tcode != OHCI1394_TCODE_PHY) {
if (!ohci->no_swap_incoming)
packet_swab(d->spb, tcode, (length - 4) >> 2);
packet_swab(d->spb, tcode);
DBGMSG(ohci->id, "Packet received from node"
" %d ack=0x%02X spd=%d tcode=0x%X"
" length=%d ctx=%d tlabel=%d",
......@@ -1560,11 +2329,11 @@ static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
if (d->prg_cpu) {
for (i=0; i<d->num_desc; i++)
if (d->prg_cpu[i] && d->prg_bus[i]) {
pci_free_consistent(
d->ohci->dev, sizeof(struct dma_cmd),
d->prg_cpu[i], d->prg_bus[i]);
pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
}
pci_pool_destroy(d->prg_pool);
OHCI_DMA_FREE("dma_rcv prg pool");
kfree(d->prg_cpu);
kfree(d->prg_bus);
}
......@@ -1624,6 +2393,10 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
return -ENOMEM;
}
d->prg_pool = hpsb_pci_pool_create("ohci1394 rcv prg", ohci->dev,
sizeof(struct dma_cmd), 4, 0, SLAB_KERNEL);
OHCI_DMA_ALLOC("dma_rcv prg pool");
for (i=0; i<d->num_desc; i++) {
d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
d->buf_size,
......@@ -1639,11 +2412,8 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
return -ENOMEM;
}
d->prg_cpu[i] = pci_alloc_consistent(ohci->dev,
sizeof(struct dma_cmd),
d->prg_bus+i);
OHCI_DMA_ALLOC("consistent dma_rcv prg[%d]", i);
d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
if (d->prg_cpu[i] != NULL) {
memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
......@@ -1692,11 +2462,11 @@ static void free_dma_trm_ctx(struct dma_trm_ctx *d)
if (d->prg_cpu) {
for (i=0; i<d->num_desc; i++)
if (d->prg_cpu[i] && d->prg_bus[i]) {
pci_free_consistent(
d->ohci->dev, sizeof(struct at_dma_prg),
d->prg_cpu[i], d->prg_bus[i]);
OHCI_DMA_FREE("consistent dma_trm prg[%d]", i);
pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
}
pci_pool_destroy(d->prg_pool);
OHCI_DMA_FREE("dma_trm prg pool");
kfree(d->prg_cpu);
kfree(d->prg_bus);
}
......@@ -1732,11 +2502,13 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
memset(d->prg_cpu, 0, d->num_desc * sizeof(struct at_dma_prg*));
memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
d->prg_pool = hpsb_pci_pool_create("ohci1394 trm prg", ohci->dev,
sizeof(struct at_dma_prg), 4, 0, SLAB_KERNEL);
OHCI_DMA_ALLOC("dma_rcv prg pool");
for (i = 0; i < d->num_desc; i++) {
d->prg_cpu[i] = pci_alloc_consistent(ohci->dev,
sizeof(struct at_dma_prg),
d->prg_bus+i);
OHCI_DMA_ALLOC("consistent dma_trm prg[%d]", i);
d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
if (d->prg_cpu[i] != NULL) {
memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
......@@ -1941,6 +2713,7 @@ static struct hpsb_host_driver ohci1394_driver = {
.get_rom = ohci_get_rom,
.transmit_packet = ohci_transmit,
.devctl = ohci_devctl,
.isoctl = ohci_isoctl,
.hw_csr_reg = ohci_hw_csr_reg,
};
......
......@@ -95,6 +95,7 @@ struct dma_rcv_ctx {
/* dma block descriptors */
struct dma_cmd **prg_cpu;
dma_addr_t *prg_bus;
struct pci_pool *prg_pool;
/* dma buffers */
quadlet_t **buf_cpu;
......@@ -120,6 +121,7 @@ struct dma_trm_ctx {
/* dma block descriptors */
struct at_dma_prg **prg_cpu;
dma_addr_t *prg_bus;
struct pci_pool *prg_pool;
unsigned int prg_ind;
unsigned int sent_ind;
......@@ -292,6 +294,9 @@ static inline u32 reg_read(const struct ti_ohci *ohci, int offset)
#define OHCI1394_IsoRecvIntEventClear 0x0A4
#define OHCI1394_IsoRecvIntMaskSet 0x0A8
#define OHCI1394_IsoRecvIntMaskClear 0x0AC
#define OHCI1394_InitialBandwidthAvailable 0x0B0
#define OHCI1394_InitialChannelsAvailableHi 0x0B4
#define OHCI1394_InitialChannelsAvailableLo 0x0B8
#define OHCI1394_FairnessControl 0x0DC
#define OHCI1394_LinkControlSet 0x0E0
#define OHCI1394_LinkControlClear 0x0E4
......
This source diff could not be displayed because it is too large. You can view the blob instead.
#!/bin/sh
cat <<EOF
/* Generated file for OUI database */
#include <linux/config.h>
#ifdef CONFIG_IEEE1394_OUI_DB
struct oui_list_struct {
int oui;
char *name;
} oui_list[] = {
EOF
while read oui name; do
echo " { 0x$oui, \"$name\" },"
done
cat <<EOF
};
#endif /* CONFIG_IEEE1394_OUI_DB */
EOF
......@@ -19,6 +19,16 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
* Contributions:
*
* Manfred Weihs <weihs@ict.tuwien.ac.at>
* reading bus info block (containing GUID) from serial
* eeprom via i2c and storing it in config ROM
* Reworked code for initiating bus resets
* (long, short, with or without hold-off)
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/slab.h>
......@@ -450,7 +460,7 @@ static void handle_selfid(struct ti_lynx *lynx, struct hpsb_host *host)
if (host->in_bus_reset) return; /* in bus reset again */
if (isroot) reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_CYCMASTER);
if (isroot) reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_CYCMASTER); //FIXME: I do not think, we need this here
reg_set_bits(lynx, LINK_CONTROL,
LINK_CONTROL_RCV_CMP_VALID | LINK_CONTROL_TX_ASYNC_EN
| LINK_CONTROL_RX_ASYNC_EN | LINK_CONTROL_CYCTIMEREN);
......@@ -563,6 +573,7 @@ static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
struct hpsb_packet *packet;
LIST_HEAD(packet_list);
unsigned long flags;
int phy_reg;
switch (cmd) {
case RESET_BUS:
......@@ -571,21 +582,140 @@ static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
break;
}
if (arg) {
arg = 3 << 6;
switch (arg) {
case SHORT_RESET:
if (lynx->phyic.reg_1394a) {
phy_reg = get_phy_reg(lynx, 5);
if (phy_reg == -1) {
PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
retval = -1;
break;
}
phy_reg |= 0x40;
PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset) on request");
lynx->selfid_size = -1;
lynx->phy_reg0 = -1;
set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
break;
} else {
arg = 1 << 6;
PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
/* fall through to long bus reset */
}
case LONG_RESET:
phy_reg = get_phy_reg(lynx, 1);
if (phy_reg == -1) {
PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
retval = -1;
break;
}
phy_reg |= 0x40;
retval = get_phy_reg(lynx, 1);
arg |= (retval == -1 ? 63 : retval);
retval = 0;
PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset) on request");
lynx->selfid_size = -1;
lynx->phy_reg0 = -1;
set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
break;
case SHORT_RESET_NO_FORCE_ROOT:
if (lynx->phyic.reg_1394a) {
phy_reg = get_phy_reg(lynx, 1);
if (phy_reg == -1) {
PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
retval = -1;
break;
}
if (phy_reg & 0x80) {
phy_reg &= ~0x80;
set_phy_reg(lynx, 1, phy_reg); /* clear RHB */
}
phy_reg = get_phy_reg(lynx, 5);
if (phy_reg == -1) {
PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
retval = -1;
break;
}
phy_reg |= 0x40;
PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, no force_root) on request");
lynx->selfid_size = -1;
lynx->phy_reg0 = -1;
set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
break;
} else {
PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
/* fall through to long bus reset */
}
case LONG_RESET_NO_FORCE_ROOT:
phy_reg = get_phy_reg(lynx, 1);
if (phy_reg == -1) {
PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
retval = -1;
break;
}
phy_reg &= ~0x80;
phy_reg |= 0x40;
PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, no force_root) on request");
lynx->selfid_size = -1;
lynx->phy_reg0 = -1;
set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
break;
case SHORT_RESET_FORCE_ROOT:
if (lynx->phyic.reg_1394a) {
phy_reg = get_phy_reg(lynx, 1);
if (phy_reg == -1) {
PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
retval = -1;
break;
}
if (!(phy_reg & 0x80)) {
phy_reg |= 0x80;
set_phy_reg(lynx, 1, phy_reg); /* set RHB */
}
phy_reg = get_phy_reg(lynx, 5);
if (phy_reg == -1) {
PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
retval = -1;
break;
}
phy_reg |= 0x40;
PRINT(KERN_INFO, lynx->id, "resetting bus on request");
PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, force_root set) on request");
lynx->selfid_size = -1;
lynx->phy_reg0 = -1;
set_phy_reg(lynx, 1, arg);
set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
break;
} else {
PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
/* fall through to long bus reset */
}
case LONG_RESET_FORCE_ROOT:
phy_reg = get_phy_reg(lynx, 1);
if (phy_reg == -1) {
PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
retval = -1;
break;
}
phy_reg |= 0xc0;
PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, force_root set) on request");
lynx->selfid_size = -1;
lynx->phy_reg0 = -1;
set_phy_reg(lynx, 1, phy_reg); /* set IBR and RHB */
break;
default:
PRINT(KERN_ERR, lynx->id, "unknown argument for reset_bus command %d", arg);
retval = -1;
}
break;
case GET_CYCLE_COUNTER:
......@@ -1706,6 +1836,7 @@ static struct hpsb_host_driver lynx_driver = {
.get_rom = get_lynx_rom,
.transmit_packet = lynx_transmit,
.devctl = lynx_devctl,
.isoctl = NULL,
};
MODULE_AUTHOR("Andreas E. Bombe <andreas.bombe@munich.netsurf.de>");
......
......@@ -37,6 +37,7 @@
#include <linux/init.h>
#include <linux/version.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <asm/uaccess.h>
#include <asm/atomic.h>
#include <linux/devfs_fs_kernel.h>
......@@ -46,6 +47,7 @@
#include "ieee1394_core.h"
#include "hosts.h"
#include "highlevel.h"
#include "iso.h"
#include "ieee1394_transactions.h"
#include "raw1394.h"
......@@ -107,7 +109,6 @@ static struct pending_request *__alloc_pending_request(int flags)
if (req != NULL) {
memset(req, 0, sizeof(struct pending_request));
INIT_LIST_HEAD(&req->list);
HPSB_INIT_WORK(&req->tq, (void(*)(void*))queue_complete_cb, NULL);
}
return req;
......@@ -132,20 +133,27 @@ static void free_pending_request(struct pending_request *req)
kfree(req);
}
static void queue_complete_req(struct pending_request *req)
/* fi->reqlists_lock must be taken */
static void __queue_complete_req(struct pending_request *req)
{
unsigned long flags;
struct file_info *fi = req->file_info;
spin_lock_irqsave(&fi->reqlists_lock, flags);
list_del(&req->list);
list_add_tail(&req->list, &fi->req_complete);
spin_unlock_irqrestore(&fi->reqlists_lock, flags);
up(&fi->complete_sem);
wake_up_interruptible(&fi->poll_wait_complete);
}
static void queue_complete_req(struct pending_request *req)
{
unsigned long flags;
struct file_info *fi = req->file_info;
spin_lock_irqsave(&fi->reqlists_lock, flags);
__queue_complete_req(req);
spin_unlock_irqrestore(&fi->reqlists_lock, flags);
}
static void queue_complete_cb(struct pending_request *req)
{
struct hpsb_packet *packet = req->packet;
......@@ -171,8 +179,11 @@ static void queue_complete_cb(struct pending_request *req)
req->req.length = 0;
}
if (req->req.type != RAW1394_REQ_PHYPACKET)
free_tlabel(packet->host, packet->node_id, packet->tlabel);
if ((req->req.type == RAW1394_REQ_ASYNC_READ) ||
(req->req.type == RAW1394_REQ_ASYNC_WRITE) ||
(req->req.type == RAW1394_REQ_LOCK) ||
(req->req.type == RAW1394_REQ_LOCK64))
hpsb_free_tlabel(packet);
queue_complete_req(req);
}
......@@ -183,7 +194,8 @@ static void add_host(struct hpsb_host *host)
struct host_info *hi;
unsigned long flags;
hi = (struct host_info *)kmalloc(sizeof(struct host_info), SLAB_KERNEL);
hi = (struct host_info *)kmalloc(sizeof(struct host_info),
in_interrupt() ? SLAB_ATOMIC : SLAB_KERNEL);
if (hi != NULL) {
INIT_LIST_HEAD(&hi->list);
hi->host = host;
......@@ -642,45 +654,37 @@ static int handle_async_request(struct file_info *fi,
switch (req->req.type) {
case RAW1394_REQ_ASYNC_READ:
if (req->req.length == 4) {
DBGMSG("quadlet_read_request called");
packet = hpsb_make_readqpacket(fi->host, node, addr);
if (!packet) return -ENOMEM;
DBGMSG("read_request called");
packet = hpsb_make_readpacket(fi->host, node, addr, req->req.length);
req->data = &packet->header[3];
} else {
DBGMSG("block_read_request called");
packet = hpsb_make_readbpacket(fi->host, node, addr,
req->req.length);
if (!packet) return -ENOMEM;
if (!packet)
return -ENOMEM;
if (req->req.length == 4)
req->data = &packet->header[3];
else
req->data = packet->data;
}
break;
case RAW1394_REQ_ASYNC_WRITE:
if (req->req.length == 4) {
quadlet_t x;
DBGMSG("write_request called");
DBGMSG("quadlet_write_request called");
if (copy_from_user(&x, int2ptr(req->req.sendb), 4)) {
req->req.error = RAW1394_ERROR_MEMFAULT;
}
packet = hpsb_make_writeqpacket(fi->host, node, addr,
x);
if (!packet) return -ENOMEM;
} else {
DBGMSG("block_write_request called");
packet = hpsb_make_writebpacket(fi->host, node, addr,
packet = hpsb_make_writepacket(fi->host, node, addr, NULL,
req->req.length);
if (!packet) return -ENOMEM;
if (!packet)
return -ENOMEM;
if (req->req.length == 4) {
if (copy_from_user(&packet->header[3], int2ptr(req->req.sendb),
req->req.length))
req->req.error = RAW1394_ERROR_MEMFAULT;
} else {
if (copy_from_user(packet->data, int2ptr(req->req.sendb),
req->req.length)) {
req->req.length))
req->req.error = RAW1394_ERROR_MEMFAULT;
}
}
req->req.length = 0;
break;
......@@ -700,7 +704,7 @@ static int handle_async_request(struct file_info *fi,
}
packet = hpsb_make_lockpacket(fi->host, node, addr,
req->req.misc);
req->req.misc, NULL, 0);
if (!packet) return -ENOMEM;
if (copy_from_user(packet->data, int2ptr(req->req.sendb),
......@@ -728,7 +732,7 @@ static int handle_async_request(struct file_info *fi,
}
}
packet = hpsb_make_lock64packet(fi->host, node, addr,
req->req.misc);
req->req.misc, NULL, 0);
if (!packet) return -ENOMEM;
if (copy_from_user(packet->data, int2ptr(req->req.sendb),
......@@ -753,8 +757,7 @@ static int handle_async_request(struct file_info *fi,
return sizeof(struct raw1394_request);
}
req->tq.data = req;
hpsb_add_packet_complete_task(packet, &req->tq);
hpsb_set_packet_complete_task(packet, (void(*)(void*))queue_complete_cb, req);
spin_lock_irq(&fi->reqlists_lock);
list_add_tail(&req->list, &fi->req_pending);
......@@ -765,7 +768,7 @@ static int handle_async_request(struct file_info *fi,
if (!hpsb_send_packet(packet)) {
req->req.error = RAW1394_ERROR_SEND_ERROR;
req->req.length = 0;
free_tlabel(packet->host, packet->node_id, packet->tlabel);
hpsb_free_tlabel(packet);
queue_complete_req(req);
}
return sizeof(struct raw1394_request);
......@@ -776,15 +779,14 @@ static int handle_iso_send(struct file_info *fi, struct pending_request *req,
{
struct hpsb_packet *packet;
packet = alloc_hpsb_packet(req->req.length);
if (!packet) return -ENOMEM;
req->packet = packet;
fill_iso_packet(packet, req->req.length, channel & 0x3f,
packet = hpsb_make_isopacket(fi->host, req->req.length, channel & 0x3f,
(req->req.misc >> 16) & 0x3, req->req.misc & 0xf);
packet->type = hpsb_iso;
if (!packet)
return -ENOMEM;
packet->speed_code = req->req.address & 0x3;
packet->host = fi->host;
req->packet = packet;
if (copy_from_user(packet->data, int2ptr(req->req.sendb),
req->req.length)) {
......@@ -794,16 +796,15 @@ static int handle_iso_send(struct file_info *fi, struct pending_request *req,
return sizeof(struct raw1394_request);
}
HPSB_PREPARE_WORK(&req->tq, (void (*)(void*))queue_complete_req, req);
req->req.length = 0;
hpsb_add_packet_complete_task(packet, &req->tq);
hpsb_set_packet_complete_task(packet, (void (*)(void*))queue_complete_req, req);
spin_lock_irq(&fi->reqlists_lock);
list_add_tail(&req->list, &fi->req_pending);
spin_unlock_irq(&fi->reqlists_lock);
/* Update the generation of the packet just before sending. */
packet->generation = get_hpsb_generation(fi->host);
packet->generation = req->req.generation;
if (!hpsb_send_packet(packet)) {
req->req.error = RAW1394_ERROR_SEND_ERROR;
......@@ -857,16 +858,15 @@ static int handle_async_send(struct file_info *fi, struct pending_request *req)
packet->header_size=header_length;
packet->data_size=req->req.length-header_length;
HPSB_PREPARE_WORK(&req->tq, (void (*)(void*))queue_complete_req, req);
req->req.length = 0;
hpsb_add_packet_complete_task(packet, &req->tq);
hpsb_set_packet_complete_task(packet, (void(*)(void*))queue_complete_cb, req);
spin_lock_irq(&fi->reqlists_lock);
list_add_tail(&req->list, &fi->req_pending);
spin_unlock_irq(&fi->reqlists_lock);
/* Update the generation of the packet just before sending. */
packet->generation = get_hpsb_generation(fi->host);
packet->generation = req->req.generation;
if (!hpsb_send_packet(packet)) {
req->req.error = RAW1394_ERROR_SEND_ERROR;
......@@ -1806,8 +1806,7 @@ static int write_phypacket(struct file_info *fi, struct pending_request *req)
if (!packet) return -ENOMEM;
req->req.length=0;
req->packet=packet;
req->tq.data=req;
hpsb_add_packet_complete_task(packet, &req->tq);
hpsb_set_packet_complete_task(packet, (void(*)(void*))queue_complete_cb, req);
spin_lock_irq(&fi->reqlists_lock);
list_add_tail(&req->list, &fi->req_pending);
spin_unlock_irq(&fi->reqlists_lock);
......@@ -1997,6 +1996,234 @@ static ssize_t raw1394_write(struct file *file, const char *buffer, size_t count
return retval;
}
/* rawiso operations */
/* check if any RAW1394_REQ_RAWISO_ACTIVITY event is already in the
* completion queue (reqlists_lock must be taken) */
static inline int __rawiso_event_in_queue(struct file_info *fi)
{
struct list_head *lh;
struct pending_request *req;
list_for_each(lh, &fi->req_complete) {
req = list_entry(lh, struct pending_request, list);
if(req->req.type == RAW1394_REQ_RAWISO_ACTIVITY) {
return 1;
}
}
return 0;
}
static void rawiso_activity_cb(struct hpsb_iso *iso)
{
unsigned long host_flags;
struct list_head *lh;
struct host_info *hi;
spin_lock_irqsave(&host_info_lock, host_flags);
hi = find_host_info(iso->host);
if (hi != NULL) {
list_for_each(lh, &hi->file_info_list) {
unsigned long reqlist_flags;
struct file_info *fi = list_entry(lh, struct file_info, list);
spin_lock_irqsave(&fi->reqlists_lock, reqlist_flags);
/* only one ISO activity event may be in the queue */
if(!__rawiso_event_in_queue(fi)) {
struct pending_request *req = __alloc_pending_request(SLAB_ATOMIC);
if(req) {
req->file_info = fi;
req->req.type = RAW1394_REQ_RAWISO_ACTIVITY;
req->req.generation = get_hpsb_generation(iso->host);
__queue_complete_req(req);
} else {
/* on allocation failure, signal an overflow */
if(fi->iso_handle) {
atomic_inc(&fi->iso_handle->overflows);
}
}
}
spin_unlock_irqrestore(&fi->reqlists_lock, reqlist_flags);
}
}
spin_unlock_irqrestore(&host_info_lock, host_flags);
}
/* helper function - gather all the kernel iso status bits for returning to user-space */
static void raw1394_iso_fill_status(struct hpsb_iso *iso, struct raw1394_iso_status *stat)
{
stat->config.buf_packets = iso->buf_packets;
stat->config.max_packet_size = iso->max_packet_size;
stat->config.channel = iso->channel;
stat->config.speed = iso->speed;
stat->config.irq_interval = iso->irq_interval;
stat->buf_stride = iso->buf_stride;
stat->packet_data_offset = iso->packet_data_offset;
stat->packet_info_offset = iso->packet_info_offset;
stat->first_packet = iso->first_packet;
stat->n_packets = hpsb_iso_n_ready(iso);
stat->overflows = atomic_read(&iso->overflows);
}
static int raw1394_iso_xmit_init(struct file_info *fi, void *uaddr)
{
struct raw1394_iso_status stat;
if(copy_from_user(&stat, uaddr, sizeof(stat)))
return -EFAULT;
fi->iso_handle = hpsb_iso_xmit_init(fi->host,
stat.config.buf_packets,
stat.config.max_packet_size,
stat.config.channel,
stat.config.speed,
stat.config.irq_interval,
rawiso_activity_cb);
if(!fi->iso_handle)
return -ENOMEM;
fi->iso_state = RAW1394_ISO_XMIT;
raw1394_iso_fill_status(fi->iso_handle, &stat);
if(copy_to_user(uaddr, &stat, sizeof(stat)))
return -EFAULT;
/* queue an event to get things started */
rawiso_activity_cb(fi->iso_handle);
return 0;
}
static int raw1394_iso_recv_init(struct file_info *fi, void *uaddr)
{
struct raw1394_iso_status stat;
if(copy_from_user(&stat, uaddr, sizeof(stat)))
return -EFAULT;
fi->iso_handle = hpsb_iso_recv_init(fi->host,
stat.config.buf_packets,
stat.config.max_packet_size,
stat.config.channel,
stat.config.irq_interval,
rawiso_activity_cb);
if(!fi->iso_handle)
return -ENOMEM;
fi->iso_state = RAW1394_ISO_RECV;
raw1394_iso_fill_status(fi->iso_handle, &stat);
if(copy_to_user(uaddr, &stat, sizeof(stat)))
return -EFAULT;
return 0;
}
static int raw1394_iso_get_status(struct file_info *fi, void *uaddr)
{
struct raw1394_iso_status stat;
struct hpsb_iso *iso = fi->iso_handle;
raw1394_iso_fill_status(fi->iso_handle, &stat);
if(copy_to_user(uaddr, &stat, sizeof(stat)))
return -EFAULT;
/* reset overflow counter */
atomic_set(&iso->overflows, 0);
return 0;
}
static void raw1394_iso_shutdown(struct file_info *fi)
{
if(fi->iso_handle)
hpsb_iso_shutdown(fi->iso_handle);
fi->iso_handle = NULL;
fi->iso_state = RAW1394_ISO_INACTIVE;
}
/* mmap the rawiso xmit/recv buffer */
static int raw1394_mmap(struct file *file, struct vm_area_struct *vma)
{
struct file_info *fi = file->private_data;
if(fi->iso_state == RAW1394_ISO_INACTIVE)
return -EINVAL;
return dma_region_mmap(&fi->iso_handle->buf, file, vma);
}
/* ioctl is only used for rawiso operations */
static int raw1394_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
struct file_info *fi = file->private_data;
switch(fi->iso_state) {
case RAW1394_ISO_INACTIVE:
switch(cmd) {
case RAW1394_ISO_XMIT_INIT:
return raw1394_iso_xmit_init(fi, (void*) arg);
case RAW1394_ISO_RECV_INIT:
return raw1394_iso_recv_init(fi, (void*) arg);
default:
break;
}
break;
case RAW1394_ISO_RECV:
switch(cmd) {
case RAW1394_ISO_RECV_START:
return hpsb_iso_recv_start(fi->iso_handle, arg);
case RAW1394_ISO_STOP:
hpsb_iso_stop(fi->iso_handle);
return 0;
case RAW1394_ISO_GET_STATUS:
return raw1394_iso_get_status(fi, (void*) arg);
case RAW1394_ISO_PRODUCE_CONSUME:
return hpsb_iso_recv_release_packets(fi->iso_handle, arg);
case RAW1394_ISO_SHUTDOWN:
raw1394_iso_shutdown(fi);
return 0;
}
break;
case RAW1394_ISO_XMIT:
switch(cmd) {
case RAW1394_ISO_XMIT_START: {
/* copy two ints from user-space */
int args[2];
if(copy_from_user(&args[0], (void*) arg, sizeof(args)))
return -EFAULT;
return hpsb_iso_xmit_start(fi->iso_handle, args[0], args[1]);
}
case RAW1394_ISO_STOP:
hpsb_iso_stop(fi->iso_handle);
return 0;
case RAW1394_ISO_GET_STATUS:
return raw1394_iso_get_status(fi, (void*) arg);
case RAW1394_ISO_PRODUCE_CONSUME:
return hpsb_iso_xmit_queue_packets(fi->iso_handle, arg);
case RAW1394_ISO_SHUTDOWN:
raw1394_iso_shutdown(fi);
return 0;
}
break;
default:
break;
}
return -EINVAL;
}
static unsigned int raw1394_poll(struct file *file, poll_table *pt)
{
struct file_info *fi = file->private_data;
......@@ -2057,6 +2284,9 @@ static int raw1394_release(struct inode *inode, struct file *file)
struct arm_addr *arm_addr = NULL;
int another_host;
if(fi->iso_state != RAW1394_ISO_INACTIVE)
raw1394_iso_shutdown(fi);
for (i = 0; i < 64; i++) {
if (fi->listen_channels & (1ULL << i)) {
hpsb_unlisten_channel(hl_handle, fi->host, i);
......@@ -2165,6 +2395,8 @@ static struct file_operations file_ops = {
.owner = THIS_MODULE,
.read = raw1394_read,
.write = raw1394_write,
.mmap = raw1394_mmap,
.ioctl = raw1394_ioctl,
.poll = raw1394_poll,
.open = raw1394_open,
.release = raw1394_release,
......
......@@ -40,6 +40,7 @@
#define RAW1394_REQ_ISO_RECEIVE 10001
#define RAW1394_REQ_FCP_REQUEST 10002
#define RAW1394_REQ_ARM 10003
#define RAW1394_REQ_RAWISO_ACTIVITY 10004
/* error codes */
#define RAW1394_ERROR_NONE 0
......@@ -115,6 +116,64 @@ typedef struct arm_request_response {
struct arm_response *response;
} *arm_request_response_t;
/* rawiso API */
/* ioctls */
#define RAW1394_ISO_XMIT_INIT 1 /* arg: raw1394_iso_status* */
#define RAW1394_ISO_RECV_INIT 2 /* arg: raw1394_iso_status* */
#define RAW1394_ISO_RECV_START 3 /* arg: int, starting cycle */
#define RAW1394_ISO_XMIT_START 8 /* arg: int[2], { starting cycle, prebuffer } */
#define RAW1394_ISO_STOP 4
#define RAW1394_ISO_GET_STATUS 5 /* arg: raw1394_iso_status* */
#define RAW1394_ISO_PRODUCE_CONSUME 6 /* arg: int, # of packets */
#define RAW1394_ISO_SHUTDOWN 7
/* per-packet metadata embedded in the ringbuffer */
/* must be identical to hpsb_iso_packet_info in iso.h! */
struct raw1394_iso_packet_info {
unsigned short len;
unsigned short cycle;
unsigned char channel; /* recv only */
unsigned char tag;
unsigned char sy;
};
struct raw1394_iso_config {
unsigned int buf_packets;
unsigned int max_packet_size;
int channel;
int speed; /* xmit only */
int irq_interval;
};
/* argument to RAW1394_ISO_XMIT/RECV_INIT and RAW1394_ISO_GET_STATUS */
struct raw1394_iso_status {
/* current settings */
struct raw1394_iso_config config;
/* byte offset between successive packets in the buffer */
int buf_stride;
/* byte offset of data payload within each packet */
int packet_data_offset;
/* byte offset of struct iso_packet_info within each packet */
int packet_info_offset;
/* index of next packet to fill with data (ISO transmission)
or next packet containing data recieved (ISO reception) */
unsigned int first_packet;
/* number of packets waiting to be filled with data (ISO transmission)
or containing data received (ISO reception) */
unsigned int n_packets;
/* approximate number of packets dropped due to overflow or
underflow of the packet buffer (a value of zero guarantees
that no packets have been dropped) */
unsigned int overflows;
};
#ifdef __KERNEL__
struct iso_block_store {
......@@ -123,6 +182,10 @@ struct iso_block_store {
quadlet_t data[0];
};
enum raw1394_iso_state { RAW1394_ISO_INACTIVE = 0,
RAW1394_ISO_RECV = 1,
RAW1394_ISO_XMIT = 2 };
struct file_info {
struct list_head list;
......@@ -141,11 +204,16 @@ struct file_info {
u8 *fcp_buffer;
/* old ISO API */
u64 listen_channels;
quadlet_t *iso_buffer;
size_t iso_buffer_length;
u8 notification; /* (busreset-notification) RAW1394_NOTIFY_OFF/ON */
/* new rawiso API */
enum raw1394_iso_state iso_state;
struct hpsb_iso *iso_handle;
};
struct arm_addr {
......@@ -164,7 +232,6 @@ struct pending_request {
struct list_head list;
struct file_info *file_info;
struct hpsb_packet *packet;
struct hpsb_queue_struct tq;
struct iso_block_store *ibs;
quadlet_t *data;
int free_data;
......
......@@ -84,8 +84,6 @@
* sbp2_serialize_io - Serialize all I/O coming down from the scsi drivers
* (0 = deserialized, 1 = serialized, default = 0)
* sbp2_max_sectors, - Change max sectors per I/O supported (default = 255)
* sbp2_max_outstanding_cmds - Change max outstanding concurrent commands (default = 8)
* sbp2_max_cmds_per_lun - Change max concurrent commands per sbp2 device (default = 1)
* sbp2_exclusive_login - Set to zero if you'd like to allow multiple hosts the ability
* to log in at the same time. Sbp2 device must support this,
* and you must know what you're doing (default = 1)
......@@ -310,6 +308,7 @@
#include <linux/list.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/module.h>
......@@ -320,6 +319,7 @@
#include <linux/blk.h>
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/version.h>
#include <asm/current.h>
#include <asm/uaccess.h>
#include <asm/io.h>
......@@ -329,12 +329,10 @@
#include <asm/io.h>
#include <asm/scatterlist.h>
#ifdef CONFIG_KBUILD_2_5
#include <scsi.h>
#include <hosts.h>
#else
#include "../scsi/scsi.h"
#include "../scsi/hosts.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,47)
#include "../scsi/sd.h"
#endif
#include "ieee1394.h"
......@@ -348,7 +346,7 @@
#include "sbp2.h"
static char version[] __devinitdata =
"$Rev: 601 $ James Goodwin <jamesg@filanet.com>";
"$Rev: 697 $ James Goodwin <jamesg@filanet.com>";
/*
* Module load parameter definitions
......@@ -392,26 +390,6 @@ MODULE_PARM(sbp2_max_sectors,"i");
MODULE_PARM_DESC(sbp2_max_sectors, "Change max sectors per I/O supported (default = 255)");
static int sbp2_max_sectors = SBP2_MAX_SECTORS;
/*
* Adjust sbp2_max_outstanding_cmds to tune performance if you have many
* sbp2 devices attached (or if you need to do some debugging).
*/
MODULE_PARM(sbp2_max_outstanding_cmds,"i");
MODULE_PARM_DESC(sbp2_max_outstanding_cmds, "Change max outstanding concurrent commands (default = 8)");
static int sbp2_max_outstanding_cmds = SBP2SCSI_MAX_OUTSTANDING_CMDS;
/*
* Adjust sbp2_max_cmds_per_lun to tune performance. Enabling more than
* one concurrent/linked command per sbp2 device may allow some
* performance gains, but some older sbp2 devices have firmware bugs
* resulting in problems when linking commands... so, enable this with
* care. I can note that the Oxsemi OXFW911 sbp2 chipset works very well
* with large numbers of concurrent/linked commands. =)
*/
MODULE_PARM(sbp2_max_cmds_per_lun,"i");
MODULE_PARM_DESC(sbp2_max_cmds_per_lun, "Change max concurrent commands per sbp2 device (default = 1)");
static int sbp2_max_cmds_per_lun = SBP2SCSI_MAX_CMDS_PER_LUN;
/*
* Exclusive login to sbp2 device? In most cases, the sbp2 driver should
* do an exclusive login, as it's generally unsafe to have two hosts
......@@ -658,90 +636,11 @@ static int sbp2util_down_timeout(atomic_t *done, int timeout)
return ((i > 0) ? 0:1);
}
/*
* This function is called to initially create a packet pool for use in
* sbp2 I/O requests. This packet pool is used when sending out sbp2
* command and agent reset requests, and allows us to remove all
* kmallocs/kfrees from the critical I/O paths.
*/
static int sbp2util_create_request_packet_pool(struct sbp2scsi_host_info *hi)
/* Free's an allocated packet */
static void sbp2_free_packet(struct hpsb_packet *packet)
{
struct hpsb_packet *packet;
int i;
hi->request_packet = kmalloc(sizeof(struct sbp2_request_packet) * SBP2_MAX_REQUEST_PACKETS,
GFP_KERNEL);
if (!hi->request_packet) {
SBP2_ERR("sbp2util_create_request_packet_pool - packet allocation failed!");
return(-ENOMEM);
}
memset(hi->request_packet, 0, sizeof(struct sbp2_request_packet) * SBP2_MAX_REQUEST_PACKETS);
/*
* Create a pool of request packets. Just take the max supported
* concurrent commands and multiply by two to be safe...
*/
for (i=0; i<SBP2_MAX_REQUEST_PACKETS; i++) {
/*
* Max payload of 8 bytes since the sbp2 command request
* uses a payload of 8 bytes, and agent reset is a quadlet
* write request. Bump this up if we plan on using this
* pool for other stuff.
*/
packet = alloc_hpsb_packet(8);
if (!packet) {
SBP2_ERR("sbp2util_create_request_packet_pool - packet allocation failed!");
return(-ENOMEM);
}
/*
* Put these request packets into a free list
*/
INIT_LIST_HEAD(&hi->request_packet[i].list);
hi->request_packet[i].packet = packet;
list_add_tail(&hi->request_packet[i].list, &hi->sbp2_req_free);
}
return(0);
}
/*
* This function is called to remove the packet pool. It is called when
* the sbp2 driver is unloaded.
*/
static void sbp2util_remove_request_packet_pool(struct sbp2scsi_host_info *hi)
{
struct list_head *lh;
struct sbp2_request_packet *request_packet;
unsigned long flags;
/*
* Go through free list releasing packets
*/
sbp2_spin_lock(&hi->sbp2_request_packet_lock, flags);
while (!list_empty(&hi->sbp2_req_free)) {
lh = hi->sbp2_req_free.next;
list_del(lh);
request_packet = list_entry(lh, struct sbp2_request_packet, list);
/*
* Free the hpsb packets that we allocated for the pool
*/
if (request_packet) {
free_hpsb_packet(request_packet->packet);
}
}
kfree(hi->request_packet);
sbp2_spin_unlock(&hi->sbp2_request_packet_lock, flags);
return;
hpsb_free_tlabel(packet);
free_hpsb_packet(packet);
}
/*
......@@ -751,93 +650,28 @@ static void sbp2util_remove_request_packet_pool(struct sbp2scsi_host_info *hi)
* out a free request packet and re-initialize values in it. I'm sure this
* can still stand some more optimization.
*/
static struct sbp2_request_packet *
sbp2util_allocate_write_request_packet(struct sbp2scsi_host_info *hi,
static struct hpsb_packet *
sbp2util_allocate_write_packet(struct sbp2scsi_host_info *hi,
struct node_entry *ne, u64 addr,
size_t data_size,
quadlet_t data) {
struct list_head *lh;
struct sbp2_request_packet *request_packet = NULL;
quadlet_t *data)
{
struct hpsb_packet *packet;
unsigned long flags;
sbp2_spin_lock(&hi->sbp2_request_packet_lock, flags);
if (!list_empty(&hi->sbp2_req_free)) {
/*
* Pull out a free request packet
*/
lh = hi->sbp2_req_free.next;
list_del(lh);
packet = hpsb_make_writepacket(hi->host, ne->nodeid,
addr, data, data_size);
request_packet = list_entry(lh, struct sbp2_request_packet, list);
packet = request_packet->packet;
if (!packet)
return NULL;
/*
* Initialize the packet (this is really initialization
* the core 1394 stack should do, but I'm doing it myself
* to avoid the overhead).
*/
packet->data_size = data_size;
INIT_LIST_HEAD(&packet->list);
sema_init(&packet->state_change, 0);
packet->state = hpsb_unused;
packet->data_be = 1;
hpsb_set_packet_complete_task(packet, (void (*)(void*))sbp2_free_packet,
packet);
hpsb_node_fill_packet(ne, packet);
packet->tlabel = get_tlabel(hi->host, packet->node_id, 0);
if (!data_size) {
fill_async_writequad(packet, addr, data);
} else {
fill_async_writeblock(packet, addr, data_size);
}
/*
* Set up a task queue completion routine, which returns
* the packet to the free list and releases the tlabel.
*/
HPSB_PREPARE_WORK(&request_packet->tq,
(void (*)(void*))sbp2util_free_request_packet,
request_packet);
request_packet->hi_context = hi;
hpsb_add_packet_complete_task(packet, &request_packet->tq);
/*
* Now, put the packet on the in-use list.
*/
list_add_tail(&request_packet->list, &hi->sbp2_req_inuse);
} else {
SBP2_ERR("sbp2util_allocate_request_packet - no packets available!");
}
sbp2_spin_unlock(&hi->sbp2_request_packet_lock, flags);
return(request_packet);
return packet;
}
/*
* This function is called to return a packet to our packet pool. It is
* also called as a completion routine when a request packet is completed.
*/
static void sbp2util_free_request_packet(struct sbp2_request_packet *request_packet)
{
unsigned long flags;
struct sbp2scsi_host_info *hi = request_packet->hi_context;
/*
* Free the tlabel, and return the packet to the free pool.
*/
sbp2_spin_lock(&hi->sbp2_request_packet_lock, flags);
free_tlabel(hi->host, LOCAL_BUS | request_packet->packet->node_id,
request_packet->packet->tlabel);
list_del(&request_packet->list);
list_add_tail(&request_packet->list, &hi->sbp2_req_free);
sbp2_spin_unlock(&hi->sbp2_request_packet_lock, flags);
return;
}
/*
* This function is called to create a pool of command orbs used for
......@@ -847,11 +681,13 @@ static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_i
struct sbp2scsi_host_info *hi)
{
int i;
unsigned long flags;
unsigned long flags, orbs;
struct sbp2_command_info *command;
orbs = sbp2_serialize_io ? 2 : SBP2_MAX_COMMAND_ORBS;
sbp2_spin_lock(&scsi_id->sbp2_command_orb_lock, flags);
for (i = 0; i < scsi_id->sbp2_total_command_orbs; i++) {
for (i = 0; i < orbs; i++) {
command = (struct sbp2_command_info *)
kmalloc(sizeof(struct sbp2_command_info), GFP_KERNEL);
if (!command) {
......@@ -1148,7 +984,7 @@ static void sbp2_update(struct unit_directory *ud)
sbp2_set_busy_timeout(hi, scsi_id);
/* Do a SBP-2 fetch agent reset. */
sbp2_agent_reset(hi, scsi_id, 0);
sbp2_agent_reset(hi, scsi_id, 1);
/* Get the max speed and packet size that we can use. */
sbp2_max_speed_and_size(hi, scsi_id);
......@@ -1175,7 +1011,7 @@ static void sbp2_add_host(struct hpsb_host *host)
/* Allocate some memory for our host info structure */
hi = (struct sbp2scsi_host_info *)kmalloc(sizeof(struct sbp2scsi_host_info),
GFP_KERNEL);
in_interrupt() ? SLAB_ATOMIC : SLAB_KERNEL);
if (hi == NULL) {
SBP2_ERR("out of memory in sbp2_add_host");
......@@ -1185,17 +1021,8 @@ static void sbp2_add_host(struct hpsb_host *host)
/* Initialize some host stuff */
memset(hi, 0, sizeof(struct sbp2scsi_host_info));
INIT_LIST_HEAD(&hi->list);
INIT_LIST_HEAD(&hi->sbp2_req_inuse);
INIT_LIST_HEAD(&hi->sbp2_req_free);
hi->host = host;
hi->sbp2_command_lock = SPIN_LOCK_UNLOCKED;
hi->sbp2_request_packet_lock = SPIN_LOCK_UNLOCKED;
/* Create our request packet pool (pool of packets for use in I/O) */
if (sbp2util_create_request_packet_pool(hi)) {
SBP2_ERR("sbp2util_create_request_packet_pool failed!");
return;
}
sbp2_spin_lock(&sbp2_host_info_lock, flags);
list_add_tail(&hi->list, &sbp2_host_info_list);
......@@ -1262,7 +1089,6 @@ static void sbp2_remove_host(struct hpsb_host *host)
hi = sbp2_find_host_info(host);
if (hi != NULL) {
sbp2util_remove_request_packet_pool(hi);
list_del(&hi->list);
kfree(hi);
}
......@@ -1373,7 +1199,6 @@ static int sbp2_start_device(struct sbp2scsi_host_info *hi, struct unit_director
INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_inuse);
INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_completed);
scsi_id->sbp2_command_orb_lock = SPIN_LOCK_UNLOCKED;
scsi_id->sbp2_total_command_orbs = 0;
/*
* Make sure that we've gotten ahold of the sbp2 management agent
......@@ -1382,15 +1207,6 @@ static int sbp2_start_device(struct sbp2scsi_host_info *hi, struct unit_director
*/
sbp2_parse_unit_directory(scsi_id);
scsi_id->sbp2_total_command_orbs = SBP2_MAX_COMMAND_ORBS;
/*
* Knock the total command orbs down if we are serializing I/O
*/
if (sbp2_serialize_io) {
scsi_id->sbp2_total_command_orbs = 2; /* one extra for good measure */
}
/*
* Find an empty spot to stick our scsi id instance data.
*/
......@@ -1440,7 +1256,7 @@ static int sbp2_start_device(struct sbp2scsi_host_info *hi, struct unit_director
/*
* Do a SBP-2 fetch agent reset
*/
sbp2_agent_reset(hi, scsi_id, 0);
sbp2_agent_reset(hi, scsi_id, 1);
/*
* Get the max speed and packet size that we can use
......@@ -1461,10 +1277,7 @@ static void sbp2_remove_device(struct sbp2scsi_host_info *hi,
/* Complete any pending commands with selection timeout */
sbp2scsi_complete_all_commands(hi, scsi_id, DID_NO_CONNECT);
/* Clean up any other structures */
if (scsi_id->sbp2_total_command_orbs) {
sbp2util_remove_command_orb_pool(scsi_id, hi);
}
if (scsi_id->login_response) {
pci_free_consistent(hi->host->pdev,
......@@ -1951,7 +1764,7 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id)
* possible. */
if ((scsi_id->sbp2_firmware_revision & 0xffff00) ==
SBP2_128KB_BROKEN_FIRMWARE &&
(sbp2_max_sectors * 512) > (128 * 1024)) {
(sbp2_max_sectors * 512) > (128*1024)) {
SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB max transfer size.",
NODE_BUS_ARGS(scsi_id->ne->nodeid));
SBP2_WARN("WARNING: Current sbp2_max_sectors setting is larger than 128KB (%d sectors)!",
......@@ -2012,35 +1825,36 @@ static int sbp2_max_speed_and_size(struct sbp2scsi_host_info *hi, struct scsi_id
/*
* This function is called in order to perform a SBP-2 agent reset.
*/
static int sbp2_agent_reset(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id, u32 flags)
static int sbp2_agent_reset(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id, int wait)
{
struct sbp2_request_packet *agent_reset_request_packet;
struct hpsb_packet *packet;
quadlet_t data;
SBP2_DEBUG("sbp2_agent_reset");
/*
* Ok, let's write to the target's management agent register
*/
agent_reset_request_packet =
sbp2util_allocate_write_request_packet(hi, scsi_id->ne,
data = ntohl(SBP2_AGENT_RESET_DATA);
packet = sbp2util_allocate_write_packet(hi, scsi_id->ne,
scsi_id->sbp2_command_block_agent_addr +
SBP2_AGENT_RESET_OFFSET,
0, ntohl(SBP2_AGENT_RESET_DATA));
4, &data);
if (!agent_reset_request_packet) {
SBP2_ERR("sbp2util_allocate_write_request_packet failed");
return(-EIO);
if (!packet) {
SBP2_ERR("sbp2util_allocate_write_packet failed");
return(-ENOMEM);
}
if (!hpsb_send_packet(agent_reset_request_packet->packet)) {
if (!hpsb_send_packet(packet)) {
SBP2_ERR("hpsb_send_packet failed");
sbp2util_free_request_packet(agent_reset_request_packet);
sbp2_free_packet(packet);
return(-EIO);
}
if (!(flags & SBP2_SEND_NO_WAIT)) {
down(&agent_reset_request_packet->packet->state_change);
down(&agent_reset_request_packet->packet->state_change);
if (wait) {
down(&packet->state_change);
down(&packet->state_change);
}
/*
......@@ -2049,7 +1863,6 @@ static int sbp2_agent_reset(struct sbp2scsi_host_info *hi, struct scsi_id_instan
scsi_id->last_orb = NULL;
return(0);
}
/*
......@@ -2310,7 +2123,7 @@ static int sbp2_create_command_orb(struct sbp2scsi_host_info *hi,
static int sbp2_link_orb_command(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id,
struct sbp2_command_info *command)
{
struct sbp2_request_packet *command_request_packet;
struct hpsb_packet *packet;
struct sbp2_command_orb *command_orb = &command->command_orb;
outstanding_orb_incr;
......@@ -2333,25 +2146,24 @@ static int sbp2_link_orb_command(struct sbp2scsi_host_info *hi, struct scsi_id_i
*/
if (hpsb_node_entry_valid(scsi_id->ne)) {
command_request_packet =
sbp2util_allocate_write_request_packet(hi, scsi_id->ne,
packet = sbp2util_allocate_write_packet(hi, scsi_id->ne,
scsi_id->sbp2_command_block_agent_addr +
SBP2_ORB_POINTER_OFFSET, 8, 0);
SBP2_ORB_POINTER_OFFSET, 8, NULL);
if (!command_request_packet) {
SBP2_ERR("sbp2util_allocate_write_request_packet failed");
return(-EIO);
if (!packet) {
SBP2_ERR("sbp2util_allocate_write_packet failed");
return(-ENOMEM);
}
command_request_packet->packet->data[0] = ORB_SET_NODE_ID(hi->host->node_id);
command_request_packet->packet->data[1] = command->command_orb_dma;
sbp2util_cpu_to_be32_buffer(command_request_packet->packet->data, 8);
packet->data[0] = ORB_SET_NODE_ID(hi->host->node_id);
packet->data[1] = command->command_orb_dma;
sbp2util_cpu_to_be32_buffer(packet->data, 8);
SBP2_ORB_DEBUG("write command agent, command orb %p", command_orb);
if (!hpsb_send_packet(command_request_packet->packet)) {
if (!hpsb_send_packet(packet)) {
SBP2_ERR("hpsb_send_packet failed");
sbp2util_free_request_packet(command_request_packet);
sbp2_free_packet(packet);
return(-EIO);
}
......@@ -2382,22 +2194,22 @@ static int sbp2_link_orb_command(struct sbp2scsi_host_info *hi, struct scsi_id_i
* Ring the doorbell
*/
if (hpsb_node_entry_valid(scsi_id->ne)) {
quadlet_t data = cpu_to_be32(command->command_orb_dma);
command_request_packet = sbp2util_allocate_write_request_packet(hi,
scsi_id->ne,
scsi_id->sbp2_command_block_agent_addr + SBP2_DOORBELL_OFFSET,
0, cpu_to_be32(command->command_orb_dma));
packet = sbp2util_allocate_write_packet(hi, scsi_id->ne,
scsi_id->sbp2_command_block_agent_addr +
SBP2_DOORBELL_OFFSET, 4, &data);
if (!command_request_packet) {
SBP2_ERR("sbp2util_allocate_write_request_packet failed");
return(-EIO);
if (!packet) {
SBP2_ERR("sbp2util_allocate_write_packet failed");
return(-ENOMEM);
}
SBP2_ORB_DEBUG("ring doorbell, command orb %p", command_orb);
if (!hpsb_send_packet(command_request_packet->packet)) {
if (!hpsb_send_packet(packet)) {
SBP2_ERR("hpsb_send_packet failed");
sbp2util_free_request_packet(command_request_packet);
sbp2_free_packet(packet);
return(-EIO);
}
}
......@@ -2789,7 +2601,7 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
* Initiate a fetch agent reset.
*/
SBP2_DEBUG("Dead bit set - initiating fetch agent reset");
sbp2_agent_reset(hi, scsi_id, SBP2_SEND_NO_WAIT);
sbp2_agent_reset(hi, scsi_id, 0);
}
SBP2_ORB_DEBUG("completing command orb %p", &command->command_orb);
......@@ -3107,7 +2919,7 @@ static int sbp2scsi_abort (Scsi_Cmnd *SCpnt)
/*
* Initiate a fetch agent reset.
*/
sbp2_agent_reset(hi, scsi_id, SBP2_SEND_NO_WAIT);
sbp2_agent_reset(hi, scsi_id, 0);
sbp2scsi_complete_all_commands(hi, scsi_id, DID_BUS_BUSY);
sbp2_spin_unlock(&hi->sbp2_command_lock, flags);
}
......@@ -3127,7 +2939,7 @@ static int sbp2scsi_reset (Scsi_Cmnd *SCpnt)
if (scsi_id) {
SBP2_ERR("Generating sbp2 fetch agent reset");
sbp2_agent_reset(hi, scsi_id, SBP2_SEND_NO_WAIT);
sbp2_agent_reset(hi, scsi_id, 0);
}
return(SUCCESS);
......@@ -3143,7 +2955,7 @@ static int sbp2scsi_biosparam (struct scsi_device *sdev,
#else
static int sbp2scsi_biosparam (Scsi_Disk *disk, kdev_t dev, int geom[])
{
sector_t capacity = disk->capacity;
unsigned capacity = disk->capacity;
#endif
int heads, sectors, cylinders;
......@@ -3208,16 +3020,12 @@ static const char *sbp2scsi_info (struct Scsi_Host *host)
"SBP-2 module load options:\n"
"- Max speed supported: %s\n"
"- Max sectors per I/O supported: %d\n"
"- Max outstanding commands supported: %d\n"
"- Max outstanding commands per lun supported: %d\n"
"- Serialized I/O (debug): %s\n"
"- Exclusive login: %s",
hi->host->driver->name,
version,
hpsb_speedto_str[sbp2_max_speed],
sbp2_max_sectors,
sbp2_max_outstanding_cmds,
sbp2_max_cmds_per_lun,
sbp2_serialize_io ? "yes" : "no",
sbp2_exclusive_login ? "yes" : "no");
......@@ -3242,8 +3050,10 @@ static Scsi_Host_Template scsi_driver_template = {
.eh_host_reset_handler =sbp2scsi_reset,
.bios_param = sbp2scsi_biosparam,
.this_id = -1,
.sg_tablesize = SBP2_MAX_SG_ELEMENTS,
.use_clustering = SBP2_CLUSTERING,
.sg_tablesize = SG_ALL,
.use_clustering = ENABLE_CLUSTERING,
.cmd_per_lun = SBP2_MAX_CMDS_PER_LUN,
.can_queue = SBP2_MAX_SCSI_QUEUE,
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
.use_new_eh_code = TRUE,
#endif
......@@ -3255,16 +3065,12 @@ static int sbp2_module_init(void)
{
SBP2_DEBUG("sbp2_module_init");
/*
* Module load debug option to force one command at a time (serializing I/O)
*/
/* Module load debug option to force one command at a time
* (serializing I/O) */
if (sbp2_serialize_io) {
SBP2_ERR("Driver forced to serialize I/O (serialize_io = 1)");
scsi_driver_template.can_queue = 1;
scsi_driver_template.cmd_per_lun = 1;
} else {
scsi_driver_template.can_queue = sbp2_max_outstanding_cmds;
scsi_driver_template.cmd_per_lun = sbp2_max_cmds_per_lun;
}
/*
......
......@@ -32,33 +32,19 @@
#endif
#define SBP2_DEVICE_NAME "sbp2"
#define SBP2_DEVICE_NAME_SIZE 4
/*
* SBP2 specific structures and defines
*/
#define ORB_FMT_CMD 0x0
#define ORB_FMT_DUMMY 0x3
#define ORB_DIRECTION_WRITE_TO_MEDIA 0x0
#define ORB_DIRECTION_READ_FROM_MEDIA 0x1
#define ORB_DIRECTION_NO_DATA_TRANSFER 0x2
#define ORB_SET_NULL_PTR(value) ((value & 0x1) << 31)
#define ORB_SET_NOTIFY(value) ((value & 0x1) << 31)
#define ORB_SET_RQ_FMT(value) ((value & 0x3) << 29)
#define ORB_SET_RQ_FMT(value) ((value & 0x3) << 29) /* unused ? */
#define ORB_SET_NODE_ID(value) ((value & 0xffff) << 16)
struct sbp2_dummy_orb {
volatile u32 next_ORB_hi;
volatile u32 next_ORB_lo;
u32 reserved1;
u32 reserved2;
u32 notify_rq_fmt;
u8 command_block[12];
};
#define ORB_SET_DATA_SIZE(value) (value & 0xffff)
#define ORB_SET_PAGE_SIZE(value) ((value & 0x7) << 16)
#define ORB_SET_PAGE_TABLE_PRESENT(value) ((value & 0x1) << 19)
......@@ -253,25 +239,15 @@ struct sbp2_status_block {
* Other misc defines
*/
#define SBP2_128KB_BROKEN_FIRMWARE 0xa0b800
#define SBP2_BROKEN_FIRMWARE_MAX_TRANSFER 0x20000
#define SBP2_DEVICE_TYPE_LUN_UNINITIALIZED 0xffffffff
/*
* Flags for SBP-2 functions
*/
#define SBP2_SEND_NO_WAIT 0x00000001
/*
* SCSI specific stuff
*/
#define SBP2_MAX_SG_ELEMENTS SG_ALL
#define SBP2_CLUSTERING ENABLE_CLUSTERING
#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
#define SBP2SCSI_MAX_SCSI_IDS 16 /* Max sbp2 device instances supported */
#define SBP2SCSI_MAX_OUTSTANDING_CMDS 8 /* Max total outstanding sbp2 commands allowed at a time! */
#define SBP2SCSI_MAX_CMDS_PER_LUN 1 /* Max outstanding sbp2 commands per device - tune as needed */
#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
#ifndef TYPE_SDAD
......@@ -314,26 +290,18 @@ static unchar sbp2scsi_direction_table[0x100] = {
DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN
};
#define SBP2_MAX_REQUEST_PACKETS (sbp2_max_outstanding_cmds * 2)
#define SBP2_MAX_COMMAND_ORBS (sbp2_max_cmds_per_lun * 2)
/*
* Request packets structure (used for sending command and agent reset packets)
*/
struct sbp2_request_packet {
struct list_head list;
struct hpsb_packet *packet;
struct hpsb_queue_struct tq;
void *hi_context;
};
/* This should be safe. If there's more than one LUN per node, we could
* saturate the tlabel's though. */
#define SBP2_MAX_CMDS_PER_LUN 8
#define SBP2_MAX_SCSI_QUEUE (SBP2_MAX_CMDS_PER_LUN * SBP2SCSI_MAX_SCSI_IDS)
#define SBP2_MAX_COMMAND_ORBS SBP2_MAX_SCSI_QUEUE
/* This is the two dma types we use for cmd_dma below */
#define CMD_DMA_NONE 0x0
#define CMD_DMA_PAGE 0x1
#define CMD_DMA_SINGLE 0x2
enum cmd_dma_types {
CMD_DMA_NONE,
CMD_DMA_PAGE,
CMD_DMA_SINGLE
};
/*
* Encapsulates all the info necessary for an outstanding command.
......@@ -347,11 +315,11 @@ struct sbp2_command_info {
void (*Current_done)(Scsi_Cmnd *);
/* Also need s/g structure for each sbp2 command */
struct sbp2_unrestricted_page_table scatter_gather_element[SBP2_MAX_SG_ELEMENTS] ____cacheline_aligned;
struct sbp2_unrestricted_page_table scatter_gather_element[SG_ALL] ____cacheline_aligned;
dma_addr_t sge_dma ____cacheline_aligned;
void *sge_buffer;
dma_addr_t cmd_dma;
int dma_type;
enum cmd_dma_types dma_type;
unsigned long dma_size;
int dma_dir;
......@@ -412,7 +380,6 @@ struct scsi_id_instance_data {
spinlock_t sbp2_command_orb_lock;
struct list_head sbp2_command_orb_inuse;
struct list_head sbp2_command_orb_completed;
u32 sbp2_total_command_orbs;
/* Node entry, as retrieved from NodeMgr entries */
struct node_entry *ne;
......@@ -433,10 +400,9 @@ struct sbp2scsi_host_info {
struct hpsb_host *host;
/*
* Spin locks for command processing and packet pool management
* Spin locks for command processing
*/
spinlock_t sbp2_command_lock;
spinlock_t sbp2_request_packet_lock;
/*
* This is the scsi host we register with the scsi mid level.
......@@ -445,21 +411,6 @@ struct sbp2scsi_host_info {
*/
struct Scsi_Host *scsi_host;
/*
* Lists keeping track of inuse/free sbp2_request_packets. These structures are
* used for sending out sbp2 command and agent reset packets. We initially create
* a pool of request packets so that we don't have to do any kmallocs while in critical
* I/O paths.
*/
struct list_head sbp2_req_inuse;
struct list_head sbp2_req_free;
/*
* Here is the pool of request packets. All the hpsb packets (for 1394 bus transactions)
* are allocated at init and simply re-initialized when needed.
*/
struct sbp2_request_packet *request_packet;
/*
* SCSI ID instance data (one for each sbp2 device instance possible)
*/
......@@ -474,13 +425,6 @@ struct sbp2scsi_host_info {
/*
* Various utility prototypes
*/
static int sbp2util_create_request_packet_pool(struct sbp2scsi_host_info *hi);
static void sbp2util_remove_request_packet_pool(struct sbp2scsi_host_info *hi);
static struct sbp2_request_packet *sbp2util_allocate_write_request_packet(struct sbp2scsi_host_info *hi,
struct node_entry *ne, u64 addr,
size_t data_size,
quadlet_t data);
static void sbp2util_free_request_packet(struct sbp2_request_packet *request_packet);
static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_id, struct sbp2scsi_host_info *hi);
static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_id, struct sbp2scsi_host_info *hi);
static struct sbp2_command_info *sbp2util_find_command_for_orb(struct scsi_id_instance_data *scsi_id, dma_addr_t orb);
......@@ -523,7 +467,7 @@ static int sbp2_reconnect_device(struct sbp2scsi_host_info *hi, struct scsi_id_i
static int sbp2_logout_device(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id);
static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid,
quadlet_t *data, u64 addr, unsigned int length, u16 flags);
static int sbp2_agent_reset(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id, u32 flags);
static int sbp2_agent_reset(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id, int wait);
static int sbp2_create_command_orb(struct sbp2scsi_host_info *hi,
struct scsi_id_instance_data *scsi_id,
struct sbp2_command_info *command,
......@@ -552,8 +496,7 @@ void sbp2scsi_setup(char *str, int *ints);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,44)
static int sbp2scsi_biosparam (struct scsi_device *sdev, struct block_device *dev, sector_t capacity, int geom[]);
#else
static int sbp2scsi_biosparam (struct scsi_device *sdev,
struct block_device *dev, sector_t capacy, int geom[]);
static int sbp2scsi_biosparam (Scsi_Disk *disk, kdev_t dev, int geom[]);
#endif
static int sbp2scsi_abort (Scsi_Cmnd *SCpnt);
static int sbp2scsi_reset (Scsi_Cmnd *SCpnt);
......
......@@ -37,8 +37,7 @@
#include <linux/proc_fs.h>
#include <linux/delay.h>
#include <linux/devfs_fs_kernel.h>
#include <asm/bitops.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/wrapper.h>
#include <linux/vmalloc.h>
......@@ -1249,18 +1248,15 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
int video1394_mmap(struct file *file, struct vm_area_struct *vma)
{
struct file_ctx *ctx = (struct file_ctx *)file->private_data;
struct video_card *video = ctx->video;
struct ti_ohci *ohci = video->ohci;
int res = -EINVAL;
lock_kernel();
ohci = video->ohci;
if (ctx->current_ctx == NULL) {
PRINT(KERN_ERR, ohci->id, "Current iso context not set");
PRINT(KERN_ERR, ctx->video->ohci->id, "Current iso context not set");
} else
res = do_iso_mmap(ohci, ctx->current_ctx, vma);
res = do_iso_mmap(ctx->video->ohci, ctx->current_ctx, vma);
unlock_kernel();
return res;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment