Commit b0e15db0 authored by Ben Collins's avatar Ben Collins

Merge http://linux.bkbits.net/linux-2.5

into debian.org:/usr/src/kernel/ieee1394-2.6
parents 162ed082 bd6b55b0
......@@ -124,7 +124,7 @@ config IEEE1394_SBP2_PHYS_DMA
config IEEE1394_ETH1394
tristate "Ethernet over 1394"
depends on IEEE1394 && EXPERIMENTAL
depends on IEEE1394 && EXPERIMENTAL && INET
select IEEE1394_CONFIG_ROM_IP1394
select IEEE1394_EXTRA_CONFIG_ROMS
help
......
......@@ -319,7 +319,7 @@ void ohci1394_stop_it_ctx(struct ti_ohci *ohci, int ctx, int synchronous)
control = reg_read(ohci, OHCI1394_IsoXmitContextControlSet + ctx * 16);
if ((control & OHCI1394_CONTEXT_ACTIVE) == 0)
break;
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
}
......@@ -408,7 +408,7 @@ static void stream_shift_packet_lists(unsigned long l)
/* Now that we know the list is non-empty, we can get the head
* of the list without locking, because the process context
* only adds to the tail.
* only adds to the tail.
*/
pl = list_entry(s->dma_packet_lists.next, struct packet_list, link);
last = &pl->packets[PACKET_LIST_SIZE - 1];
......@@ -424,7 +424,7 @@ static void stream_shift_packet_lists(unsigned long l)
if (last->db->payload_desc.status == 0) {
HPSB_INFO("weird interrupt...");
return;
}
}
/* If the last descriptor block does not specify a branch
* address, we have a sample underflow.
......@@ -469,7 +469,7 @@ static struct packet *stream_current_packet(struct stream *s)
return &s->current_packet_list->packets[s->current_packet];
}
static void stream_queue_packet(struct stream *s)
{
s->current_packet++;
......@@ -543,13 +543,13 @@ void packet_initialize(struct packet *p, struct packet *next)
DMA_CTL_OUTPUT_MORE | DMA_CTL_IMMEDIATE | 8;
if (next) {
p->db->payload_desc.control =
p->db->payload_desc.control =
DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH;
p->db->payload_desc.branch = next->db_bus | 3;
p->db->header_desc.skip = next->db_bus | 3;
}
else {
p->db->payload_desc.control =
p->db->payload_desc.control =
DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH |
DMA_CTL_UPDATE | DMA_CTL_IRQ;
p->db->payload_desc.branch = 0;
......@@ -580,7 +580,7 @@ struct packet_list *packet_list_alloc(struct stream *s)
for (i = 0; i < PACKET_LIST_SIZE; i++) {
if (i < PACKET_LIST_SIZE - 1)
next = &pl->packets[i + 1];
else
else
next = NULL;
packet_initialize(&pl->packets[i], next);
}
......@@ -695,7 +695,7 @@ static u32 get_header_bits(struct stream *s, int sub_frame, u32 sample)
case AMDTP_FORMAT_IEC958_PCM:
case AMDTP_FORMAT_IEC958_AC3:
return get_iec958_header_bits(s, sub_frame, sample);
case AMDTP_FORMAT_RAW:
return 0x40;
......@@ -739,18 +739,18 @@ static void fill_packet(struct stream *s, struct packet *packet, int nevents)
/* Fill IEEE1394 headers */
packet->db->header_desc.header[0] =
(IEEE1394_SPEED_100 << 16) | (0x01 << 14) |
(IEEE1394_SPEED_100 << 16) | (0x01 << 14) |
(s->iso_channel << 8) | (TCODE_ISO_DATA << 4);
packet->db->header_desc.header[1] = size << 16;
/* Calculate synchronization timestamp (syt). First we
* determine syt_index, that is, the index in the packet of
* the sample for which the timestamp is valid. */
syt_index = (s->syt_interval - s->dbc) & (s->syt_interval - 1);
if (syt_index < nevents) {
syt = ((atomic_read(&s->cycle_count) << 12) |
syt = ((atomic_read(&s->cycle_count) << 12) |
s->cycle_offset.integer) & 0xffff;
fraction_add(&s->cycle_offset,
fraction_add(&s->cycle_offset,
&s->cycle_offset, &s->ticks_per_syt_offset);
/* This next addition should be modulo 8000 (0x1f40),
......@@ -763,7 +763,7 @@ static void fill_packet(struct stream *s, struct packet *packet, int nevents)
syt = 0xffff;
atomic_inc(&s->cycle_count2);
/* Fill cip header */
packet->payload->eoh0 = 0;
packet->payload->sid = s->host->host->node_id & 0x3f;
......@@ -1072,7 +1072,7 @@ void stream_free(struct stream *s)
* that sometimes generates an it transmit interrupt if we
* later re-enable the context.
*/
wait_event_interruptible(s->packet_list_wait,
wait_event_interruptible(s->packet_list_wait,
list_empty(&s->dma_packet_lists));
ohci1394_stop_it_ctx(s->host->ohci, s->iso_tasklet.context, 1);
......@@ -1102,7 +1102,7 @@ static ssize_t amdtp_write(struct file *file, const char *buffer, size_t count,
unsigned char *p;
int i;
size_t length;
if (s->packet_pool == NULL)
return -EBADFD;
......@@ -1123,16 +1123,16 @@ static ssize_t amdtp_write(struct file *file, const char *buffer, size_t count,
return -EFAULT;
if (s->input->length < s->input->size)
continue;
stream_flush(s);
if (s->current_packet_list != NULL)
continue;
if (file->f_flags & O_NONBLOCK)
return i + length > 0 ? i + length : -EAGAIN;
if (wait_event_interruptible(s->packet_list_wait,
if (wait_event_interruptible(s->packet_list_wait,
!list_empty(&s->free_packet_lists)))
return -EINTR;
}
......@@ -1152,7 +1152,7 @@ static int amdtp_ioctl(struct inode *inode, struct file *file,
case AMDTP_IOC_CHANNEL:
if (copy_from_user(&cfg, (struct amdtp_ioctl *) arg, sizeof cfg))
return -EFAULT;
else
else
return stream_configure(s, cmd, &cfg);
default:
......@@ -1266,6 +1266,7 @@ static int __init amdtp_init_module (void)
{
cdev_init(&amdtp_cdev, &amdtp_fops);
amdtp_cdev.owner = THIS_MODULE;
kobject_set_name(&amdtp_cdev.kobj, "amdtp");
if (cdev_add(&amdtp_cdev, IEEE1394_AMDTP_DEV, 16)) {
HPSB_ERR("amdtp: unable to add char device");
return -EIO;
......
......@@ -24,7 +24,7 @@
*
* The dimension field specifies the dimension of the signal, that is,
* the number of audio channels. Only AMDTP_FORMAT_RAW supports
* settings greater than 2.
* settings greater than 2.
*
* The mode field specifies which transmission mode to use. The AMDTP
* specifies two different transmission modes: blocking and
......
......@@ -187,14 +187,14 @@ static int pcr_read(struct hpsb_host *host, int nodeid, quadlet_t *buf,
int csraddr = addr - CSR_REGISTER_BASE;
int plug;
struct cmp_host *ch;
if (length != 4)
return RCODE_TYPE_ERROR;
ch = hpsb_get_hostinfo(&cmp_highlevel, host);
if (csraddr == 0x900) {
*buf = cpu_to_be32(ch->u.ompr_quadlet);
return RCODE_COMPLETE;
return RCODE_COMPLETE;
}
else if (csraddr < 0x904 + ch->u.ompr.nplugs * 4) {
plug = (csraddr - 0x904) / 4;
......@@ -206,7 +206,7 @@ static int pcr_read(struct hpsb_host *host, int nodeid, quadlet_t *buf,
}
else if (csraddr == 0x980) {
*buf = cpu_to_be32(ch->v.impr_quadlet);
return RCODE_COMPLETE;
return RCODE_COMPLETE;
}
else if (csraddr < 0x984 + ch->v.impr.nplugs * 4) {
plug = (csraddr - 0x984) / 4;
......@@ -225,10 +225,10 @@ static int pcr_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
struct cmp_host *ch;
ch = hpsb_get_hostinfo(&cmp_highlevel, host);
if (extcode != EXTCODE_COMPARE_SWAP)
if (extcode != EXTCODE_COMPARE_SWAP)
return RCODE_TYPE_ERROR;
if (csraddr == 0x900) {
/* FIXME: Ignore writes to bits 30-31 and 0-7 */
*store = cpu_to_be32(ch->u.ompr_quadlet);
......
......@@ -130,23 +130,23 @@ static void host_reset(struct hpsb_host *host)
host->csr.state &= ~0x100;
}
host->csr.topology_map[1] =
host->csr.topology_map[1] =
cpu_to_be32(be32_to_cpu(host->csr.topology_map[1]) + 1);
host->csr.topology_map[2] = cpu_to_be32(host->node_count << 16
host->csr.topology_map[2] = cpu_to_be32(host->node_count << 16
| host->selfid_count);
host->csr.topology_map[0] =
host->csr.topology_map[0] =
cpu_to_be32((host->selfid_count + 2) << 16
| csr_crc16(host->csr.topology_map + 1,
host->selfid_count + 2));
host->csr.speed_map[1] =
host->csr.speed_map[1] =
cpu_to_be32(be32_to_cpu(host->csr.speed_map[1]) + 1);
host->csr.speed_map[0] = cpu_to_be32(0x3f1 << 16
host->csr.speed_map[0] = cpu_to_be32(0x3f1 << 16
| csr_crc16(host->csr.speed_map+1,
0x3f1));
}
/*
/*
* HI == seconds (bits 0:2)
* LO == fraction units of 1/8000 of a second, as per 1394 (bits 19:31)
*
......@@ -161,7 +161,7 @@ static void host_reset(struct hpsb_host *host)
static inline void calculate_expire(struct csr_control *csr)
{
unsigned long units;
/* Take the seconds, and convert to units */
units = (unsigned long)(csr->split_timeout_hi & 0x07) << 13;
......@@ -288,7 +288,7 @@ static void remove_host(struct hpsb_host *host)
}
int hpsb_update_config_rom(struct hpsb_host *host, const quadlet_t *new_rom,
int hpsb_update_config_rom(struct hpsb_host *host, const quadlet_t *new_rom,
size_t buffersize, unsigned char rom_version)
{
unsigned long flags;
......@@ -296,7 +296,7 @@ int hpsb_update_config_rom(struct hpsb_host *host, const quadlet_t *new_rom,
HPSB_NOTICE("hpsb_update_config_rom() is deprecated");
spin_lock_irqsave(&host->csr.lock, flags);
spin_lock_irqsave(&host->csr.lock, flags);
if (rom_version != host->csr.generation)
ret = -1;
else if (buffersize > host->csr.rom->cache_head->size)
......@@ -329,10 +329,10 @@ static int read_maps(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
int csraddr = addr - CSR_REGISTER_BASE;
const char *src;
spin_lock_irqsave(&host->csr.lock, flags);
spin_lock_irqsave(&host->csr.lock, flags);
if (csraddr < CSR_SPEED_MAP) {
src = ((char *)host->csr.topology_map) + csraddr
src = ((char *)host->csr.topology_map) + csraddr
- CSR_TOPOLOGY_MAP;
} else {
src = ((char *)host->csr.speed_map) + csraddr - CSR_SPEED_MAP;
......@@ -352,7 +352,7 @@ static int read_regs(struct hpsb_host *host, int nodeid, quadlet_t *buf,
int csraddr = addr - CSR_REGISTER_BASE;
int oldcycle;
quadlet_t ret;
if ((csraddr | length) & 0x3)
return RCODE_TYPE_ERROR;
......@@ -404,7 +404,7 @@ static int read_regs(struct hpsb_host *host, int nodeid, quadlet_t *buf,
/* cycle time wrapped around */
host->csr.bus_time += (1 << 7);
}
*(buf++) = cpu_to_be32(host->csr.bus_time
*(buf++) = cpu_to_be32(host->csr.bus_time
| (host->csr.cycle_time >> 25));
out;
......@@ -464,7 +464,7 @@ static int write_regs(struct hpsb_host *host, int nodeid, int destid,
quadlet_t *data, u64 addr, size_t length, u16 flags)
{
int csraddr = addr - CSR_REGISTER_BASE;
if ((csraddr | length) & 0x3)
return RCODE_TYPE_ERROR;
......@@ -494,12 +494,12 @@ static int write_regs(struct hpsb_host *host, int nodeid, int destid,
return RCODE_ADDRESS_ERROR;
case CSR_SPLIT_TIMEOUT_HI:
host->csr.split_timeout_hi =
host->csr.split_timeout_hi =
be32_to_cpu(*(data++)) & 0x00000007;
calculate_expire(&host->csr);
out;
case CSR_SPLIT_TIMEOUT_LO:
host->csr.split_timeout_lo =
host->csr.split_timeout_lo =
be32_to_cpu(*(data++)) & 0xfff80000;
calculate_expire(&host->csr);
out;
......
/*
* csr1212.c -- IEEE 1212 Control and Status Register support for Linux
*
*
* Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
* Steve Kinneberg <kinnebergsteve@acmsystems.com>
*
......@@ -173,7 +173,7 @@ struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
if (!csr)
return NULL;
csr->cache_head =
csr->cache_head =
csr1212_rom_cache_malloc(CSR1212_CONFIG_ROM_SPACE_OFFSET,
CSR1212_CONFIG_ROM_SPACE_SIZE);
if (!csr->cache_head) {
......@@ -238,7 +238,7 @@ static struct csr1212_keyval *csr1212_new_keyval(u_int8_t type, u_int8_t key)
struct csr1212_keyval *csr1212_new_immediate(u_int8_t key, u_int32_t value)
{
struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key);
if (!kv)
return NULL;
......@@ -253,11 +253,10 @@ struct csr1212_keyval *csr1212_new_leaf(u_int8_t key, const void *data, size_t d
if (!kv)
return NULL;
if (data_len > 0) {
kv->value.leaf.data = CSR1212_MALLOC(data_len);
if (!kv->value.leaf.data)
{
if (!kv->value.leaf.data) {
CSR1212_FREE(kv);
return NULL;
}
......@@ -572,7 +571,7 @@ struct csr1212_keyval *csr1212_new_modifiable_descriptor_leaf(u_int16_t max_size
CSR1212_MODIFIABLE_DESCRIPTOR_SET_MAX_SIZE(kv, max_size);
CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_HI(kv, address);
CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_LO(kv, address);
return kv;
}
......@@ -621,7 +620,7 @@ struct csr1212_keyval *csr1212_new_keyword_leaf(int strc, const char *strv[])
/* make sure last quadlet is zeroed out */
*((u_int32_t*)&(buffer[(data_len - 1) & ~0x3])) = 0;
/* Copy keyword(s) into leaf data buffer */
for (i = 0; i < strc; i++) {
int len = strlen(strv[i]) + 1;
......@@ -643,7 +642,7 @@ void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
return;
dentry = csr1212_find_keyval(dir, kv);
if (!dentry)
return;
......@@ -788,8 +787,7 @@ static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
return CSR1212_ENOMEM;
}
if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) != CSR1212_SUCCESS)
{
if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) != CSR1212_SUCCESS) {
csr1212_release_keyval(cache->ext_rom);
csr->ops->release_addr(csr_addr, csr->private);
CSR1212_FREE(cache);
......@@ -1119,12 +1117,11 @@ int csr1212_generate_csr_image(struct csr1212_csr *csr)
/* Remove unused, excess cache regions */
while (cache) {
struct csr1212_csr_rom_cache *oc = cache;
cache = cache->next;
csr1212_remove_cache(csr, oc);
}
/* Go through the list backward so that when done, the correct CRC
* will be calculated for the Extended ROM areas. */
for(cache = csr->cache_tail; cache; cache = cache->prev) {
......@@ -1263,7 +1260,7 @@ static inline int csr1212_parse_dir_entry(struct csr1212_keyval *dir,
ret = CSR1212_ENOMEM;
goto fail;
}
k->refcnt = 0; /* Don't keep local reference when parsing. */
break;
......@@ -1450,7 +1447,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
newcr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
if (!newcr)
return CSR1212_ENOMEM;
newcr->offset_start = cache_index & ~(csr->max_rom - 1);
newcr->offset_end = newcr->offset_start;
newcr->next = cr;
......@@ -1474,7 +1471,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
newcr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
if (!newcr)
return CSR1212_ENOMEM;
newcr->offset_start = cache_index & ~(csr->max_rom - 1);
newcr->offset_end = newcr->offset_start;
newcr->prev = cr;
......
/*
* csr1212.h -- IEEE 1212 Control and Status Register support for Linux
*
*
* Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
* Steve Kinneberg <kinnebergsteve@acmsystems.com>
*
......@@ -37,6 +37,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#define CSR1212_MALLOC(size) kmalloc((size), in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
#define CSR1212_FREE(ptr) kfree(ptr)
......@@ -440,7 +441,7 @@ static inline u_int32_t *CSR1212_ICON_DESCRIPTOR_LEAF_PIXELS(struct csr1212_keyv
static const int pd[4] = { 0, 4, 16, 256 };
static const int cs[16] = { 4, 2 };
int ps = pd[CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE_DEPTH(kv)];
return &kv->value.leaf.data[5 +
(ps * cs[CSR1212_ICON_DESCRIPTOR_LEAF_COLOR_SPACE(kv)]) /
sizeof(u_int32_t)];
......@@ -705,7 +706,7 @@ static inline void csr1212_release_keyval(struct csr1212_keyval *kv)
* _kv is a struct csr1212_keyval * that'll point to the current keyval (loop index).
* _dir is a struct csr1212_keyval * that points to the directory to be looped.
* _pos is a struct csr1212_dentry * that is used internally for indexing.
*
*
* kv will be NULL upon exit of the loop.
*/
#define csr1212_for_each_dir_entry(_csr, _kv, _dir, _pos) \
......
......@@ -96,7 +96,7 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_d
/* fill scatter/gather list with pages */
for (i = 0; i < dma->n_pages; i++) {
unsigned long va = (unsigned long) dma->kvirt + (i << PAGE_SHIFT);
dma->sglist[i].page = vmalloc_to_page((void *)va);
dma->sglist[i].length = PAGE_SIZE;
}
......@@ -196,6 +196,8 @@ void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, un
pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first], last - first + 1, dma->direction);
}
#ifdef CONFIG_MMU
/* nopage() handler for mmap access */
static struct page*
......@@ -251,3 +253,12 @@ int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_st
return 0;
}
#else /* CONFIG_MMU */
int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma)
{
return -EINVAL;
}
#endif /* CONFIG_MMU */
......@@ -14,7 +14,7 @@
#include <asm/scatterlist.h>
/* struct dma_prog_region
a small, physically-contiguous DMA buffer with random-access,
synchronous usage characteristics
*/
......@@ -37,7 +37,7 @@ static inline dma_addr_t dma_prog_region_offset_to_bus(struct dma_prog_region *p
}
/* struct dma_region
a large, non-physically-contiguous DMA buffer with streaming,
asynchronous usage characteristics
*/
......
......@@ -34,11 +34,11 @@
/* none of this is exposed to user-space */
/*
/*
the 8-byte CIP (Common Isochronous Packet) header that precedes
each packet of DV data.
See the IEC 61883 standard.
See the IEC 61883 standard.
*/
struct CIP_header { unsigned char b[8]; };
......@@ -71,10 +71,10 @@ static inline void fill_cip_header(struct CIP_header *cip,
/*
/*
DMA commands used to program the OHCI's DMA engine
See the Texas Instruments OHCI 1394 chipset documentation.
See the Texas Instruments OHCI 1394 chipset documentation.
*/
struct output_more_immediate { u32 q[8]; };
......@@ -95,17 +95,17 @@ static inline void fill_output_more_immediate(struct output_more_immediate *omi,
omi->q[1] = 0;
omi->q[2] = 0;
omi->q[3] = 0;
/* IT packet header */
omi->q[4] = cpu_to_le32( (0x0 << 16) /* IEEE1394_SPEED_100 */
| (tag << 14)
| (channel << 8)
| (TCODE_ISO_DATA << 4)
| (TCODE_ISO_DATA << 4)
| (sync_tag) );
/* reserved field; mimic behavior of my Sony DSR-40 */
omi->q[5] = cpu_to_le32((payload_size << 16) | (0x7F << 8) | 0xA0);
omi->q[6] = 0;
omi->q[7] = 0;
}
......@@ -186,11 +186,11 @@ static inline void fill_input_last(struct input_last *il,
/*
/*
A "DMA descriptor block" consists of several contiguous DMA commands.
struct DMA_descriptor_block encapsulates all of the commands necessary
to send one packet of DV data.
struct DMA_descriptor_block encapsulates all of the commands necessary
to send one packet of DV data.
There are three different types of these blocks:
1) command to send an empty packet (CIP header only, no DV data):
......@@ -225,44 +225,44 @@ struct DMA_descriptor_block {
union {
struct {
/* iso header, common to all output block types */
struct output_more_immediate omi;
struct output_more_immediate omi;
union {
/* empty packet */
struct {
struct output_last ol; /* CIP header */
} empty;
/* full packet */
struct {
struct output_more om; /* CIP header */
union {
/* payload does not cross page boundary */
struct {
struct output_last ol; /* data payload */
} nocross;
/* payload crosses page boundary */
struct {
struct output_more om; /* data payload */
struct output_last ol; /* data payload */
} cross;
} u;
} full;
} u;
} out;
struct {
struct input_last il;
struct input_last il;
} in;
} u;
/* ensure that PAGE_SIZE % sizeof(struct DMA_descriptor_block) == 0
/* ensure that PAGE_SIZE % sizeof(struct DMA_descriptor_block) == 0
by padding out to 128 bytes */
u32 __pad__[12];
u32 __pad__[12];
};
......@@ -281,7 +281,7 @@ struct frame {
/* index of this frame in video_card->frames[] */
unsigned int frame_num;
/* FRAME_CLEAR - DMA program not set up, waiting for data
/* FRAME_CLEAR - DMA program not set up, waiting for data
FRAME_READY - DMA program written, ready to transmit
Changes to these should be locked against the interrupt
......@@ -290,7 +290,7 @@ struct frame {
FRAME_CLEAR = 0,
FRAME_READY
} state;
/* whether this frame has been DMA'ed already; used only from
the IRQ handler to determine whether the frame can be reset */
int done;
......@@ -299,7 +299,7 @@ struct frame {
/* kernel virtual pointer to the start of this frame's data in
the user ringbuffer. Use only for CPU access; to get the DMA
bus address you must go through the video->user_dma mapping */
unsigned long data;
unsigned long data;
/* Max # of packets per frame */
#define MAX_PACKETS 500
......@@ -310,7 +310,7 @@ struct frame {
struct CIP_header *header_pool;
dma_addr_t header_pool_dma;
/* a physically contiguous memory pool for allocating DMA
descriptor blocks; usually around 64KB in size
!descriptor_pool must be aligned to PAGE_SIZE! */
......@@ -338,7 +338,7 @@ struct frame {
/* pointer to the first packet's CIP header (where the timestamp goes) */
struct CIP_header *cip_syt1;
/* pointer to the second packet's CIP header
(only set if the first packet was empty) */
struct CIP_header *cip_syt2;
......@@ -384,7 +384,7 @@ static void frame_delete(struct frame *f);
static void frame_reset(struct frame *f);
/* struct video_card contains all data associated with one instance
of the dv1394 driver
of the dv1394 driver
*/
enum modes {
MODE_RECEIVE,
......@@ -411,7 +411,7 @@ struct video_card {
u32 ohci_IsoXmitContextControlSet;
u32 ohci_IsoXmitContextControlClear;
u32 ohci_IsoXmitCommandPtr;
/* OHCI card IR DMA context number, -1 if not in use */
struct ohci1394_iso_tasklet ir_tasklet;
int ohci_ir_ctx;
......@@ -421,10 +421,10 @@ struct video_card {
u32 ohci_IsoRcvContextControlClear;
u32 ohci_IsoRcvCommandPtr;
u32 ohci_IsoRcvContextMatch;
/* CONCURRENCY CONTROL */
/* there are THREE levels of locking associated with video_card. */
/*
......@@ -435,7 +435,7 @@ struct video_card {
*/
unsigned long open;
/*
/*
2) the spinlock - this provides mutual exclusion between the interrupt
handler and process-context operations. Generally you must take the
spinlock under the following conditions:
......@@ -458,7 +458,7 @@ struct video_card {
/* flag to prevent spurious interrupts (which OHCI seems to
generate a lot :) from accessing the struct */
int dma_running;
/*
3) the sleeping semaphore 'sem' - this is used from process context only,
to serialize various operations on the video_card. Even though only one
......@@ -477,24 +477,24 @@ struct video_card {
/* support asynchronous I/O signals (SIGIO) */
struct fasync_struct *fasync;
/* the large, non-contiguous (rvmalloc()) ringbuffer for DV
data, exposed to user-space via mmap() */
unsigned long dv_buf_size;
struct dma_region dv_buf;
/* next byte in the ringbuffer that a write() call will fill */
size_t write_off;
struct frame *frames[DV1394_MAX_FRAMES];
/* n_frames also serves as an indicator that this struct video_card is
initialized and ready to run DMA buffers */
int n_frames;
/* this is the frame that is currently "owned" by the OHCI DMA controller
(set to -1 iff DMA is not running)
(set to -1 iff DMA is not running)
! must lock against the interrupt handler when accessing it !
......@@ -511,7 +511,6 @@ struct video_card {
The interrupt handler will NEVER advance active_frame to a
frame that is not READY.
*/
int active_frame;
int first_run;
......@@ -521,10 +520,10 @@ struct video_card {
/* altered ONLY from process context. Must check first_clear_frame->state;
if it's READY, that means the ringbuffer is full with READY frames;
if it's CLEAR, that means one or more ringbuffer frames are CLEAR */
unsigned int first_clear_frame;
unsigned int first_clear_frame;
/* altered both by process and interrupt */
unsigned int n_clear_frames;
unsigned int n_clear_frames;
/* only altered by the interrupt */
unsigned int dropped_frames;
......@@ -548,17 +547,17 @@ struct video_card {
/* the isochronous channel to use, -1 if video card is inactive */
int channel;
/* physically contiguous packet ringbuffer for receive */
struct dma_region packet_buf;
unsigned long packet_buf_size;
unsigned int current_packet;
int first_frame; /* received first start frame marker? */
enum modes mode;
};
/*
/*
if the video_card is not initialized, then the ONLY fields that are valid are:
ohci
open
......@@ -575,7 +574,7 @@ static int do_dv1394_init_default(struct video_card *video);
static void do_dv1394_shutdown(struct video_card *video, int free_user_buf);
/* NTSC empty packet rate accurate to within 0.01%,
/* NTSC empty packet rate accurate to within 0.01%,
calibrated against a Sony DSR-40 DVCAM deck */
#define CIP_N_NTSC 68000000
......
......@@ -47,11 +47,11 @@
TODO:
- tunable frame-drop behavior: either loop last frame, or halt transmission
- use a scatter/gather buffer for DMA programs (f->descriptor_pool)
so that we don't rely on allocating 64KB of contiguous kernel memory
via pci_alloc_consistent()
DONE:
- during reception, better handling of dropped frames and continuity errors
- during reception, prevent DMA from bypassing the irq tasklets
......@@ -82,7 +82,7 @@
- expose NTSC and PAL as separate devices (can be overridden)
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/list.h>
......@@ -117,7 +117,7 @@
#include "nodemgr.h"
#include "hosts.h"
#include "ieee1394_core.h"
#include "highlevel.h"
#include "highlevel.h"
#include "dv1394.h"
#include "dv1394-private.h"
......@@ -215,7 +215,7 @@ static struct frame* frame_new(unsigned int frame_num, struct video_card *video)
debug_printk("dv1394: frame_new: allocated CIP header pool at virt 0x%08lx (contig) dma 0x%08lx size %ld\n",
(unsigned long) f->header_pool, (unsigned long) f->header_pool_dma, PAGE_SIZE);
f->descriptor_pool_size = MAX_PACKETS * sizeof(struct DMA_descriptor_block);
/* make it an even # of pages */
f->descriptor_pool_size += PAGE_SIZE - (f->descriptor_pool_size%PAGE_SIZE);
......@@ -228,10 +228,10 @@ static struct frame* frame_new(unsigned int frame_num, struct video_card *video)
kfree(f);
return NULL;
}
debug_printk("dv1394: frame_new: allocated DMA program memory at virt 0x%08lx (contig) dma 0x%08lx size %ld\n",
(unsigned long) f->descriptor_pool, (unsigned long) f->descriptor_pool_dma, f->descriptor_pool_size);
f->data = 0;
frame_reset(f);
......@@ -248,9 +248,9 @@ static void frame_delete(struct frame *f)
/*
/*
frame_prepare() - build the DMA program for transmitting
Frame_prepare() must be called OUTSIDE the video->spinlock.
However, frame_prepare() must still be serialized, so
it should be called WITH the video->sem taken.
......@@ -265,7 +265,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
dma_addr_t block_dma;
struct CIP_header *cip;
dma_addr_t cip_dma;
unsigned int n_descriptors, full_packets, packets_per_frame, payload_size;
/* these flags denote packets that need special attention */
......@@ -278,7 +278,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
unsigned long irq_flags;
irq_printk("frame_prepare( %d ) ---------------------\n", this_frame);
full_packets = 0;
......@@ -304,7 +304,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
return;
}
/* the block surely won't cross a page boundary,
/* the block surely won't cross a page boundary,
since an even number of descriptor_blocks fit on a page */
block = &(f->descriptor_pool[f->n_packets]);
......@@ -312,22 +312,22 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
to the kernel base address of the descriptor pool
+ DMA base address of the descriptor pool */
block_dma = ((unsigned long) block - (unsigned long) f->descriptor_pool) + f->descriptor_pool_dma;
/* the whole CIP pool fits on one page, so no worries about boundaries */
if ( ((unsigned long) &(f->header_pool[f->n_packets]) - (unsigned long) f->header_pool)
if ( ((unsigned long) &(f->header_pool[f->n_packets]) - (unsigned long) f->header_pool)
> PAGE_SIZE) {
printk(KERN_ERR "dv1394: FATAL ERROR: no room to allocate CIP header\n");
return;
}
cip = &(f->header_pool[f->n_packets]);
/* DMA address of the CIP header = offset of cip
relative to kernel base address of the header pool
+ DMA base address of the header pool */
cip_dma = (unsigned long) cip % PAGE_SIZE + f->header_pool_dma;
/* is this an empty packet? */
if (video->cip_accum > (video->cip_d - video->cip_n)) {
......@@ -362,7 +362,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
for this purpose, because that would leave very little time to set
the timestamp before DMA starts on the next frame.
*/
if (f->n_packets == 0) {
first_packet = 1;
} else if ( full_packets == (packets_per_frame-1) ) {
......@@ -370,7 +370,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
} else if (f->n_packets == packets_per_frame) {
mid_packet = 1;
}
/********************/
/* setup CIP header */
......@@ -396,10 +396,10 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
fill_cip_header(cip,
/* the node ID number of the OHCI card */
reg_read(video->ohci, OHCI1394_NodeID) & 0x3F,
video->continuity_counter,
video->continuity_counter,
video->pal_or_ntsc,
0xFFFF /* the timestamp is filled in later */);
/* advance counter, only for full packets */
if ( ! empty_packet )
video->continuity_counter++;
......@@ -423,7 +423,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
sizeof(struct CIP_header), /* data size */
cip_dma);
if (first_packet)
f->frame_begin_timestamp = &(block->u.out.u.empty.ol.q[3]);
else if (mid_packet)
......@@ -445,7 +445,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
sizeof(struct CIP_header), /* data size */
cip_dma);
/* third (and possibly fourth) descriptor - for DV data */
/* the 480-byte payload can cross a page boundary; if so,
we need to split it into two DMA descriptors */
......@@ -464,9 +464,9 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
data_p - (unsigned long) video->dv_buf.kvirt));
fill_output_last( &(block->u.out.u.full.u.cross.ol),
/* want completion status on all interesting packets */
(first_packet || mid_packet || last_packet) ? 1 : 0,
(first_packet || mid_packet || last_packet) ? 1 : 0,
/* want interrupt on all interesting packets */
(first_packet || mid_packet || last_packet) ? 1 : 0,
......@@ -492,14 +492,14 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
n_descriptors = 5;
if (first_packet)
f->first_n_descriptors = n_descriptors;
full_packets++;
} else {
/* fits on one page */
fill_output_last( &(block->u.out.u.full.u.nocross.ol),
/* want completion status on all interesting packets */
(first_packet || mid_packet || last_packet) ? 1 : 0,
......@@ -508,11 +508,11 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
480, /* data size (480 bytes of DV data) */
/* DMA address of data_p */
dma_region_offset_to_bus(&video->dv_buf,
data_p - (unsigned long) video->dv_buf.kvirt));
if (first_packet)
f->frame_begin_timestamp = &(block->u.out.u.full.u.nocross.ol.q[3]);
else if (mid_packet)
......@@ -531,8 +531,8 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
full_packets++;
}
}
/* link this descriptor block into the DMA program by filling in
/* link this descriptor block into the DMA program by filling in
the branch address of the previous block */
/* note: we are not linked into the active DMA chain yet */
......@@ -545,10 +545,10 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
f->n_packets++;
}
/* when we first assemble a new frame, set the final branch
/* when we first assemble a new frame, set the final branch
to loop back up to the top */
*(f->frame_end_branch) = cpu_to_le32(f->descriptor_pool_dma | f->first_n_descriptors);
......@@ -572,11 +572,11 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
this_frame, video->active_frame, video->n_clear_frames, video->first_clear_frame, last_frame);
irq_printk(" begin_ts %08lx mid_ts %08lx end_ts %08lx end_br %08lx\n",
(unsigned long) f->frame_begin_timestamp,
(unsigned long) f->mid_frame_timestamp,
(unsigned long) f->frame_end_timestamp,
(unsigned long) f->frame_begin_timestamp,
(unsigned long) f->mid_frame_timestamp,
(unsigned long) f->frame_end_timestamp,
(unsigned long) f->frame_end_branch);
if (video->active_frame != -1) {
/* if DMA is already active, we are almost done */
......@@ -589,7 +589,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
/* this write MUST precede the next one, or we could silently drop frames */
wmb();
/* disable the want_status semaphore on the last packet */
temp = le32_to_cpu(*(video->frames[last_frame]->frame_end_branch - 2));
temp &= 0xF7CFFFFF;
......@@ -605,7 +605,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
dropped frame. Hopefully this window is too
small to really matter, and the consequence
is rather harmless. */
irq_printk(" new frame %d linked onto DMA chain\n", this_frame);
......@@ -614,13 +614,13 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
}
} else {
u32 transmit_sec, transmit_cyc;
u32 ts_cyc, ts_off;
/* DMA is stopped, so this is the very first frame */
video->active_frame = this_frame;
/* set CommandPtr to address and size of first descriptor block */
reg_write(video->ohci, video->ohci_IsoXmitCommandPtr,
video->frames[video->active_frame]->descriptor_pool_dma |
......@@ -641,7 +641,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
transmit_sec += transmit_cyc/8000;
transmit_cyc %= 8000;
ts_off = ct_off;
ts_cyc = transmit_cyc + 3;
ts_cyc %= 8000;
......@@ -657,7 +657,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
f->cip_syt2->b[6] = f->assigned_timestamp >> 8;
f->cip_syt2->b[7] = f->assigned_timestamp & 0xFF;
}
/* --- start DMA --- */
/* clear all bits in ContextControl register */
......@@ -668,8 +668,8 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
/* the OHCI card has the ability to start ISO transmission on a
particular cycle (start-on-cycle). This way we can ensure that
the first DV frame will have an accurate timestamp.
However, start-on-cycle only appears to work if the OHCI card
However, start-on-cycle only appears to work if the OHCI card
is cycle master! Since the consequences of messing up the first
timestamp are minimal*, just disable start-on-cycle for now.
......@@ -690,7 +690,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
/* set the 'run' bit */
reg_write(video->ohci, video->ohci_IsoXmitContextControlSet, 0x8000);
flush_pci_write(video->ohci);
/* --- DMA should be running now --- */
debug_printk(" Cycle = %4u ContextControl = %08x CmdPtr = %08x\n",
......@@ -715,19 +715,19 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
i++;
}
printk("set = %08x, cmdPtr = %08x\n",
printk("set = %08x, cmdPtr = %08x\n",
reg_read(video->ohci, video->ohci_IsoXmitContextControlSet),
reg_read(video->ohci, video->ohci_IsoXmitCommandPtr)
);
if ( ! (reg_read(video->ohci, video->ohci_IsoXmitContextControlSet) & (1 << 10)) ) {
printk("DMA did NOT go active after 20ms, event = %x\n",
printk("DMA did NOT go active after 20ms, event = %x\n",
reg_read(video->ohci, video->ohci_IsoXmitContextControlSet) & 0x1F);
} else
printk("DMA is RUNNING!\n");
}
#endif
}
......@@ -738,11 +738,11 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
/*** RECEIVE FUNCTIONS *****************************************************/
/*
/*
frame method put_packet
map and copy the packet data to its location in the frame
based upon DIF section and sequence
map and copy the packet data to its location in the frame
based upon DIF section and sequence
*/
static void inline
......@@ -754,28 +754,28 @@ frame_put_packet (struct frame *f, struct packet *p)
/* sanity check */
if (dif_sequence > 11 || dif_block > 149) return;
switch (section_type) {
case 0: /* 1 Header block */
memcpy( (void *) f->data + dif_sequence * 150 * 80, p->data, 480);
break;
case 1: /* 2 Subcode blocks */
memcpy( (void *) f->data + dif_sequence * 150 * 80 + (1 + dif_block) * 80, p->data, 480);
break;
case 2: /* 3 VAUX blocks */
memcpy( (void *) f->data + dif_sequence * 150 * 80 + (3 + dif_block) * 80, p->data, 480);
break;
case 3: /* 9 Audio blocks interleaved with video */
memcpy( (void *) f->data + dif_sequence * 150 * 80 + (6 + dif_block * 16) * 80, p->data, 480);
break;
case 4: /* 135 Video blocks interleaved with audio */
memcpy( (void *) f->data + dif_sequence * 150 * 80 + (7 + (dif_block / 15) + dif_block) * 80, p->data, 480);
break;
default: /* we can not handle any other data */
break;
}
......@@ -786,25 +786,25 @@ static void start_dma_receive(struct video_card *video)
{
if (video->first_run == 1) {
video->first_run = 0;
/* start DMA once all of the frames are READY */
video->n_clear_frames = 0;
video->first_clear_frame = -1;
video->current_packet = 0;
video->active_frame = 0;
/* reset iso recv control register */
reg_write(video->ohci, video->ohci_IsoRcvContextControlClear, 0xFFFFFFFF);
wmb();
/* clear bufferFill, set isochHeader and speed (0=100) */
reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, 0x40000000);
/* match on all tags, listen on channel */
reg_write(video->ohci, video->ohci_IsoRcvContextMatch, 0xf0000000 | video->channel);
/* address and first descriptor block + Z=1 */
reg_write(video->ohci, video->ohci_IsoRcvCommandPtr,
reg_write(video->ohci, video->ohci_IsoRcvCommandPtr,
video->frames[0]->descriptor_pool_dma | 1); /* Z=1 */
wmb();
......@@ -813,13 +813,13 @@ static void start_dma_receive(struct video_card *video)
/* run */
reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, 0x8000);
flush_pci_write(video->ohci);
debug_printk("dv1394: DMA started\n");
#if DV1394_DEBUG_LEVEL >= 2
{
int i;
for (i = 0; i < 1000; ++i) {
mdelay(1);
if (reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 10)) {
......@@ -828,15 +828,14 @@ static void start_dma_receive(struct video_card *video)
}
}
if ( reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 11) ) {
printk("DEAD, event = %x\n",
printk("DEAD, event = %x\n",
reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & 0x1F);
} else
printk("RUNNING!\n");
}
#endif
}
else if ( reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 11) ) {
debug_printk("DEAD, event = %x\n",
} else if ( reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 11) ) {
debug_printk("DEAD, event = %x\n",
reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & 0x1F);
/* wake */
......@@ -845,7 +844,7 @@ static void start_dma_receive(struct video_card *video)
}
/*
/*
receive_packets() - build the DMA program for receiving
*/
......@@ -875,24 +874,24 @@ static void receive_packets(struct video_card *video)
/* locate a descriptor block and packet from the buffer */
block = &(f->descriptor_pool[i]);
block_dma = ((unsigned long) block - (unsigned long) f->descriptor_pool) + f->descriptor_pool_dma;
data = ((struct packet*)video->packet_buf.kvirt) + f->frame_num * MAX_PACKETS + i;
data_dma = dma_region_offset_to_bus( &video->packet_buf,
data_dma = dma_region_offset_to_bus( &video->packet_buf,
((unsigned long) data - (unsigned long) video->packet_buf.kvirt) );
/* setup DMA descriptor block */
want_interrupt = ((i % (MAX_PACKETS/2)) == 0 || i == (MAX_PACKETS-1));
fill_input_last( &(block->u.in.il), want_interrupt, 512, data_dma);
/* link descriptors */
last_branch_address = f->frame_end_branch;
if (last_branch_address != NULL)
*(last_branch_address) = cpu_to_le32(block_dma | 1); /* set Z=1 */
f->frame_end_branch = &(block->u.in.il.q[2]);
}
} /* next j */
spin_unlock_irqrestore(&video->spinlock, irq_flags);
......@@ -913,7 +912,7 @@ static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
debug_printk("dv1394: initialising %d\n", video->id);
if (init->api_version != DV1394_API_VERSION)
return -EINVAL;
/* first sanitize all the parameters */
if ( (init->n_frames < 2) || (init->n_frames > DV1394_MAX_FRAMES) )
return -EINVAL;
......@@ -949,7 +948,7 @@ static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
/* (the card should not be reset if the parameters are screwy) */
do_dv1394_shutdown(video, 0);
/* try to claim the ISO channel */
spin_lock_irqsave(&video->ohci->IR_channel_lock, flags);
if (video->ohci->ISO_channel_usage & chan_mask) {
......@@ -991,19 +990,19 @@ static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
}
video->syt_offset = init->syt_offset;
/* find and claim DMA contexts on the OHCI card */
if (video->ohci_it_ctx == -1) {
ohci1394_init_iso_tasklet(&video->it_tasklet, OHCI_ISO_TRANSMIT,
it_tasklet_func, (unsigned long) video);
if (ohci1394_register_iso_tasklet(video->ohci, &video->it_tasklet) < 0) {
if (ohci1394_register_iso_tasklet(video->ohci, &video->it_tasklet) < 0) {
printk(KERN_ERR "dv1394: could not find an available IT DMA context\n");
retval = -EBUSY;
goto err;
}
video->ohci_it_ctx = video->it_tasklet.context;
debug_printk("dv1394: claimed IT DMA context %d\n", video->ohci_it_ctx);
}
......@@ -1020,7 +1019,7 @@ static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
video->ohci_ir_ctx = video->ir_tasklet.context;
debug_printk("dv1394: claimed IR DMA context %d\n", video->ohci_ir_ctx);
}
/* allocate struct frames */
for (i = 0; i < init->n_frames; i++) {
video->frames[i] = frame_new(i, video);
......@@ -1037,14 +1036,14 @@ static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
retval = dma_region_alloc(&video->dv_buf, new_buf_size, video->ohci->dev, PCI_DMA_TODEVICE);
if (retval)
goto err;
video->dv_buf_size = new_buf_size;
debug_printk("dv1394: Allocated %d frame buffers, total %u pages (%u DMA pages), %lu bytes\n",
video->n_frames, video->dv_buf.n_pages,
video->dv_buf.n_dma_pages, video->dv_buf_size);
}
/* set up the frame->data pointers */
for (i = 0; i < video->n_frames; i++)
video->frames[i]->data = (unsigned long) video->dv_buf.kvirt + i * video->frame_size;
......@@ -1054,17 +1053,17 @@ static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
video->packet_buf_size = sizeof(struct packet) * video->n_frames * MAX_PACKETS;
if (video->packet_buf_size % PAGE_SIZE)
video->packet_buf_size += PAGE_SIZE - (video->packet_buf_size % PAGE_SIZE);
retval = dma_region_alloc(&video->packet_buf, video->packet_buf_size,
video->ohci->dev, PCI_DMA_FROMDEVICE);
if (retval)
goto err;
debug_printk("dv1394: Allocated %d packets in buffer, total %u pages (%u DMA pages), %lu bytes\n",
debug_printk("dv1394: Allocated %d packets in buffer, total %u pages (%u DMA pages), %lu bytes\n",
video->n_frames*MAX_PACKETS, video->packet_buf.n_pages,
video->packet_buf.n_dma_pages, video->packet_buf_size);
}
/* set up register offsets for IT context */
/* IT DMA context registers are spaced 16 bytes apart */
video->ohci_IsoXmitContextControlSet = OHCI1394_IsoXmitContextControlSet+16*video->ohci_it_ctx;
......@@ -1085,7 +1084,7 @@ static int do_dv1394_init(struct video_card *video, struct dv1394_init *init)
/* enable interrupts for IR context */
reg_write(video->ohci, OHCI1394_IsoRecvIntMaskSet, (1 << video->ohci_ir_ctx) );
debug_printk("dv1394: interrupts enabled for IR context %d\n", video->ohci_ir_ctx);
return 0;
err:
......@@ -1105,7 +1104,7 @@ static int do_dv1394_init_default(struct video_card *video)
/* the following are now set via devfs */
init.channel = video->channel;
init.format = video->pal_or_ntsc;
init.cip_n = video->cip_n;
init.cip_n = video->cip_n;
init.cip_d = video->cip_d;
init.syt_offset = video->syt_offset;
......@@ -1135,17 +1134,17 @@ static void stop_dma(struct video_card *video)
reg_write(video->ohci, video->ohci_IsoXmitContextControlClear, (1 << 15));
reg_write(video->ohci, video->ohci_IsoRcvContextControlClear, (1 << 15));
flush_pci_write(video->ohci);
video->active_frame = -1;
video->first_run = 1;
/* wait until DMA really stops */
i = 0;
while (i < 1000) {
/* wait 0.1 millisecond */
udelay(100);
udelay(100);
if ( (reg_read(video->ohci, video->ohci_IsoXmitContextControlClear) & (1 << 10)) ||
(reg_read(video->ohci, video->ohci_IsoRcvContextControlClear) & (1 << 10)) ) {
/* still active */
......@@ -1155,10 +1154,10 @@ static void stop_dma(struct video_card *video)
debug_printk("dv1394: stop_dma: DMA stopped safely after %d ms\n", i/10);
break;
}
i++;
}
if (i == 1000) {
printk(KERN_ERR "dv1394: stop_dma: DMA still going after %d ms!\n", i/10);
}
......@@ -1175,12 +1174,12 @@ static void stop_dma(struct video_card *video)
static void do_dv1394_shutdown(struct video_card *video, int free_dv_buf)
{
int i;
debug_printk("dv1394: shutdown...\n");
/* stop DMA if in progress */
stop_dma(video);
/* release the DMA contexts */
if (video->ohci_it_ctx != -1) {
video->ohci_IsoXmitContextControlSet = 0;
......@@ -1189,7 +1188,7 @@ static void do_dv1394_shutdown(struct video_card *video, int free_dv_buf)
/* disable interrupts for IT context */
reg_write(video->ohci, OHCI1394_IsoXmitIntMaskClear, (1 << video->ohci_it_ctx));
/* remove tasklet */
ohci1394_unregister_iso_tasklet(video->ohci, &video->it_tasklet);
debug_printk("dv1394: IT context %d released\n", video->ohci_it_ctx);
......@@ -1215,16 +1214,16 @@ static void do_dv1394_shutdown(struct video_card *video, int free_dv_buf)
if (video->channel != -1) {
u64 chan_mask;
unsigned long flags;
chan_mask = (u64)1 << video->channel;
spin_lock_irqsave(&video->ohci->IR_channel_lock, flags);
video->ohci->ISO_channel_usage &= ~(chan_mask);
spin_unlock_irqrestore(&video->ohci->IR_channel_lock, flags);
video->channel = -1;
}
/* free the frame structs */
for (i = 0; i < DV1394_MAX_FRAMES; i++) {
if (video->frames[i])
......@@ -1233,10 +1232,10 @@ static void do_dv1394_shutdown(struct video_card *video, int free_dv_buf)
}
video->n_frames = 0;
/* we can't free the DMA buffer unless it is guaranteed that
no more user-space mappings exist */
if (free_dv_buf) {
dma_region_free(&video->dv_buf);
video->dv_buf_size = 0;
......@@ -1324,11 +1323,11 @@ static int dv1394_fasync(int fd, struct file *file, int on)
{
/* I just copied this code verbatim from Alan Cox's mouse driver example
(linux/Documentation/DocBook/) */
struct video_card *video = file_to_video_card(file);
int retval = fasync_helper(fd, file, on, &video->fasync);
if (retval < 0)
return retval;
return 0;
......@@ -1362,19 +1361,19 @@ static ssize_t dv1394_write(struct file *file, const char *buffer, size_t count,
ret = 0;
add_wait_queue(&video->waitq, &wait);
while (count > 0) {
/* must set TASK_INTERRUPTIBLE *before* checking for free
buffers; otherwise we could miss a wakeup if the interrupt
fires between the check and the schedule() */
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&video->spinlock, flags);
target_frame = video->first_clear_frame;
spin_unlock_irqrestore(&video->spinlock, flags);
if (video->frames[target_frame]->state == FRAME_CLEAR) {
......@@ -1390,7 +1389,7 @@ static ssize_t dv1394_write(struct file *file, const char *buffer, size_t count,
if (cnt > count)
cnt = count;
if (cnt <= 0) {
if (cnt <= 0) {
/* no room left, gotta wait */
if (file->f_flags & O_NONBLOCK) {
if (!ret)
......@@ -1404,7 +1403,7 @@ static ssize_t dv1394_write(struct file *file, const char *buffer, size_t count,
}
schedule();
continue; /* start over from 'while(count > 0)...' */
}
......@@ -1423,7 +1422,7 @@ static ssize_t dv1394_write(struct file *file, const char *buffer, size_t count,
if (video->write_off == video->frame_size * ((target_frame + 1) % video->n_frames))
frame_prepare(video, target_frame);
}
remove_wait_queue(&video->waitq, &wait);
set_current_state(TASK_RUNNING);
up(&video->sem);
......@@ -1456,9 +1455,9 @@ static ssize_t dv1394_read(struct file *file, char *buffer, size_t count, loff_
return ret;
}
video->continuity_counter = -1;
receive_packets(video);
start_dma_receive(video);
}
......@@ -1470,7 +1469,7 @@ static ssize_t dv1394_read(struct file *file, char *buffer, size_t count, loff_
/* must set TASK_INTERRUPTIBLE *before* checking for free
buffers; otherwise we could miss a wakeup if the interrupt
fires between the check and the schedule() */
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&video->spinlock, flags);
......@@ -1494,7 +1493,7 @@ static ssize_t dv1394_read(struct file *file, char *buffer, size_t count, loff_
if (cnt > count)
cnt = count;
if (cnt <= 0) {
if (cnt <= 0) {
/* no room left, gotta wait */
if (file->f_flags & O_NONBLOCK) {
if (!ret)
......@@ -1508,7 +1507,7 @@ static ssize_t dv1394_read(struct file *file, char *buffer, size_t count, loff_
}
schedule();
continue; /* start over from 'while(count > 0)...' */
}
......@@ -1531,7 +1530,7 @@ static ssize_t dv1394_read(struct file *file, char *buffer, size_t count, loff_
spin_unlock_irqrestore(&video->spinlock, flags);
}
}
remove_wait_queue(&video->waitq, &wait);
set_current_state(TASK_RUNNING);
up(&video->sem);
......@@ -1579,19 +1578,19 @@ static int dv1394_ioctl(struct inode *inode, struct file *file,
ret = -EINVAL;
goto out;
}
while (n_submit > 0) {
add_wait_queue(&video->waitq, &wait);
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&video->spinlock, flags);
/* wait until video->first_clear_frame is really CLEAR */
while (video->frames[video->first_clear_frame]->state != FRAME_CLEAR) {
spin_unlock_irqrestore(&video->spinlock, flags);
if (signal_pending(current)) {
remove_wait_queue(&video->waitq, &wait);
set_current_state(TASK_RUNNING);
......@@ -1601,14 +1600,14 @@ static int dv1394_ioctl(struct inode *inode, struct file *file,
schedule();
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&video->spinlock, flags);
}
spin_unlock_irqrestore(&video->spinlock, flags);
remove_wait_queue(&video->waitq, &wait);
set_current_state(TASK_RUNNING);
frame_prepare(video, video->first_clear_frame);
n_submit--;
......@@ -1625,7 +1624,7 @@ static int dv1394_ioctl(struct inode *inode, struct file *file,
ret = -EINVAL;
goto out;
}
n_wait = (unsigned int) arg;
/* since we re-run the last frame on underflow, we will
......@@ -1636,16 +1635,16 @@ static int dv1394_ioctl(struct inode *inode, struct file *file,
ret = -EINVAL;
goto out;
}
add_wait_queue(&video->waitq, &wait);
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&video->spinlock, flags);
while (video->n_clear_frames < n_wait) {
spin_unlock_irqrestore(&video->spinlock, flags);
if (signal_pending(current)) {
remove_wait_queue(&video->waitq, &wait);
set_current_state(TASK_RUNNING);
......@@ -1655,7 +1654,7 @@ static int dv1394_ioctl(struct inode *inode, struct file *file,
schedule();
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&video->spinlock, flags);
}
......@@ -1674,7 +1673,7 @@ static int dv1394_ioctl(struct inode *inode, struct file *file,
ret = -EINVAL;
goto out;
}
n_recv = (unsigned int) arg;
/* at least one frame must be active */
......@@ -1682,7 +1681,7 @@ static int dv1394_ioctl(struct inode *inode, struct file *file,
ret = -EINVAL;
goto out;
}
spin_lock_irqsave(&video->spinlock, flags);
/* release the clear frames */
......@@ -1693,7 +1692,7 @@ static int dv1394_ioctl(struct inode *inode, struct file *file,
/* reset dropped_frames */
video->dropped_frames = 0;
spin_unlock_irqrestore(&video->spinlock, flags);
ret = 0;
......@@ -1706,11 +1705,11 @@ static int dv1394_ioctl(struct inode *inode, struct file *file,
if (ret)
goto out;
}
video->continuity_counter = -1;
receive_packets(video);
start_dma_receive(video);
ret = 0;
......@@ -1765,7 +1764,7 @@ static int dv1394_ioctl(struct inode *inode, struct file *file,
/* reset dropped_frames */
video->dropped_frames = 0;
spin_unlock_irqrestore(&video->spinlock, flags);
if (copy_to_user((void*)arg, &status, sizeof(status))) {
......@@ -1798,11 +1797,11 @@ static int dv1394_open(struct inode *inode, struct file *file)
has already been set to video by devfs */
if (file->private_data) {
video = (struct video_card*) file->private_data;
} else {
/* look up the card by ID */
unsigned long flags;
spin_lock_irqsave(&dv1394_cards_lock, flags);
if (!list_empty(&dv1394_cards)) {
struct video_card *p;
......@@ -1819,10 +1818,10 @@ static int dv1394_open(struct inode *inode, struct file *file)
debug_printk("dv1394: OHCI card %d not found", ieee1394_file_to_instance(file));
return -ENODEV;
}
file->private_data = (void*) video;
}
#ifndef DV1394_ALLOW_MORE_THAN_ONE_OPEN
if ( test_and_set_bit(0, &video->open) ) {
......@@ -1845,7 +1844,7 @@ static int dv1394_release(struct inode *inode, struct file *file)
/* clean up async I/O users */
dv1394_fasync(-1, file, 0);
/* give someone else a turn */
clear_bit(0, &video->open);
......@@ -1865,19 +1864,19 @@ static void it_tasklet_func(unsigned long data)
if (!video->dma_running)
goto out;
irq_printk("ContextControl = %08x, CommandPtr = %08x\n",
irq_printk("ContextControl = %08x, CommandPtr = %08x\n",
reg_read(video->ohci, video->ohci_IsoXmitContextControlSet),
reg_read(video->ohci, video->ohci_IsoXmitCommandPtr)
);
if ( (video->ohci_it_ctx != -1) &&
(reg_read(video->ohci, video->ohci_IsoXmitContextControlSet) & (1 << 10)) ) {
struct frame *f;
unsigned int frame, i;
if (video->active_frame == -1)
frame = 0;
else
......@@ -1901,7 +1900,7 @@ static void it_tasklet_func(unsigned long data)
int prev_frame;
struct frame *prev_f;
/* don't reset, need this later *(f->frame_begin_timestamp) = 0; */
irq_printk(" BEGIN\n");
......@@ -1910,11 +1909,11 @@ static void it_tasklet_func(unsigned long data)
if (prev_frame == -1)
prev_frame += video->n_frames;
prev_f = video->frames[prev_frame];
/* make sure we can actually garbage collect
this frame */
if ( (prev_f->state == FRAME_READY) &&
prev_f->done && (!f->done) )
prev_f->done && (!f->done) )
{
frame_reset(prev_f);
video->n_clear_frames++;
......@@ -1929,7 +1928,7 @@ static void it_tasklet_func(unsigned long data)
f->done = 1;
}
/* see if we need to set the timestamp for the next frame */
if ( *(f->mid_frame_timestamp) ) {
struct frame *next_frame;
......@@ -1957,9 +1956,9 @@ static void it_tasklet_func(unsigned long data)
plus the length of the last frame sent, plus the syt latency */
ts_cyc = begin_ts & 0xF;
/* advance one frame, plus syt latency (typically 2-3) */
ts_cyc += f->n_packets + video->syt_offset ;
ts_cyc += f->n_packets + video->syt_offset ;
ts_off = 0;
ts_off = 0;
ts_cyc += ts_off/3072;
ts_off %= 3072;
......@@ -1986,14 +1985,12 @@ static void it_tasklet_func(unsigned long data)
video->dropped_frames++;
}
} /* for (each frame) */
}
if (wake) {
kill_fasync(&video->fasync, SIGIO, POLL_OUT);
/* wake readers/writers/ioctl'ers */
wake_up_interruptible(&video->waitq);
}
......@@ -2011,10 +2008,9 @@ static void ir_tasklet_func(unsigned long data)
if (!video->dma_running)
goto out;
if ( (video->ohci_ir_ctx != -1) &&
(reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 10)) )
{
(reg_read(video->ohci, video->ohci_IsoRcvContextControlSet) & (1 << 10)) ) {
int sof=0; /* start-of-frame flag */
struct frame *f;
......@@ -2036,14 +2032,14 @@ static void ir_tasklet_func(unsigned long data)
dma_region_sync_for_cpu(&video->packet_buf,
(unsigned long) p - (unsigned long) video->packet_buf.kvirt,
sizeof(struct packet));
packet_length = le16_to_cpu(p->data_length);
packet_time = le16_to_cpu(p->timestamp);
irq_printk("received packet %02d, timestamp=%04x, length=%04x, sof=%02x%02x\n", video->current_packet,
packet_time, packet_length,
packet_time, packet_length,
p->data[0], p->data[1]);
/* get the descriptor based on packet_buffer cursor */
f = video->frames[video->current_packet / MAX_PACKETS];
block = &(f->descriptor_pool[video->current_packet % MAX_PACKETS]);
......@@ -2053,14 +2049,14 @@ static void ir_tasklet_func(unsigned long data)
/* get the current frame */
f = video->frames[video->active_frame];
/* exclude empty packet */
if (packet_length > 8 && xferstatus == 0x11) {
/* check for start of frame */
/* DRD> Changed to check section type ([0]>>5==0)
/* DRD> Changed to check section type ([0]>>5==0)
and dif sequence ([1]>>4==0) */
sof = ( (p->data[0] >> 5) == 0 && (p->data[1] >> 4) == 0);
dbc = (int) (p->cip_h1 >> 24);
if ( video->continuity_counter != -1 && dbc > ((video->continuity_counter + 1) % 256) )
{
......@@ -2071,12 +2067,12 @@ static void ir_tasklet_func(unsigned long data)
video->first_clear_frame = -1;
}
video->continuity_counter = dbc;
if (!video->first_frame) {
if (sof) {
video->first_frame = 1;
}
} else if (sof) {
/* close current frame */
frame_reset(f); /* f->state = STATE_CLEAR */
......@@ -2089,7 +2085,7 @@ static void ir_tasklet_func(unsigned long data)
}
if (video->first_clear_frame == -1)
video->first_clear_frame = video->active_frame;
/* get the next frame */
video->active_frame = (video->active_frame + 1) % video->n_frames;
f = video->frames[video->active_frame];
......@@ -2101,22 +2097,22 @@ static void ir_tasklet_func(unsigned long data)
/* open next frame */
f->state = FRAME_READY;
}
/* copy to buffer */
if (f->n_packets > (video->frame_size / 480)) {
printk(KERN_ERR "frame buffer overflow during receive\n");
}
frame_put_packet(f, p);
} /* first_frame */
}
/* stop, end of ready packets */
else if (xferstatus == 0) {
break;
}
/* reset xferStatus & resCount */
block->u.in.il.q[3] = cpu_to_le32(512);
......@@ -2127,7 +2123,7 @@ static void ir_tasklet_func(unsigned long data)
next_dma = ((unsigned long) block - (unsigned long) f->descriptor_pool) + f->descriptor_pool_dma;
next->u.in.il.q[0] |= 3 << 20; /* enable interrupt */
next->u.in.il.q[2] = 0; /* disable branch */
/* link previous to next */
prev_i = (next_i == 0) ? (MAX_PACKETS * video->n_frames - 1) : (next_i - 1);
f = video->frames[prev_i / MAX_PACKETS];
......@@ -2145,20 +2141,20 @@ static void ir_tasklet_func(unsigned long data)
/* advance packet_buffer cursor */
video->current_packet = (video->current_packet + 1) % (MAX_PACKETS * video->n_frames);
} /* for all packets */
wake = 1; /* why the hell not? */
} /* receive interrupt */
if (wake) {
kill_fasync(&video->fasync, SIGIO, POLL_IN);
/* wake readers/writers/ioctl'ers */
wake_up_interruptible(&video->waitq);
}
out:
spin_unlock(&video->spinlock);
}
......@@ -2216,13 +2212,13 @@ static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes
printk(KERN_ERR "dv1394: cannot allocate video_card\n");
goto err;
}
memset(video, 0, sizeof(struct video_card));
video->ohci = ohci;
/* lower 2 bits of id indicate which of four "plugs"
per host */
video->id = ohci->host->id << 2;
video->id = ohci->host->id << 2;
if (format == DV1394_NTSC)
video->id |= mode;
else
......@@ -2234,16 +2230,16 @@ static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes
video->ohci_IsoXmitContextControlSet = 0;
video->ohci_IsoXmitContextControlClear = 0;
video->ohci_IsoXmitCommandPtr = 0;
video->ohci_IsoRcvContextControlSet = 0;
video->ohci_IsoRcvContextControlClear = 0;
video->ohci_IsoRcvCommandPtr = 0;
video->ohci_IsoRcvContextMatch = 0;
video->n_frames = 0; /* flag that video is not initialized */
video->channel = 63; /* default to broadcast channel */
video->active_frame = -1;
/* initialize the following */
video->pal_or_ntsc = format;
video->cip_n = 0; /* 0 = use builtin default */
......@@ -2270,7 +2266,7 @@ static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes
INIT_LIST_HEAD(&video->list);
list_add_tail(&video->list, &dv1394_cards);
spin_unlock_irqrestore(&dv1394_cards_lock, flags);
if (devfs_mk_cdev(MKDEV(IEEE1394_MAJOR,
IEEE1394_MINOR_BLOCK_DV1394*16 + video->id),
S_IFCHR|S_IRUGO|S_IWUGO,
......@@ -2281,7 +2277,7 @@ static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes
goto err_free;
debug_printk("dv1394: dv1394_init() OK on ID %d\n", video->id);
return 0;
err_free:
......@@ -2293,7 +2289,7 @@ static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes
static void dv1394_un_init(struct video_card *video)
{
char buf[32];
/* obviously nobody has the driver open at this point */
do_dv1394_shutdown(video, 1);
snprintf(buf, sizeof(buf), "dv/host%d/%s/%s", (video->id >> 2),
......@@ -2305,13 +2301,13 @@ static void dv1394_un_init(struct video_card *video)
kfree(video);
}
static void dv1394_remove_host (struct hpsb_host *host)
{
struct video_card *video;
unsigned long flags;
int id = host->id;
/* We only work with the OHCI-1394 driver */
if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
return;
......@@ -2355,7 +2351,7 @@ static void dv1394_add_host (struct hpsb_host *host)
devfs_mk_dir("ieee1394/dv/host%d", id);
devfs_mk_dir("ieee1394/dv/host%d/NTSC", id);
devfs_mk_dir("ieee1394/dv/host%d/PAL", id);
dv1394_init(ohci, DV1394_NTSC, MODE_RECEIVE);
dv1394_init(ohci, DV1394_NTSC, MODE_TRANSMIT);
dv1394_init(ohci, DV1394_PAL, MODE_RECEIVE);
......@@ -2373,7 +2369,7 @@ static void dv1394_host_reset(struct hpsb_host *host)
struct ti_ohci *ohci;
struct video_card *video = NULL, *tmp_vid;
unsigned long flags;
/* We only work with the OHCI-1394 driver */
if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
return;
......@@ -2394,7 +2390,7 @@ static void dv1394_host_reset(struct hpsb_host *host)
if (!video)
return;
spin_lock_irqsave(&video->spinlock, flags);
if (!video->dma_running)
......@@ -2403,7 +2399,7 @@ static void dv1394_host_reset(struct hpsb_host *host)
/* check IT context */
if (video->ohci_it_ctx != -1) {
u32 ctx;
ctx = reg_read(video->ohci, video->ohci_IsoXmitContextControlSet);
/* if (RUN but not ACTIVE) */
......@@ -2415,17 +2411,17 @@ static void dv1394_host_reset(struct hpsb_host *host)
/* to be safe, assume a frame has been dropped. User-space programs
should handle this condition like an underflow. */
video->dropped_frames++;
/* for some reason you must clear, then re-set the RUN bit to restart DMA */
/* clear RUN */
reg_write(video->ohci, video->ohci_IsoXmitContextControlClear, (1 << 15));
flush_pci_write(video->ohci);
/* set RUN */
reg_write(video->ohci, video->ohci_IsoXmitContextControlSet, (1 << 15));
flush_pci_write(video->ohci);
/* set the WAKE bit (just in case; this isn't strictly necessary) */
reg_write(video->ohci, video->ohci_IsoXmitContextControlSet, (1 << 12));
flush_pci_write(video->ohci);
......@@ -2435,11 +2431,11 @@ static void dv1394_host_reset(struct hpsb_host *host)
reg_read(video->ohci, video->ohci_IsoXmitCommandPtr));
}
}
/* check IR context */
if (video->ohci_ir_ctx != -1) {
u32 ctx;
ctx = reg_read(video->ohci, video->ohci_IsoRcvContextControlSet);
/* if (RUN but not ACTIVE) */
......@@ -2454,15 +2450,15 @@ static void dv1394_host_reset(struct hpsb_host *host)
/* for some reason you must clear, then re-set the RUN bit to restart DMA */
/* XXX this doesn't work for me, I can't get IR DMA to restart :[ */
/* clear RUN */
reg_write(video->ohci, video->ohci_IsoRcvContextControlClear, (1 << 15));
flush_pci_write(video->ohci);
/* set RUN */
reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 15));
flush_pci_write(video->ohci);
/* set the WAKE bit (just in case; this isn't strictly necessary) */
reg_write(video->ohci, video->ohci_IsoRcvContextControlSet, (1 << 12));
flush_pci_write(video->ohci);
......@@ -2475,7 +2471,7 @@ static void dv1394_host_reset(struct hpsb_host *host)
out:
spin_unlock_irqrestore(&video->spinlock, flags);
/* wake readers/writers/ioctl'ers */
wake_up_interruptible(&video->waitq);
}
......@@ -2616,6 +2612,7 @@ static int __init dv1394_init_module(void)
cdev_init(&dv1394_cdev, &dv1394_fops);
dv1394_cdev.owner = THIS_MODULE;
kobject_set_name(&dv1394_cdev.kobj, "dv1394");
ret = cdev_add(&dv1394_cdev, IEEE1394_DV1394_DEV, 16);
if (ret) {
printk(KERN_ERR "dv1394: unable to register character device\n");
......
......@@ -49,7 +49,7 @@
To set the DV output parameters (e.g. whether you want NTSC or PAL
video), use the DV1394_INIT ioctl, passing in the parameters you
want in a struct dv1394_init.
Example 1:
To play a raw .DV file: cat foo.DV > /dev/dv1394
(cat will use write() internally)
......@@ -72,9 +72,9 @@
2)
For more control over buffering, and to avoid unnecessary copies
of the DV data, you can use the more sophisticated the mmap() interface.
First, call the DV1394_INIT ioctl to specify your parameters,
including the number of frames in the ringbuffer. Then, calling mmap()
of the DV data, you can use the more sophisticated the mmap() interface.
First, call the DV1394_INIT ioctl to specify your parameters,
including the number of frames in the ringbuffer. Then, calling mmap()
on the dv1394 device will give you direct access to the ringbuffer
from which the DV card reads your frame data.
......@@ -99,7 +99,7 @@
*--------------------------------------*
| CLEAR | DV data | DV data | CLEAR |
*--------------------------------------*
<ACTIVE>
<ACTIVE>
transmission goes in this direction --->>>
......@@ -110,10 +110,10 @@
will continue to transmit frame 2, and will increase the dropped_frames
counter each time it repeats the transmission).
If you called DV1394_GET_STATUS at this instant, you would
receive the following values:
n_frames = 4
active_frame = 1
first_clear_frame = 3
......@@ -144,9 +144,9 @@
(checks of system call return values omitted for brevity; always
check return values in your code!)
while ( frames left ) {
struct pollfd *pfd = ...;
pfd->fd = dv1394_fd;
......@@ -154,12 +154,12 @@
pfd->events = POLLOUT | POLLIN; (OUT for transmit, IN for receive)
(add other sources of I/O here)
poll(pfd, 1, -1); (or select(); add a timeout if you want)
if (pfd->revents) {
struct dv1394_status status;
ioctl(dv1394_fd, DV1394_GET_STATUS, &status);
if (status.dropped_frames > 0) {
......@@ -183,7 +183,7 @@
should close the dv1394 file descriptor (and munmap() all
ringbuffer mappings, if you are using them), then re-open the
dv1394 device (and re-map the ringbuffer).
*/
......@@ -215,7 +215,7 @@ enum pal_or_ntsc {
struct dv1394_init {
/* DV1394_API_VERSION */
unsigned int api_version;
/* isochronous transmission channel to use */
unsigned int channel;
......@@ -227,7 +227,7 @@ struct dv1394_init {
enum pal_or_ntsc format;
/* the following are used only for transmission */
/* set these to zero unless you want a
non-default empty packet rate (see below) */
unsigned long cip_n;
......@@ -244,7 +244,7 @@ struct dv1394_init {
would imply a different size for the ringbuffer). If you need a
different buffer size, simply close and re-open the device, then
initialize it with your new settings. */
/* Q: What are cip_n and cip_d? */
/*
......@@ -261,13 +261,13 @@ struct dv1394_init {
The default empty packet insertion rate seems to work for many people; if
your DV output is stable, you can simply ignore this discussion. However,
we have exposed the empty packet rate as a parameter to support devices that
do not work with the default rate.
do not work with the default rate.
The decision to insert an empty packet is made with a numerator/denominator
algorithm. Empty packets are produced at an average rate of CIP_N / CIP_D.
You can alter the empty packet rate by passing non-zero values for cip_n
and cip_d to the INIT ioctl.
*/
......
/*
* eth1394.c -- Ethernet driver for Linux IEEE-1394 Subsystem
*
*
* Copyright (C) 2001-2003 Ben Collins <bcollins@debian.org>
* 2000 Bonin Franck <boninf@free.fr>
* 2003 Steve Kinneberg <kinnebergsteve@acmsystems.com>
......@@ -89,7 +89,7 @@
#define TRACE() printk(KERN_ERR "%s:%s[%d] ---- TRACE\n", driver_name, __FUNCTION__, __LINE__)
static char version[] __devinitdata =
"$Rev: 1175 $ Ben Collins <bcollins@debian.org>";
"$Rev: 1198 $ Ben Collins <bcollins@debian.org>";
struct fragment_info {
struct list_head list;
......@@ -216,7 +216,7 @@ static struct hpsb_highlevel eth1394_highlevel = {
/* This is called after an "ifup" */
static int ether1394_open (struct net_device *dev)
{
struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
struct eth1394_priv *priv = dev->priv;
int ret = 0;
/* Something bad happened, don't even try */
......@@ -278,7 +278,7 @@ static void ether1394_tx_timeout (struct net_device *dev)
static int ether1394_change_mtu(struct net_device *dev, int new_mtu)
{
struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
struct eth1394_priv *priv = dev->priv;
if ((new_mtu < 68) ||
(new_mtu > min(ETH1394_DATA_LEN,
......@@ -479,7 +479,7 @@ static void ether1394_reset_priv (struct net_device *dev, int set_mtu)
{
unsigned long flags;
int i;
struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
struct eth1394_priv *priv = dev->priv;
struct hpsb_host *host = priv->host;
u64 guid = *((u64*)&(host->csr.rom->bus_info_data[3]));
u16 maxpayload = 1 << (host->csr.max_rec + 1);
......@@ -652,7 +652,7 @@ static void ether1394_add_host (struct hpsb_host *host)
static void ether1394_remove_host (struct hpsb_host *host)
{
struct eth1394_host_info *hi;
hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
if (hi != NULL) {
struct eth1394_priv *priv = (struct eth1394_priv *)hi->dev->priv;
......@@ -660,7 +660,7 @@ static void ether1394_remove_host (struct hpsb_host *host)
hpsb_unregister_addrspace(&eth1394_highlevel, host,
priv->local_fifo);
if (priv->iso != NULL)
if (priv->iso != NULL)
hpsb_iso_shutdown(priv->iso);
if (hi->dev) {
......@@ -731,18 +731,16 @@ static int ether1394_header(struct sk_buff *skb, struct net_device *dev,
eth->h_proto = htons(type);
if (dev->flags & (IFF_LOOPBACK|IFF_NOARP))
{
if (dev->flags & (IFF_LOOPBACK|IFF_NOARP)) {
memset(eth->h_dest, 0, dev->addr_len);
return(dev->hard_header_len);
}
if (daddr)
{
if (daddr) {
memcpy(eth->h_dest,daddr,dev->addr_len);
return dev->hard_header_len;
}
return -dev->hard_header_len;
}
......@@ -760,15 +758,15 @@ static int ether1394_rebuild_header(struct sk_buff *skb)
struct eth1394hdr *eth = (struct eth1394hdr *)skb->data;
struct net_device *dev = skb->dev;
switch (eth->h_proto)
{
switch (eth->h_proto) {
#ifdef CONFIG_INET
case __constant_htons(ETH_P_IP):
return arp_find((unsigned char*)&eth->h_dest, skb);
#endif
#endif
default:
ETH1394_PRINT(KERN_DEBUG, dev->name,
"unable to resolve type %04x addresses.\n",
"unable to resolve type %04x addresses.\n",
eth->h_proto);
break;
}
......@@ -797,7 +795,7 @@ static int ether1394_header_cache(struct neighbour *neigh, struct hh_cache *hh)
eth->h_proto = type;
memcpy(eth->h_dest, neigh->ha, dev->addr_len);
hh->hh_len = ETH1394_HLEN;
return 0;
}
......@@ -867,7 +865,7 @@ static inline u16 ether1394_parse_encap(struct sk_buff *skb,
nodeid_t srcid, nodeid_t destid,
u16 ether_type)
{
struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
struct eth1394_priv *priv = dev->priv;
u64 dest_hw;
unsigned short ret = 0;
......@@ -1010,7 +1008,7 @@ static inline int new_fragment(struct list_head *frag_info, int offset, int len)
}
new = kmalloc(sizeof(struct fragment_info), GFP_ATOMIC);
if (!new)
if (!new)
return -ENOMEM;
new->offset = offset;
......@@ -1192,7 +1190,7 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
purge_partial_datagram(pdgl->prev);
pdg->sz--;
}
retval = new_partial_datagram(dev, pdgl, dgl, dg_size,
buf + hdr_len, fg_off,
fg_len);
......@@ -1374,7 +1372,7 @@ static void ether1394_iso(struct hpsb_iso *iso)
* arphdr) is the same format as the ip1394 header, so they overlap. The rest
* needs to be munged a bit. The remainder of the arphdr is formatted based
* on hwaddr len and ipaddr len. We know what they'll be, so it's easy to
* judge.
* judge.
*
* Now that the EUI is used for the hardware address all we need to do to make
* this work for 1394 is to insert 2 quadlets that contain max_rec size,
......@@ -1452,7 +1450,7 @@ static inline unsigned int ether1394_encapsulate(struct sk_buff *skb,
hdr->common.lf = ETH1394_HDR_LF_IF;
hdr->sf.fg_off = 0;
break;
default:
hdr->sf.fg_off += adj_max_payload;
bufhdr = (union eth1394_hdr *)skb_pull(skb, adj_max_payload);
......@@ -1499,7 +1497,7 @@ static inline int ether1394_prep_write_packet(struct hpsb_packet *p,
ETH1394_PRINT_G(KERN_ERR, "No more tlabels left while sending "
"to node " NODE_BUS_FMT "\n", NODE_BUS_ARGS(host, node));
return -1;
}
}
p->header[0] = (p->node_id << 16) | (p->tlabel << 10)
| (1 << 8) | (TCODE_WRITEB << 4);
......@@ -1538,7 +1536,6 @@ static inline void ether1394_free_packet(struct hpsb_packet *packet)
{
if (packet->tcode != TCODE_STREAM_DATA)
hpsb_free_tlabel(packet);
packet->data = NULL;
hpsb_free_packet(packet);
}
......@@ -1583,9 +1580,9 @@ static inline void ether1394_dg_complete(struct packet_task *ptask, int fail)
{
struct sk_buff *skb = ptask->skb;
struct net_device *dev = skb->dev;
struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
struct eth1394_priv *priv = dev->priv;
unsigned long flags;
/* Statistics */
spin_lock_irqsave(&priv->lock, flags);
if (fail) {
......@@ -1616,8 +1613,7 @@ static void ether1394_complete_cb(void *__ptask)
ether1394_free_packet(packet);
ptask->outstanding_pkts--;
if (ptask->outstanding_pkts > 0 && !fail)
{
if (ptask->outstanding_pkts > 0 && !fail) {
int tx_len;
/* Add the encapsulation header to the fragment */
......@@ -1637,7 +1633,7 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
{
int kmflags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
struct eth1394hdr *eth;
struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
struct eth1394_priv *priv = dev->priv;
int proto;
unsigned long flags;
nodeid_t dest_node;
......@@ -1797,7 +1793,7 @@ static int ether1394_ethtool_ioctl(struct net_device *dev, void *useraddr)
case ETHTOOL_GDRVINFO: {
struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
strcpy (info.driver, driver_name);
strcpy (info.version, "$Rev: 1175 $");
strcpy (info.version, "$Rev: 1198 $");
/* FIXME XXX provide sane businfo */
strcpy (info.bus_info, "ieee1394");
if (copy_to_user (useraddr, &info, sizeof (info)))
......
......@@ -493,7 +493,7 @@ int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
return 0;
}
void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
unsigned int channel)
{
if (channel > 63) {
......
......@@ -5,7 +5,7 @@
struct hpsb_address_serve {
struct list_head host_list; /* per host list */
struct list_head hl_list; /* hpsb_highlevel list */
struct hpsb_address_ops *op;
......@@ -19,7 +19,7 @@ struct hpsb_address_serve {
/*
* The above structs are internal to highlevel driver handling. Only the
* following structures are of interest to actual highlevel drivers.
* following structures are of interest to actual highlevel drivers.
*/
struct hpsb_highlevel {
......@@ -68,8 +68,8 @@ struct hpsb_highlevel {
struct hpsb_address_ops {
/*
* Null function pointers will make the respective operation complete
* with RCODE_TYPE_ERROR. Makes for easy to implement read-only
* Null function pointers will make the respective operation complete
* with RCODE_TYPE_ERROR. Makes for easy to implement read-only
* registers (just leave everything but read NULL).
*
* All functions shall return appropriate IEEE 1394 rcodes.
......@@ -77,7 +77,7 @@ struct hpsb_address_ops {
/* These functions have to implement block reads for themselves. */
/* These functions either return a response code
or a negative number. In the first case a response will be generated; in the
or a negative number. In the first case a response will be generated; in the
later case, no response will be sent and the driver, that handled the request
will send the response itself
*/
......@@ -104,7 +104,7 @@ void highlevel_host_reset(struct hpsb_host *host);
a packet arrives. The flags argument contains the second word of the first header
quadlet of the incoming packet (containing transaction label, retry code,
transaction code and priority). These functions either return a response code
or a negative number. In the first case a response will be generated; in the
or a negative number. In the first case a response will be generated; in the
later case, no response will be sent and the driver, that handled the request
will send the response itself.
*/
......@@ -155,7 +155,7 @@ int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
* Enable or disable receving a certain isochronous channel through the
* iso_receive op.
*/
int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
unsigned int channel);
void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host,
unsigned int channel);
......
......@@ -126,9 +126,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
h->hostdata = h + 1;
h->driver = drv;
INIT_LIST_HEAD(&h->pending_packets);
spin_lock_init(&h->pending_pkt_lock);
skb_queue_head_init(&h->pending_packet_queue);
INIT_LIST_HEAD(&h->addr_space);
init_timer(&h->delayed_reset);
......
......@@ -5,6 +5,8 @@
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/skbuff.h>
#include <asm/semaphore.h>
#include "ieee1394_types.h"
......@@ -21,8 +23,8 @@ struct hpsb_host {
atomic_t generation;
struct list_head pending_packets;
spinlock_t pending_pkt_lock;
struct sk_buff_head pending_packet_queue;
struct timer_list timeout;
unsigned long timeout_interval;
......@@ -164,7 +166,7 @@ struct hpsb_host_driver {
* called. Return 0 on success, negative errno on failure.
* NOTE: The function must be callable in interrupt context.
*/
int (*transmit_packet) (struct hpsb_host *host,
int (*transmit_packet) (struct hpsb_host *host,
struct hpsb_packet *packet);
/* This function requests miscellanous services from the driver, see
......
......@@ -39,7 +39,7 @@
#define ACK_TARDY 0xb
#define ACK_CONFLICT_ERROR 0xc
#define ACK_DATA_ERROR 0xd
#define ACK_TYPE_ERROR 0xe
#define ACK_TYPE_ERROR 0xe
#define ACK_ADDRESS_ERROR 0xf
/* Non-standard "ACK codes" for internal use */
......@@ -74,7 +74,7 @@ extern const char *hpsb_speedto_str[];
#define SELFID_PORT_CHILD 0x3
#define SELFID_PORT_PARENT 0x2
#define SELFID_PORT_NCONN 0x1
#define SELFID_PORT_NONE 0x0
#define SELFID_PORT_NONE 0x0
#include <asm/byteorder.h>
......
......@@ -31,6 +31,8 @@
#include <linux/moduleparam.h>
#include <linux/bitops.h>
#include <linux/kdev_t.h>
#include <linux/skbuff.h>
#include <asm/byteorder.h>
#include <asm/semaphore.h>
......@@ -56,8 +58,6 @@ MODULE_PARM_DESC(disable_nodemgr, "Disable nodemgr functionality.");
/* We are GPL, so treat us special */
MODULE_LICENSE("GPL");
static kmem_cache_t *hpsb_packet_cache;
/* Some globals used */
const char *hpsb_speedto_str[] = { "S100", "S200", "S400", "S800", "S1600", "S3200" };
......@@ -122,30 +122,27 @@ void hpsb_set_packet_complete_task(struct hpsb_packet *packet,
struct hpsb_packet *hpsb_alloc_packet(size_t data_size)
{
struct hpsb_packet *packet = NULL;
void *data = NULL;
int gfp_flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
struct sk_buff *skb;
packet = kmem_cache_alloc(hpsb_packet_cache, gfp_flags);
if (packet == NULL)
data_size = ((data_size + 3) & ~3);
skb = alloc_skb(data_size + sizeof(*packet), GFP_ATOMIC);
if (skb == NULL)
return NULL;
memset(packet, 0, sizeof(*packet));
memset(skb->data, 0, data_size + sizeof(*packet));
packet = (struct hpsb_packet *)skb->data;
packet->skb = skb;
packet->header = packet->embedded_header;
INIT_LIST_HEAD(&packet->list);
packet->state = hpsb_unused;
packet->generation = -1;
INIT_LIST_HEAD(&packet->driver_list);
atomic_set(&packet->refcnt, 1);
if (data_size) {
data_size = (data_size + 3) & ~3;
data = kmalloc(data_size + 8, gfp_flags);
if (data == NULL) {
kmem_cache_free(hpsb_packet_cache, packet);
return NULL;
}
packet->data = data;
packet->data = (quadlet_t *)(skb->data + sizeof(*packet));
packet->data_size = data_size;
}
......@@ -162,8 +159,8 @@ struct hpsb_packet *hpsb_alloc_packet(size_t data_size)
void hpsb_free_packet(struct hpsb_packet *packet)
{
if (packet && atomic_dec_and_test(&packet->refcnt)) {
kfree(packet->data);
kmem_cache_free(hpsb_packet_cache, packet);
BUG_ON(!list_empty(&packet->driver_list));
kfree_skb(packet->skb);
}
}
......@@ -219,13 +216,13 @@ static int check_selfids(struct hpsb_host *host)
if (!sid->extended) {
nodeid++;
esid_seq = 0;
if (sid->phy_id != nodeid) {
HPSB_INFO("SelfIDs failed monotony check with "
"%d", sid->phy_id);
return 0;
}
if (sid->link_active) {
host->nodes_active++;
if (sid->contender)
......@@ -234,7 +231,7 @@ static int check_selfids(struct hpsb_host *host)
} else {
esid = (struct ext_selfid *)sid;
if ((esid->phy_id != nodeid)
if ((esid->phy_id != nodeid)
|| (esid->seq_nr != esid_seq)) {
HPSB_INFO("SelfIDs failed monotony check with "
"%d/%d", esid->phy_id, esid->seq_nr);
......@@ -244,24 +241,24 @@ static int check_selfids(struct hpsb_host *host)
}
sid++;
}
esid = (struct ext_selfid *)(sid - 1);
while (esid->extended) {
if ((esid->porta == 0x2) || (esid->portb == 0x2)
|| (esid->portc == 0x2) || (esid->portd == 0x2)
|| (esid->porte == 0x2) || (esid->portf == 0x2)
|| (esid->portg == 0x2) || (esid->porth == 0x2)) {
HPSB_INFO("SelfIDs failed root check on "
"extended SelfID");
return 0;
HPSB_INFO("SelfIDs failed root check on "
"extended SelfID");
return 0;
}
esid--;
}
sid = (struct selfid *)esid;
if ((sid->port0 == 0x2) || (sid->port1 == 0x2) || (sid->port2 == 0x2)) {
HPSB_INFO("SelfIDs failed root check");
return 0;
HPSB_INFO("SelfIDs failed root check");
return 0;
}
host->node_count = nodeid + 1;
......@@ -400,7 +397,7 @@ void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
}
void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
int ackcode)
{
packet->ack_code = ackcode;
......@@ -413,7 +410,7 @@ void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
if (ackcode != ACK_PENDING || !packet->expect_response) {
atomic_dec(&packet->refcnt);
list_del(&packet->list);
skb_unlink(packet->skb);
packet->state = hpsb_complete;
queue_packet_complete(packet);
return;
......@@ -505,17 +502,17 @@ int hpsb_send_packet(struct hpsb_packet *packet)
packet->state = hpsb_queued;
if (!packet->no_waiter || packet->expect_response) {
unsigned long flags;
/* This just seems silly to me */
WARN_ON(packet->no_waiter && packet->expect_response);
if (!packet->no_waiter || packet->expect_response) {
atomic_inc(&packet->refcnt);
spin_lock_irqsave(&host->pending_pkt_lock, flags);
list_add_tail(&packet->list, &host->pending_packets);
spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
skb_queue_tail(&host->pending_packet_queue, packet->skb);
}
if (packet->node_id == host->node_id)
{ /* it is a local request, so handle it locally */
if (packet->node_id == host->node_id) {
/* it is a local request, so handle it locally */
quadlet_t *data;
size_t size = packet->data_size + packet->header_size;
......@@ -547,6 +544,7 @@ int hpsb_send_packet(struct hpsb_packet *packet)
+ NODEID_TO_NODE(packet->node_id)];
}
#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
switch (packet->speed_code) {
case 2:
dump_packet("send packet 400:", packet->header,
......@@ -560,6 +558,7 @@ int hpsb_send_packet(struct hpsb_packet *packet)
dump_packet("send packet 100:", packet->header,
packet->header_size);
}
#endif
return host->driver->transmit_packet(host, packet);
}
......@@ -595,80 +594,78 @@ static void send_packet_nocare(struct hpsb_packet *packet)
}
void handle_packet_response(struct hpsb_host *host, int tcode, quadlet_t *data,
size_t size)
static void handle_packet_response(struct hpsb_host *host, int tcode,
quadlet_t *data, size_t size)
{
struct hpsb_packet *packet = NULL;
struct list_head *lh;
struct sk_buff *skb;
int tcode_match = 0;
int tlabel;
unsigned long flags;
tlabel = (data[0] >> 10) & 0x3f;
spin_lock_irqsave(&host->pending_pkt_lock, flags);
spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
list_for_each(lh, &host->pending_packets) {
packet = list_entry(lh, struct hpsb_packet, list);
skb_queue_walk(&host->pending_packet_queue, skb) {
packet = (struct hpsb_packet *)skb->data;
if ((packet->tlabel == tlabel)
&& (packet->node_id == (data[1] >> 16))){
break;
}
packet = NULL;
}
if (lh == &host->pending_packets) {
if (packet == NULL) {
HPSB_DEBUG("unsolicited response packet received - no tlabel match");
dump_packet("contents:", data, 16);
spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
return;
}
switch (packet->tcode) {
case TCODE_WRITEQ:
case TCODE_WRITEB:
if (tcode == TCODE_WRITE_RESPONSE) tcode_match = 1;
if (tcode != TCODE_WRITE_RESPONSE)
break;
tcode_match = 1;
memcpy(packet->header, data, 12);
break;
case TCODE_READQ:
if (tcode == TCODE_READQ_RESPONSE) tcode_match = 1;
if (tcode != TCODE_READQ_RESPONSE)
break;
tcode_match = 1;
memcpy(packet->header, data, 16);
break;
case TCODE_READB:
if (tcode == TCODE_READB_RESPONSE) tcode_match = 1;
if (tcode != TCODE_READB_RESPONSE)
break;
tcode_match = 1;
BUG_ON(packet->skb->len - sizeof(*packet) < size - 16);
memcpy(packet->header, data, 16);
memcpy(packet->data, data + 4, size - 16);
break;
case TCODE_LOCK_REQUEST:
if (tcode == TCODE_LOCK_RESPONSE) tcode_match = 1;
if (tcode != TCODE_LOCK_RESPONSE)
break;
tcode_match = 1;
size = min((size - 16), (size_t)8);
BUG_ON(packet->skb->len - sizeof(*packet) < size);
memcpy(packet->header, data, 16);
memcpy(packet->data, data + 4, size);
break;
}
if (!tcode_match || (packet->tlabel != tlabel)
|| (packet->node_id != (data[1] >> 16))) {
if (!tcode_match) {
HPSB_INFO("unsolicited response packet received - tcode mismatch");
dump_packet("contents:", data, 16);
spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
return;
}
list_del(&packet->list);
spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
/* FIXME - update size fields? */
switch (tcode) {
case TCODE_WRITE_RESPONSE:
memcpy(packet->header, data, 12);
break;
case TCODE_READQ_RESPONSE:
memcpy(packet->header, data, 16);
break;
case TCODE_READB_RESPONSE:
memcpy(packet->header, data, 16);
memcpy(packet->data, data + 4, size - 16);
break;
case TCODE_LOCK_RESPONSE:
memcpy(packet->header, data, 16);
memcpy(packet->data, data + 4, (size - 16) > 8 ? 8 : size - 16);
break;
}
__skb_unlink(skb, skb->list);
spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
if (packet->state == hpsb_queued) {
packet->sendtime = jiffies;
......@@ -685,10 +682,8 @@ static struct hpsb_packet *create_reply_packet(struct hpsb_host *host,
{
struct hpsb_packet *p;
dsize += (dsize % 4 ? 4 - (dsize % 4) : 0);
p = hpsb_alloc_packet(dsize);
if (p == NULL) {
if (unlikely(p == NULL)) {
/* FIXME - send data_error response */
return NULL;
}
......@@ -702,9 +697,8 @@ static struct hpsb_packet *create_reply_packet(struct hpsb_host *host,
p->generation = get_hpsb_generation(host);
if (dsize % 4) {
p->data[dsize / 4] = 0;
}
if (dsize % 4)
p->data[dsize / 4] = 0;
return p;
}
......@@ -851,11 +845,11 @@ static void handle_incoming_packet(struct hpsb_host *host, int tcode,
fill_async_lock_resp(packet, rcode, extcode, 4);
break;
case 8:
if ((extcode != EXTCODE_FETCH_ADD)
if ((extcode != EXTCODE_FETCH_ADD)
&& (extcode != EXTCODE_LITTLE_ADD)) {
rcode = highlevel_lock(host, source,
packet->data, addr,
data[5], data[4],
data[5], data[4],
extcode, flags);
fill_async_lock_resp(packet, rcode, extcode, 4);
} else {
......@@ -870,7 +864,7 @@ static void handle_incoming_packet(struct hpsb_host *host, int tcode,
rcode = highlevel_lock64(host, source,
(octlet_t *)packet->data, addr,
*(octlet_t *)(data + 6),
*(octlet_t *)(data + 4),
*(octlet_t *)(data + 4),
extcode, flags);
fill_async_lock_resp(packet, rcode, extcode, 8);
break;
......@@ -932,7 +926,7 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
break;
default:
HPSB_NOTICE("received packet with bogus transaction code %d",
HPSB_NOTICE("received packet with bogus transaction code %d",
tcode);
break;
}
......@@ -941,74 +935,75 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
void abort_requests(struct hpsb_host *host)
{
unsigned long flags;
struct hpsb_packet *packet, *packet_next;
LIST_HEAD(llist);
struct hpsb_packet *packet;
struct sk_buff *skb;
host->driver->devctl(host, CANCEL_REQUESTS, 0);
host->driver->devctl(host, CANCEL_REQUESTS, 0);
spin_lock_irqsave(&host->pending_pkt_lock, flags);
list_splice(&host->pending_packets, &llist);
INIT_LIST_HEAD(&host->pending_packets);
spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
while ((skb = skb_dequeue(&host->pending_packet_queue)) != NULL) {
packet = (struct hpsb_packet *)skb->data;
list_for_each_entry_safe(packet, packet_next, &llist, list) {
list_del(&packet->list);
packet->state = hpsb_complete;
packet->ack_code = ACKX_ABORTED;
packet->state = hpsb_complete;
packet->ack_code = ACKX_ABORTED;
queue_packet_complete(packet);
}
}
}
void abort_timedouts(unsigned long __opaque)
{
struct hpsb_host *host = (struct hpsb_host *)__opaque;
unsigned long flags;
struct hpsb_packet *packet, *packet_next;
unsigned long expire;
LIST_HEAD(expiredlist);
unsigned long flags;
struct hpsb_packet *packet;
struct sk_buff *skb;
unsigned long expire;
spin_lock_irqsave(&host->csr.lock, flags);
spin_lock_irqsave(&host->csr.lock, flags);
expire = host->csr.expire;
spin_unlock_irqrestore(&host->csr.lock, flags);
spin_lock_irqsave(&host->pending_pkt_lock, flags);
list_for_each_entry_safe(packet, packet_next, &host->pending_packets, list) {
if (time_before(packet->sendtime + expire, jiffies)) {
list_del(&packet->list);
list_add(&packet->list, &expiredlist);
}
}
spin_unlock_irqrestore(&host->csr.lock, flags);
/* Hold the lock around this, since we aren't dequeuing all
* packets, just ones we need. */
spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
while (!skb_queue_empty(&host->pending_packet_queue)) {
skb = skb_peek(&host->pending_packet_queue);
packet = (struct hpsb_packet *)skb->data;
if (time_before(packet->sendtime + expire, jiffies)) {
__skb_unlink(skb, skb->list);
packet->state = hpsb_complete;
packet->ack_code = ACKX_TIMEOUT;
queue_packet_complete(packet);
} else {
/* Since packets are added to the tail, the oldest
* ones are first, always. When we get to one that
* isn't timed out, the rest aren't either. */
break;
}
}
if (!list_empty(&host->pending_packets))
if (!skb_queue_empty(&host->pending_packet_queue))
mod_timer(&host->timeout, jiffies + host->timeout_interval);
spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
list_for_each_entry_safe(packet, packet_next, &expiredlist, list) {
list_del(&packet->list);
packet->state = hpsb_complete;
packet->ack_code = ACKX_TIMEOUT;
queue_packet_complete(packet);
}
spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
}
/* Kernel thread and vars, which handles packets that are completed. Only
* packets that have a "complete" function are sent here. This way, the
* completion is run out of kernel context, and doesn't block the rest of
* the stack. */
static int khpsbpkt_pid = -1;
static DECLARE_COMPLETION(khpsbpkt_complete);
static LIST_HEAD(hpsbpkt_list);
struct sk_buff_head hpsbpkt_queue;
static DECLARE_MUTEX_LOCKED(khpsbpkt_sig);
static spinlock_t khpsbpkt_lock = SPIN_LOCK_UNLOCKED;
static void queue_packet_complete(struct hpsb_packet *packet)
{
if (packet->complete_routine != NULL) {
unsigned long flags;
spin_lock_irqsave(&khpsbpkt_lock, flags);
list_add_tail(&packet->list, &hpsbpkt_list);
spin_unlock_irqrestore(&khpsbpkt_lock, flags);
skb_queue_tail(&hpsbpkt_queue, packet->skb);
/* Signal the kernel thread to handle this */
up(&khpsbpkt_sig);
......@@ -1018,24 +1013,24 @@ static void queue_packet_complete(struct hpsb_packet *packet)
static int hpsbpkt_thread(void *__hi)
{
struct hpsb_packet *packet, *next;
unsigned long flags;
struct sk_buff *skb;
struct hpsb_packet *packet;
void (*complete_routine)(void*);
void *complete_data;
daemonize("khpsbpkt");
allow_signal(SIGTERM);
while (!down_interruptible(&khpsbpkt_sig)) {
spin_lock_irqsave(&khpsbpkt_lock, flags);
list_for_each_entry_safe(packet, next, &hpsbpkt_list, list) {
void (*complete_routine)(void*) = packet->complete_routine;
void *complete_data = packet->complete_data;
while ((skb = skb_dequeue(&hpsbpkt_queue)) != NULL) {
packet = (struct hpsb_packet *)skb->data;
complete_routine = packet->complete_routine;
complete_data = packet->complete_data;
list_del(&packet->list);
packet->complete_routine = packet->complete_data = NULL;
complete_routine(complete_data);
}
spin_unlock_irqrestore(&khpsbpkt_lock, flags);
}
complete_and_exit(&khpsbpkt_complete, 0);
......@@ -1046,6 +1041,8 @@ static int __init ieee1394_init(void)
{
int i;
skb_queue_head_init(&hpsbpkt_queue);
if (hpsb_init_config_roms()) {
HPSB_ERR("Failed to initialize some config rom entries.\n");
HPSB_ERR("Some features may not be available\n");
......@@ -1066,9 +1063,6 @@ static int __init ieee1394_init(void)
devfs_mk_dir("ieee1394");
hpsb_packet_cache = kmem_cache_create("hpsb_packet", sizeof(struct hpsb_packet),
0, SLAB_HWCACHE_ALIGN, NULL, NULL);
bus_register(&ieee1394_bus_type);
for (i = 0; fw_bus_attrs[i]; i++)
bus_create_file(&ieee1394_bus_type, fw_bus_attrs[i]);
......@@ -1104,8 +1098,6 @@ static void __exit ieee1394_cleanup(void)
wait_for_completion(&khpsbpkt_complete);
}
kmem_cache_destroy(hpsb_packet_cache);
hpsb_cleanup_config_roms();
unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
......
......@@ -12,9 +12,13 @@
struct hpsb_packet {
/* This struct is basically read-only for hosts with the exception of
* the data buffer contents and xnext - see below. */
struct list_head list;
/* This can be used for host driver internal linking. */
/* This can be used for host driver internal linking.
*
* NOTE: This must be left in init state when the driver is done
* with it (e.g. by using list_del_init()), since the core does
* some sanity checks to make sure the packet is not on a
* driver_list when free'ing it. */
struct list_head driver_list;
nodeid_t node_id;
......@@ -27,10 +31,9 @@ struct hpsb_packet {
* queued = queued for sending
* pending = sent, waiting for response
* complete = processing completed, successful or not
* incoming = incoming packet
*/
enum {
hpsb_unused, hpsb_queued, hpsb_pending, hpsb_complete, hpsb_incoming
enum {
hpsb_unused, hpsb_queued, hpsb_pending, hpsb_complete
} __attribute__((packed)) state;
/* These are core internal. */
......@@ -67,6 +70,9 @@ struct hpsb_packet {
void (*complete_routine)(void *);
void *complete_data;
/* XXX This is just a hack at the moment */
struct sk_buff *skb;
/* Store jiffies for implementing bus timeouts. */
unsigned long sendtime;
......@@ -141,7 +147,7 @@ int hpsb_bus_reset(struct hpsb_host *host);
*/
void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid);
/*
/*
* Notify completion of SelfID stage to the core and report new physical ID
* and whether host is root now.
*/
......
......@@ -67,7 +67,7 @@ static void fill_async_writeblock(struct hpsb_packet *packet, u64 addr, int leng
packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0);
}
static void fill_async_lock(struct hpsb_packet *packet, u64 addr, int extcode,
static void fill_async_lock(struct hpsb_packet *packet, u64 addr, int extcode,
int length)
{
PREP_ASYNC_HEAD_ADDRESS(TCODE_LOCK_REQUEST);
......@@ -89,10 +89,10 @@ static void fill_iso_packet(struct hpsb_packet *packet, int length, int channel,
packet->tcode = TCODE_ISO_DATA;
}
static void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data)
{
static void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data)
{
packet->header[0] = data;
packet->header[1] = ~data;
packet->header[1] = ~data;
packet->header_size = 8;
packet->data_size = 0;
packet->expect_response = 0;
......@@ -145,7 +145,7 @@ int hpsb_get_tlabel(struct hpsb_packet *packet)
}
spin_lock_irqsave(&tp->lock, flags);
packet->tlabel = find_next_zero_bit(tp->pool, 64, tp->next);
if (packet->tlabel > 63)
packet->tlabel = find_first_zero_bit(tp->pool, 64);
......@@ -158,7 +158,7 @@ int hpsb_get_tlabel(struct hpsb_packet *packet)
return 0;
}
/**
/**
* hpsb_free_tlabel - free an allocated transaction label
* @packet: packet whos tlabel/tpool needs to be cleared
*
......@@ -173,7 +173,7 @@ void hpsb_free_tlabel(struct hpsb_packet *packet)
{
unsigned long flags;
struct hpsb_tlabel_pool *tp;
tp = &packet->host->tpool[packet->node_id & NODE_MASK];
BUG_ON(packet->tlabel > 63 || packet->tlabel < 0);
......@@ -204,7 +204,7 @@ int hpsb_packet_success(struct hpsb_packet *packet)
return -EINVAL;
default:
HPSB_ERR("received reserved rcode %d from node %d",
(packet->header[1] >> 12) & 0xf,
(packet->header[1] >> 12) & 0xf,
packet->node_id);
return -EAGAIN;
}
......@@ -268,7 +268,7 @@ struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node,
if (length == 0)
return NULL;
packet = hpsb_alloc_packet((length + 3) & ~3);
packet = hpsb_alloc_packet(length);
if (!packet)
return NULL;
......@@ -296,7 +296,7 @@ struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node
if (length == 0)
return NULL;
packet = hpsb_alloc_packet((length + 3) & ~3);
packet = hpsb_alloc_packet(length);
if (!packet)
return NULL;
......@@ -330,7 +330,7 @@ struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer, i
if (length == 0)
return NULL;
packet = hpsb_alloc_packet((length + 3) & ~3);
packet = hpsb_alloc_packet(length);
if (!packet)
return NULL;
......@@ -338,7 +338,7 @@ struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer, i
packet->data[length >> 2] = 0;
}
packet->host = host;
if (hpsb_get_tlabel(packet)) {
hpsb_free_packet(packet);
return NULL;
......@@ -430,17 +430,17 @@ struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host, nodeid_t node
}
struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host,
quadlet_t data)
quadlet_t data)
{
struct hpsb_packet *p;
struct hpsb_packet *p;
p = hpsb_alloc_packet(0);
if (!p) return NULL;
p = hpsb_alloc_packet(0);
if (!p) return NULL;
p->host = host;
fill_phy_packet(p, data);
p->host = host;
fill_phy_packet(p, data);
return p;
return p;
}
struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host,
......@@ -470,7 +470,7 @@ int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
{
struct hpsb_packet *packet;
int retval = 0;
if (length == 0)
return -EINVAL;
......@@ -544,7 +544,7 @@ int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
packet = hpsb_make_lockpacket(host, node, addr, extcode, data, arg);
packet = hpsb_make_lockpacket(host, node, addr, extcode, data, arg);
if (!packet)
return -ENOMEM;
......@@ -607,7 +607,7 @@ int hpsb_send_gasp(struct hpsb_host *host, int channel, unsigned int generation,
HPSB_VERBOSE("Send GASP: channel = %d, length = %Zd", channel, length);
length += 8;
packet = hpsb_make_streampacket(host, NULL, length, channel, 3, 0);
if (!packet)
return -ENOMEM;
......
......@@ -274,7 +274,7 @@ int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
cycle %= 8000;
isoctl_args[0] = cycle;
if (tag_mask < 0)
/* match all tags */
tag_mask = 0xF;
......@@ -358,7 +358,7 @@ int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag
}
}
out:
out:
spin_unlock_irqrestore(&iso->lock, flags);
return rv;
}
......
......@@ -88,7 +88,7 @@ static struct csr1212_bus_ops nodemgr_csr_ops = {
};
/*
/*
* Basically what we do here is start off retrieving the bus_info block.
* From there will fill in some info about the node, verify it is of IEEE
* 1394 type, and that the crc checks out ok. After that we start off with
......@@ -102,7 +102,7 @@ static struct csr1212_bus_ops nodemgr_csr_ops = {
* that's easy to parse by the protocol interface.
*/
/*
/*
* The nodemgr relies heavily on the Driver Model for device callbacks and
* driver/device mappings. The old nodemgr used to handle all this itself,
* but now we are much simpler because of the LDM.
......@@ -273,7 +273,7 @@ static ssize_t fw_show_ne_bus_options(struct device *dev, char *buf)
ne->busopt.irmc,
ne->busopt.cmc, ne->busopt.isc, ne->busopt.bmc,
ne->busopt.pmc, ne->busopt.generation, ne->busopt.lnkspd,
ne->busopt.max_rec,
ne->busopt.max_rec,
ne->busopt.max_rom,
ne->busopt.cyc_clk_acc);
}
......@@ -328,7 +328,7 @@ static ssize_t fw_get_ignore_driver(struct device *dev, char *buf)
struct unit_directory *ud = container_of(dev, struct unit_directory, device);
return sprintf(buf, "%d\n", ud->ignore_driver);
}
}
static DEVICE_ATTR(ignore_driver, S_IWUSR | S_IRUGO, fw_get_ignore_driver, fw_set_ignore_driver);
......@@ -356,7 +356,6 @@ static int nodemgr_rescan_bus_thread(void *__unused)
{
/* No userlevel access needed */
daemonize("kfwrescan");
allow_signal(SIGTERM);
bus_rescan_devices(&ieee1394_bus_type);
......@@ -726,7 +725,7 @@ static void nodemgr_update_bus_options(struct node_entry *ne)
ne->busopt.max_rom = (busoptions >> 8) & 0x3;
ne->busopt.generation = (busoptions >> 4) & 0xf;
ne->busopt.lnkspd = busoptions & 0x7;
HPSB_VERBOSE("NodeMgr: raw=0x%08x irmc=%d cmc=%d isc=%d bmc=%d pmc=%d "
"cyc_clk_acc=%d max_rec=%d max_rom=%d gen=%d lspd=%d",
busoptions, ne->busopt.irmc, ne->busopt.cmc,
......@@ -1012,7 +1011,7 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent
case CSR1212_KV_ID_UNIT:
nodemgr_process_unit_directory(hi, ne, kv, &ud_id, NULL);
break;
break;
case CSR1212_KV_ID_DESCRIPTOR:
if (last_key_id == CSR1212_KV_ID_VENDOR) {
......@@ -1056,13 +1055,14 @@ static int nodemgr_hotplug(struct class_device *cdev, char **envp, int num_envp,
#define PUT_ENVP(fmt,val) \
do { \
int printed; \
envp[i++] = buffer; \
length += snprintf(buffer, buffer_size - length, \
printed = snprintf(buffer, buffer_size - length, \
fmt, val); \
if ((buffer_size - length <= 0) || (i >= num_envp)) \
if ((buffer_size - (length+printed) <= 0) || (i >= num_envp)) \
return -ENOMEM; \
++length; \
buffer += length; \
length += printed+1; \
buffer += printed+1; \
} while (0)
PUT_ENVP("VENDOR_ID=%06x", ud->vendor_id);
......@@ -1084,7 +1084,7 @@ static int nodemgr_hotplug(struct class_device *cdev, char **envp, int num_envp,
char *buffer, int buffer_size)
{
return -ENODEV;
}
}
#endif /* CONFIG_HOTPLUG */
......@@ -1150,7 +1150,6 @@ static void nodemgr_update_node(struct node_entry *ne, struct csr1212_csr *csr,
ne->generation = generation;
}
static void nodemgr_node_scan_one(struct host_info *hi,
......@@ -1381,8 +1380,9 @@ static void nodemgr_node_probe(struct host_info *hi, int generation)
static int nodemgr_do_irm_duties(struct hpsb_host *host, int cycles)
{
quadlet_t bc;
if (!host->is_irm)
/* if irm_id == -1 then there is no IRM on this bus */
if (!host->is_irm || host->irm_id == (nodeid_t)-1)
return 1;
host->csr.broadcast_channel |= 0x40000000; /* set validity bit */
......@@ -1467,7 +1467,6 @@ static int nodemgr_host_thread(void *__hi)
/* No userlevel access needed */
daemonize(hi->daemon_name);
allow_signal(SIGTERM);
/* Setup our device-model entries */
nodemgr_create_host_dev_files(host);
......@@ -1611,7 +1610,7 @@ int hpsb_node_read(struct node_entry *ne, u64 addr,
addr, buffer, length);
}
int hpsb_node_write(struct node_entry *ne, u64 addr,
int hpsb_node_write(struct node_entry *ne, u64 addr,
quadlet_t *buffer, size_t length)
{
unsigned int generation = ne->generation;
......@@ -1621,7 +1620,7 @@ int hpsb_node_write(struct node_entry *ne, u64 addr,
addr, buffer, length);
}
int hpsb_node_lock(struct node_entry *ne, u64 addr,
int hpsb_node_lock(struct node_entry *ne, u64 addr,
int extcode, quadlet_t *data, quadlet_t arg)
{
unsigned int generation = ne->generation;
......
......@@ -169,7 +169,7 @@ struct hpsb_host *hpsb_get_host_by_ne(struct node_entry *ne);
/*
* This will fill in the given, pre-initialised hpsb_packet with the current
* information from the node entry (host, node ID, generation number). It will
* return false if the node owning the GUID is not accessible (and not modify the
* return false if the node owning the GUID is not accessible (and not modify the
* hpsb_packet) and return true otherwise.
*
* Note that packet sending may still fail in hpsb_send_packet if a bus reset
......@@ -181,9 +181,9 @@ void hpsb_node_fill_packet(struct node_entry *ne, struct hpsb_packet *pkt);
int hpsb_node_read(struct node_entry *ne, u64 addr,
quadlet_t *buffer, size_t length);
int hpsb_node_write(struct node_entry *ne, u64 addr,
int hpsb_node_write(struct node_entry *ne, u64 addr,
quadlet_t *buffer, size_t length);
int hpsb_node_lock(struct node_entry *ne, u64 addr,
int hpsb_node_lock(struct node_entry *ne, u64 addr,
int extcode, quadlet_t *data, quadlet_t arg);
......
......@@ -32,7 +32,7 @@
* Things implemented, but still in test phase:
* . Iso Transmit
* . Async Stream Packets Transmit (Receive done via Iso interface)
*
*
* Things not implemented:
* . DMA error recovery
*
......@@ -41,7 +41,7 @@
* added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
*/
/*
/*
* Acknowledgments:
*
* Adam J Richter <adam@yggdrasil.com>
......@@ -162,7 +162,7 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
static char version[] __devinitdata =
"$Rev: 1172 $ Ben Collins <bcollins@debian.org>";
"$Rev: 1203 $ Ben Collins <bcollins@debian.org>";
/* Module Parameters */
static int phys_dma = 1;
......@@ -185,7 +185,7 @@ static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
static void ohci1394_pci_remove(struct pci_dev *pdev);
#ifndef __LITTLE_ENDIAN
static unsigned hdr_sizes[] =
static unsigned hdr_sizes[] =
{
3, /* TCODE_WRITEQ */
4, /* TCODE_WRITEB */
......@@ -221,7 +221,7 @@ static inline void packet_swab(quadlet_t *data, int tcode)
* IEEE-1394 functionality section *
***********************************/
static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
{
int i;
unsigned long flags;
......@@ -243,9 +243,9 @@ static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
if (i >= OHCI_LOOP_COUNT)
PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
r, r & 0x80000000, i);
spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
return (r & 0x00ff0000) >> 16;
}
......@@ -303,7 +303,7 @@ static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
else
q0 = q[0];
if ((self_id_count & 0x80000000) ||
if ((self_id_count & 0x80000000) ||
((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
PRINT(KERN_ERR,
"Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
......@@ -335,7 +335,7 @@ static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
q0 = q[0];
q1 = q[1];
}
if (q0 == ~q1) {
DBGMSG ("SelfID packet 0x%x received", q0);
hpsb_selfid_received(host, cpu_to_be32(q0));
......@@ -358,7 +358,7 @@ static void ohci_soft_reset(struct ti_ohci *ohci) {
int i;
reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
for (i = 0; i < OHCI_LOOP_COUNT; i++) {
if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
break;
......@@ -367,32 +367,6 @@ static void ohci_soft_reset(struct ti_ohci *ohci) {
DBGMSG ("Soft reset finished");
}
static int run_context(struct ti_ohci *ohci, int reg, char *msg)
{
u32 nodeId;
/* check that the node id is valid */
nodeId = reg_read(ohci, OHCI1394_NodeID);
if (!(nodeId&0x80000000)) {
PRINT(KERN_ERR,
"Running dma failed because Node ID is not valid");
return -1;
}
/* check that the node number != 63 */
if ((nodeId&0x3f)==63) {
PRINT(KERN_ERR,
"Running dma failed because Node ID == 63");
return -1;
}
/* Run the dma context */
reg_write(ohci, reg, 0x8000);
if (msg) PRINT(KERN_DEBUG, "%s", msg);
return 0;
}
/* Generate the dma receive prgs and start the context */
static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
......@@ -404,7 +378,7 @@ static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
for (i=0; i<d->num_desc; i++) {
u32 c;
c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
if (generate_irq)
c |= DMA_CTL_IRQ;
......@@ -433,7 +407,7 @@ static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
/* Set bufferFill, isochHeader, multichannel for IR context */
reg_write(ohci, d->ctrlSet, 0xd0000000);
/* Set the context match register to match on all tags */
reg_write(ohci, d->ctxtMatch, 0xf0000000);
......@@ -505,7 +479,7 @@ static void ohci_initialize(struct ti_ohci *ohci)
spin_lock_init(&ohci->phy_reg_lock);
spin_lock_init(&ohci->event_lock);
/* Put some defaults to these undefined bus options */
buf = reg_read(ohci, OHCI1394_BusOptions);
buf |= 0xE0000000; /* Enable IRMC, CMC and ISC */
......@@ -521,7 +495,7 @@ static void ohci_initialize(struct ti_ohci *ohci)
/* Clear link control register */
reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
/* Enable cycle timer and cycle master and set the IRM
* contender bit in our self ID packets. */
reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_CycleTimerEnable |
......@@ -539,10 +513,10 @@ static void ohci_initialize(struct ti_ohci *ohci)
reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
/* Now get our max packet size */
ohci->max_packet_size =
ohci->max_packet_size =
1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
/* Don't accept phy packets into AR request context */
/* Don't accept phy packets into AR request context */
reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
/* Clear the interrupt mask */
......@@ -561,15 +535,15 @@ static void ohci_initialize(struct ti_ohci *ohci)
initialize_dma_trm_ctx(&ohci->at_req_context);
initialize_dma_trm_ctx(&ohci->at_resp_context);
/*
* Accept AT requests from all nodes. This probably
/*
* Accept AT requests from all nodes. This probably
* will have to be controlled from the subsystem
* on a per node basis.
*/
reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
/* Specify AT retries */
reg_write(ohci, OHCI1394_ATRetries,
reg_write(ohci, OHCI1394_ATRetries,
OHCI1394_MAX_AT_REQ_RETRIES |
(OHCI1394_MAX_AT_RESP_RETRIES<<4) |
(OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
......@@ -580,8 +554,8 @@ static void ohci_initialize(struct ti_ohci *ohci)
/* Enable interrupts */
reg_write(ohci, OHCI1394_IntMaskSet,
OHCI1394_unrecoverableError |
OHCI1394_masterIntEnable |
OHCI1394_busReset |
OHCI1394_masterIntEnable |
OHCI1394_busReset |
OHCI1394_selfIDComplete |
OHCI1394_RSPkt |
OHCI1394_RQPkt |
......@@ -620,13 +594,46 @@ static void ohci_initialize(struct ti_ohci *ohci)
if (status & 0x20)
set_phy_reg(ohci, 8, status & ~1);
}
/* Serial EEPROM Sanity check. */
if ((ohci->max_packet_size < 512) ||
(ohci->max_packet_size > 4096)) {
/* Serial EEPROM contents are suspect, set a sane max packet
* size and print the raw contents for bug reports if verbose
* debug is enabled. */
#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
int i;
#endif
PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
"attempting to setting max_packet_size to 512 bytes");
reg_write(ohci, OHCI1394_BusOptions,
(reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
ohci->max_packet_size = 512;
#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
PRINT(KERN_DEBUG, " EEPROM Present: %d",
(reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
for (i = 0;
((i < 1000) &&
(reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
udelay(10);
for (i = 0; i < 0x20; i++) {
reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
(reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
}
#endif
}
}
/*
/*
* Insert a packet in the DMA fifo and generate the DMA prg
* FIXME: rewrite the program in order to accept packets crossing
* page boundaries.
* check also that a single dma descriptor doesn't cross a
* check also that a single dma descriptor doesn't cross a
* page boundary.
*/
static void insert_packet(struct ti_ohci *ohci,
......@@ -644,13 +651,13 @@ static void insert_packet(struct ti_ohci *ohci,
d->prg_cpu[idx]->begin.branchAddress = 0;
if (d->type == DMA_CTX_ASYNC_RESP) {
/*
/*
* For response packets, we need to put a timeout value in
* the 16 lower bits of the status... let's try 1 sec timeout
*/
* the 16 lower bits of the status... let's try 1 sec timeout
*/
cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
d->prg_cpu[idx]->begin.status = cpu_to_le32(
(((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
(((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
((cycleTimer&0x01fff000)>>12));
DBGMSG("cycleTimer: %08x timeStamp: %08x",
......@@ -674,7 +681,7 @@ static void insert_packet(struct ti_ohci *ohci,
} else {
/* Sending a normal async request or response */
d->prg_cpu[idx]->data[1] =
(packet->header[1] & 0xFFFF) |
(packet->header[1] & 0xFFFF) |
(packet->header[0] & 0xFFFF0000);
d->prg_cpu[idx]->data[2] = packet->header[2];
d->prg_cpu[idx]->data[3] = packet->header[3];
......@@ -694,10 +701,10 @@ static void insert_packet(struct ti_ohci *ohci,
}
d->prg_cpu[idx]->end.control =
cpu_to_le32(DMA_CTL_OUTPUT_LAST |
DMA_CTL_IRQ |
DMA_CTL_IRQ |
DMA_CTL_BRANCH |
packet->data_size);
/*
/*
* Check that the packet data buffer
* does not cross a page boundary.
*
......@@ -706,7 +713,7 @@ static void insert_packet(struct ti_ohci *ohci,
* problem.
*/
#if 0
if (cross_bound((unsigned long)packet->data,
if (cross_bound((unsigned long)packet->data,
packet->data_size)>0) {
/* FIXME: do something about it */
PRINT(KERN_ERR,
......@@ -723,28 +730,28 @@ static void insert_packet(struct ti_ohci *ohci,
d->prg_cpu[idx]->end.branchAddress = 0;
d->prg_cpu[idx]->end.status = 0;
if (d->branchAddrPtr)
if (d->branchAddrPtr)
*(d->branchAddrPtr) =
cpu_to_le32(d->prg_bus[idx] | 0x3);
d->branchAddrPtr =
&(d->prg_cpu[idx]->end.branchAddress);
} else { /* quadlet transmit */
if (packet->type == hpsb_raw)
d->prg_cpu[idx]->begin.control =
d->prg_cpu[idx]->begin.control =
cpu_to_le32(DMA_CTL_OUTPUT_LAST |
DMA_CTL_IMMEDIATE |
DMA_CTL_IRQ |
DMA_CTL_IRQ |
DMA_CTL_BRANCH |
(packet->header_size + 4));
else
d->prg_cpu[idx]->begin.control =
cpu_to_le32(DMA_CTL_OUTPUT_LAST |
DMA_CTL_IMMEDIATE |
DMA_CTL_IRQ |
DMA_CTL_IRQ |
DMA_CTL_BRANCH |
packet->header_size);
if (d->branchAddrPtr)
if (d->branchAddrPtr)
*(d->branchAddrPtr) =
cpu_to_le32(d->prg_bus[idx] | 0x2);
d->branchAddrPtr =
......@@ -756,11 +763,11 @@ static void insert_packet(struct ti_ohci *ohci,
(packet->header[0] & 0xFFFF);
d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
packet_swab(d->prg_cpu[idx]->data, packet->tcode);
d->prg_cpu[idx]->begin.control =
cpu_to_le32(DMA_CTL_OUTPUT_MORE |
d->prg_cpu[idx]->begin.control =
cpu_to_le32(DMA_CTL_OUTPUT_MORE |
DMA_CTL_IMMEDIATE | 0x8);
d->prg_cpu[idx]->end.control =
d->prg_cpu[idx]->end.control =
cpu_to_le32(DMA_CTL_OUTPUT_LAST |
DMA_CTL_UPDATE |
DMA_CTL_IRQ |
......@@ -790,7 +797,7 @@ static void insert_packet(struct ti_ohci *ohci,
d->prg_cpu[idx]->end.address,
d->prg_cpu[idx]->end.branchAddress,
d->prg_cpu[idx]->end.status);
if (d->branchAddrPtr)
if (d->branchAddrPtr)
*(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
}
......@@ -798,7 +805,7 @@ static void insert_packet(struct ti_ohci *ohci,
/* queue the packet in the appropriate context queue */
list_add_tail(&packet->driver_list, &d->fifo_list);
d->prg_ind = (d->prg_ind+1)%d->num_desc;
d->prg_ind = (d->prg_ind + 1) % d->num_desc;
}
/*
......@@ -806,46 +813,54 @@ static void insert_packet(struct ti_ohci *ohci,
* and runs or wakes up the DMA prg if necessary.
*
* The function MUST be called with the d->lock held.
*/
static int dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
*/
static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
{
struct hpsb_packet *p;
int idx,z;
struct hpsb_packet *packet, *ptmp;
int idx = d->prg_ind;
int z = 0;
if (list_empty(&d->pending_list) || d->free_prgs == 0)
return 0;
/* insert the packets into the dma fifo */
list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
if (!d->free_prgs)
break;
p = driver_packet(d->pending_list.next);
idx = d->prg_ind;
z = (p->data_size) ? 3 : 2;
/* For the first packet only */
if (!z)
z = (packet->data_size) ? 3 : 2;
/* insert the packets into the dma fifo */
while (d->free_prgs > 0 && !list_empty(&d->pending_list)) {
struct hpsb_packet *p = driver_packet(d->pending_list.next);
list_del(&p->driver_list);
insert_packet(ohci, d, p);
/* Insert the packet */
list_del_init(&packet->driver_list);
insert_packet(ohci, d, packet);
}
if (d->free_prgs == 0)
DBGMSG("Transmit DMA FIFO ctx=%d is full... waiting", d->ctx);
/* Nothing must have been done, either no free_prgs or no packets */
if (z == 0)
return;
/* Is the context running ? (should be unless it is
/* Is the context running ? (should be unless it is
the first packet to be sent in this context) */
if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
reg_write(ohci, d->cmdPtr, d->prg_bus[idx]|z);
run_context(ohci, d->ctrlSet, NULL);
}
else {
reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
/* Check that the node id is valid, and not 63 */
if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
else
reg_write(ohci, d->ctrlSet, 0x8000);
} else {
/* Wake up the dma context if necessary */
if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
if (!(reg_read(ohci, d->ctrlSet) & 0x400))
DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
}
/* do this always, to avoid race condition */
reg_write(ohci, d->ctrlSet, 0x1000);
}
return 1;
return;
}
/* Transmission of an async or iso packet */
......@@ -871,7 +886,7 @@ static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
* interrupt context, so we bail out if that is the
* case. I don't see anyone sending ISO packets from
* interrupt context anyway... */
if (ohci->it_legacy_context.ohci == NULL) {
if (in_interrupt()) {
PRINT(KERN_ERR,
......@@ -889,11 +904,11 @@ static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
initialize_dma_trm_ctx(&ohci->it_legacy_context);
}
d = &ohci->it_legacy_context;
} else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
d = &ohci->at_resp_context;
else
else
d = &ohci->at_req_context;
spin_lock_irqsave(&d->lock,flags);
......@@ -986,7 +1001,7 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
* enable cycleTimer, cycleMaster
*/
DBGMSG("Cycle master enabled");
reg_write(ohci, OHCI1394_LinkControlSet,
reg_write(ohci, OHCI1394_LinkControlSet,
OHCI1394_LinkControl_CycleTimerEnable |
OHCI1394_LinkControl_CycleMaster);
}
......@@ -1011,7 +1026,7 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
if (arg<0 || arg>63) {
PRINT(KERN_ERR,
"%s: IS0 listen channel %d is out of range",
"%s: IS0 listen channel %d is out of range",
__FUNCTION__, arg);
return -EFAULT;
}
......@@ -1038,7 +1053,7 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
if (ohci->ISO_channel_usage & mask) {
PRINT(KERN_ERR,
"%s: IS0 listen channel %d is already used",
"%s: IS0 listen channel %d is already used",
__FUNCTION__, arg);
spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
return -EFAULT;
......@@ -1047,12 +1062,12 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
ohci->ISO_channel_usage |= mask;
ohci->ir_legacy_channels |= mask;
if (arg>31)
reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1<<(arg-32));
if (arg>31)
reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1<<(arg-32));
else
reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1<<arg);
reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1<<arg);
spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
DBGMSG("Listening enabled on channel %d", arg);
......@@ -1064,32 +1079,32 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
if (arg<0 || arg>63) {
PRINT(KERN_ERR,
"%s: IS0 unlisten channel %d is out of range",
"%s: IS0 unlisten channel %d is out of range",
__FUNCTION__, arg);
return -EFAULT;
}
mask = (u64)0x1<<arg;
spin_lock_irqsave(&ohci->IR_channel_lock, flags);
if (!(ohci->ISO_channel_usage & mask)) {
PRINT(KERN_ERR,
"%s: IS0 unlisten channel %d is not used",
"%s: IS0 unlisten channel %d is not used",
__FUNCTION__, arg);
spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
return -EFAULT;
}
ohci->ISO_channel_usage &= ~mask;
ohci->ir_legacy_channels &= ~mask;
if (arg>31)
reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1<<(arg-32));
if (arg>31)
reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1<<(arg-32));
else
reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1<<arg);
reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1<<arg);
spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
DBGMSG("Listening disabled on channel %d", arg);
......@@ -1215,7 +1230,7 @@ static int ohci_iso_recv_init(struct hpsb_iso *iso)
/* iso->irq_interval is in packets - translate that to blocks */
if (iso->irq_interval == 1)
recv->block_irq_interval = 1;
recv->block_irq_interval = 1;
else
recv->block_irq_interval = iso->irq_interval *
((recv->nblocks+1)/iso->buf_packets);
......@@ -1241,7 +1256,7 @@ static int ohci_iso_recv_init(struct hpsb_iso *iso)
for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
recv->buf_stride *= 2);
if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
recv->buf_stride > PAGE_SIZE) {
/* this shouldn't happen, but anyway... */
......@@ -1285,7 +1300,7 @@ static int ohci_iso_recv_init(struct hpsb_iso *iso)
reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
}
/* write the DMA program */
ohci_iso_recv_program(iso);
......@@ -1293,7 +1308,7 @@ static int ohci_iso_recv_init(struct hpsb_iso *iso)
" (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
recv->dma_mode == BUFFER_FILL_MODE ?
"buffer-fill" : "packet-per-buffer",
iso->buf_size/PAGE_SIZE, iso->buf_size,
iso->buf_size/PAGE_SIZE, iso->buf_size,
recv->nblocks, recv->buf_stride, recv->block_irq_interval);
return 0;
......@@ -1309,7 +1324,7 @@ static void ohci_iso_recv_stop(struct hpsb_iso *iso)
/* disable interrupts */
reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
/* halt DMA */
ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
}
......@@ -1457,20 +1472,20 @@ static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, in
if (cycle != -1) {
u32 seconds;
/* enable cycleMatch */
reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
/* set starting cycle */
cycle &= 0x1FFF;
/* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
just snarf them from the current time */
seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
/* advance one second to give some extra time for DMA to start */
seconds += 1;
cycle |= (seconds & 3) << 13;
contextMatch |= cycle << 12;
......@@ -1535,7 +1550,7 @@ static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
next->control |= cpu_to_le32(3 << 20);
next->status = cpu_to_le32(recv->buf_stride);
/* link prev to next */
/* link prev to next */
prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
sizeof(struct dma_cmd) * next_i)
| 1); /* Z=1 */
......@@ -1593,15 +1608,15 @@ static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso
int wake = 0;
int runaway = 0;
struct ti_ohci *ohci = recv->ohci;
while (1) {
/* we expect the next parsable packet to begin at recv->dma_offset */
/* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
unsigned int offset;
unsigned short len, cycle;
unsigned char channel, tag, sy;
unsigned char *p = iso->data_buf.kvirt;
unsigned int this_block = recv->dma_offset/recv->buf_stride;
......@@ -1619,26 +1634,26 @@ static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso
break;
wake = 1;
/* parse data length, tag, channel, and sy */
/* note: we keep our own local copies of 'len' and 'offset'
so the user can't mess with them by poking in the mmap area */
len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
if (len > 4096) {
PRINT(KERN_ERR,
"IR DMA error - bogus 'len' value %u\n", len);
}
channel = p[recv->dma_offset+1] & 0x3F;
tag = p[recv->dma_offset+1] >> 6;
sy = p[recv->dma_offset+0] & 0xF;
/* advance to data payload */
recv->dma_offset += 4;
/* check for wrap-around */
if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
recv->dma_offset -= recv->buf_stride*recv->nblocks;
......@@ -1651,7 +1666,7 @@ static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso
recv->dma_offset += len;
/* payload is padded to 4 bytes */
if (len % 4) {
if (len % 4) {
recv->dma_offset += 4 - (len%4);
}
......@@ -1700,13 +1715,13 @@ static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_
/* loop over all blocks */
for (loop = 0; loop < recv->nblocks; loop++) {
/* check block_dma to see if it's done */
struct dma_cmd *im = &recv->block[recv->block_dma];
/* check the DMA descriptor for new writes to xferStatus */
u16 xferstatus = le32_to_cpu(im->status) >> 16;
/* rescount is the number of bytes *remaining to be written* in the block */
u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
......@@ -1728,12 +1743,12 @@ static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_
we can't touch it until it's done */
break;
}
/* OK, the block is finished... */
/* sync our view of the block */
dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
/* reset the DMA descriptor */
im->status = recv->buf_stride;
......@@ -1756,11 +1771,11 @@ static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_is
int count;
int wake = 0;
struct ti_ohci *ohci = recv->ohci;
/* loop over the entire buffer */
for (count = 0; count < recv->nblocks; count++) {
u32 packet_len = 0;
/* pointer to the DMA descriptor */
struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
......@@ -1774,10 +1789,10 @@ static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_is
/* this packet hasn't come in yet; we are done for now */
goto out;
}
if (event == 0x11) {
/* packet received successfully! */
/* rescount is the number of bytes *remaining* in the packet buffer,
after the packet was written */
packet_len = recv->buf_stride - rescount;
......@@ -1790,7 +1805,7 @@ static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_is
/* sync our view of the buffer */
dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
/* record the per-packet info */
{
/* iso header is 8 bytes ahead of the data payload */
......@@ -1806,7 +1821,7 @@ static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_is
/* skip iso header */
offset += 8;
packet_len -= 8;
cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
channel = hdr[5] & 0x3F;
tag = hdr[5] >> 6;
......@@ -1814,7 +1829,7 @@ static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_is
hpsb_iso_packet_received(iso, offset, packet_len, cycle, channel, tag, sy);
}
/* reset the DMA descriptor */
il->status = recv->buf_stride;
......@@ -1958,7 +1973,7 @@ static void ohci_iso_xmit_task(unsigned long data)
/* DMA descriptor */
struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
/* check for new writes to xferStatus */
u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
u8 event = xferstatus & 0x1F;
......@@ -1971,16 +1986,16 @@ static void ohci_iso_xmit_task(unsigned long data)
if (event != 0x11)
PRINT(KERN_ERR,
"IT DMA error - OHCI error code 0x%02x\n", event);
/* at least one packet went out, so wake up the writer */
wake = 1;
/* parse cycle */
cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
/* tell the subsystem the packet has gone out */
hpsb_iso_packet_sent(iso, cycle, event != 0x11);
/* reset the DMA descriptor for next time */
cmd->output_last.status = 0;
}
......@@ -2101,14 +2116,14 @@ static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
/* cycle match */
if (cycle != -1) {
u32 start = cycle & 0x1FFF;
/* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
just snarf them from the current time */
u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
/* advance one second to give some extra time for DMA to start */
seconds += 1;
start |= (seconds & 3) << 13;
reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
......@@ -2201,6 +2216,7 @@ static void dma_trm_reset(struct dma_trm_ctx *d)
unsigned long flags;
LIST_HEAD(packet_list);
struct ti_ohci *ohci = d->ohci;
struct hpsb_packet *packet, *ptmp;
ohci1394_stop_context(ohci, d->ctrlClear, NULL);
......@@ -2221,19 +2237,20 @@ static void dma_trm_reset(struct dma_trm_ctx *d)
spin_unlock_irqrestore(&d->lock, flags);
/* Now process subsystem callbacks for the packets from the
* context. */
if (list_empty(&packet_list))
return;
while (!list_empty(&packet_list)) {
struct hpsb_packet *p = driver_packet(packet_list.next);
PRINT(KERN_INFO,
"AT dma reset ctx=%d, aborting transmission", d->ctx);
list_del(&p->driver_list);
hpsb_packet_sent(ohci->host, p, ACKX_ABORTED);
PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
/* Now process subsystem callbacks for the packets from this
* context. */
list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
list_del_init(&packet->driver_list);
hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
}
}
static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
quadlet_t rx_event,
quadlet_t tx_event)
{
......@@ -2393,7 +2410,8 @@ static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
ohci1394_stop_context(ohci, d->ctrlClear,
"reqTxComplete");
else
tasklet_schedule(&d->task);
dma_trm_tasklet((unsigned long)d);
//tasklet_schedule(&d->task);
event &= ~OHCI1394_reqTxComplete;
}
if (event & OHCI1394_respTxComplete) {
......@@ -2436,7 +2454,7 @@ static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
event &= ~OHCI1394_isochRx;
}
if (event & OHCI1394_isochTx) {
quadlet_t tx_event;
quadlet_t tx_event;
tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
......@@ -2459,7 +2477,7 @@ static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
isroot = (node_id & 0x40000000) != 0;
DBGMSG("SelfID interrupt received "
"(phyid %d, %s)", phyid,
"(phyid %d, %s)", phyid,
(isroot ? "root" : "not root"));
handle_selfid(ohci, host, phyid, isroot);
......@@ -2535,10 +2553,10 @@ static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
#define cond_le32_to_cpu(data, noswap) \
(noswap ? data : le32_to_cpu(data))
static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
-1, 0, -1, 0, -1, -1, 16, -1};
/*
/*
* Determine the length of a packet in the buffer
* Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
*/
......@@ -2669,7 +2687,7 @@ static void dma_rcv_tasklet (unsigned long data)
offset=0;
}
}
/* We get one phy packet to the async descriptor for each
* bus reset. We always ignore it. */
if (tcode != OHCI1394_TCODE_PHY) {
......@@ -2687,7 +2705,7 @@ static void dma_rcv_tasklet (unsigned long data)
ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
== 0x11) ? 1 : 0;
hpsb_packet_received(ohci->host, d->spb,
hpsb_packet_received(ohci->host, d->spb,
length-4, ack);
}
#ifdef OHCI1394_DEBUG
......@@ -2713,24 +2731,23 @@ static void dma_trm_tasklet (unsigned long data)
{
struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
struct hpsb_packet *packet;
struct hpsb_packet *packet, *ptmp;
unsigned long flags;
u32 status, ack;
size_t datasize;
spin_lock_irqsave(&d->lock, flags);
while (!list_empty(&d->fifo_list)) {
packet = driver_packet(d->fifo_list.next);
list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
datasize = packet->data_size;
if (datasize && packet->type != hpsb_raw)
status = le32_to_cpu(
d->prg_cpu[d->sent_ind]->end.status) >> 16;
else
else
status = le32_to_cpu(
d->prg_cpu[d->sent_ind]->begin.status) >> 16;
if (status == 0)
if (status == 0)
/* this packet hasn't been sent yet*/
break;
......@@ -2738,34 +2755,34 @@ static void dma_trm_tasklet (unsigned long data)
if (datasize)
if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
DBGMSG("Stream packet sent to channel %d tcode=0x%X "
"ack=0x%X spd=%d dataLength=%d ctx=%d",
"ack=0x%X spd=%d dataLength=%d ctx=%d",
(le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
(le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
status&0x1f, (status>>5)&0x3,
status&0x1f, (status>>5)&0x3,
le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
d->ctx);
else
DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
"0x%02X ack=0x%X spd=%d dataLength=%d ctx=%d",
"0x%02X ack=0x%X spd=%d dataLength=%d ctx=%d",
(le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
(le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
(le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
status&0x1f, (status>>5)&0x3,
le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
d->ctx);
else
else
DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
"0x%02X ack=0x%X spd=%d data=0x%08X ctx=%d",
"0x%02X ack=0x%X spd=%d data=0x%08X ctx=%d",
(le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
>>16)&0x3f,
(le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
>>4)&0xf,
(le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
>>10)&0x3f,
status&0x1f, (status>>5)&0x3,
status&0x1f, (status>>5)&0x3,
le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
d->ctx);
#endif
#endif
if (status & 0x10) {
ack = status & 0xf;
......@@ -2818,11 +2835,11 @@ static void dma_trm_tasklet (unsigned long data)
}
}
list_del(&packet->driver_list);
list_del_init(&packet->driver_list);
hpsb_packet_sent(ohci->host, packet, ack);
if (datasize) {
pci_unmap_single(ohci->dev,
pci_unmap_single(ohci->dev,
cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
datasize, PCI_DMA_TODEVICE);
OHCI_DMA_FREE("single Xmit data packet");
......@@ -2867,7 +2884,7 @@ static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
for (i=0; i<d->num_desc; i++)
if (d->buf_cpu[i] && d->buf_bus[i]) {
pci_free_consistent(
ohci->dev, d->buf_size,
ohci->dev, d->buf_size,
d->buf_cpu[i], d->buf_bus[i]);
OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
}
......@@ -2875,7 +2892,7 @@ static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
kfree(d->buf_bus);
}
if (d->prg_cpu) {
for (i=0; i<d->num_desc; i++)
for (i=0; i<d->num_desc; i++)
if (d->prg_cpu[i] && d->prg_bus[i]) {
pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
......@@ -2921,7 +2938,7 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
memset(d->buf_cpu, 0, d->num_desc * sizeof(quadlet_t*));
memset(d->buf_bus, 0, d->num_desc * sizeof(dma_addr_t));
d->prg_cpu = kmalloc(d->num_desc * sizeof(struct dma_cmd*),
d->prg_cpu = kmalloc(d->num_desc * sizeof(struct dma_cmd*),
GFP_KERNEL);
d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
......@@ -2946,11 +2963,11 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
OHCI_DMA_ALLOC("dma_rcv prg pool");
for (i=0; i<d->num_desc; i++) {
d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
d->buf_size,
d->buf_bus+i);
OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
if (d->buf_cpu[i] != NULL) {
memset(d->buf_cpu[i], 0, d->buf_size);
} else {
......@@ -3015,7 +3032,7 @@ static void free_dma_trm_ctx(struct dma_trm_ctx *d)
DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
if (d->prg_cpu) {
for (i=0; i<d->num_desc; i++)
for (i=0; i<d->num_desc; i++)
if (d->prg_cpu[i] && d->prg_bus[i]) {
pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
......@@ -3045,7 +3062,7 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
d->ctrlClear = 0;
d->cmdPtr = 0;
d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*),
d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*),
GFP_KERNEL);
d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
......@@ -3194,7 +3211,7 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
* noByteSwapData registers to see if they were not cleared to
* zero. Should this work? Obviously it's not defined what these
* registers will read when they aren't supported. Bleh! */
if (dev->vendor == PCI_VENDOR_ID_APPLE &&
if (dev->vendor == PCI_VENDOR_ID_APPLE &&
dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
ohci->no_swap_incoming = 1;
ohci->selfid_swap = 0;
......@@ -3217,7 +3234,7 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
/* We hardwire the MMIO length, since some CardBus adaptors
* fail to report the right length. Anyway, the ohci spec
* clearly says it's 2kb, so this shouldn't be a problem. */
* clearly says it's 2kb, so this shouldn't be a problem. */
ohci_base = pci_resource_start(dev, 0);
if (pci_resource_len(dev, 0) != OHCI1394_REGISTER_SIZE)
PRINT(KERN_WARNING, "Unexpected PCI resource length of %lx!",
......@@ -3248,7 +3265,7 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
/* self-id dma buffer allocation */
ohci->selfid_buf_cpu =
ohci->selfid_buf_cpu =
pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
&ohci->selfid_buf_bus);
OHCI_DMA_ALLOC("consistent selfid_buf");
......@@ -3259,8 +3276,8 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
"8Kb boundary... may cause problems on some CXD3222 chip",
ohci->selfid_buf_cpu);
"8Kb boundary... may cause problems on some CXD3222 chip",
ohci->selfid_buf_cpu);
/* No self-id errors at startup */
ohci->self_id_errors = 0;
......@@ -3423,7 +3440,7 @@ static void ohci1394_pci_remove(struct pci_dev *pdev)
free_dma_trm_ctx(&ohci->it_legacy_context);
case OHCI_INIT_HAVE_SELFID_BUFFER:
pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
ohci->selfid_buf_cpu,
ohci->selfid_buf_bus);
OHCI_DMA_FREE("consistent selfid_buf");
......@@ -3544,7 +3561,7 @@ int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
/* stop the channel program if it's still running */
reg_write(ohci, reg, 0x8000);
/* Wait until it effectively stops */
while (reg_read(ohci, reg) & 0x400) {
i++;
......
......@@ -110,7 +110,7 @@ struct dma_rcv_ctx {
int ctxtMatch;
};
/* DMA transmit context */
/* DMA transmit context */
struct dma_trm_ctx {
struct ti_ohci *ohci;
enum context_type type;
......@@ -151,7 +151,7 @@ struct ohci1394_iso_tasklet {
struct ti_ohci {
struct pci_dev *dev;
enum {
enum {
OHCI_INIT_ALLOC_HOST,
OHCI_INIT_HAVE_MEM_REGION,
OHCI_INIT_HAVE_IOMAPPING,
......@@ -161,17 +161,17 @@ struct ti_ohci {
OHCI_INIT_HAVE_IRQ,
OHCI_INIT_DONE,
} init_state;
/* remapped memory spaces */
void *registers;
void *registers;
/* dma buffer for self-id packets */
quadlet_t *selfid_buf_cpu;
dma_addr_t selfid_buf_bus;
/* buffer for csr config rom */
quadlet_t *csr_config_rom_cpu;
dma_addr_t csr_config_rom_bus;
quadlet_t *csr_config_rom_cpu;
dma_addr_t csr_config_rom_bus;
int csr_config_rom_length;
unsigned int max_packet_size;
......@@ -198,7 +198,7 @@ struct ti_ohci {
struct dma_rcv_ctx ir_legacy_context;
struct ohci1394_iso_tasklet ir_legacy_tasklet;
/* iso transmit */
int nb_iso_xmit_ctx;
unsigned long it_ctx_usage; /* use test_and_set_bit() for atomicity */
......@@ -260,7 +260,7 @@ static inline u32 reg_read(const struct ti_ohci *ohci, int offset)
/* 2 KiloBytes of register space */
#define OHCI1394_REGISTER_SIZE 0x800
#define OHCI1394_REGISTER_SIZE 0x800
/* Offsets relative to context bases defined below */
......@@ -440,9 +440,9 @@ static inline u32 reg_read(const struct ti_ohci *ohci, int offset)
#define OHCI1394_TCODE_PHY 0xE
void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet,
void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet,
int type,
void (*func)(unsigned long),
void (*func)(unsigned long),
unsigned long data);
int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
struct ohci1394_iso_tasklet *tasklet);
......
......@@ -23,7 +23,7 @@
* Contributions:
*
* Manfred Weihs <weihs@ict.tuwien.ac.at>
* reading bus info block (containing GUID) from serial
* reading bus info block (containing GUID) from serial
* eeprom via i2c and storing it in config ROM
* Reworked code for initiating bus resets
* (long, short, with or without hold-off)
......@@ -139,7 +139,7 @@ static struct i2c_algo_bit_data bit_data = {
.udelay = 5,
.mdelay = 5,
.timeout = 100,
};
};
static struct i2c_adapter bit_ops = {
.id = 0xAA, //FIXME: probably we should get an id in i2c-id.h
......@@ -195,19 +195,19 @@ static void free_pcl(struct ti_lynx *lynx, pcl_t pclid)
if (lynx->pcl_bmap[off] & 1<<bit) {
lynx->pcl_bmap[off] &= ~(1<<bit);
} else {
PRINT(KERN_ERR, lynx->id,
PRINT(KERN_ERR, lynx->id,
"attempted to free unallocated PCL %d", pclid);
}
spin_unlock(&lynx->lock);
}
/* functions useful for debugging */
/* functions useful for debugging */
static void pretty_print_pcl(const struct ti_pcl *pcl)
{
int i;
printk("PCL next %08x, userdata %08x, status %08x, remtrans %08x, nextbuf %08x\n",
pcl->next, pcl->user_data, pcl->pcl_status,
pcl->next, pcl->user_data, pcl->pcl_status,
pcl->remaining_transfer_count, pcl->next_data_buffer);
printk("PCL");
......@@ -218,7 +218,7 @@ static void pretty_print_pcl(const struct ti_pcl *pcl)
}
printk("\n");
}
static void print_pcl(const struct ti_lynx *lynx, pcl_t pclid)
{
struct ti_pcl pcl;
......@@ -419,7 +419,7 @@ static void handle_selfid(struct ti_lynx *lynx, struct hpsb_host *host)
cpu_to_be32s(&q[i]);
i--;
}
if (!lynx->phyic.reg_1394a) {
lsid = generate_own_selfid(lynx, host);
}
......@@ -437,7 +437,7 @@ static void handle_selfid(struct ti_lynx *lynx, struct hpsb_host *host)
while (size > 0) {
struct selfid *sid = (struct selfid *)q;
if (!lynx->phyic.reg_1394a && !sid->extended
if (!lynx->phyic.reg_1394a && !sid->extended
&& (sid->phy_id == (phyid + 1))) {
hpsb_selfid_received(host, lsid);
}
......@@ -484,8 +484,7 @@ static void send_next(struct ti_lynx *lynx, int what)
}
packet = driver_packet(d->queue.next);
list_del(&packet->driver_list);
list_add_tail(&packet->driver_list, &d->pcl_queue);
list_move_tail(&packet->driver_list, &d->pcl_queue);
d->header_dma = pci_map_single(lynx->dev, packet->header,
packet->header_size, PCI_DMA_TODEVICE);
......@@ -500,11 +499,9 @@ static void send_next(struct ti_lynx *lynx, int what)
pcl.next = PCL_NEXT_INVALID;
pcl.async_error_next = PCL_NEXT_INVALID;
pcl.pcl_status = 0;
#ifdef __BIG_ENDIAN
pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size;
#else
pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size
| PCL_BIGENDIAN;
#ifdef __BIG_ENDIAN
pcl.buffer[0].control |= PCL_BIGENDIAN;
#endif
pcl.buffer[0].pointer = d->header_dma;
pcl.buffer[1].control = PCL_LAST_BUFF | packet->data_size;
......@@ -520,7 +517,7 @@ static void send_next(struct ti_lynx *lynx, int what)
case hpsb_raw:
pcl.buffer[0].control |= PCL_CMD_UNFXMT;
break;
}
}
put_pcl(lynx, d->pcl, &pcl);
run_pcl(lynx, d->pcl_start, d->channel);
......@@ -727,16 +724,16 @@ static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
case GET_CYCLE_COUNTER:
retval = reg_read(lynx, CYCLE_TIMER);
break;
case SET_CYCLE_COUNTER:
reg_write(lynx, CYCLE_TIMER, arg);
break;
case SET_BUS_ID:
reg_write(lynx, LINK_ID,
reg_write(lynx, LINK_ID,
(arg << 22) | (reg_read(lynx, LINK_ID) & 0x003f0000));
break;
case ACT_CYCLE_MASTER:
if (arg) {
reg_set_bits(lynx, LINK_CONTROL,
......@@ -767,7 +764,7 @@ static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
get_pcl(lynx, lynx->async.pcl, &pcl);
packet = driver_packet(lynx->async.pcl_queue.next);
list_del(&packet->driver_list);
list_del_init(&packet->driver_list);
pci_unmap_single(lynx->dev, lynx->async.header_dma,
packet->header_size, PCI_DMA_TODEVICE);
......@@ -795,7 +792,7 @@ static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
while (!list_empty(&packet_list)) {
packet = driver_packet(packet_list.next);
list_del(&packet->driver_list);
list_del_init(&packet->driver_list);
hpsb_packet_sent(host, packet, ACKX_ABORTED);
}
......@@ -803,7 +800,7 @@ static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
case ISO_LISTEN_CHANNEL:
spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
if (lynx->iso_rcv.chan_count++ == 0) {
reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
DMA_WORD1_CMP_ENABLE_MASTER);
......@@ -875,7 +872,7 @@ static int mem_open(struct inode *inode, struct file *file)
int cid = iminor(inode);
enum { t_rom, t_aux, t_ram } type;
struct memdata *md;
if (cid < PCILYNX_MINOR_AUX_START) {
/* just for completeness */
return -ENXIO;
......@@ -976,10 +973,10 @@ loff_t mem_llseek(struct file *file, loff_t offs, int orig)
return newoffs;
}
/*
* do not DMA if count is too small because this will have a serious impact
/*
* do not DMA if count is too small because this will have a serious impact
* on performance - the value 2400 was found by experiment and may not work
* everywhere as good as here - use mem_mindma option for modules to change
* everywhere as good as here - use mem_mindma option for modules to change
*/
static short mem_mindma = 2400;
module_param(mem_mindma, short, 0444);
......@@ -1123,7 +1120,7 @@ static ssize_t mem_read(struct file *file, char *buffer, size_t count,
}
static ssize_t mem_write(struct file *file, const char *buffer, size_t count,
static ssize_t mem_write(struct file *file, const char *buffer, size_t count,
loff_t *offset)
{
struct memdata *md = (struct memdata *)file->private_data;
......@@ -1292,7 +1289,7 @@ static irqreturn_t lynx_irq_handler(int irq, void *dev_id,
get_pcl(lynx, lynx->async.pcl, &pcl);
packet = driver_packet(lynx->async.pcl_queue.next);
list_del(&packet->driver_list);
list_del_init(&packet->driver_list);
pci_unmap_single(lynx->dev, lynx->async.header_dma,
packet->header_size, PCI_DMA_TODEVICE);
......@@ -1338,7 +1335,7 @@ static irqreturn_t lynx_irq_handler(int irq, void *dev_id,
get_pcl(lynx, lynx->iso_send.pcl, &pcl);
packet = driver_packet(lynx->iso_send.pcl_queue.next);
list_del(&packet->driver_list);
list_del_init(&packet->driver_list);
pci_unmap_single(lynx->dev, lynx->iso_send.header_dma,
packet->header_size, PCI_DMA_TODEVICE);
......@@ -1375,7 +1372,7 @@ static irqreturn_t lynx_irq_handler(int irq, void *dev_id,
int stat = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_RCV));
PRINTD(KERN_DEBUG, lynx->id, "received packet size %d",
stat & 0x1fff);
stat & 0x1fff);
if (stat & DMA_CHAN_STAT_SELFID) {
lynx->selfid_size = stat & 0x1fff;
......@@ -1417,7 +1414,7 @@ static void iso_rcv_bh(struct ti_lynx *lynx)
lynx->iso_rcv.stat[idx]);
}
if (lynx->iso_rcv.stat[idx]
if (lynx->iso_rcv.stat[idx]
& (DMA_CHAN_STAT_PCIERR | DMA_CHAN_STAT_PKTERR)) {
PRINT(KERN_INFO, lynx->id,
"iso receive error on %d to 0x%p", idx, data);
......@@ -1460,7 +1457,7 @@ static void remove_card(struct pci_dev *dev)
reg_write(lynx, PCI_INT_ENABLE, 0);
free_irq(lynx->dev->irq, lynx);
/* Disable IRM Contender */
/* Disable IRM Contender and LCtrl */
if (lynx->phyic.reg_1394a)
set_phy_reg(lynx, 4, ~0xc0 & get_phy_reg(lynx, 4));
......@@ -1558,7 +1555,7 @@ static int __devinit add_card(struct pci_dev *dev,
if (lynx->pcl_mem != NULL) {
lynx->state = have_pcl_mem;
PRINT(KERN_INFO, lynx->id,
PRINT(KERN_INFO, lynx->id,
"allocated PCL memory %d Bytes @ 0x%p", LOCALRAM_SIZE,
lynx->pcl_mem);
} else {
......@@ -1668,7 +1665,7 @@ static int __devinit add_card(struct pci_dev *dev,
lynx->async.channel = CHANNEL_ASYNC_SEND;
lynx->iso_send.queue_lock = SPIN_LOCK_UNLOCKED;
lynx->iso_send.channel = CHANNEL_ISO_SEND;
PRINT(KERN_INFO, lynx->id, "remapped memory spaces reg 0x%p, rom 0x%p, "
"ram 0x%p, aux 0x%p", lynx->registers, lynx->local_rom,
lynx->local_ram, lynx->aux_port);
......@@ -1698,17 +1695,17 @@ static int __devinit add_card(struct pci_dev *dev,
pcl.next = PCL_NEXT_INVALID;
pcl.async_error_next = PCL_NEXT_INVALID;
#ifdef __BIG_ENDIAN
pcl.buffer[0].control = PCL_CMD_RCV | 16;
pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
#else
pcl.buffer[0].control = PCL_CMD_RCV | PCL_BIGENDIAN | 16;
pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
#ifdef __BIG_ENDIAN
pcl.buffer[0].control |= PCL_BIGENDIAN;
#endif
pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
pcl.buffer[0].pointer = lynx->rcv_page_dma;
pcl.buffer[1].pointer = lynx->rcv_page_dma + 16;
put_pcl(lynx, lynx->rcv_pcl, &pcl);
pcl.next = pcl_bus(lynx, lynx->async.pcl);
pcl.async_error_next = pcl_bus(lynx, lynx->async.pcl);
put_pcl(lynx, lynx->async.pcl_start, &pcl);
......@@ -1729,7 +1726,7 @@ static int __devinit add_card(struct pci_dev *dev,
int page = i / ISORCV_PER_PAGE;
int sec = i % ISORCV_PER_PAGE;
pcl.buffer[0].pointer = lynx->iso_rcv.page_dma[page]
pcl.buffer[0].pointer = lynx->iso_rcv.page_dma[page]
+ sec * MAX_ISORCV_SIZE;
pcl.buffer[1].pointer = pcl.buffer[0].pointer + 4;
put_pcl(lynx, lynx->iso_rcv.pcl[i], &pcl);
......@@ -1755,11 +1752,11 @@ static int __devinit add_card(struct pci_dev *dev,
reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIMEOUT
| LINK_INT_PHY_REG_RCVD | LINK_INT_PHY_BUSRESET
| LINK_INT_ISO_STUCK | LINK_INT_ASYNC_STUCK
| LINK_INT_ISO_STUCK | LINK_INT_ASYNC_STUCK
| LINK_INT_SENT_REJECT | LINK_INT_TX_INVALID_TC
| LINK_INT_GRF_OVERFLOW | LINK_INT_ITF_UNDERFLOW
| LINK_INT_ATF_UNDERFLOW);
reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ASYNC_RCV), 0xa<<4);
reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
......@@ -1786,14 +1783,14 @@ static int __devinit add_card(struct pci_dev *dev,
/* attempt to enable contender bit -FIXME- would this work
* elsewhere? */
reg_set_bits(lynx, GPIO_CTRL_A, 0x1);
reg_write(lynx, GPIO_DATA_BASE + 0x3c, 0x1);
reg_write(lynx, GPIO_DATA_BASE + 0x3c, 0x1);
} else {
/* set the contender bit in the extended PHY register
/* set the contender and LCtrl bit in the extended PHY register
* set. (Should check that bis 0,1,2 (=0xE0) is set
* in register 2?)
*/
i = get_phy_reg(lynx, 4);
if (i != -1) set_phy_reg(lynx, 4, i | 0x40);
if (i != -1) set_phy_reg(lynx, 4, i | 0xc0);
}
......@@ -1820,7 +1817,7 @@ static int __devinit add_card(struct pci_dev *dev,
{
/* do i2c stuff */
unsigned char i2c_cmd = 0x10;
struct i2c_msg msg[2] = { { 0x50, 0, 1, &i2c_cmd },
struct i2c_msg msg[2] = { { 0x50, 0, 1, &i2c_cmd },
{ 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block }
};
......
......@@ -47,7 +47,7 @@ struct ti_lynx {
enum { clear, have_intr, have_aux_buf, have_pcl_mem,
have_1394_buffers, have_iomappings, is_host } state;
/* remapped memory spaces */
void *registers;
void *local_rom;
......@@ -66,9 +66,9 @@ struct ti_lynx {
#endif
/*
* use local RAM of LOCALRAM_SIZE bytes for PCLs, which allows for
* use local RAM of LOCALRAM_SIZE bytes for PCLs, which allows for
* LOCALRAM_SIZE * 8 PCLs (each sized 128 bytes);
* the following is an allocation bitmap
* the following is an allocation bitmap
*/
u8 pcl_bmap[LOCALRAM_SIZE / 1024];
......@@ -167,7 +167,7 @@ static inline void reg_clear_bits(const struct ti_lynx *lynx, int offset,
#define SERIAL_EEPROM_CONTROL 0x44
#define PCI_INT_STATUS 0x48
#define PCI_INT_ENABLE 0x4c
#define PCI_INT_ENABLE 0x4c
/* status and enable have identical bit numbers */
#define PCI_INT_INT_PEND (1<<31)
#define PCI_INT_FORCED_INT (1<<30)
......@@ -199,7 +199,7 @@ static inline void reg_clear_bits(const struct ti_lynx *lynx, int offset,
#define LBUS_ADDR_SEL_RAM (0x0<<16)
#define LBUS_ADDR_SEL_ROM (0x1<<16)
#define LBUS_ADDR_SEL_AUX (0x2<<16)
#define LBUS_ADDR_SEL_ZV (0x3<<16)
#define LBUS_ADDR_SEL_ZV (0x3<<16)
#define GPIO_CTRL_A 0xb8
#define GPIO_CTRL_B 0xbc
......@@ -208,14 +208,14 @@ static inline void reg_clear_bits(const struct ti_lynx *lynx, int offset,
#define DMA_BREG(base, chan) (base + chan * 0x20)
#define DMA_SREG(base, chan) (base + chan * 0x10)
#define DMA0_PREV_PCL 0x100
#define DMA0_PREV_PCL 0x100
#define DMA1_PREV_PCL 0x120
#define DMA2_PREV_PCL 0x140
#define DMA3_PREV_PCL 0x160
#define DMA4_PREV_PCL 0x180
#define DMA_PREV_PCL(chan) (DMA_BREG(DMA0_PREV_PCL, chan))
#define DMA0_CURRENT_PCL 0x104
#define DMA0_CURRENT_PCL 0x104
#define DMA1_CURRENT_PCL 0x124
#define DMA2_CURRENT_PCL 0x144
#define DMA3_CURRENT_PCL 0x164
......@@ -237,14 +237,14 @@ static inline void reg_clear_bits(const struct ti_lynx *lynx, int offset,
#define DMA_CHAN_STAT_SPECIALACK (1<<14)
#define DMA0_CHAN_CTRL 0x110
#define DMA0_CHAN_CTRL 0x110
#define DMA1_CHAN_CTRL 0x130
#define DMA2_CHAN_CTRL 0x150
#define DMA3_CHAN_CTRL 0x170
#define DMA4_CHAN_CTRL 0x190
#define DMA_CHAN_CTRL(chan) (DMA_BREG(DMA0_CHAN_CTRL, chan))
/* CHAN_CTRL registers share bits */
#define DMA_CHAN_CTRL_ENABLE (1<<31)
#define DMA_CHAN_CTRL_ENABLE (1<<31)
#define DMA_CHAN_CTRL_BUSY (1<<30)
#define DMA_CHAN_CTRL_LINK (1<<29)
......@@ -353,7 +353,7 @@ static inline void reg_clear_bits(const struct ti_lynx *lynx, int offset,
#define LINK_INT_GRF_OVERFLOW (1<<5)
#define LINK_INT_ITF_UNDERFLOW (1<<4)
#define LINK_INT_ATF_UNDERFLOW (1<<3)
#define LINK_INT_ISOARB_FAILED (1<<0)
#define LINK_INT_ISOARB_FAILED (1<<0)
/* PHY specifics */
#define PHY_VENDORID_TI 0x800028
......
......@@ -33,7 +33,7 @@ struct file_info {
spinlock_t reqlists_lock;
wait_queue_head_t poll_wait_complete;
struct list_head addr_list;
struct list_head addr_list;
u8 *fcp_buffer;
......
......@@ -235,10 +235,10 @@ static void remove_host(struct hpsb_host *host)
if (hi != NULL) {
list_del(&hi->list);
host_count--;
/*
FIXME: address ranges should be removed
/*
FIXME: address ranges should be removed
and fileinfo states should be initialized
(including setting generation to
(including setting generation to
internal-generation ...)
*/
}
......@@ -339,7 +339,7 @@ static void iso_receive(struct hpsb_host *host, int channel, quadlet_t *data,
req->req.misc = 0;
req->req.recvb = ptr2int(fi->iso_buffer);
req->req.length = min(length, fi->iso_buffer_length);
list_add_tail(&req->list, &reqs);
}
}
......@@ -399,7 +399,7 @@ static void fcp_request(struct hpsb_host *host, int nodeid, int direction,
req->req.misc = nodeid | (direction << 16);
req->req.recvb = ptr2int(fi->fcp_buffer);
req->req.length = length;
list_add_tail(&req->list, &reqs);
}
}
......@@ -502,7 +502,7 @@ static int state_initialized(struct file_info *fi, struct pending_request *req)
if (khl != NULL) {
req->req.misc = host_count;
req->data = (quadlet_t *)khl;
list_for_each_entry(hi, &host_info_list, list) {
khl->nodes = hi->host->node_count;
strcpy(khl->name, hi->host->driver->name);
......@@ -536,7 +536,7 @@ static int state_initialized(struct file_info *fi, struct pending_request *req)
req->req.error = RAW1394_ERROR_NONE;
req->req.generation = get_hpsb_generation(fi->host);
req->req.misc = (fi->host->node_id << 16)
req->req.misc = (fi->host->node_id << 16)
| fi->host->node_count;
if (fi->protocol_version > 3) {
req->req.misc |= NODEID_TO_NODE(fi->host->irm_id) << 8;
......@@ -635,7 +635,7 @@ static int handle_async_request(struct file_info *fi,
req->data = &packet->header[3];
else
req->data = packet->data;
break;
case RAW1394_REQ_ASYNC_WRITE:
......@@ -655,7 +655,7 @@ static int handle_async_request(struct file_info *fi,
req->req.length))
req->req.error = RAW1394_ERROR_MEMFAULT;
}
req->req.length = 0;
break;
......@@ -670,7 +670,7 @@ static int handle_async_request(struct file_info *fi,
if (copy_from_user(packet->data, int2ptr(req->req.sendb),
req->req.length))
req->req.error = RAW1394_ERROR_MEMFAULT;
req->req.length = 0;
break;
......@@ -807,13 +807,12 @@ static int handle_async_send(struct file_info *fi, struct pending_request *req)
int expect_response = req->req.misc >> 16;
if ((header_length > req->req.length) ||
(header_length < 12))
{
(header_length < 12)) {
req->req.error = RAW1394_ERROR_INVALID_ARG;
req->req.length = 0;
queue_complete_req(req);
return sizeof(struct raw1394_request);
}
}
packet = hpsb_alloc_packet(req->req.length-header_length);
req->packet = packet;
......@@ -886,7 +885,7 @@ static int arm_read (struct hpsb_host *host, int nodeid, quadlet_t *buffer,
entry = fi->addr_list.next;
while (entry != &(fi->addr_list)) {
arm_addr = list_entry(entry, struct arm_addr, addr_list);
if (((arm_addr->start) <= (addr)) &&
if (((arm_addr->start) <= (addr)) &&
((arm_addr->end) >= (addr+length))) {
found = 1;
break;
......@@ -914,7 +913,7 @@ static int arm_read (struct hpsb_host *host, int nodeid, quadlet_t *buffer,
if (rcode == -1) {
if (arm_addr->access_rights & ARM_READ) {
if (!(arm_addr->client_transactions & ARM_READ)) {
memcpy(buffer,(arm_addr->addr_space_buffer)+(addr-(arm_addr->start)),
memcpy(buffer,(arm_addr->addr_space_buffer)+(addr-(arm_addr->start)),
length);
DBGMSG("arm_read -> (rcode_complete)");
rcode = RCODE_COMPLETE;
......@@ -930,7 +929,7 @@ static int arm_read (struct hpsb_host *host, int nodeid, quadlet_t *buffer,
if (!req) {
DBGMSG("arm_read -> rcode_conflict_error");
spin_unlock(&host_info_lock);
return(RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
return(RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
The request may be retried */
}
if (rcode == RCODE_COMPLETE) {
......@@ -946,7 +945,7 @@ static int arm_read (struct hpsb_host *host, int nodeid, quadlet_t *buffer,
free_pending_request(req);
DBGMSG("arm_read -> rcode_conflict_error");
spin_unlock(&host_info_lock);
return(RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
return(RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
The request may be retried */
}
req->free_data=1;
......@@ -958,19 +957,19 @@ static int arm_read (struct hpsb_host *host, int nodeid, quadlet_t *buffer,
req->req.recvb = arm_addr->recvb;
req->req.length = size;
arm_req_resp = (struct arm_request_response *) (req->data);
arm_req = (struct arm_request *) ((byte_t *)(req->data) +
arm_req = (struct arm_request *) ((byte_t *)(req->data) +
(sizeof (struct arm_request_response)));
arm_resp = (struct arm_response *) ((byte_t *)(arm_req) +
arm_resp = (struct arm_response *) ((byte_t *)(arm_req) +
(sizeof(struct arm_request)));
arm_req->buffer = NULL;
arm_resp->buffer = NULL;
if (rcode == RCODE_COMPLETE) {
arm_resp->buffer = ((byte_t *)(arm_resp) +
arm_resp->buffer = ((byte_t *)(arm_resp) +
(sizeof(struct arm_response)));
memcpy (arm_resp->buffer,
(arm_addr->addr_space_buffer)+(addr-(arm_addr->start)),
(arm_addr->addr_space_buffer)+(addr-(arm_addr->start)),
length);
arm_resp->buffer = int2ptr((arm_addr->recvb) +
arm_resp->buffer = int2ptr((arm_addr->recvb) +
sizeof (struct arm_request_response) +
sizeof (struct arm_request) +
sizeof (struct arm_response));
......@@ -985,9 +984,9 @@ static int arm_read (struct hpsb_host *host, int nodeid, quadlet_t *buffer,
arm_req->destination_nodeid = host->node_id;
arm_req->tlabel = (flags >> 10) & 0x3f;
arm_req->tcode = (flags >> 4) & 0x0f;
arm_req_resp->request = int2ptr((arm_addr->recvb) +
arm_req_resp->request = int2ptr((arm_addr->recvb) +
sizeof (struct arm_request_response));
arm_req_resp->response = int2ptr((arm_addr->recvb) +
arm_req_resp->response = int2ptr((arm_addr->recvb) +
sizeof (struct arm_request_response) +
sizeof (struct arm_request));
queue_complete_req(req);
......@@ -1005,7 +1004,7 @@ static int arm_write (struct hpsb_host *host, int nodeid, int destid,
struct list_head *entry;
struct arm_addr *arm_addr = NULL;
struct arm_request *arm_req = NULL;
struct arm_response *arm_resp = NULL;
struct arm_response *arm_resp = NULL;
int found=0, size=0, rcode=-1, length_conflict=0;
struct arm_request_response *arm_req_resp = NULL;
......@@ -1020,7 +1019,7 @@ static int arm_write (struct hpsb_host *host, int nodeid, int destid,
entry = fi->addr_list.next;
while (entry != &(fi->addr_list)) {
arm_addr = list_entry(entry, struct arm_addr, addr_list);
if (((arm_addr->start) <= (addr)) &&
if (((arm_addr->start) <= (addr)) &&
((arm_addr->end) >= (addr+length))) {
found = 1;
break;
......@@ -1065,7 +1064,7 @@ static int arm_write (struct hpsb_host *host, int nodeid, int destid,
if (!req) {
DBGMSG("arm_write -> rcode_conflict_error");
spin_unlock(&host_info_lock);
return(RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
return(RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
The request my be retried */
}
size = sizeof(struct arm_request)+sizeof(struct arm_response) +
......@@ -1076,7 +1075,7 @@ static int arm_write (struct hpsb_host *host, int nodeid, int destid,
free_pending_request(req);
DBGMSG("arm_write -> rcode_conflict_error");
spin_unlock(&host_info_lock);
return(RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
return(RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
The request may be retried */
}
req->free_data=1;
......@@ -1088,15 +1087,15 @@ static int arm_write (struct hpsb_host *host, int nodeid, int destid,
req->req.recvb = arm_addr->recvb;
req->req.length = size;
arm_req_resp = (struct arm_request_response *) (req->data);
arm_req = (struct arm_request *) ((byte_t *)(req->data) +
arm_req = (struct arm_request *) ((byte_t *)(req->data) +
(sizeof (struct arm_request_response)));
arm_resp = (struct arm_response *) ((byte_t *)(arm_req) +
arm_resp = (struct arm_response *) ((byte_t *)(arm_req) +
(sizeof(struct arm_request)));
arm_req->buffer = ((byte_t *)(arm_resp) +
arm_req->buffer = ((byte_t *)(arm_resp) +
(sizeof(struct arm_response)));
arm_resp->buffer = NULL;
memcpy (arm_req->buffer, data, length);
arm_req->buffer = int2ptr((arm_addr->recvb) +
arm_req->buffer = int2ptr((arm_addr->recvb) +
sizeof (struct arm_request_response) +
sizeof (struct arm_request) +
sizeof (struct arm_response));
......@@ -1110,9 +1109,9 @@ static int arm_write (struct hpsb_host *host, int nodeid, int destid,
arm_req->tcode = (flags >> 4) & 0x0f;
arm_resp->buffer_length = 0;
arm_resp->response_code = rcode;
arm_req_resp->request = int2ptr((arm_addr->recvb) +
arm_req_resp->request = int2ptr((arm_addr->recvb) +
sizeof (struct arm_request_response));
arm_req_resp->response = int2ptr((arm_addr->recvb) +
arm_req_resp->response = int2ptr((arm_addr->recvb) +
sizeof (struct arm_request_response) +
sizeof (struct arm_request));
queue_complete_req(req);
......@@ -1130,7 +1129,7 @@ static int arm_lock (struct hpsb_host *host, int nodeid, quadlet_t *store,
struct list_head *entry;
struct arm_addr *arm_addr = NULL;
struct arm_request *arm_req = NULL;
struct arm_response *arm_resp = NULL;
struct arm_response *arm_resp = NULL;
int found=0, size=0, rcode=-1;
quadlet_t old, new;
struct arm_request_response *arm_req_resp = NULL;
......@@ -1138,12 +1137,12 @@ static int arm_lock (struct hpsb_host *host, int nodeid, quadlet_t *store,
if (((ext_tcode & 0xFF) == EXTCODE_FETCH_ADD) ||
((ext_tcode & 0xFF) == EXTCODE_LITTLE_ADD)) {
DBGMSG("arm_lock called by node: %X "
"addr: %4.4x %8.8x extcode: %2.2X data: %8.8X",
"addr: %4.4x %8.8x extcode: %2.2X data: %8.8X",
nodeid, (u16) ((addr >>32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF),
ext_tcode & 0xFF , be32_to_cpu(data));
} else {
DBGMSG("arm_lock called by node: %X "
"addr: %4.4x %8.8x extcode: %2.2X data: %8.8X arg: %8.8X",
"addr: %4.4x %8.8x extcode: %2.2X data: %8.8X arg: %8.8X",
nodeid, (u16) ((addr >>32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF),
ext_tcode & 0xFF , be32_to_cpu(data), be32_to_cpu(arg));
}
......@@ -1154,7 +1153,7 @@ static int arm_lock (struct hpsb_host *host, int nodeid, quadlet_t *store,
entry = fi->addr_list.next;
while (entry != &(fi->addr_list)) {
arm_addr = list_entry(entry, struct arm_addr, addr_list);
if (((arm_addr->start) <= (addr)) &&
if (((arm_addr->start) <= (addr)) &&
((arm_addr->end) >= (addr+sizeof(*store)))) {
found = 1;
break;
......@@ -1199,7 +1198,7 @@ static int arm_lock (struct hpsb_host *host, int nodeid, quadlet_t *store,
break;
case (EXTCODE_BOUNDED_ADD):
if (old != arg) {
new = cpu_to_be32(be32_to_cpu(data) +
new = cpu_to_be32(be32_to_cpu(data) +
be32_to_cpu(old));
} else {
new = old;
......@@ -1207,7 +1206,7 @@ static int arm_lock (struct hpsb_host *host, int nodeid, quadlet_t *store,
break;
case (EXTCODE_WRAP_ADD):
if (old != arg) {
new = cpu_to_be32(be32_to_cpu(data) +
new = cpu_to_be32(be32_to_cpu(data) +
be32_to_cpu(old));
} else {
new = data;
......@@ -1224,7 +1223,7 @@ static int arm_lock (struct hpsb_host *host, int nodeid, quadlet_t *store,
rcode = RCODE_COMPLETE;
memcpy (store, &old, sizeof(*store));
memcpy ((arm_addr->addr_space_buffer)+
(addr-(arm_addr->start)),
(addr-(arm_addr->start)),
&new, sizeof(*store));
}
}
......@@ -1239,31 +1238,31 @@ static int arm_lock (struct hpsb_host *host, int nodeid, quadlet_t *store,
if (!req) {
DBGMSG("arm_lock -> rcode_conflict_error");
spin_unlock(&host_info_lock);
return(RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
return(RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
The request may be retried */
}
size = sizeof(struct arm_request)+sizeof(struct arm_response) +
3 * sizeof(*store) +
3 * sizeof(*store) +
sizeof (struct arm_request_response); /* maximum */
req->data = kmalloc(size, SLAB_ATOMIC);
if (!(req->data)) {
free_pending_request(req);
DBGMSG("arm_lock -> rcode_conflict_error");
spin_unlock(&host_info_lock);
return(RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
return(RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
The request may be retried */
}
req->free_data=1;
arm_req_resp = (struct arm_request_response *) (req->data);
arm_req = (struct arm_request *) ((byte_t *)(req->data) +
arm_req = (struct arm_request *) ((byte_t *)(req->data) +
(sizeof (struct arm_request_response)));
arm_resp = (struct arm_response *) ((byte_t *)(arm_req) +
arm_resp = (struct arm_response *) ((byte_t *)(arm_req) +
(sizeof(struct arm_request)));
arm_req->buffer = ((byte_t *)(arm_resp) +
arm_req->buffer = ((byte_t *)(arm_resp) +
(sizeof(struct arm_response)));
arm_resp->buffer = ((byte_t *)(arm_req->buffer) +
arm_resp->buffer = ((byte_t *)(arm_req->buffer) +
(2* sizeof(*store)));
if ((ext_tcode == EXTCODE_FETCH_ADD) ||
if ((ext_tcode == EXTCODE_FETCH_ADD) ||
(ext_tcode == EXTCODE_LITTLE_ADD)) {
arm_req->buffer_length = sizeof(*store);
memcpy (arm_req->buffer, &data, sizeof(*store));
......@@ -1271,7 +1270,7 @@ static int arm_lock (struct hpsb_host *host, int nodeid, quadlet_t *store,
} else {
arm_req->buffer_length = 2 * sizeof(*store);
memcpy (arm_req->buffer, &arg, sizeof(*store));
memcpy (((arm_req->buffer) + sizeof(*store)),
memcpy (((arm_req->buffer) + sizeof(*store)),
&data, sizeof(*store));
}
if (rcode == RCODE_COMPLETE) {
......@@ -1284,7 +1283,7 @@ static int arm_lock (struct hpsb_host *host, int nodeid, quadlet_t *store,
req->file_info = fi;
req->req.type = RAW1394_REQ_ARM;
req->req.generation = get_hpsb_generation(host);
req->req.misc = ( (((sizeof(*store)) << 16) & (0xFFFF0000)) |
req->req.misc = ( (((sizeof(*store)) << 16) & (0xFFFF0000)) |
(ARM_LOCK & 0xFF));
req->req.tag = arm_addr->arm_tag;
req->req.recvb = arm_addr->recvb;
......@@ -1297,16 +1296,16 @@ static int arm_lock (struct hpsb_host *host, int nodeid, quadlet_t *store,
arm_req->tlabel = (flags >> 10) & 0x3f;
arm_req->tcode = (flags >> 4) & 0x0f;
arm_resp->response_code = rcode;
arm_req_resp->request = int2ptr((arm_addr->recvb) +
arm_req_resp->request = int2ptr((arm_addr->recvb) +
sizeof (struct arm_request_response));
arm_req_resp->response = int2ptr((arm_addr->recvb) +
arm_req_resp->response = int2ptr((arm_addr->recvb) +
sizeof (struct arm_request_response) +
sizeof (struct arm_request));
arm_req->buffer = int2ptr((arm_addr->recvb) +
arm_req->buffer = int2ptr((arm_addr->recvb) +
sizeof (struct arm_request_response) +
sizeof (struct arm_request) +
sizeof (struct arm_response));
arm_resp->buffer = int2ptr((arm_addr->recvb) +
arm_resp->buffer = int2ptr((arm_addr->recvb) +
sizeof (struct arm_request_response) +
sizeof (struct arm_request) +
sizeof (struct arm_response) +
......@@ -1336,20 +1335,20 @@ static int arm_lock64 (struct hpsb_host *host, int nodeid, octlet_t *store,
DBGMSG("arm_lock64 called by node: %X "
"addr: %4.4x %8.8x extcode: %2.2X data: %8.8X %8.8X ",
nodeid, (u16) ((addr >>32) & 0xFFFF),
(u32) (addr & 0xFFFFFFFF),
ext_tcode & 0xFF ,
(u32) ((be64_to_cpu(data) >> 32) & 0xFFFFFFFF),
(u32) (addr & 0xFFFFFFFF),
ext_tcode & 0xFF ,
(u32) ((be64_to_cpu(data) >> 32) & 0xFFFFFFFF),
(u32) (be64_to_cpu(data) & 0xFFFFFFFF));
} else {
DBGMSG("arm_lock64 called by node: %X "
"addr: %4.4x %8.8x extcode: %2.2X data: %8.8X %8.8X arg: "
"%8.8X %8.8X ",
nodeid, (u16) ((addr >>32) & 0xFFFF),
(u32) (addr & 0xFFFFFFFF),
ext_tcode & 0xFF ,
(u32) ((be64_to_cpu(data) >> 32) & 0xFFFFFFFF),
(u32) (addr & 0xFFFFFFFF),
ext_tcode & 0xFF ,
(u32) ((be64_to_cpu(data) >> 32) & 0xFFFFFFFF),
(u32) (be64_to_cpu(data) & 0xFFFFFFFF),
(u32) ((be64_to_cpu(arg) >> 32) & 0xFFFFFFFF),
(u32) ((be64_to_cpu(arg) >> 32) & 0xFFFFFFFF),
(u32) (be64_to_cpu(arg) & 0xFFFFFFFF));
}
spin_lock(&host_info_lock);
......@@ -1359,7 +1358,7 @@ static int arm_lock64 (struct hpsb_host *host, int nodeid, octlet_t *store,
entry = fi->addr_list.next;
while (entry != &(fi->addr_list)) {
arm_addr = list_entry(entry, struct arm_addr, addr_list);
if (((arm_addr->start) <= (addr)) &&
if (((arm_addr->start) <= (addr)) &&
((arm_addr->end) >= (addr+sizeof(*store)))) {
found = 1;
break;
......@@ -1404,7 +1403,7 @@ static int arm_lock64 (struct hpsb_host *host, int nodeid, octlet_t *store,
break;
case (EXTCODE_BOUNDED_ADD):
if (old != arg) {
new = cpu_to_be64(be64_to_cpu(data) +
new = cpu_to_be64(be64_to_cpu(data) +
be64_to_cpu(old));
} else {
new = old;
......@@ -1412,7 +1411,7 @@ static int arm_lock64 (struct hpsb_host *host, int nodeid, octlet_t *store,
break;
case (EXTCODE_WRAP_ADD):
if (old != arg) {
new = cpu_to_be64(be64_to_cpu(data) +
new = cpu_to_be64(be64_to_cpu(data) +
be64_to_cpu(old));
} else {
new = data;
......@@ -1429,9 +1428,9 @@ static int arm_lock64 (struct hpsb_host *host, int nodeid, octlet_t *store,
rcode = RCODE_COMPLETE;
memcpy (store, &old, sizeof(*store));
memcpy ((arm_addr->addr_space_buffer)+
(addr-(arm_addr->start)),
(addr-(arm_addr->start)),
&new, sizeof(*store));
}
}
}
} else {
rcode = RCODE_TYPE_ERROR; /* function not allowed */
......@@ -1444,7 +1443,7 @@ static int arm_lock64 (struct hpsb_host *host, int nodeid, octlet_t *store,
if (!req) {
spin_unlock(&host_info_lock);
DBGMSG("arm_lock64 -> rcode_conflict_error");
return(RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
return(RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
The request may be retried */
}
size = sizeof(struct arm_request)+sizeof(struct arm_response) +
......@@ -1455,20 +1454,20 @@ static int arm_lock64 (struct hpsb_host *host, int nodeid, octlet_t *store,
free_pending_request(req);
spin_unlock(&host_info_lock);
DBGMSG("arm_lock64 -> rcode_conflict_error");
return(RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
return(RCODE_CONFLICT_ERROR); /* A resource conflict was detected.
The request may be retried */
}
req->free_data=1;
arm_req_resp = (struct arm_request_response *) (req->data);
arm_req = (struct arm_request *) ((byte_t *)(req->data) +
arm_req = (struct arm_request *) ((byte_t *)(req->data) +
(sizeof (struct arm_request_response)));
arm_resp = (struct arm_response *) ((byte_t *)(arm_req) +
arm_resp = (struct arm_response *) ((byte_t *)(arm_req) +
(sizeof(struct arm_request)));
arm_req->buffer = ((byte_t *)(arm_resp) +
arm_req->buffer = ((byte_t *)(arm_resp) +
(sizeof(struct arm_response)));
arm_resp->buffer = ((byte_t *)(arm_req->buffer) +
arm_resp->buffer = ((byte_t *)(arm_req->buffer) +
(2* sizeof(*store)));
if ((ext_tcode == EXTCODE_FETCH_ADD) ||
if ((ext_tcode == EXTCODE_FETCH_ADD) ||
(ext_tcode == EXTCODE_LITTLE_ADD)) {
arm_req->buffer_length = sizeof(*store);
memcpy (arm_req->buffer, &data, sizeof(*store));
......@@ -1476,7 +1475,7 @@ static int arm_lock64 (struct hpsb_host *host, int nodeid, octlet_t *store,
} else {
arm_req->buffer_length = 2 * sizeof(*store);
memcpy (arm_req->buffer, &arg, sizeof(*store));
memcpy (((arm_req->buffer) + sizeof(*store)),
memcpy (((arm_req->buffer) + sizeof(*store)),
&data, sizeof(*store));
}
if (rcode == RCODE_COMPLETE) {
......@@ -1489,7 +1488,7 @@ static int arm_lock64 (struct hpsb_host *host, int nodeid, octlet_t *store,
req->file_info = fi;
req->req.type = RAW1394_REQ_ARM;
req->req.generation = get_hpsb_generation(host);
req->req.misc = ( (((sizeof(*store)) << 16) & (0xFFFF0000)) |
req->req.misc = ( (((sizeof(*store)) << 16) & (0xFFFF0000)) |
(ARM_LOCK & 0xFF));
req->req.tag = arm_addr->arm_tag;
req->req.recvb = arm_addr->recvb;
......@@ -1502,16 +1501,16 @@ static int arm_lock64 (struct hpsb_host *host, int nodeid, octlet_t *store,
arm_req->tlabel = (flags >> 10) & 0x3f;
arm_req->tcode = (flags >> 4) & 0x0f;
arm_resp->response_code = rcode;
arm_req_resp->request = int2ptr((arm_addr->recvb) +
arm_req_resp->request = int2ptr((arm_addr->recvb) +
sizeof (struct arm_request_response));
arm_req_resp->response = int2ptr((arm_addr->recvb) +
arm_req_resp->response = int2ptr((arm_addr->recvb) +
sizeof (struct arm_request_response) +
sizeof (struct arm_request));
arm_req->buffer = int2ptr((arm_addr->recvb) +
arm_req->buffer = int2ptr((arm_addr->recvb) +
sizeof (struct arm_request_response) +
sizeof (struct arm_request) +
sizeof (struct arm_response));
arm_resp->buffer = int2ptr((arm_addr->recvb) +
arm_resp->buffer = int2ptr((arm_addr->recvb) +
sizeof (struct arm_request_response) +
sizeof (struct arm_request) +
sizeof (struct arm_response) +
......@@ -1548,11 +1547,11 @@ static int arm_register(struct file_info *fi, struct pending_request *req)
return (-EINVAL);
}
/* addr-list-entry for fileinfo */
addr = (struct arm_addr *)kmalloc(sizeof(struct arm_addr), SLAB_KERNEL);
addr = (struct arm_addr *)kmalloc(sizeof(struct arm_addr), SLAB_KERNEL);
if (!addr) {
req->req.length = 0;
return (-ENOMEM);
}
}
/* allocation of addr_space_buffer */
addr->addr_space_buffer = (u8 *)vmalloc(req->req.length);
if (!(addr->addr_space_buffer)) {
......@@ -1593,7 +1592,7 @@ static int arm_register(struct file_info *fi, struct pending_request *req)
entry = fi_hlp->addr_list.next;
while (entry != &(fi_hlp->addr_list)) {
arm_addr = list_entry(entry, struct arm_addr, addr_list);
if ( (arm_addr->start == addr->start) &&
if ( (arm_addr->start == addr->start) &&
(arm_addr->end == addr->end)) {
DBGMSG("same host ownes same "
"addressrange -> EALREADY");
......@@ -1620,7 +1619,7 @@ static int arm_register(struct file_info *fi, struct pending_request *req)
entry = fi_hlp->addr_list.next;
while (entry != &(fi_hlp->addr_list)) {
arm_addr = list_entry(entry, struct arm_addr, addr_list);
if ( (arm_addr->start == addr->start) &&
if ( (arm_addr->start == addr->start) &&
(arm_addr->end == addr->end)) {
DBGMSG("another host ownes same "
"addressrange");
......@@ -1662,7 +1661,7 @@ static int arm_register(struct file_info *fi, struct pending_request *req)
vfree(addr->addr_space_buffer);
kfree(addr);
spin_unlock_irqrestore(&host_info_lock, flags);
return (-EALREADY);
return (-EALREADY);
}
spin_unlock_irqrestore(&host_info_lock, flags);
free_pending_request(req); /* immediate success or fail */
......@@ -1703,16 +1702,16 @@ static int arm_unregister(struct file_info *fi, struct pending_request *req)
}
DBGMSG("arm_Unregister addr found");
another_host = 0;
/* another host with valid address-entry containing
/* another host with valid address-entry containing
same addressrange */
list_for_each_entry(hi, &host_info_list, list) {
if (hi->host != fi->host) {
list_for_each_entry(fi_hlp, &hi->file_info_list, list) {
entry = fi_hlp->addr_list.next;
while (entry != &(fi_hlp->addr_list)) {
arm_addr = list_entry(entry,
arm_addr = list_entry(entry,
struct arm_addr, addr_list);
if (arm_addr->start ==
if (arm_addr->start ==
addr->start) {
DBGMSG("another host ownes "
"same addressrange");
......@@ -1735,7 +1734,7 @@ static int arm_unregister(struct file_info *fi, struct pending_request *req)
free_pending_request(req); /* immediate success or fail */
spin_unlock_irqrestore(&host_info_lock, flags);
return sizeof(struct raw1394_request);
}
}
retval = hpsb_unregister_addrspace(&raw1394_highlevel, fi->host, addr->start);
if (!retval) {
printk(KERN_ERR "raw1394: arm_Unregister failed -> EINVAL\n");
......@@ -1863,7 +1862,7 @@ static int reset_notification(struct file_info *fi, struct pending_request *req)
fi->notification=(u8)req->req.misc;
free_pending_request(req); /* we have to free the request, because we queue no response, and therefore nobody will free it */
return sizeof(struct raw1394_request);
}
}
/* error EINVAL (22) invalid argument */
return (-EINVAL);
}
......@@ -1905,7 +1904,7 @@ static int get_config_rom(struct file_info *fi, struct pending_request *req)
status = csr1212_read(fi->host->csr.rom, CSR1212_CONFIG_ROM_SPACE_OFFSET,
data, req->req.length);
if (copy_to_user(int2ptr(req->req.recvb), data,
if (copy_to_user(int2ptr(req->req.recvb), data,
req->req.length))
ret = -EFAULT;
if (copy_to_user(int2ptr(req->req.tag), &fi->host->csr.rom->cache_head->len,
......@@ -1914,7 +1913,7 @@ static int get_config_rom(struct file_info *fi, struct pending_request *req)
if (copy_to_user(int2ptr(req->req.address), &fi->host->csr.generation,
sizeof(fi->host->csr.generation)))
ret = -EFAULT;
if (copy_to_user(int2ptr(req->req.sendb), &status,
if (copy_to_user(int2ptr(req->req.sendb), &status,
sizeof(status)))
ret = -EFAULT;
kfree(data);
......@@ -1929,14 +1928,14 @@ static int update_config_rom(struct file_info *fi, struct pending_request *req)
int ret=sizeof(struct raw1394_request);
quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL);
if (!data) return -ENOMEM;
if (copy_from_user(data,int2ptr(req->req.sendb),
if (copy_from_user(data,int2ptr(req->req.sendb),
req->req.length)) {
ret= -EFAULT;
} else {
int status = hpsb_update_config_rom(fi->host,
data, req->req.length,
int status = hpsb_update_config_rom(fi->host,
data, req->req.length,
(unsigned char) req->req.misc);
if (copy_to_user(int2ptr(req->req.recvb),
if (copy_to_user(int2ptr(req->req.recvb),
&status, sizeof(status)))
ret = -ENOMEM;
}
......@@ -2033,7 +2032,7 @@ static int modify_config_rom(struct file_info *fi, struct pending_request *req)
if (ret == CSR1212_SUCCESS) {
ret = hpsb_update_config_rom_image(fi->host);
if (ret >= 0 && copy_to_user(int2ptr(req->req.recvb),
if (ret >= 0 && copy_to_user(int2ptr(req->req.recvb),
&dr, sizeof(dr))) {
ret = -ENOMEM;
}
......@@ -2044,7 +2043,7 @@ static int modify_config_rom(struct file_info *fi, struct pending_request *req)
if (ret >= 0) {
/* we have to free the request, because we queue no response,
* and therefore nobody will free it */
* and therefore nobody will free it */
free_pending_request(req);
return sizeof(struct raw1394_request);
} else {
......@@ -2362,7 +2361,7 @@ static int raw1394_iso_recv_packets(struct file_info *fi, void *uaddr)
&fi->iso_handle->infos[packet],
sizeof(struct raw1394_iso_packet_info)))
return -EFAULT;
packet = (packet + 1) % fi->iso_handle->buf_packets;
}
......@@ -2534,7 +2533,7 @@ static int raw1394_open(struct inode *inode, struct file *file)
fi = kmalloc(sizeof(struct file_info), SLAB_KERNEL);
if (fi == NULL)
return -ENOMEM;
memset(fi, 0, sizeof(struct file_info));
fi->notification = (u8) RAW1394_NOTIFY_ON; /* busreset notification */
......@@ -2588,16 +2587,16 @@ static int raw1394_release(struct inode *inode, struct file *file)
another_host = 0;
lh = fi->addr_list.next;
addr = list_entry(lh, struct arm_addr, addr_list);
/* another host with valid address-entry containing
/* another host with valid address-entry containing
same addressrange? */
list_for_each_entry(hi, &host_info_list, list) {
if (hi->host != fi->host) {
list_for_each_entry(fi_hlp, &hi->file_info_list, list) {
entry = fi_hlp->addr_list.next;
while (entry != &(fi_hlp->addr_list)) {
arm_addr = list_entry(entry,
arm_addr = list_entry(entry,
struct arm_addr, addr_list);
if (arm_addr->start ==
if (arm_addr->start ==
addr->start) {
DBGMSG("raw1394_release: "
"another host ownes "
......@@ -2726,13 +2725,13 @@ static struct hpsb_highlevel raw1394_highlevel = {
static struct cdev raw1394_cdev;
static struct file_operations raw1394_fops = {
.owner = THIS_MODULE,
.read = raw1394_read,
.read = raw1394_read,
.write = raw1394_write,
.mmap = raw1394_mmap,
.ioctl = raw1394_ioctl,
.poll = raw1394_poll,
.open = raw1394_open,
.release = raw1394_release,
.poll = raw1394_poll,
.open = raw1394_open,
.release = raw1394_release,
};
static int __init init_raw1394(void)
......@@ -2746,9 +2745,9 @@ static int __init init_raw1394(void)
cdev_init(&raw1394_cdev, &raw1394_fops);
raw1394_cdev.owner = THIS_MODULE;
kobject_set_name(&raw1394_cdev.kobj, RAW1394_DEVICE_NAME);
ret = cdev_add(&raw1394_cdev, IEEE1394_RAW1394_DEV, 1);
if (ret) {
/* jmc: leaves reference to (static) raw1394_cdev */
HPSB_ERR("raw1394 failed to register minor device block");
devfs_remove(RAW1394_DEVICE_NAME);
hpsb_unregister_highlevel(&raw1394_highlevel);
......
......@@ -78,7 +78,7 @@
#include "sbp2.h"
static char version[] __devinitdata =
"$Rev: 1170 $ Ben Collins <bcollins@debian.org>";
"$Rev: 1205 $ Ben Collins <bcollins@debian.org>";
/*
* Module load parameter definitions
......@@ -137,7 +137,7 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)"
* if your sbp2 device is not properly handling the SCSI inquiry command.
* This hack makes the inquiry look more like a typical MS Windows
* inquiry.
*
*
* If force_inquiry_hack=1 is required for your device to work,
* please submit the logged sbp2_firmware_revision value of this device to
* the linux1394-devel mailing list.
......@@ -206,7 +206,7 @@ static u32 global_outstanding_dmas = 0;
#define SBP2_INFO(fmt, args...) HPSB_INFO("sbp2: "fmt, ## args)
#define SBP2_NOTICE(fmt, args...) HPSB_NOTICE("sbp2: "fmt, ## args)
#define SBP2_WARN(fmt, args...) HPSB_WARN("sbp2: "fmt, ## args)
#else
#else
#define SBP2_DEBUG(fmt, args...)
#define SBP2_INFO(fmt, args...) HPSB_INFO("sbp2: "fmt, ## args)
#define SBP2_NOTICE(fmt, args...) HPSB_NOTICE("sbp2: "fmt, ## args)
......@@ -226,7 +226,7 @@ static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id
static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
u32 scsi_status, Scsi_Cmnd *SCpnt,
void (*done)(Scsi_Cmnd *));
static Scsi_Host_Template scsi_driver_template;
const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xA, 0xB, 0xC };
......@@ -409,7 +409,7 @@ static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_i
struct sbp2_command_info *command;
orbs = serialize_io ? 2 : SBP2_MAX_CMDS;
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
for (i = 0; i < orbs; i++) {
command = (struct sbp2_command_info *)
......@@ -445,7 +445,7 @@ static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_
struct list_head *lh, *next;
struct sbp2_command_info *command;
unsigned long flags;
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
if (!list_empty(&scsi_id->sbp2_command_orb_completed)) {
list_for_each_safe(lh, next, &scsi_id->sbp2_command_orb_completed) {
......@@ -468,7 +468,7 @@ static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_
return;
}
/*
/*
* This function finds the sbp2_command for a given outstanding command
* orb.Only looks at the inuse list.
*/
......@@ -494,7 +494,7 @@ static struct sbp2_command_info *sbp2util_find_command_for_orb(
return(NULL);
}
/*
/*
* This function finds the sbp2_command for a given outstanding SCpnt.
* Only looks at the inuse list.
*/
......@@ -520,8 +520,8 @@ static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(struct scsi_id_
* This function allocates a command orb used to send a scsi command.
*/
static struct sbp2_command_info *sbp2util_allocate_command_orb(
struct scsi_id_instance_data *scsi_id,
Scsi_Cmnd *Current_SCpnt,
struct scsi_id_instance_data *scsi_id,
Scsi_Cmnd *Current_SCpnt,
void (*Current_done)(Scsi_Cmnd *))
{
struct list_head *lh;
......@@ -647,8 +647,8 @@ static int sbp2_update(struct unit_directory *ud)
SBP2_DEBUG("sbp2_update");
if (sbp2_reconnect_device(scsi_id)) {
/*
/*
* Ok, reconnect has failed. Perhaps we didn't
* reconnect fast enough. Try doing a regular login, but
* first do a logout just in case of any weirdness.
......@@ -658,7 +658,7 @@ static int sbp2_update(struct unit_directory *ud)
if (sbp2_login_device(scsi_id)) {
/* Login failed too, just fail, and the backend
* will call our sbp2_remove for us */
SBP2_INFO("sbp2_reconnect_device failed!");
SBP2_ERR("Failed to reconnect to sbp2 device!");
return -EBUSY;
}
}
......@@ -851,7 +851,7 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
scsi_id->query_logins_orb_dma);
SBP2_DMA_FREE("query logins ORB DMA");
}
if (scsi_id->logout_orb) {
pci_free_consistent(hi->host->pdev,
sizeof(struct sbp2_logout_orb),
......@@ -905,7 +905,6 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
* allows someone else to login instead. One second makes sense. */
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ);
/*
* Login to the sbp-2 device
......@@ -920,12 +919,12 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
* Set max retries to something large on the device
*/
sbp2_set_busy_timeout(scsi_id);
/*
* Do a SBP-2 fetch agent reset
*/
sbp2_agent_reset(scsi_id, 1);
/*
* Get the max speed and packet size that we can use
*/
......@@ -1157,14 +1156,14 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
max_logins = RESPONSE_GET_MAX_LOGINS(scsi_id->query_logins_response->length_max_logins);
SBP2_DEBUG("Maximum concurrent logins supported: %d", max_logins);
active_logins = RESPONSE_GET_ACTIVE_LOGINS(scsi_id->query_logins_response->length_max_logins);
SBP2_DEBUG("Number of active logins: %d", active_logins);
if (active_logins >= max_logins) {
return(-EIO);
}
return 0;
}
......@@ -1172,7 +1171,7 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
* This function is called in order to login to a particular SBP-2 device,
* after a bus reset.
*/
static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
{
struct sbp2scsi_host_info *hi = scsi_id->hi;
quadlet_t data[2];
......@@ -1192,7 +1191,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
}
/* Set-up login ORB, assume no password */
scsi_id->login_orb->password_hi = 0;
scsi_id->login_orb->password_hi = 0;
scsi_id->login_orb->password_lo = 0;
SBP2_DEBUG("sbp2_login_device: password_hi/lo initialized");
......@@ -1216,7 +1215,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
ORB_SET_LOGIN_RESP_LENGTH(sizeof(struct sbp2_login_response));
SBP2_DEBUG("sbp2_login_device: passwd_resp_lengths initialized");
scsi_id->login_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
scsi_id->login_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
scsi_id->login_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
SBP2_STATUS_FIFO_ADDRESS_HI);
......@@ -1229,7 +1228,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
SBP2_DEBUG("sbp2_login_device: orb byte-swapped");
sbp2util_packet_dump(scsi_id->login_orb, sizeof(struct sbp2_login_orb),
sbp2util_packet_dump(scsi_id->login_orb, sizeof(struct sbp2_login_orb),
"sbp2 login orb", scsi_id->login_orb_dma);
/*
......@@ -1255,7 +1254,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
SBP2_DEBUG("sbp2_login_device: written");
/*
* Wait for login status (up to 20 seconds)...
* Wait for login status (up to 20 seconds)...
*/
if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 20*HZ)) {
SBP2_ERR("Error logging into SBP-2 device - login timed-out");
......@@ -1310,10 +1309,11 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
* This function is called in order to logout from a particular SBP-2
* device, usually called during driver unload.
*/
static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
{
struct sbp2scsi_host_info *hi = scsi_id->hi;
quadlet_t data[2];
int error;
SBP2_DEBUG("sbp2_logout_device");
......@@ -1332,7 +1332,7 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
scsi_id->logout_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
scsi_id->logout_orb->reserved5 = 0x0;
scsi_id->logout_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
scsi_id->logout_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
scsi_id->logout_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
SBP2_STATUS_FIFO_ADDRESS_HI);
......@@ -1342,7 +1342,7 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
*/
sbp2util_cpu_to_be32_buffer(scsi_id->logout_orb, sizeof(struct sbp2_logout_orb));
sbp2util_packet_dump(scsi_id->logout_orb, sizeof(struct sbp2_logout_orb),
sbp2util_packet_dump(scsi_id->logout_orb, sizeof(struct sbp2_logout_orb),
"sbp2 logout orb", scsi_id->logout_orb_dma);
/*
......@@ -1354,10 +1354,15 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
atomic_set(&scsi_id->sbp2_login_complete, 0);
hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
error = hpsb_node_write(scsi_id->ne,
scsi_id->sbp2_management_agent_addr,
data, 8);
if (error)
return error;
/* Wait for device to logout...1 second. */
sbp2util_down_timeout(&scsi_id->sbp2_login_complete, HZ);
if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, HZ))
return -EIO;
SBP2_INFO("Logged out of SBP-2 device");
......@@ -1369,10 +1374,11 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
* This function is called in order to reconnect to a particular SBP-2
* device, after a bus reset.
*/
static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
{
struct sbp2scsi_host_info *hi = scsi_id->hi;
quadlet_t data[2];
int error;
SBP2_DEBUG("sbp2_reconnect_device");
......@@ -1392,7 +1398,7 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
scsi_id->reconnect_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
scsi_id->reconnect_orb->reserved5 = 0x0;
scsi_id->reconnect_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
scsi_id->reconnect_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
scsi_id->reconnect_orb->status_FIFO_hi =
(ORB_SET_NODE_ID(hi->host->node_id) | SBP2_STATUS_FIFO_ADDRESS_HI);
......@@ -1402,7 +1408,7 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
*/
sbp2util_cpu_to_be32_buffer(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb));
sbp2util_packet_dump(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb),
sbp2util_packet_dump(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb),
"sbp2 reconnect orb", scsi_id->reconnect_orb_dma);
/*
......@@ -1419,7 +1425,11 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
atomic_set(&scsi_id->sbp2_login_complete, 0);
hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
error = hpsb_node_write(scsi_id->ne,
scsi_id->sbp2_management_agent_addr,
data, 8);
if (error)
return error;
/*
* Wait for reconnect status (up to 1 second)...
......@@ -1448,7 +1458,7 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
return(-EIO);
}
SBP2_INFO("Reconnected to SBP-2 device");
HPSB_DEBUG("Reconnected to SBP-2 device");
return(0);
......@@ -1456,7 +1466,7 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
/*
* This function is called in order to set the busy timeout (number of
* retries to attempt) on the sbp2 device.
* retries to attempt) on the sbp2 device.
*/
static int sbp2_set_busy_timeout(struct scsi_id_instance_data *scsi_id)
{
......@@ -1480,7 +1490,7 @@ static int sbp2_set_busy_timeout(struct scsi_id_instance_data *scsi_id)
/*
* This function is called to parse sbp2 device's config rom unit
* directory. Used to determine things like sbp2 management agent offset,
* and command set used (SCSI or RBC).
* and command set used (SCSI or RBC).
*/
static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
struct unit_directory *ud)
......@@ -1638,18 +1648,18 @@ static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id)
scsi_id->max_payload_size = min(sbp2_speedto_max_payload[scsi_id->speed_code],
(u8)(hi->host->csr.max_rec - 1));
SBP2_ERR("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]",
NODE_BUS_ARGS(hi->host, scsi_id->ne->nodeid),
hpsb_speedto_str[scsi_id->speed_code],
1 << ((u32)scsi_id->max_payload_size + 2));
HPSB_DEBUG("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]",
NODE_BUS_ARGS(hi->host, scsi_id->ne->nodeid),
hpsb_speedto_str[scsi_id->speed_code],
1 << ((u32)scsi_id->max_payload_size + 2));
return(0);
}
/*
* This function is called in order to perform a SBP-2 agent reset.
* This function is called in order to perform a SBP-2 agent reset.
*/
static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait)
static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait)
{
quadlet_t data;
u64 addr;
......@@ -1690,7 +1700,7 @@ static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
unchar *scsi_cmd,
unsigned int scsi_use_sg,
unsigned int scsi_request_bufflen,
void *scsi_request_buffer,
void *scsi_request_buffer,
unsigned char scsi_dir)
{
struct sbp2scsi_host_info *hi = scsi_id->hi;
......@@ -1734,7 +1744,7 @@ static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
case SCSI_DATA_UNKNOWN:
default:
SBP2_ERR("SCSI data transfer direction not specified. "
"Update the SBP2 direction table in sbp2.h if "
"Update the SBP2 direction table in sbp2.h if "
"necessary for your application");
print_command (scsi_cmd);
orb_direction = sbp2scsi_direction_table[*scsi_cmd];
......@@ -1805,12 +1815,12 @@ static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
while (sg_len) {
scatter_gather_element[sg_count].segment_base_lo = sg_addr;
if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
scatter_gather_element[sg_count].length_segment_base_hi =
scatter_gather_element[sg_count].length_segment_base_hi =
PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
} else {
scatter_gather_element[sg_count].length_segment_base_hi =
scatter_gather_element[sg_count].length_segment_base_hi =
PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
sg_len = 0;
}
......@@ -1821,14 +1831,14 @@ static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
/* Number of page table (s/g) elements */
command_orb->misc |= ORB_SET_DATA_SIZE(sg_count);
sbp2util_packet_dump(scatter_gather_element,
(sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
sbp2util_packet_dump(scatter_gather_element,
(sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
"sbp2 s/g list", command->sge_dma);
/*
* Byte swap page tables if necessary
*/
sbp2util_cpu_to_be32_buffer(scatter_gather_element,
sbp2util_cpu_to_be32_buffer(scatter_gather_element,
(sizeof(struct sbp2_unrestricted_page_table)) *
sg_count);
......@@ -1871,7 +1881,7 @@ static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
/*
* Need to turn this into page tables, since the
* buffer is too large.
*/
*/
command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
command_orb->data_descriptor_lo = command->sge_dma;
......@@ -1889,12 +1899,12 @@ static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
while (sg_len) {
scatter_gather_element[sg_count].segment_base_lo = sg_addr;
if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
scatter_gather_element[sg_count].length_segment_base_hi =
scatter_gather_element[sg_count].length_segment_base_hi =
PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
} else {
scatter_gather_element[sg_count].length_segment_base_hi =
scatter_gather_element[sg_count].length_segment_base_hi =
PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
sg_len = 0;
}
......@@ -1904,14 +1914,14 @@ static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
/* Number of page table (s/g) elements */
command_orb->misc |= ORB_SET_DATA_SIZE(sg_count);
sbp2util_packet_dump(scatter_gather_element,
(sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
sbp2util_packet_dump(scatter_gather_element,
(sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
"sbp2 s/g list", command->sge_dma);
/*
* Byte swap page tables if necessary
*/
sbp2util_cpu_to_be32_buffer(scatter_gather_element,
sbp2util_cpu_to_be32_buffer(scatter_gather_element,
(sizeof(struct sbp2_unrestricted_page_table)) *
sg_count);
......@@ -1932,9 +1942,9 @@ static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
return(0);
}
/*
* This function is called in order to begin a regular SBP-2 command.
* This function is called in order to begin a regular SBP-2 command.
*/
static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
struct sbp2_command_info *command)
......@@ -2019,7 +2029,7 @@ static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
}
/*
* This function is called in order to begin a regular SBP-2 command.
* This function is called in order to begin a regular SBP-2 command.
*/
static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
......@@ -2046,8 +2056,8 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
/*
* The scsi stack sends down a request_bufflen which does not match the
* length field in the scsi cdb. This causes some sbp2 devices to
* reject this inquiry command. Fix the request_bufflen.
* length field in the scsi cdb. This causes some sbp2 devices to
* reject this inquiry command. Fix the request_bufflen.
*/
if (*cmd == INQUIRY) {
if (force_inquiry_hack || scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK)
......@@ -2061,14 +2071,14 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
*/
sbp2_create_command_orb(scsi_id, command, cmd, SCpnt->use_sg,
request_bufflen, SCpnt->request_buffer,
SCpnt->sc_data_direction);
SCpnt->sc_data_direction);
/*
* Update our cdb if necessary (to handle sbp2 RBC command set
* differences). This is where the command set hacks go! =)
*/
sbp2_check_sbp2_command(scsi_id, command->command_orb.cdb);
sbp2util_packet_dump(&command->command_orb, sizeof(struct sbp2_command_orb),
sbp2util_packet_dump(&command->command_orb, sizeof(struct sbp2_command_orb),
"sbp2 command orb", command->command_orb_dma);
/*
......@@ -2080,7 +2090,7 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
* Link up the orb, and ring the doorbell if needed
*/
sbp2_link_orb_command(scsi_id, command);
return(0);
}
......@@ -2097,13 +2107,13 @@ static void sbp2_check_sbp2_command(struct scsi_id_instance_data *scsi_id, uncha
SBP2_DEBUG("sbp2_check_sbp2_command");
switch (*cmd) {
case READ_6:
if (sbp2_command_conversion_device_type(device_type)) {
SBP2_DEBUG("Convert READ_6 to READ_10");
/*
* Need to turn read_6 into read_10
*/
......@@ -2117,7 +2127,7 @@ static void sbp2_check_sbp2_command(struct scsi_id_instance_data *scsi_id, uncha
new_cmd[7] = 0x0;
new_cmd[8] = cmd[4];
new_cmd[9] = cmd[5];
memcpy(cmd, new_cmd, 10);
}
......@@ -2129,7 +2139,7 @@ static void sbp2_check_sbp2_command(struct scsi_id_instance_data *scsi_id, uncha
if (sbp2_command_conversion_device_type(device_type)) {
SBP2_DEBUG("Convert WRITE_6 to WRITE_10");
/*
* Need to turn write_6 into write_10
*/
......@@ -2143,7 +2153,7 @@ static void sbp2_check_sbp2_command(struct scsi_id_instance_data *scsi_id, uncha
new_cmd[7] = 0x0;
new_cmd[8] = cmd[4];
new_cmd[9] = cmd[5];
memcpy(cmd, new_cmd, 10);
}
......@@ -2169,7 +2179,7 @@ static void sbp2_check_sbp2_command(struct scsi_id_instance_data *scsi_id, uncha
new_cmd[7] = 0x0;
new_cmd[8] = cmd[4];
new_cmd[9] = cmd[5];
memcpy(cmd, new_cmd, 10);
}
......@@ -2232,7 +2242,7 @@ static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
SBP2_DEBUG("sbp2_check_sbp2_response");
switch (SCpnt->cmnd[0]) {
case INQUIRY:
/*
......@@ -2270,7 +2280,7 @@ static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
case MODE_SENSE:
if (sbp2_command_conversion_device_type(device_type)) {
SBP2_DEBUG("Modify mode sense response (10 byte version)");
scsi_buf[0] = scsi_buf[1]; /* Mode data length */
......@@ -2278,7 +2288,6 @@ static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
scsi_buf[2] = scsi_buf[3]; /* Device specific parameter */
scsi_buf[3] = scsi_buf[7]; /* Block descriptor length */
memcpy(scsi_buf + 4, scsi_buf + 8, scsi_buf[0]);
}
break;
......@@ -2342,7 +2351,7 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
}
/*
* Put response into scsi_id status fifo...
* Put response into scsi_id status fifo...
*/
memcpy(&scsi_id->status_block, data, length);
......@@ -2394,7 +2403,7 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
if (STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc)) {
/*
* Initiate a fetch agent reset.
* Initiate a fetch agent reset.
*/
SBP2_DEBUG("Dead bit set - initiating fetch agent reset");
sbp2_agent_reset(scsi_id, 0);
......@@ -2405,7 +2414,7 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
/*
* Check here to see if there are no commands in-use. If there are none, we can
* null out last orb so that next time around we write directly to the orb pointer...
* null out last orb so that next time around we write directly to the orb pointer...
* Quick start saves one 1394 bus transaction.
*/
if (list_empty(&scsi_id->sbp2_command_orb_inuse)) {
......@@ -2413,8 +2422,8 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
}
} else {
/*
/*
* It's probably a login/logout/reconnect status.
*/
if ((scsi_id->login_orb_dma == scsi_id->status_block.ORB_offset_lo) ||
......@@ -2443,10 +2452,10 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
**************************************/
/*
* This routine is the main request entry routine for doing I/O. It is
* This routine is the main request entry routine for doing I/O. It is
* called from the scsi stack directly.
*/
static int sbp2scsi_queuecommand (Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
static int sbp2scsi_queuecommand (Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
{
struct scsi_id_instance_data *scsi_id =
(struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
......@@ -2521,7 +2530,7 @@ static int sbp2scsi_queuecommand (Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
* This function is called in order to complete all outstanding SBP-2
* commands (in case of resets, etc.).
*/
static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id,
static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id,
u32 status)
{
struct sbp2scsi_host_info *hi = scsi_id->hi;
......@@ -2581,7 +2590,7 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
SBP2_ERR("Bus reset in progress - retry command later");
return;
}
/*
* Switch on scsi status
*/
......@@ -2650,7 +2659,7 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
* or hot-plug...
*/
#if 0
if ((scsi_status == SBP2_SCSI_STATUS_CHECK_CONDITION) &&
if ((scsi_status == SBP2_SCSI_STATUS_CHECK_CONDITION) &&
(SCpnt->sense_buffer[2] == UNIT_ATTENTION)) {
SBP2_DEBUG("UNIT ATTENTION - return busy");
SCpnt->result = DID_BUS_BUSY << 16;
......@@ -2680,7 +2689,7 @@ static int sbp2scsi_slave_configure (struct scsi_device *sdev)
* Called by scsi stack when something has really gone wrong. Usually
* called when a command has timed-out for some reason.
*/
static int sbp2scsi_abort (Scsi_Cmnd *SCpnt)
static int sbp2scsi_abort (Scsi_Cmnd *SCpnt)
{
struct scsi_id_instance_data *scsi_id =
(struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
......@@ -2689,7 +2698,7 @@ static int sbp2scsi_abort (Scsi_Cmnd *SCpnt)
SBP2_ERR("aborting sbp2 command");
print_command (SCpnt->cmnd);
if (scsi_id) {
/*
......@@ -2716,10 +2725,10 @@ static int sbp2scsi_abort (Scsi_Cmnd *SCpnt)
}
/*
* Initiate a fetch agent reset.
* Initiate a fetch agent reset.
*/
sbp2_agent_reset(scsi_id, 0);
sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY);
sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY);
}
return(SUCCESS);
......@@ -2728,7 +2737,7 @@ static int sbp2scsi_abort (Scsi_Cmnd *SCpnt)
/*
* Called by scsi stack when something has really gone wrong.
*/
static int sbp2scsi_reset (Scsi_Cmnd *SCpnt)
static int sbp2scsi_reset (Scsi_Cmnd *SCpnt)
{
struct scsi_id_instance_data *scsi_id =
(struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
......
......@@ -271,7 +271,7 @@ struct sbp2_status_block {
#endif
/*
* SCSI direction table...
* SCSI direction table...
* (now used as a back-up in case the direction passed down from above is "unknown")
*
* DIN = IN data direction
......@@ -285,7 +285,7 @@ struct sbp2_status_block {
#define DIN ORB_DIRECTION_READ_FROM_MEDIA
#define DOU ORB_DIRECTION_WRITE_TO_MEDIA
#define DNO ORB_DIRECTION_NO_DATA_TRANSFER
#define DUN DIN
#define DUN DIN
static unchar sbp2scsi_direction_table[0x100] = {
DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN,
......@@ -316,8 +316,8 @@ enum cmd_dma_types {
CMD_DMA_SINGLE
};
/*
* Encapsulates all the info necessary for an outstanding command.
/*
* Encapsulates all the info necessary for an outstanding command.
*/
struct sbp2_command_info {
......@@ -386,12 +386,12 @@ struct scsi_id_instance_data {
u32 sbp2_device_type_and_lun;
u32 sbp2_firmware_revision;
/*
/*
* Variable used for logins, reconnects, logouts, query logins
*/
atomic_t sbp2_login_complete;
/*
/*
* Pool of command orbs, so we can have more than overlapped command per id
*/
spinlock_t sbp2_command_orb_lock;
......@@ -433,8 +433,8 @@ static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_i
static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_id);
static struct sbp2_command_info *sbp2util_find_command_for_orb(struct scsi_id_instance_data *scsi_id, dma_addr_t orb);
static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(struct scsi_id_instance_data *scsi_id, void *SCpnt);
static struct sbp2_command_info *sbp2util_allocate_command_orb(struct scsi_id_instance_data *scsi_id,
Scsi_Cmnd *Current_SCpnt,
static struct sbp2_command_info *sbp2util_allocate_command_orb(struct scsi_id_instance_data *scsi_id,
Scsi_Cmnd *Current_SCpnt,
void (*Current_done)(Scsi_Cmnd *));
static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_id,
struct sbp2_command_info *command);
......@@ -455,8 +455,8 @@ static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid, quadlet_
*/
static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id);
static int sbp2_login_device(struct scsi_id_instance_data *scsi_id);
static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id);
static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id);
static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id);
static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id);
static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid,
quadlet_t *data, u64 addr, size_t length, u16 flags);
static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait);
......@@ -465,7 +465,7 @@ static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
unchar *scsi_cmd,
unsigned int scsi_use_sg,
unsigned int scsi_request_bufflen,
void *scsi_request_buffer,
void *scsi_request_buffer,
unsigned char scsi_dir);
static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
struct sbp2_command_info *command);
......
......@@ -102,7 +102,7 @@ struct dma_iso_ctx {
unsigned int *buffer_status;
struct timeval *buffer_time; /* time when the buffer was received */
unsigned int *last_used_cmd; /* For ISO Transmit with
unsigned int *last_used_cmd; /* For ISO Transmit with
variable sized packets only ! */
int ctrlClear;
int ctrlSet;
......@@ -154,7 +154,7 @@ static struct hpsb_highlevel video1394_highlevel;
static int free_dma_iso_ctx(struct dma_iso_ctx *d)
{
int i;
DBGMSG(d->ohci->host->id, "Freeing dma_iso_ctx %d", d->ctx);
ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
......@@ -260,7 +260,7 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
d->cmdPtr = OHCI1394_IsoRcvCommandPtr+32*d->ctx;
d->ctxMatch = OHCI1394_IsoRcvContextMatch+32*d->ctx;
d->ir_prg = kmalloc(d->num_desc * sizeof(struct dma_cmd *),
d->ir_prg = kmalloc(d->num_desc * sizeof(struct dma_cmd *),
GFP_KERNEL);
if (d->ir_prg == NULL) {
......@@ -273,7 +273,7 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
d->nb_cmd = d->buf_size / PAGE_SIZE + 1;
d->left_size = (d->frame_size % PAGE_SIZE) ?
d->frame_size % PAGE_SIZE : PAGE_SIZE;
for (i = 0;i < d->num_desc; i++) {
if (dma_prog_region_alloc(&d->prg_reg[i], d->nb_cmd *
sizeof(struct dma_cmd), ohci->dev)) {
......@@ -289,21 +289,21 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
d->ctrlClear = OHCI1394_IsoXmitContextControlClear+16*d->ctx;
d->cmdPtr = OHCI1394_IsoXmitCommandPtr+16*d->ctx;
d->it_prg = kmalloc(d->num_desc * sizeof(struct it_dma_prg *),
d->it_prg = kmalloc(d->num_desc * sizeof(struct it_dma_prg *),
GFP_KERNEL);
if (d->it_prg == NULL) {
PRINT(KERN_ERR, ohci->host->id,
PRINT(KERN_ERR, ohci->host->id,
"Failed to allocate dma it prg");
free_dma_iso_ctx(d);
return NULL;
}
memset(d->it_prg, 0, d->num_desc*sizeof(struct it_dma_prg *));
d->packet_size = packet_size;
if (PAGE_SIZE % packet_size || packet_size>4096) {
PRINT(KERN_ERR, ohci->host->id,
PRINT(KERN_ERR, ohci->host->id,
"Packet size %d (page_size: %ld) "
"not yet supported\n",
packet_size, PAGE_SIZE);
......@@ -362,7 +362,7 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
memset(d->buffer_time, 0, d->num_desc * sizeof(struct timeval));
memset(d->last_used_cmd, 0, d->num_desc * sizeof(unsigned int));
memset(d->next_buffer, -1, d->num_desc * sizeof(int));
spin_lock_init(&d->lock);
PRINT(KERN_INFO, ohci->host->id, "Iso %s DMA: %d buffers "
......@@ -412,9 +412,9 @@ static void initialize_dma_ir_prg(struct dma_iso_ctx *d, int n, int flags)
(unsigned long)d->dma.kvirt));
ir_prg[1].branchAddress = cpu_to_le32((dma_prog_region_offset_to_bus(ir_reg,
2 * sizeof(struct dma_cmd)) & 0xfffffff0) | 0x1);
for (i = 2; i < d->nb_cmd - 1; i++) {
ir_prg[i].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
ir_prg[i].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
DMA_CTL_BRANCH | PAGE_SIZE);
ir_prg[i].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
(buf+(i-1)*PAGE_SIZE) -
......@@ -426,21 +426,21 @@ static void initialize_dma_ir_prg(struct dma_iso_ctx *d, int n, int flags)
}
/* The last descriptor will generate an interrupt */
ir_prg[i].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
ir_prg[i].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
DMA_CTL_IRQ | DMA_CTL_BRANCH | d->left_size);
ir_prg[i].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
(buf+(i-1)*PAGE_SIZE) -
(unsigned long)d->dma.kvirt));
} else {
} else {
/* Only one DMA page is used. Read d->left_size immediately and */
/* generate an interrupt as this is also the last page. */
ir_prg[1].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
ir_prg[1].control = cpu_to_le32(DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE |
DMA_CTL_IRQ | DMA_CTL_BRANCH | (d->left_size-4));
ir_prg[1].address = cpu_to_le32(dma_region_offset_to_bus(&d->dma,
(buf + 4) - (unsigned long)d->dma.kvirt));
}
}
static void initialize_dma_ir_ctx(struct dma_iso_ctx *d, int tag, int flags)
{
struct ti_ohci *ohci = (struct ti_ohci *)d->ohci;
......@@ -462,13 +462,13 @@ static void initialize_dma_ir_ctx(struct dma_iso_ctx *d, int tag, int flags)
reg_write(ohci, d->ctrlSet, 0x80000000);
/* Set isoch header */
if (flags & VIDEO1394_INCLUDE_ISO_HEADERS)
if (flags & VIDEO1394_INCLUDE_ISO_HEADERS)
reg_write(ohci, d->ctrlSet, 0x40000000);
/* Set the context match register to match on all tags,
/* Set the context match register to match on all tags,
sync for sync tag, and listen to d->channel */
reg_write(ohci, d->ctxMatch, 0xf0000000|((tag&0xf)<<8)|d->channel);
/* Set up isoRecvIntMask to generate interrupts */
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1<<d->ctx);
}
......@@ -524,9 +524,9 @@ static inline void put_timestamp(struct ti_ohci *ohci, struct dma_iso_ctx * d,
timeStamp = ((cycleTimer & 0x0fff) + d->syt_offset); /* 11059 = 450 us */
timeStamp = (timeStamp % 3072 + ((timeStamp / 3072) << 12)
+ (cycleTimer & 0xf000)) & 0xffff;
buf[6] = timeStamp >> 8;
buf[7] = timeStamp & 0xff;
buf[6] = timeStamp >> 8;
buf[7] = timeStamp & 0xff;
/* if first packet is empty packet, then put timestamp into the next full one too */
if ( (le32_to_cpu(d->it_prg[n][0].data[1]) >>16) == 0x008) {
......@@ -557,7 +557,7 @@ static inline void put_timestamp(struct ti_ohci *ohci, struct dma_iso_ctx * d,
#if 0
printk("curr: %d, next: %d, cycleTimer: %08x timeStamp: %08x\n",
curr, n, cycleTimer, timeStamp);
#endif
#endif
}
void wakeup_dma_it_ctx(unsigned long l)
......@@ -569,7 +569,7 @@ void wakeup_dma_it_ctx(unsigned long l)
spin_lock(&d->lock);
for (i = 0; i < d->num_desc; i++) {
if (d->it_prg[i][d->last_used_cmd[i]].end.status &
if (d->it_prg[i][d->last_used_cmd[i]].end.status &
cpu_to_le32(0xFFFF0000)) {
int next = d->next_buffer[i];
put_timestamp(ohci, d, next);
......@@ -592,23 +592,23 @@ static void initialize_dma_it_prg(struct dma_iso_ctx *d, int n, int sync_tag)
int i;
d->last_used_cmd[n] = d->nb_cmd - 1;
for (i=0;i<d->nb_cmd;i++) {
it_prg[i].begin.control = cpu_to_le32(DMA_CTL_OUTPUT_MORE |
DMA_CTL_IMMEDIATE | 8) ;
it_prg[i].begin.address = 0;
it_prg[i].begin.status = 0;
it_prg[i].data[0] = cpu_to_le32(
(IEEE1394_SPEED_100 << 16)
(IEEE1394_SPEED_100 << 16)
| (/* tag */ 1 << 14)
| (d->channel << 8)
| (d->channel << 8)
| (TCODE_ISO_DATA << 4));
if (i==0) it_prg[i].data[0] |= cpu_to_le32(sync_tag);
it_prg[i].data[1] = cpu_to_le32(d->packet_size << 16);
it_prg[i].data[2] = 0;
it_prg[i].data[3] = 0;
it_prg[i].end.control = cpu_to_le32(DMA_CTL_OUTPUT_LAST |
DMA_CTL_BRANCH);
it_prg[i].end.address =
......@@ -617,15 +617,15 @@ static void initialize_dma_it_prg(struct dma_iso_ctx *d, int n, int sync_tag)
if (i<d->nb_cmd-1) {
it_prg[i].end.control |= cpu_to_le32(d->packet_size);
it_prg[i].begin.branchAddress =
it_prg[i].begin.branchAddress =
cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
it_prg[i].end.branchAddress =
it_prg[i].end.branchAddress =
cpu_to_le32((dma_prog_region_offset_to_bus(it_reg, (i + 1) *
sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
} else {
/* the last prg generates an interrupt */
it_prg[i].end.control |= cpu_to_le32(DMA_CTL_UPDATE |
it_prg[i].end.control |= cpu_to_le32(DMA_CTL_UPDATE |
DMA_CTL_IRQ | d->left_size);
/* the last prg doesn't branch */
it_prg[i].begin.branchAddress = 0;
......@@ -657,7 +657,7 @@ static void initialize_dma_it_prg_var_packet_queue(
} else {
size = packet_sizes[i];
}
it_prg[i].data[1] = cpu_to_le32(size << 16);
it_prg[i].data[1] = cpu_to_le32(size << 16);
it_prg[i].end.control = cpu_to_le32(DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH);
if (i < d->nb_cmd-1 && packet_sizes[i+1] != 0) {
......@@ -670,7 +670,7 @@ static void initialize_dma_it_prg_var_packet_queue(
sizeof(struct it_dma_prg)) & 0xfffffff0) | 0x3);
} else {
/* the last prg generates an interrupt */
it_prg[i].end.control |= cpu_to_le32(DMA_CTL_UPDATE |
it_prg[i].end.control |= cpu_to_le32(DMA_CTL_UPDATE |
DMA_CTL_IRQ | size);
/* the last prg doesn't branch */
it_prg[i].begin.branchAddress = 0;
......@@ -694,7 +694,7 @@ static void initialize_dma_it_ctx(struct dma_iso_ctx *d, int sync_tag,
for (i=0;i<d->num_desc;i++)
initialize_dma_it_prg(d, i, sync_tag);
/* Set up isoRecvIntMask to generate interrupts */
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1<<d->ctx);
}
......@@ -731,9 +731,9 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
mask = mask << 1;
}
}
if (v.channel<0 || v.channel>(ISO_CHANNELS-1)) {
PRINT(KERN_ERR, ohci->host->id,
PRINT(KERN_ERR, ohci->host->id,
"Iso channel %d out of bounds", v.channel);
return -EFAULT;
}
......@@ -743,7 +743,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
(u32)(ohci->ISO_channel_usage>>32),
(u32)(ohci->ISO_channel_usage&0xffffffff));
if (ohci->ISO_channel_usage & mask) {
PRINT(KERN_ERR, ohci->host->id,
PRINT(KERN_ERR, ohci->host->id,
"Channel %d is already taken", v.channel);
return -EFAULT;
}
......@@ -762,19 +762,19 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
}
if (v.nb_buffers * v.buf_size > VIDEO1394_MAX_SIZE) {
PRINT(KERN_ERR, ohci->host->id,
"%d buffers of size %d bytes is too big",
PRINT(KERN_ERR, ohci->host->id,
"%d buffers of size %d bytes is too big",
v.nb_buffers, v.buf_size);
return -EFAULT;
}
if (cmd == VIDEO1394_IOC_LISTEN_CHANNEL) {
d = alloc_dma_iso_ctx(ohci, OHCI_ISO_RECEIVE,
v.nb_buffers, v.buf_size,
v.nb_buffers, v.buf_size,
v.channel, 0);
if (d == NULL) {
PRINT(KERN_ERR, ohci->host->id,
PRINT(KERN_ERR, ohci->host->id,
"Couldn't allocate ir context");
return -EFAULT;
}
......@@ -785,21 +785,21 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
v.buf_size = d->buf_size;
list_add_tail(&d->link, &ctx->context_list);
PRINT(KERN_INFO, ohci->host->id,
PRINT(KERN_INFO, ohci->host->id,
"iso context %d listen on channel %d",
d->ctx, v.channel);
}
else {
d = alloc_dma_iso_ctx(ohci, OHCI_ISO_TRANSMIT,
v.nb_buffers, v.buf_size,
v.nb_buffers, v.buf_size,
v.channel, v.packet_size);
if (d == NULL) {
PRINT(KERN_ERR, ohci->host->id,
PRINT(KERN_ERR, ohci->host->id,
"Couldn't allocate it context");
return -EFAULT;
}
initialize_dma_it_ctx(d, v.sync_tag,
initialize_dma_it_ctx(d, v.sync_tag,
v.syt_offset, v.flags);
ctx->current_ctx = d;
......@@ -808,7 +808,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
list_add_tail(&d->link, &ctx->context_list);
PRINT(KERN_INFO, ohci->host->id,
PRINT(KERN_INFO, ohci->host->id,
"Iso context %d talk on channel %d", d->ctx,
v.channel);
}
......@@ -818,7 +818,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
return 0;
}
case VIDEO1394_IOC_UNLISTEN_CHANNEL:
case VIDEO1394_IOC_UNLISTEN_CHANNEL:
case VIDEO1394_IOC_UNTALK_CHANNEL:
{
int channel;
......@@ -829,13 +829,13 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
return -EFAULT;
if (channel<0 || channel>(ISO_CHANNELS-1)) {
PRINT(KERN_ERR, ohci->host->id,
PRINT(KERN_ERR, ohci->host->id,
"Iso channel %d out of bound", channel);
return -EFAULT;
}
mask = (u64)0x1<<channel;
if (!(ohci->ISO_channel_usage & mask)) {
PRINT(KERN_ERR, ohci->host->id,
PRINT(KERN_ERR, ohci->host->id,
"Channel %d is not being used", channel);
return -EFAULT;
}
......@@ -852,7 +852,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
PRINT(KERN_INFO, ohci->host->id, "Iso context %d "
"stop talking on channel %d", d->ctx, channel);
free_dma_iso_ctx(d);
return 0;
}
case VIDEO1394_IOC_LISTEN_QUEUE_BUFFER:
......@@ -866,20 +866,20 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, v.channel);
if ((v.buffer<0) || (v.buffer>d->num_desc)) {
PRINT(KERN_ERR, ohci->host->id,
PRINT(KERN_ERR, ohci->host->id,
"Buffer %d out of range",v.buffer);
return -EFAULT;
}
spin_lock_irqsave(&d->lock,flags);
if (d->buffer_status[v.buffer]==VIDEO1394_BUFFER_QUEUED) {
PRINT(KERN_ERR, ohci->host->id,
PRINT(KERN_ERR, ohci->host->id,
"Buffer %d is already used",v.buffer);
spin_unlock_irqrestore(&d->lock,flags);
return -EFAULT;
}
d->buffer_status[v.buffer]=VIDEO1394_BUFFER_QUEUED;
if (d->last_buffer>=0)
......@@ -893,7 +893,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
spin_unlock_irqrestore(&d->lock,flags);
if (!(reg_read(ohci, d->ctrlSet) & 0x8000))
if (!(reg_read(ohci, d->ctrlSet) & 0x8000))
{
DBGMSG(ohci->host->id, "Starting iso DMA ctx=%d",d->ctx);
......@@ -907,13 +907,13 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
else {
/* Wake up dma context if necessary */
if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
PRINT(KERN_INFO, ohci->host->id,
PRINT(KERN_INFO, ohci->host->id,
"Waking up iso dma ctx=%d", d->ctx);
reg_write(ohci, d->ctrlSet, 0x1000);
}
}
return 0;
}
case VIDEO1394_IOC_LISTEN_WAIT_BUFFER:
case VIDEO1394_IOC_LISTEN_POLL_BUFFER:
......@@ -928,13 +928,13 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
d = find_ctx(&ctx->context_list, OHCI_ISO_RECEIVE, v.channel);
if ((v.buffer<0) || (v.buffer>d->num_desc)) {
PRINT(KERN_ERR, ohci->host->id,
PRINT(KERN_ERR, ohci->host->id,
"Buffer %d out of range",v.buffer);
return -EFAULT;
}
/*
* I change the way it works so that it returns
* I change the way it works so that it returns
* the last received frame.
*/
spin_lock_irqsave(&d->lock, flags);
......@@ -961,7 +961,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
}
}
#else
if (wait_event_interruptible(d->waitq,
if (wait_event_interruptible(d->waitq,
d->buffer_status[v.buffer]
== VIDEO1394_BUFFER_READY)
== -ERESTARTSYS)
......@@ -970,7 +970,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
d->buffer_status[v.buffer]=VIDEO1394_BUFFER_FREE;
break;
default:
PRINT(KERN_ERR, ohci->host->id,
PRINT(KERN_ERR, ohci->host->id,
"Buffer %d is not queued",v.buffer);
spin_unlock_irqrestore(&d->lock, flags);
return -EFAULT;
......@@ -1011,16 +1011,16 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, v.channel);
if ((v.buffer<0) || (v.buffer>d->num_desc)) {
PRINT(KERN_ERR, ohci->host->id,
PRINT(KERN_ERR, ohci->host->id,
"Buffer %d out of range",v.buffer);
return -EFAULT;
}
if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) {
unsigned int *psizes;
int buf_size = d->nb_cmd * sizeof(unsigned int);
if (copy_from_user(&qv, (void *)arg, sizeof(qv)))
if (copy_from_user(&qv, (void *)arg, sizeof(qv)))
return -EFAULT;
psizes = kmalloc(buf_size, GFP_KERNEL);
......@@ -1038,14 +1038,14 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
spin_lock_irqsave(&d->lock,flags);
if (d->buffer_status[v.buffer]!=VIDEO1394_BUFFER_FREE) {
PRINT(KERN_ERR, ohci->host->id,
PRINT(KERN_ERR, ohci->host->id,
"Buffer %d is already used",v.buffer);
spin_unlock_irqrestore(&d->lock,flags);
if (qv.packet_sizes)
kfree(qv.packet_sizes);
return -EFAULT;
}
if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) {
initialize_dma_it_prg_var_packet_queue(
d, v.buffer, qv.packet_sizes,
......@@ -1056,7 +1056,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
if (d->last_buffer >= 0) {
d->it_prg[d->last_buffer]
[ d->last_used_cmd[d->last_buffer] ].end.branchAddress =
[ d->last_used_cmd[d->last_buffer] ].end.branchAddress =
cpu_to_le32((dma_prog_region_offset_to_bus(&d->prg_reg[v.buffer],
0) & 0xfffffff0) | 0x3);
......@@ -1073,7 +1073,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
spin_unlock_irqrestore(&d->lock,flags);
if (!(reg_read(ohci, d->ctrlSet) & 0x8000))
if (!(reg_read(ohci, d->ctrlSet) & 0x8000))
{
DBGMSG(ohci->host->id, "Starting iso transmit DMA ctx=%d",
d->ctx);
......@@ -1089,8 +1089,8 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
else {
/* Wake up dma context if necessary */
if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
PRINT(KERN_INFO, ohci->host->id,
"Waking up iso transmit dma ctx=%d",
PRINT(KERN_INFO, ohci->host->id,
"Waking up iso transmit dma ctx=%d",
d->ctx);
put_timestamp(ohci, d, d->last_buffer);
reg_write(ohci, d->ctrlSet, 0x1000);
......@@ -1101,7 +1101,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
kfree(qv.packet_sizes);
return 0;
}
case VIDEO1394_IOC_TALK_WAIT_BUFFER:
{
......@@ -1114,7 +1114,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, v.channel);
if ((v.buffer<0) || (v.buffer>d->num_desc)) {
PRINT(KERN_ERR, ohci->host->id,
PRINT(KERN_ERR, ohci->host->id,
"Buffer %d out of range",v.buffer);
return -EFAULT;
}
......@@ -1131,7 +1131,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
if (signal_pending(current)) return -EINTR;
}
#else
if (wait_event_interruptible(d->waitq,
if (wait_event_interruptible(d->waitq,
d->buffer_status[v.buffer]
== VIDEO1394_BUFFER_READY)
== -ERESTARTSYS)
......@@ -1140,7 +1140,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
d->buffer_status[v.buffer]=VIDEO1394_BUFFER_FREE;
return 0;
default:
PRINT(KERN_ERR, ohci->host->id,
PRINT(KERN_ERR, ohci->host->id,
"Buffer %d is not queued",v.buffer);
return -EFAULT;
}
......@@ -1153,7 +1153,7 @@ static int video1394_ioctl(struct inode *inode, struct file *file,
/*
* This maps the vmalloced and reserved buffer to user space.
*
* FIXME:
* FIXME:
* - PAGE_READONLY should suffice!?
* - remap_page_range is kind of inefficient for page by page remapping.
* But e.g. pte_alloc() does not work in modules ... :-(
......@@ -1211,7 +1211,7 @@ static int video1394_release(struct inode *inode, struct file *file)
struct dma_iso_ctx *d;
d = list_entry(lh, struct dma_iso_ctx, link);
mask = (u64) 1 << d->channel;
if (!(ohci->ISO_channel_usage & mask))
PRINT(KERN_ERR, ohci->host->id, "On release: Channel %d "
"is not being used", d->channel);
......@@ -1226,7 +1226,7 @@ static int video1394_release(struct inode *inode, struct file *file)
kfree(ctx);
file->private_data = NULL;
unlock_kernel();
return 0;
}
......@@ -1285,7 +1285,7 @@ static void video1394_add_host (struct hpsb_host *host)
hpsb_set_hostinfo(&video1394_highlevel, host, ohci);
hpsb_set_hostinfo_key(&video1394_highlevel, host, ohci->host->id);
minor = IEEE1394_MINOR_BLOCK_VIDEO1394 * 16 + ohci->host->id;
minor = IEEE1394_MINOR_BLOCK_VIDEO1394 * 16 + ohci->host->id;
devfs_mk_cdev(MKDEV(IEEE1394_MAJOR, minor),
S_IFCHR | S_IRUSR | S_IWUSR,
"%s/%d", VIDEO1394_DRIVER_NAME, ohci->host->id);
......@@ -1438,7 +1438,7 @@ static void __exit video1394_exit_module (void)
ret |= unregister_ioctl32_conversion(VIDEO1394_IOC32_TALK_WAIT_BUFFER);
ret |= unregister_ioctl32_conversion(VIDEO1394_IOC32_LISTEN_POLL_BUFFER);
if (ret)
PRINT_G(KERN_INFO, "Error unregistering ioctl32 translations");
PRINT_G(KERN_CRIT, "Error unregistering ioctl32 translations");
#endif
hpsb_unregister_protocol(&video1394_driver);
......@@ -1457,6 +1457,7 @@ static int __init video1394_init_module (void)
cdev_init(&video1394_cdev, &video1394_fops);
video1394_cdev.owner = THIS_MODULE;
kobject_set_name(&video1394_cdev.kobj, VIDEO1394_DRIVER_NAME);
ret = cdev_add(&video1394_cdev, IEEE1394_VIDEO1394_DEV, 16);
if (ret) {
PRINT_G(KERN_ERR, "video1394: unable to get minor device block");
......
......@@ -42,7 +42,7 @@ struct video1394_mmap {
unsigned int sync_tag;
unsigned int nb_buffers;
unsigned int buf_size;
unsigned int packet_size; /* For VARIABLE_PACKET_SIZE:
unsigned int packet_size; /* For VARIABLE_PACKET_SIZE:
Maximum packet size */
unsigned int fps;
unsigned int syt_offset;
......@@ -53,7 +53,7 @@ struct video1394_mmap {
struct video1394_queue_variable {
unsigned int channel;
unsigned int buffer;
unsigned int* packet_sizes; /* Buffer of size:
unsigned int* packet_sizes; /* Buffer of size:
buf_size / packet_size */
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment