Commit 34641a58 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bcollins/linux1394-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/bcollins/linux1394-2.6: (28 commits)
  eth1394: replace __constant_htons by htons
  ieee1394: adjust code formatting in highlevel.c
  ieee1394: hl_irqs_lock is taken in hardware interrupt context
  ieee1394_core: switch to kthread API
  ieee1394: sbp2: Kconfig fix
  ieee1394: add preprocessor constant for invalid csr address
  sbp2: fix deregistration of status fifo address space
  [PATCH] eth1394: endian fixes
  Fix broken suspend/resume in ohci1394
  sbp2: use __attribute__((packed)) for on-the-wire structures
  sbp2: provide helptext for CONFIG_IEEE1394_SBP2_PHYS_DMA and mark it experimental
  Update feature removal of obsolete raw1394 ISO requests.
  sbp2: fix S800 transfers if phys_dma is off
  sbp2: remove ohci1394 specific constant
  ohci1394: make phys_dma parameter read-only
  ohci1394: set address range properties
  ieee1394: extend lowlevel API for address range properties
  sbp2: log number of supported concurrent logins
  sbp2: remove manipulation of inquiry response
  ieee1394: save RAM by using a single tlabel for broadcast transactions
  ...
parents 2eec0e08 7136b807
......@@ -49,11 +49,11 @@ Who: Paul E. McKenney <paulmck@us.ibm.com>
---------------------------
What: raw1394: requests of type RAW1394_REQ_ISO_SEND, RAW1394_REQ_ISO_LISTEN
When: November 2005
When: November 2006
Why: Deprecated in favour of the new ioctl-based rawiso interface, which is
more efficient. You should really be using libraw1394 for raw1394
access anyway.
Who: Jody McIntyre <scjody@steamballoon.com>
Who: Jody McIntyre <scjody@modernduck.com>
---------------------------
......
......@@ -128,8 +128,17 @@ config IEEE1394_SBP2
1394 bus. SBP-2 devices include harddrives and DVD devices.
config IEEE1394_SBP2_PHYS_DMA
bool "Enable Phys DMA support for SBP2 (Debug)"
depends on IEEE1394 && IEEE1394_SBP2
bool "Enable replacement for physical DMA in SBP2"
depends on IEEE1394 && IEEE1394_SBP2 && EXPERIMENTAL && (X86_32 || PPC_32)
help
This builds sbp2 for use with non-OHCI host adapters which do not
support physical DMA or for when ohci1394 is run with phys_dma=0.
Physical DMA is data movement without assistence of the drivers'
interrupt handlers. This option includes the interrupt handlers
that are required in absence of this hardware feature.
This option is buggy and currently broken on some architectures.
If unsure, say N.
config IEEE1394_ETH1394
tristate "Ethernet over 1394"
......
......@@ -779,7 +779,7 @@ static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1);
csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom, csr->private);
if (csr_addr == ~0ULL) {
if (csr_addr == CSR1212_INVALID_ADDR_SPACE) {
return CSR1212_ENOMEM;
}
if (csr_addr < CSR1212_REGISTER_SPACE_BASE) {
......
......@@ -192,6 +192,7 @@
#define CSR1212_EXTENDED_ROM_SIZE (0x10000 * sizeof(u_int32_t))
#define CSR1212_INVALID_ADDR_SPACE -1
/* Config ROM image structures */
struct csr1212_bus_info_block_img {
......
......@@ -145,12 +145,12 @@ void dma_region_free(struct dma_region *dma)
/* find the scatterlist index and remaining offset corresponding to a
given offset from the beginning of the buffer */
static inline int dma_region_find(struct dma_region *dma, unsigned long offset,
unsigned long *rem)
unsigned int start, unsigned long *rem)
{
int i;
unsigned long off = offset;
for (i = 0; i < dma->n_dma_pages; i++) {
for (i = start; i < dma->n_dma_pages; i++) {
if (off < sg_dma_len(&dma->sglist[i])) {
*rem = off;
break;
......@@ -170,7 +170,7 @@ dma_addr_t dma_region_offset_to_bus(struct dma_region * dma,
unsigned long rem = 0;
struct scatterlist *sg =
&dma->sglist[dma_region_find(dma, offset, &rem)];
&dma->sglist[dma_region_find(dma, offset, 0, &rem)];
return sg_dma_address(sg) + rem;
}
......@@ -178,13 +178,13 @@ void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
unsigned long len)
{
int first, last;
unsigned long rem;
unsigned long rem = 0;
if (!len)
len = 1;
first = dma_region_find(dma, offset, &rem);
last = dma_region_find(dma, offset + len - 1, &rem);
first = dma_region_find(dma, offset, 0, &rem);
last = dma_region_find(dma, rem + len - 1, first, &rem);
pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1,
dma->direction);
......@@ -194,13 +194,13 @@ void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
unsigned long len)
{
int first, last;
unsigned long rem;
unsigned long rem = 0;
if (!len)
len = 1;
first = dma_region_find(dma, offset, &rem);
last = dma_region_find(dma, offset + len - 1, &rem);
first = dma_region_find(dma, offset, 0, &rem);
last = dma_region_find(dma, rem + len - 1, first, &rem);
pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first],
last - first + 1, dma->direction);
......
......@@ -367,7 +367,7 @@ static int eth1394_probe(struct device *dev)
spin_lock_init(&node_info->pdg.lock);
INIT_LIST_HEAD(&node_info->pdg.list);
node_info->pdg.sz = 0;
node_info->fifo = ETHER1394_INVALID_ADDR;
node_info->fifo = CSR1212_INVALID_ADDR_SPACE;
ud->device.driver_data = node_info;
new_node->ud = ud;
......@@ -502,10 +502,8 @@ static void ether1394_reset_priv (struct net_device *dev, int set_mtu)
/* Determine speed limit */
for (i = 0; i < host->node_count; i++)
if (max_speed > host->speed_map[NODEID_TO_NODE(host->node_id) *
64 + i])
max_speed = host->speed_map[NODEID_TO_NODE(host->node_id) *
64 + i];
if (max_speed > host->speed[i])
max_speed = host->speed[i];
priv->bc_sspd = max_speed;
/* We'll use our maxpayload as the default mtu */
......@@ -568,13 +566,11 @@ static void ether1394_add_host (struct hpsb_host *host)
if (!(host->config_roms & HPSB_CONFIG_ROM_ENTRY_IP1394))
return;
fifo_addr = hpsb_allocate_and_register_addrspace(&eth1394_highlevel,
host,
&addr_ops,
ETHER1394_REGION_ADDR_LEN,
ETHER1394_REGION_ADDR_LEN,
-1, -1);
if (fifo_addr == ~0ULL)
fifo_addr = hpsb_allocate_and_register_addrspace(
&eth1394_highlevel, host, &addr_ops,
ETHER1394_REGION_ADDR_LEN, ETHER1394_REGION_ADDR_LEN,
CSR1212_INVALID_ADDR_SPACE, CSR1212_INVALID_ADDR_SPACE);
if (fifo_addr == CSR1212_INVALID_ADDR_SPACE)
goto out;
/* We should really have our own alloc_hpsbdev() function in
......@@ -774,7 +770,7 @@ static int ether1394_rebuild_header(struct sk_buff *skb)
default:
ETH1394_PRINT(KERN_DEBUG, dev->name,
"unable to resolve type %04x addresses.\n",
eth->h_proto);
ntohs(eth->h_proto));
break;
}
......@@ -796,9 +792,8 @@ static int ether1394_header_cache(struct neighbour *neigh, struct hh_cache *hh)
(16 - ETH1394_HLEN));
struct net_device *dev = neigh->dev;
if (type == __constant_htons(ETH_P_802_3)) {
if (type == htons(ETH_P_802_3))
return -1;
}
eth->h_proto = type;
memcpy(eth->h_dest, neigh->ha, dev->addr_len);
......@@ -887,7 +882,7 @@ static inline u16 ether1394_parse_encap(struct sk_buff *skb,
/* If this is an ARP packet, convert it. First, we want to make
* use of some of the fields, since they tell us a little bit
* about the sending machine. */
if (ether_type == __constant_htons (ETH_P_ARP)) {
if (ether_type == htons(ETH_P_ARP)) {
struct eth1394_arp *arp1394 = (struct eth1394_arp*)skb->data;
struct arphdr *arp = (struct arphdr *)skb->data;
unsigned char *arp_ptr = (unsigned char *)(arp + 1);
......@@ -935,7 +930,7 @@ static inline u16 ether1394_parse_encap(struct sk_buff *skb,
*(u32*)arp_ptr = arp1394->sip; /* move sender IP addr */
arp_ptr += arp->ar_pln; /* skip over sender IP addr */
if (arp->ar_op == 1)
if (arp->ar_op == htons(ARPOP_REQUEST))
/* just set ARP req target unique ID to 0 */
*((u64*)arp_ptr) = 0;
else
......@@ -943,8 +938,8 @@ static inline u16 ether1394_parse_encap(struct sk_buff *skb,
}
/* Now add the ethernet header. */
if (dev->hard_header (skb, dev, __constant_ntohs (ether_type),
&dest_hw, NULL, skb->len) >= 0)
if (dev->hard_header(skb, dev, ntohs(ether_type), &dest_hw, NULL,
skb->len) >= 0)
ret = ether1394_type_trans(skb, dev);
return ret;
......@@ -1395,7 +1390,7 @@ static inline void ether1394_arp_to_1394arp(struct sk_buff *skb,
/* We need to encapsulate the standard header with our own. We use the
* ethernet header's proto for our own. */
static inline unsigned int ether1394_encapsulate_prep(unsigned int max_payload,
int proto,
__be16 proto,
union eth1394_hdr *hdr,
u16 dg_size, u16 dgl)
{
......@@ -1514,8 +1509,8 @@ static inline void ether1394_prep_gasp_packet(struct hpsb_packet *p,
p->data = ((quadlet_t*)skb->data) - 2;
p->data[0] = cpu_to_be32((priv->host->node_id << 16) |
ETHER1394_GASP_SPECIFIER_ID_HI);
p->data[1] = __constant_cpu_to_be32((ETHER1394_GASP_SPECIFIER_ID_LO << 24) |
ETHER1394_GASP_VERSION);
p->data[1] = cpu_to_be32((ETHER1394_GASP_SPECIFIER_ID_LO << 24) |
ETHER1394_GASP_VERSION);
/* Setting the node id to ALL_NODES (not LOCAL_BUS | ALL_NODES)
* prevents hpsb_send_packet() from setting the speed to an arbitrary
......@@ -1626,7 +1621,7 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
gfp_t kmflags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
struct eth1394hdr *eth;
struct eth1394_priv *priv = netdev_priv(dev);
int proto;
__be16 proto;
unsigned long flags;
nodeid_t dest_node;
eth1394_tx_type tx_type;
......@@ -1670,9 +1665,9 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
/* Set the transmission type for the packet. ARP packets and IP
* broadcast packets are sent via GASP. */
if (memcmp(eth->h_dest, dev->broadcast, ETH1394_ALEN) == 0 ||
proto == __constant_htons(ETH_P_ARP) ||
(proto == __constant_htons(ETH_P_IP) &&
IN_MULTICAST(__constant_ntohl(skb->nh.iph->daddr)))) {
proto == htons(ETH_P_ARP) ||
(proto == htons(ETH_P_IP) &&
IN_MULTICAST(ntohl(skb->nh.iph->daddr)))) {
tx_type = ETH1394_GASP;
dest_node = LOCAL_BUS | ALL_NODES;
max_payload = priv->bc_maxpayload - ETHER1394_GASP_OVERHEAD;
......@@ -1688,7 +1683,7 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
goto fail;
}
node_info = (struct eth1394_node_info*)node->ud->device.driver_data;
if (node_info->fifo == ETHER1394_INVALID_ADDR) {
if (node_info->fifo == CSR1212_INVALID_ADDR_SPACE) {
ret = -EAGAIN;
goto fail;
}
......@@ -1704,7 +1699,7 @@ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev)
}
/* If this is an ARP packet, convert it */
if (proto == __constant_htons (ETH_P_ARP))
if (proto == htons(ETH_P_ARP))
ether1394_arp_to_1394arp (skb, dev);
ptask->hdr.words.word1 = 0;
......
......@@ -32,8 +32,6 @@
* S3200 (per Table 16-3 of IEEE 1394b-2002). */
#define ETHER1394_REGION_ADDR_LEN 4096
#define ETHER1394_INVALID_ADDR ~0ULL
/* GASP identifier numbers for IPv4 over IEEE 1394 */
#define ETHER1394_GASP_SPECIFIER_ID 0x00005E
#define ETHER1394_GASP_SPECIFIER_ID_HI ((ETHER1394_GASP_SPECIFIER_ID >> 8) & 0xffff)
......
This diff is collapsed.
......@@ -19,6 +19,7 @@
#include <linux/pci.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
#include <linux/mutex.h>
#include "csr1212.h"
#include "ieee1394.h"
......@@ -105,7 +106,7 @@ static int alloc_hostnum_cb(struct hpsb_host *host, void *__data)
* Return Value: a pointer to the &hpsb_host if succesful, %NULL if
* no memory was available.
*/
static DECLARE_MUTEX(host_num_alloc);
static DEFINE_MUTEX(host_num_alloc);
struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
struct device *dev)
......@@ -148,7 +149,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
h->topology_map = h->csr.topology_map + 3;
h->speed_map = (u8 *)(h->csr.speed_map + 2);
down(&host_num_alloc);
mutex_lock(&host_num_alloc);
while (nodemgr_for_each_host(&hostnum, alloc_hostnum_cb))
hostnum++;
......@@ -167,7 +168,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
class_device_register(&h->class_dev);
get_device(&h->device);
up(&host_num_alloc);
mutex_unlock(&host_num_alloc);
return h;
}
......
......@@ -30,13 +30,14 @@ struct hpsb_host {
unsigned char iso_listen_count[64];
int node_count; /* number of identified nodes on this bus */
int selfid_count; /* total number of SelfIDs received */
int nodes_active; /* number of nodes that are actually active */
int node_count; /* number of identified nodes on this bus */
int selfid_count; /* total number of SelfIDs received */
int nodes_active; /* number of nodes with active link layer */
u8 speed[ALL_NODES]; /* speed between each node and local node */
nodeid_t node_id; /* node ID of this host */
nodeid_t irm_id; /* ID of this bus' isochronous resource manager */
nodeid_t busmgr_id; /* ID of this bus' bus manager */
nodeid_t node_id; /* node ID of this host */
nodeid_t irm_id; /* ID of this bus' isochronous resource manager */
nodeid_t busmgr_id; /* ID of this bus' bus manager */
/* this nodes state */
unsigned in_bus_reset:1;
......@@ -55,7 +56,7 @@ struct hpsb_host {
struct csr_control csr;
/* Per node tlabel pool allocation */
struct hpsb_tlabel_pool tpool[64];
struct hpsb_tlabel_pool tpool[ALL_NODES];
struct hpsb_host_driver *driver;
......@@ -72,6 +73,8 @@ struct hpsb_host {
unsigned int config_roms;
struct list_head addr_space;
u64 low_addr_space; /* upper bound of physical DMA area */
u64 middle_addr_space; /* upper bound of posted write area */
};
......
......@@ -33,6 +33,7 @@
#include <linux/kdev_t.h>
#include <linux/skbuff.h>
#include <linux/suspend.h>
#include <linux/kthread.h>
#include <asm/byteorder.h>
#include <asm/semaphore.h>
......@@ -285,9 +286,9 @@ static int check_selfids(struct hpsb_host *host)
static void build_speed_map(struct hpsb_host *host, int nodecount)
{
u8 speedcap[nodecount];
u8 cldcnt[nodecount];
u8 *map = host->speed_map;
u8 *speedcap = host->speed;
struct selfid *sid;
struct ext_selfid *esid;
int i, j, n;
......@@ -354,6 +355,11 @@ static void build_speed_map(struct hpsb_host *host, int nodecount)
}
}
}
/* assume maximum speed for 1394b PHYs, nodemgr will correct it */
for (n = 0; n < nodecount; n++)
if (speedcap[n] == 3)
speedcap[n] = IEEE1394_SPEED_MAX;
}
......@@ -554,11 +560,10 @@ int hpsb_send_packet(struct hpsb_packet *packet)
return 0;
}
if (packet->type == hpsb_async && packet->node_id != ALL_NODES) {
if (packet->type == hpsb_async &&
NODEID_TO_NODE(packet->node_id) != ALL_NODES)
packet->speed_code =
host->speed_map[NODEID_TO_NODE(host->node_id) * 64
+ NODEID_TO_NODE(packet->node_id)];
}
host->speed[NODEID_TO_NODE(packet->node_id)];
dump_packet("send packet", packet->header, packet->header_size, packet->speed_code);
......@@ -997,11 +1002,8 @@ void abort_timedouts(unsigned long __opaque)
* packets that have a "complete" function are sent here. This way, the
* completion is run out of kernel context, and doesn't block the rest of
* the stack. */
static int khpsbpkt_pid = -1, khpsbpkt_kill;
static DECLARE_COMPLETION(khpsbpkt_complete);
static struct task_struct *khpsbpkt_thread;
static struct sk_buff_head hpsbpkt_queue;
static DECLARE_MUTEX_LOCKED(khpsbpkt_sig);
static void queue_packet_complete(struct hpsb_packet *packet)
{
......@@ -1011,9 +1013,7 @@ static void queue_packet_complete(struct hpsb_packet *packet)
}
if (packet->complete_routine != NULL) {
skb_queue_tail(&hpsbpkt_queue, packet->skb);
/* Signal the kernel thread to handle this */
up(&khpsbpkt_sig);
wake_up_process(khpsbpkt_thread);
}
return;
}
......@@ -1025,19 +1025,9 @@ static int hpsbpkt_thread(void *__hi)
void (*complete_routine)(void*);
void *complete_data;
daemonize("khpsbpkt");
current->flags |= PF_NOFREEZE;
while (1) {
if (down_interruptible(&khpsbpkt_sig)) {
printk("khpsbpkt: received unexpected signal?!\n" );
break;
}
if (khpsbpkt_kill)
break;
while (!kthread_should_stop()) {
while ((skb = skb_dequeue(&hpsbpkt_queue)) != NULL) {
packet = (struct hpsb_packet *)skb->data;
......@@ -1048,9 +1038,13 @@ static int hpsbpkt_thread(void *__hi)
complete_routine(complete_data);
}
}
complete_and_exit(&khpsbpkt_complete, 0);
set_current_state(TASK_INTERRUPTIBLE);
if (!skb_peek(&hpsbpkt_queue))
schedule();
__set_current_state(TASK_RUNNING);
}
return 0;
}
static int __init ieee1394_init(void)
......@@ -1065,10 +1059,10 @@ static int __init ieee1394_init(void)
HPSB_ERR("Some features may not be available\n");
}
khpsbpkt_pid = kernel_thread(hpsbpkt_thread, NULL, CLONE_KERNEL);
if (khpsbpkt_pid < 0) {
khpsbpkt_thread = kthread_run(hpsbpkt_thread, NULL, "khpsbpkt");
if (IS_ERR(khpsbpkt_thread)) {
HPSB_ERR("Failed to start hpsbpkt thread!\n");
ret = -ENOMEM;
ret = PTR_ERR(khpsbpkt_thread);
goto exit_cleanup_config_roms;
}
......@@ -1148,10 +1142,7 @@ static int __init ieee1394_init(void)
release_chrdev:
unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
exit_release_kernel_thread:
if (khpsbpkt_pid >= 0) {
kill_proc(khpsbpkt_pid, SIGTERM, 1);
wait_for_completion(&khpsbpkt_complete);
}
kthread_stop(khpsbpkt_thread);
exit_cleanup_config_roms:
hpsb_cleanup_config_roms();
return ret;
......@@ -1172,12 +1163,7 @@ static void __exit ieee1394_cleanup(void)
bus_remove_file(&ieee1394_bus_type, fw_bus_attrs[i]);
bus_unregister(&ieee1394_bus_type);
if (khpsbpkt_pid >= 0) {
khpsbpkt_kill = 1;
mb();
up(&khpsbpkt_sig);
wait_for_completion(&khpsbpkt_complete);
}
kthread_stop(khpsbpkt_thread);
hpsb_cleanup_config_roms();
......
......@@ -136,8 +136,11 @@ int hpsb_get_tlabel(struct hpsb_packet *packet)
{
unsigned long flags;
struct hpsb_tlabel_pool *tp;
int n = NODEID_TO_NODE(packet->node_id);
tp = &packet->host->tpool[packet->node_id & NODE_MASK];
if (unlikely(n == ALL_NODES))
return 0;
tp = &packet->host->tpool[n];
if (irqs_disabled() || in_atomic()) {
if (down_trylock(&tp->count))
......@@ -175,8 +178,11 @@ void hpsb_free_tlabel(struct hpsb_packet *packet)
{
unsigned long flags;
struct hpsb_tlabel_pool *tp;
int n = NODEID_TO_NODE(packet->node_id);
tp = &packet->host->tpool[packet->node_id & NODE_MASK];
if (unlikely(n == ALL_NODES))
return;
tp = &packet->host->tpool[n];
BUG_ON(packet->tlabel > 63 || packet->tlabel < 0);
......
......@@ -38,6 +38,7 @@ struct nodemgr_csr_info {
struct hpsb_host *host;
nodeid_t nodeid;
unsigned int generation;
unsigned int speed_unverified:1;
};
......@@ -57,23 +58,75 @@ static char *nodemgr_find_oui_name(int oui)
return NULL;
}
/*
* Correct the speed map entry. This is necessary
* - for nodes with link speed < phy speed,
* - for 1394b nodes with negotiated phy port speed < IEEE1394_SPEED_MAX.
* A possible speed is determined by trial and error, using quadlet reads.
*/
static int nodemgr_check_speed(struct nodemgr_csr_info *ci, u64 addr,
quadlet_t *buffer)
{
quadlet_t q;
u8 i, *speed, old_speed, good_speed;
int ret;
speed = ci->host->speed + NODEID_TO_NODE(ci->nodeid);
old_speed = *speed;
good_speed = IEEE1394_SPEED_MAX + 1;
/* Try every speed from S100 to old_speed.
* If we did it the other way around, a too low speed could be caught
* if the retry succeeded for some other reason, e.g. because the link
* just finished its initialization. */
for (i = IEEE1394_SPEED_100; i <= old_speed; i++) {
*speed = i;
ret = hpsb_read(ci->host, ci->nodeid, ci->generation, addr,
&q, sizeof(quadlet_t));
if (ret)
break;
*buffer = q;
good_speed = i;
}
if (good_speed <= IEEE1394_SPEED_MAX) {
HPSB_DEBUG("Speed probe of node " NODE_BUS_FMT " yields %s",
NODE_BUS_ARGS(ci->host, ci->nodeid),
hpsb_speedto_str[good_speed]);
*speed = good_speed;
ci->speed_unverified = 0;
return 0;
}
*speed = old_speed;
return ret;
}
static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
void *buffer, void *__ci)
{
struct nodemgr_csr_info *ci = (struct nodemgr_csr_info*)__ci;
int i, ret = 0;
int i, ret;
for (i = 1; ; i++) {
ret = hpsb_read(ci->host, ci->nodeid, ci->generation, addr,
buffer, length);
if (!ret || i == 3)
if (!ret) {
ci->speed_unverified = 0;
break;
}
/* Give up after 3rd failure. */
if (i == 3)
break;
/* The ieee1394_core guessed the node's speed capability from
* the self ID. Check whether a lower speed works. */
if (ci->speed_unverified && length == sizeof(quadlet_t)) {
ret = nodemgr_check_speed(ci, addr, buffer);
if (!ret)
break;
}
if (msleep_interruptible(334))
return -EINTR;
}
return ret;
}
......@@ -1204,6 +1257,8 @@ static void nodemgr_node_scan_one(struct host_info *hi,
ci->host = host;
ci->nodeid = nodeid;
ci->generation = generation;
ci->speed_unverified =
host->speed[NODEID_TO_NODE(nodeid)] > IEEE1394_SPEED_100;
/* We need to detect when the ConfigROM's generation has changed,
* so we only update the node's info when it needs to be. */
......
......@@ -163,7 +163,7 @@ printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id ,
/* Module Parameters */
static int phys_dma = 1;
module_param(phys_dma, int, 0644);
module_param(phys_dma, int, 0444);
MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
static void dma_trm_tasklet(unsigned long data);
......@@ -553,7 +553,8 @@ static void ohci_initialize(struct ti_ohci *ohci)
* register content.
* To actually enable physical responses is the job of our interrupt
* handler which programs the physical request filter. */
reg_write(ohci, OHCI1394_PhyUpperBound, 0x01000000);
reg_write(ohci, OHCI1394_PhyUpperBound,
OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED >> 16);
DBGMSG("physUpperBoundOffset=%08x",
reg_read(ohci, OHCI1394_PhyUpperBound));
......@@ -580,6 +581,7 @@ static void ohci_initialize(struct ti_ohci *ohci)
OHCI1394_isochRx |
OHCI1394_isochTx |
OHCI1394_postedWriteErr |
OHCI1394_cycleTooLong |
OHCI1394_cycleInconsistent);
/* Enable link */
......@@ -2382,6 +2384,15 @@ static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
PRINT(KERN_ERR, "physical posted write error");
/* no recovery strategy yet, had to involve protocol drivers */
}
if (event & OHCI1394_cycleTooLong) {
if(printk_ratelimit())
PRINT(KERN_WARNING, "isochronous cycle too long");
else
DBGMSG("OHCI1394_cycleTooLong");
reg_write(ohci, OHCI1394_LinkControlSet,
OHCI1394_LinkControl_CycleMaster);
event &= ~OHCI1394_cycleTooLong;
}
if (event & OHCI1394_cycleInconsistent) {
/* We subscribe to the cycleInconsistent event only to
* clear the corresponding event bit... otherwise,
......@@ -3400,6 +3411,14 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
if (phys_dma) {
host->low_addr_space =
(u64) reg_read(ohci, OHCI1394_PhyUpperBound) << 16;
if (!host->low_addr_space)
host->low_addr_space = OHCI1394_PHYS_UPPER_BOUND_FIXED;
}
host->middle_addr_space = OHCI1394_MIDDLE_ADDRESS_SPACE;
/* Tell the highlevel this host is ready */
if (hpsb_add_host(host))
FAIL(-ENOMEM, "Failed to register host with highlevel");
......@@ -3458,24 +3477,13 @@ static void ohci1394_pci_remove(struct pci_dev *pdev)
case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
/* The ohci_soft_reset() stops all DMA contexts, so we
* dont need to do this. */
/* Free AR dma */
free_dma_rcv_ctx(&ohci->ar_req_context);
free_dma_rcv_ctx(&ohci->ar_resp_context);
/* Free AT dma */
free_dma_trm_ctx(&ohci->at_req_context);
free_dma_trm_ctx(&ohci->at_resp_context);
/* Free IR dma */
free_dma_rcv_ctx(&ohci->ir_legacy_context);
/* Free IT dma */
free_dma_trm_ctx(&ohci->it_legacy_context);
/* Free IR legacy dma */
free_dma_rcv_ctx(&ohci->ir_legacy_context);
case OHCI_INIT_HAVE_SELFID_BUFFER:
pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
ohci->selfid_buf_cpu,
......@@ -3535,6 +3543,7 @@ static int ohci1394_pci_resume (struct pci_dev *pdev)
}
#endif /* CONFIG_PPC_PMAC */
pci_restore_state(pdev);
pci_enable_device(pdev);
return 0;
......@@ -3554,6 +3563,8 @@ static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
}
#endif
pci_save_state(pdev);
return 0;
}
......
......@@ -443,6 +443,16 @@ static inline u32 reg_read(const struct ti_ohci *ohci, int offset)
#define OHCI1394_TCODE_PHY 0xE
/* Node offset map (phys DMA area, posted write area).
* The value of OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED may be modified but must
* be lower than OHCI1394_MIDDLE_ADDRESS_SPACE.
* OHCI1394_PHYS_UPPER_BOUND_FIXED and OHCI1394_MIDDLE_ADDRESS_SPACE are
* constants given by the OHCI spec.
*/
#define OHCI1394_PHYS_UPPER_BOUND_FIXED 0x000100000000ULL /* 4 GB */
#define OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED 0x010000000000ULL /* 1 TB */
#define OHCI1394_MIDDLE_ADDRESS_SPACE 0xffff00000000ULL
void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet,
int type,
void (*func)(unsigned long),
......
......@@ -408,34 +408,34 @@ static void fcp_request(struct hpsb_host *host, int nodeid, int direction,
#ifdef CONFIG_COMPAT
struct compat_raw1394_req {
__u32 type;
__s32 error;
__u32 misc;
__u32 type;
__s32 error;
__u32 misc;
__u32 generation;
__u32 length;
__u32 generation;
__u32 length;
__u64 address;
__u64 address;
__u64 tag;
__u64 tag;
__u64 sendb;
__u64 recvb;
} __attribute__((packed));
__u64 sendb;
__u64 recvb;
} __attribute__((packed));
static const char __user *raw1394_compat_write(const char __user *buf)
{
struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
struct raw1394_request __user *r;
r = compat_alloc_user_space(sizeof(struct raw1394_request));
#define C(x) __copy_in_user(&r->x, &cr->x, sizeof(r->x))
if (copy_in_user(r, cr, sizeof(struct compat_raw1394_req)) ||
C(address) ||
C(tag) ||
C(sendb) ||
C(recvb))
C(address) ||
C(tag) ||
C(sendb) ||
C(recvb))
return ERR_PTR(-EFAULT);
return (const char __user *)r;
}
......@@ -443,11 +443,11 @@ static const char __user *raw1394_compat_write(const char __user *buf)
#define P(x) __put_user(r->x, &cr->x)
static int
static int
raw1394_compat_read(const char __user *buf, struct raw1394_request *r)
{
struct compat_raw1394_req __user *cr = (typeof(cr)) r;
if (!access_ok(VERIFY_WRITE,cr,sizeof(struct compat_raw1394_req)) ||
struct compat_raw1394_req __user *cr = (typeof(cr)) r;
if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) ||
P(type) ||
P(error) ||
P(misc) ||
......@@ -512,18 +512,17 @@ static ssize_t raw1394_read(struct file *file, char __user * buffer,
}
#ifdef CONFIG_COMPAT
if (count == sizeof(struct compat_raw1394_req) &&
sizeof(struct compat_raw1394_req) !=
sizeof(struct raw1394_request)) {
if (count == sizeof(struct compat_raw1394_req) &&
sizeof(struct compat_raw1394_req) !=
sizeof(struct raw1394_request)) {
ret = raw1394_compat_read(buffer, &req->req);
} else
} else
#endif
{
if (copy_to_user(buffer, &req->req, sizeof(req->req))) {
ret = -EFAULT;
goto out;
}
}
ret = (ssize_t) sizeof(struct raw1394_request);
}
out:
......@@ -2348,7 +2347,6 @@ static int state_connected(struct file_info *fi, struct pending_request *req)
return handle_async_request(fi, req, node);
}
static ssize_t raw1394_write(struct file *file, const char __user * buffer,
size_t count, loff_t * offset_is_ignored)
{
......@@ -2357,9 +2355,9 @@ static ssize_t raw1394_write(struct file *file, const char __user * buffer,
ssize_t retval = 0;
#ifdef CONFIG_COMPAT
if (count == sizeof(struct compat_raw1394_req) &&
sizeof(struct compat_raw1394_req) !=
sizeof(struct raw1394_request)) {
if (count == sizeof(struct compat_raw1394_req) &&
sizeof(struct compat_raw1394_req) !=
sizeof(struct raw1394_request)) {
buffer = raw1394_compat_write(buffer);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
......
......@@ -127,10 +127,12 @@ MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = "
* talking to a single sbp2 device at the same time (filesystem coherency,
* etc.). If you're running an sbp2 device that supports multiple logins,
* and you're either running read-only filesystems or some sort of special
* filesystem supporting multiple hosts (one such filesystem is OpenGFS,
* see opengfs.sourceforge.net for more info), then set exclusive_login
* to zero. Note: The Oxsemi OXFW911 sbp2 chipset supports up to four
* concurrent logins.
* filesystem supporting multiple hosts, e.g. OpenGFS, Oracle Cluster
* File System, or Lustre, then set exclusive_login to zero.
*
* So far only bridges from Oxford Semiconductor are known to support
* concurrent logins. Depending on firmware, four or two concurrent logins
* are possible on OXFW911 and newer Oxsemi bridges.
*/
static int exclusive_login = 1;
module_param(exclusive_login, int, 0644);
......@@ -306,8 +308,9 @@ static const struct {
u32 model_id;
unsigned workarounds;
} sbp2_workarounds_table[] = {
/* TSB42AA9 */ {
/* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
.firmware_revision = 0x002800,
.model_id = 0x001010,
.workarounds = SBP2_WORKAROUND_INQUIRY_36 |
SBP2_WORKAROUND_MODE_SENSE_8,
},
......@@ -791,12 +794,12 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
scsi_id->ud = ud;
scsi_id->speed_code = IEEE1394_SPEED_100;
scsi_id->max_payload_size = sbp2_speedto_max_payload[IEEE1394_SPEED_100];
scsi_id->status_fifo_addr = CSR1212_INVALID_ADDR_SPACE;
atomic_set(&scsi_id->sbp2_login_complete, 0);
INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_inuse);
INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_completed);
INIT_LIST_HEAD(&scsi_id->scsi_list);
spin_lock_init(&scsi_id->sbp2_command_orb_lock);
scsi_id->sbp2_lun = 0;
ud->device.driver_data = scsi_id;
......@@ -844,8 +847,8 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace(
&sbp2_highlevel, ud->ne->host, &sbp2_ops,
sizeof(struct sbp2_status_block), sizeof(quadlet_t),
0x010000000000ULL, CSR1212_ALL_SPACE_END);
if (scsi_id->status_fifo_addr == ~0ULL) {
ud->ne->host->low_addr_space, CSR1212_ALL_SPACE_END);
if (scsi_id->status_fifo_addr == CSR1212_INVALID_ADDR_SPACE) {
SBP2_ERR("failed to allocate status FIFO address range");
goto failed_alloc;
}
......@@ -1087,9 +1090,9 @@ static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id)
SBP2_DMA_FREE("single query logins data");
}
if (scsi_id->status_fifo_addr)
if (scsi_id->status_fifo_addr != CSR1212_INVALID_ADDR_SPACE)
hpsb_unregister_addrspace(&sbp2_highlevel, hi->host,
scsi_id->status_fifo_addr);
scsi_id->status_fifo_addr);
scsi_id->ud->device.driver_data = NULL;
......@@ -1213,13 +1216,11 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
SBP2_DEBUG("length_max_logins = %x",
(unsigned int)scsi_id->query_logins_response->length_max_logins);
SBP2_DEBUG("Query logins to SBP-2 device successful");
max_logins = RESPONSE_GET_MAX_LOGINS(scsi_id->query_logins_response->length_max_logins);
SBP2_DEBUG("Maximum concurrent logins supported: %d", max_logins);
SBP2_INFO("Maximum concurrent logins supported: %d", max_logins);
active_logins = RESPONSE_GET_ACTIVE_LOGINS(scsi_id->query_logins_response->length_max_logins);
SBP2_DEBUG("Number of active logins: %d", active_logins);
SBP2_INFO("Number of active logins: %d", active_logins);
if (active_logins >= max_logins) {
return -EIO;
......@@ -1648,6 +1649,8 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
}
}
#define SBP2_PAYLOAD_TO_BYTES(p) (1 << ((p) + 2))
/*
* This function is called in order to determine the max speed and packet
* size we can use in our ORBs. Note, that we (the driver and host) only
......@@ -1660,13 +1663,12 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id)
{
struct sbp2scsi_host_info *hi = scsi_id->hi;
u8 payload;
SBP2_DEBUG_ENTER();
/* Initial setting comes from the hosts speed map */
scsi_id->speed_code =
hi->host->speed_map[NODEID_TO_NODE(hi->host->node_id) * 64 +
NODEID_TO_NODE(scsi_id->ne->nodeid)];
hi->host->speed[NODEID_TO_NODE(scsi_id->ne->nodeid)];
/* Bump down our speed if the user requested it */
if (scsi_id->speed_code > max_speed) {
......@@ -1677,15 +1679,22 @@ static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id)
/* Payload size is the lesser of what our speed supports and what
* our host supports. */
scsi_id->max_payload_size =
min(sbp2_speedto_max_payload[scsi_id->speed_code],
(u8) (hi->host->csr.max_rec - 1));
payload = min(sbp2_speedto_max_payload[scsi_id->speed_code],
(u8) (hi->host->csr.max_rec - 1));
/* If physical DMA is off, work around limitation in ohci1394:
* packet size must not exceed PAGE_SIZE */
if (scsi_id->ne->host->low_addr_space < (1ULL << 32))
while (SBP2_PAYLOAD_TO_BYTES(payload) + 24 > PAGE_SIZE &&
payload)
payload--;
HPSB_DEBUG("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]",
NODE_BUS_ARGS(hi->host, scsi_id->ne->nodeid),
hpsb_speedto_str[scsi_id->speed_code],
1 << ((u32) scsi_id->max_payload_size + 2));
SBP2_PAYLOAD_TO_BYTES(payload));
scsi_id->max_payload_size = payload;
return 0;
}
......@@ -2112,33 +2121,6 @@ static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense
return sbp2_status[8] & 0x3f; /* return scsi status */
}
/*
* This function is called after a command is completed, in order to do any necessary SBP-2
* response data translations for the SCSI stack
*/
static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
struct scsi_cmnd *SCpnt)
{
u8 *scsi_buf = SCpnt->request_buffer;
SBP2_DEBUG_ENTER();
if (SCpnt->cmnd[0] == INQUIRY && (SCpnt->cmnd[1] & 3) == 0) {
/*
* Make sure data length is ok. Minimum length is 36 bytes
*/
if (scsi_buf[4] == 0) {
scsi_buf[4] = 36 - 5;
}
/*
* Fix ansi revision and response data format
*/
scsi_buf[2] |= 2;
scsi_buf[3] = (scsi_buf[3] & 0xf0) | 2;
}
}
/*
* This function deals with status writes from the SBP-2 device
*/
......@@ -2477,13 +2459,6 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
SCpnt->result = DID_ERROR << 16;
}
/*
* Take care of any sbp2 response data mucking here (RBC stuff, etc.)
*/
if (SCpnt->result == DID_OK << 16) {
sbp2_check_sbp2_response(scsi_id, SCpnt);
}
/*
* If a bus reset is in progress and there was an error, complete
* the command as busy so that it will get retried.
......
......@@ -52,7 +52,7 @@ struct sbp2_command_orb {
u32 data_descriptor_lo;
u32 misc;
u8 cdb[12];
};
} __attribute__((packed));
#define SBP2_LOGIN_REQUEST 0x0
#define SBP2_QUERY_LOGINS_REQUEST 0x1
......@@ -80,7 +80,7 @@ struct sbp2_login_orb {
u32 passwd_resp_lengths;
u32 status_fifo_hi;
u32 status_fifo_lo;
};
} __attribute__((packed));
#define RESPONSE_GET_LOGIN_ID(value) (value & 0xffff)
#define RESPONSE_GET_LENGTH(value) ((value >> 16) & 0xffff)
......@@ -91,7 +91,7 @@ struct sbp2_login_response {
u32 command_block_agent_hi;
u32 command_block_agent_lo;
u32 reconnect_hold;
};
} __attribute__((packed));
#define ORB_SET_LOGIN_ID(value) (value & 0xffff)
......@@ -106,7 +106,7 @@ struct sbp2_query_logins_orb {
u32 reserved_resp_length;
u32 status_fifo_hi;
u32 status_fifo_lo;
};
} __attribute__((packed));
#define RESPONSE_GET_MAX_LOGINS(value) (value & 0xffff)
#define RESPONSE_GET_ACTIVE_LOGINS(value) ((RESPONSE_GET_LENGTH(value) - 4) / 12)
......@@ -116,7 +116,7 @@ struct sbp2_query_logins_response {
u32 misc_IDs;
u32 initiator_misc_hi;
u32 initiator_misc_lo;
};
} __attribute__((packed));
struct sbp2_reconnect_orb {
u32 reserved1;
......@@ -127,7 +127,7 @@ struct sbp2_reconnect_orb {
u32 reserved5;
u32 status_fifo_hi;
u32 status_fifo_lo;
};
} __attribute__((packed));
struct sbp2_logout_orb {
u32 reserved1;
......@@ -138,7 +138,7 @@ struct sbp2_logout_orb {
u32 reserved5;
u32 status_fifo_hi;
u32 status_fifo_lo;
};
} __attribute__((packed));
#define PAGE_TABLE_SET_SEGMENT_BASE_HI(value) (value & 0xffff)
#define PAGE_TABLE_SET_SEGMENT_LENGTH(value) ((value & 0xffff) << 16)
......@@ -146,7 +146,7 @@ struct sbp2_logout_orb {
struct sbp2_unrestricted_page_table {
u32 length_segment_base_hi;
u32 segment_base_lo;
};
} __attribute__((packed));
#define RESP_STATUS_REQUEST_COMPLETE 0x0
#define RESP_STATUS_TRANSPORT_FAILURE 0x1
......@@ -191,7 +191,7 @@ struct sbp2_status_block {
u32 ORB_offset_hi_misc;
u32 ORB_offset_lo;
u8 command_set_dependent[24];
};
} __attribute__((packed));
/*
* Miscellaneous SBP2 related config rom defines
......@@ -395,9 +395,8 @@ static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
struct scsi_cmnd *SCpnt,
void (*done)(struct scsi_cmnd *));
static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data);
static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
struct scsi_cmnd *SCpnt);
static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status,
unchar *sense_data);
static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
struct unit_directory *ud);
static int sbp2_set_busy_timeout(struct scsi_id_instance_data *scsi_id);
......
......@@ -331,7 +331,7 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
spin_lock_init(&d->lock);
PRINT(KERN_INFO, ohci->host->id, "Iso %s DMA: %d buffers "
DBGMSG(ohci->host->id, "Iso %s DMA: %d buffers "
"of size %d allocated for a frame size %d, each with %d prgs",
(type == OHCI_ISO_RECEIVE) ? "receive" : "transmit",
d->num_desc - 1, d->buf_size, d->frame_size, d->nb_cmd);
......@@ -759,7 +759,7 @@ static int __video1394_ioctl(struct file *file,
} else {
mask = (u64)0x1<<v.channel;
}
PRINT(KERN_INFO, ohci->host->id, "mask: %08X%08X usage: %08X%08X\n",
DBGMSG(ohci->host->id, "mask: %08X%08X usage: %08X%08X\n",
(u32)(mask>>32),(u32)(mask&0xffffffff),
(u32)(ohci->ISO_channel_usage>>32),
(u32)(ohci->ISO_channel_usage&0xffffffff));
......@@ -805,7 +805,7 @@ static int __video1394_ioctl(struct file *file,
v.buf_size = d->buf_size;
list_add_tail(&d->link, &ctx->context_list);
PRINT(KERN_INFO, ohci->host->id,
DBGMSG(ohci->host->id,
"iso context %d listen on channel %d",
d->ctx, v.channel);
}
......@@ -828,7 +828,7 @@ static int __video1394_ioctl(struct file *file,
list_add_tail(&d->link, &ctx->context_list);
PRINT(KERN_INFO, ohci->host->id,
DBGMSG(ohci->host->id,
"Iso context %d talk on channel %d", d->ctx,
v.channel);
}
......@@ -873,7 +873,7 @@ static int __video1394_ioctl(struct file *file,
d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, channel);
if (d == NULL) return -ESRCH;
PRINT(KERN_INFO, ohci->host->id, "Iso context %d "
DBGMSG(ohci->host->id, "Iso context %d "
"stop talking on channel %d", d->ctx, channel);
free_dma_iso_ctx(d);
......@@ -935,7 +935,7 @@ static int __video1394_ioctl(struct file *file,
else {
/* Wake up dma context if necessary */
if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
PRINT(KERN_INFO, ohci->host->id,
DBGMSG(ohci->host->id,
"Waking up iso dma ctx=%d", d->ctx);
reg_write(ohci, d->ctrlSet, 0x1000);
}
......@@ -1106,7 +1106,7 @@ static int __video1394_ioctl(struct file *file,
else {
/* Wake up dma context if necessary */
if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
PRINT(KERN_INFO, ohci->host->id,
DBGMSG(ohci->host->id,
"Waking up iso transmit dma ctx=%d",
d->ctx);
put_timestamp(ohci, d, d->last_buffer);
......@@ -1232,7 +1232,7 @@ static int video1394_release(struct inode *inode, struct file *file)
"is not being used", d->channel);
else
ohci->ISO_channel_usage &= ~mask;
PRINT(KERN_INFO, ohci->host->id, "On release: Iso %s context "
DBGMSG(ohci->host->id, "On release: Iso %s context "
"%d stop listening on channel %d",
d->type == OHCI_ISO_RECEIVE ? "receive" : "transmit",
d->ctx, d->channel);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment