Commit 1316ff5d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: (25 commits)
  firewire: fw-cdev: reorder wakeup vs. spinlock
  firewire: in-code doc updates.
  firewire: a header cleanup
  firewire: adopt read cycle timer ABI from raw1394
  firewire: fw-ohci: check for misconfigured bus (phyID == 63)
  firewire: fw-ohci: missing dma_unmap_single
  firewire: fw-ohci: log posted write errors
  firewire: fw-ohci: reorder includes
  firewire: fw-ohci: fix includes
  firewire: fw-ohci: enforce read order for selfID generation
  firewire: fw-sbp2: use an own workqueue (fix system responsiveness)
  firewire: fw-sbp2: expose module parameter for workarounds
  firewire: fw-sbp2: add support for multiple logical units per target
  firewire: fw-sbp2: always enable IRQs before calling command ORB callback
  firewire: fw-core: local variable shadows a global one
  firewire: optimize fw_core_add_address_handler
  ieee1394: ieee1394_core.c: use DEFINE_SPINLOCK for spinlock definition
  ieee1394: csr1212: proper refcounting
  ieee1394: nodemgr: fix leak of struct csr1212_keyval
  ieee1394: pcilynx: I2C cleanups
  ...
parents f563d53c 83431cba
...@@ -25,11 +25,14 @@ ...@@ -25,11 +25,14 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/preempt.h>
#include <linux/time.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/firewire-cdev.h> #include <linux/firewire-cdev.h>
#include <asm/system.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include "fw-transaction.h" #include "fw-transaction.h"
#include "fw-topology.h" #include "fw-topology.h"
...@@ -140,11 +143,10 @@ static void queue_event(struct client *client, struct event *event, ...@@ -140,11 +143,10 @@ static void queue_event(struct client *client, struct event *event,
event->v[1].size = size1; event->v[1].size = size1;
spin_lock_irqsave(&client->lock, flags); spin_lock_irqsave(&client->lock, flags);
list_add_tail(&event->link, &client->event_list); list_add_tail(&event->link, &client->event_list);
wake_up_interruptible(&client->wait);
spin_unlock_irqrestore(&client->lock, flags); spin_unlock_irqrestore(&client->lock, flags);
wake_up_interruptible(&client->wait);
} }
static int static int
...@@ -621,20 +623,19 @@ iso_callback(struct fw_iso_context *context, u32 cycle, ...@@ -621,20 +623,19 @@ iso_callback(struct fw_iso_context *context, u32 cycle,
size_t header_length, void *header, void *data) size_t header_length, void *header, void *data)
{ {
struct client *client = data; struct client *client = data;
struct iso_interrupt *interrupt; struct iso_interrupt *irq;
interrupt = kzalloc(sizeof(*interrupt) + header_length, GFP_ATOMIC); irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC);
if (interrupt == NULL) if (irq == NULL)
return; return;
interrupt->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
interrupt->interrupt.closure = client->iso_closure; irq->interrupt.closure = client->iso_closure;
interrupt->interrupt.cycle = cycle; irq->interrupt.cycle = cycle;
interrupt->interrupt.header_length = header_length; irq->interrupt.header_length = header_length;
memcpy(interrupt->interrupt.header, header, header_length); memcpy(irq->interrupt.header, header, header_length);
queue_event(client, &interrupt->event, queue_event(client, &irq->event, &irq->interrupt,
&interrupt->interrupt, sizeof(irq->interrupt) + header_length, NULL, 0);
sizeof(interrupt->interrupt) + header_length, NULL, 0);
} }
static int ioctl_create_iso_context(struct client *client, void *buffer) static int ioctl_create_iso_context(struct client *client, void *buffer)
...@@ -812,6 +813,28 @@ static int ioctl_stop_iso(struct client *client, void *buffer) ...@@ -812,6 +813,28 @@ static int ioctl_stop_iso(struct client *client, void *buffer)
return fw_iso_context_stop(client->iso_context); return fw_iso_context_stop(client->iso_context);
} }
static int ioctl_get_cycle_timer(struct client *client, void *buffer)
{
struct fw_cdev_get_cycle_timer *request = buffer;
struct fw_card *card = client->device->card;
unsigned long long bus_time;
struct timeval tv;
unsigned long flags;
preempt_disable();
local_irq_save(flags);
bus_time = card->driver->get_bus_time(card);
do_gettimeofday(&tv);
local_irq_restore(flags);
preempt_enable();
request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
request->cycle_timer = bus_time & 0xffffffff;
return 0;
}
static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
ioctl_get_info, ioctl_get_info,
ioctl_send_request, ioctl_send_request,
...@@ -825,6 +848,7 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { ...@@ -825,6 +848,7 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
ioctl_queue_iso, ioctl_queue_iso,
ioctl_start_iso, ioctl_start_iso,
ioctl_stop_iso, ioctl_stop_iso,
ioctl_get_cycle_timer,
}; };
static int static int
......
...@@ -102,11 +102,6 @@ fw_unit(struct device *dev) ...@@ -102,11 +102,6 @@ fw_unit(struct device *dev)
#define CSR_INSTANCE 0x18 #define CSR_INSTANCE 0x18
#define CSR_DIRECTORY_ID 0x20 #define CSR_DIRECTORY_ID 0x20
#define SBP2_COMMAND_SET_SPECIFIER 0x38
#define SBP2_COMMAND_SET 0x39
#define SBP2_COMMAND_SET_REVISION 0x3b
#define SBP2_FIRMWARE_REVISION 0x3c
struct fw_csr_iterator { struct fw_csr_iterator {
u32 *p; u32 *p;
u32 *end; u32 *end;
......
...@@ -18,21 +18,23 @@ ...@@ -18,21 +18,23 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/ */
#include <linux/kernel.h> #include <linux/compiler.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/poll.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <asm/uaccess.h> #include <asm/page.h>
#include <asm/semaphore.h> #include <asm/system.h>
#include "fw-transaction.h"
#include "fw-ohci.h" #include "fw-ohci.h"
#include "fw-transaction.h"
#define DESCRIPTOR_OUTPUT_MORE 0 #define DESCRIPTOR_OUTPUT_MORE 0
#define DESCRIPTOR_OUTPUT_LAST (1 << 12) #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
...@@ -678,6 +680,9 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet) ...@@ -678,6 +680,9 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
/* FIXME: Document how the locking works. */ /* FIXME: Document how the locking works. */
if (ohci->generation != packet->generation) { if (ohci->generation != packet->generation) {
if (packet->payload_length > 0)
dma_unmap_single(ohci->card.device, payload_bus,
packet->payload_length, DMA_TO_DEVICE);
packet->ack = RCODE_GENERATION; packet->ack = RCODE_GENERATION;
return -1; return -1;
} }
...@@ -912,10 +917,15 @@ static void bus_reset_tasklet(unsigned long data) ...@@ -912,10 +917,15 @@ static void bus_reset_tasklet(unsigned long data)
reg = reg_read(ohci, OHCI1394_NodeID); reg = reg_read(ohci, OHCI1394_NodeID);
if (!(reg & OHCI1394_NodeID_idValid)) { if (!(reg & OHCI1394_NodeID_idValid)) {
fw_error("node ID not valid, new bus reset in progress\n"); fw_notify("node ID not valid, new bus reset in progress\n");
return; return;
} }
ohci->node_id = reg & 0xffff; if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
fw_notify("malconfigured bus\n");
return;
}
ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
OHCI1394_NodeID_nodeNumber);
/* /*
* The count in the SelfIDCount register is the number of * The count in the SelfIDCount register is the number of
...@@ -926,12 +936,14 @@ static void bus_reset_tasklet(unsigned long data) ...@@ -926,12 +936,14 @@ static void bus_reset_tasklet(unsigned long data)
self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff; self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff;
generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff; generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
rmb();
for (i = 1, j = 0; j < self_id_count; i += 2, j++) { for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1])
fw_error("inconsistent self IDs\n"); fw_error("inconsistent self IDs\n");
ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]); ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]);
} }
rmb();
/* /*
* Check the consistency of the self IDs we just read. The * Check the consistency of the self IDs we just read. The
...@@ -1046,6 +1058,9 @@ static irqreturn_t irq_handler(int irq, void *data) ...@@ -1046,6 +1058,9 @@ static irqreturn_t irq_handler(int irq, void *data)
iso_event &= ~(1 << i); iso_event &= ~(1 << i);
} }
if (unlikely(event & OHCI1394_postedWriteErr))
fw_error("PCI posted write error\n");
if (event & OHCI1394_cycle64Seconds) { if (event & OHCI1394_cycle64Seconds) {
cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer); cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
if ((cycle_time & 0x80000000) == 0) if ((cycle_time & 0x80000000) == 0)
...@@ -1119,8 +1134,8 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length) ...@@ -1119,8 +1134,8 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
OHCI1394_RQPkt | OHCI1394_RSPkt | OHCI1394_RQPkt | OHCI1394_RSPkt |
OHCI1394_reqTxComplete | OHCI1394_respTxComplete | OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
OHCI1394_isochRx | OHCI1394_isochTx | OHCI1394_isochRx | OHCI1394_isochTx |
OHCI1394_masterIntEnable | OHCI1394_postedWriteErr | OHCI1394_cycle64Seconds |
OHCI1394_cycle64Seconds); OHCI1394_masterIntEnable);
/* Activate link_on bit and contender bit in our self ID packets.*/ /* Activate link_on bit and contender bit in our self ID packets.*/
if (ohci_update_phy_reg(card, 4, 0, if (ohci_update_phy_reg(card, 4, 0,
......
...@@ -59,6 +59,8 @@ ...@@ -59,6 +59,8 @@
#define OHCI1394_LinkControl_cycleSource (1 << 22) #define OHCI1394_LinkControl_cycleSource (1 << 22)
#define OHCI1394_NodeID 0x0E8 #define OHCI1394_NodeID 0x0E8
#define OHCI1394_NodeID_idValid 0x80000000 #define OHCI1394_NodeID_idValid 0x80000000
#define OHCI1394_NodeID_nodeNumber 0x0000003f
#define OHCI1394_NodeID_busNumber 0x0000ffc0
#define OHCI1394_PhyControl 0x0EC #define OHCI1394_PhyControl 0x0EC
#define OHCI1394_PhyControl_Read(addr) (((addr) << 8) | 0x00008000) #define OHCI1394_PhyControl_Read(addr) (((addr) << 8) | 0x00008000)
#define OHCI1394_PhyControl_ReadDone 0x80000000 #define OHCI1394_PhyControl_ReadDone 0x80000000
......
...@@ -37,11 +37,12 @@ ...@@ -37,11 +37,12 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/stringify.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/workqueue.h>
#include <scsi/scsi.h> #include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h> #include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h> #include <scsi/scsi_device.h>
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
...@@ -61,36 +62,94 @@ module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644); ...@@ -61,36 +62,94 @@ module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644);
MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
"(default = Y, use N for concurrent initiators)"); "(default = Y, use N for concurrent initiators)");
/*
* Flags for firmware oddities
*
* - 128kB max transfer
* Limit transfer size. Necessary for some old bridges.
*
* - 36 byte inquiry
* When scsi_mod probes the device, let the inquiry command look like that
* from MS Windows.
*
* - skip mode page 8
* Suppress sending of mode_sense for mode page 8 if the device pretends to
* support the SCSI Primary Block commands instead of Reduced Block Commands.
*
* - fix capacity
* Tell sd_mod to correct the last sector number reported by read_capacity.
* Avoids access beyond actual disk limits on devices with an off-by-one bug.
* Don't use this with devices which don't have this bug.
*
* - override internal blacklist
* Instead of adding to the built-in blacklist, use only the workarounds
* specified in the module load parameter.
* Useful if a blacklist entry interfered with a non-broken device.
*/
#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
#define SBP2_WORKAROUND_INQUIRY_36 0x2
#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
#define SBP2_WORKAROUND_OVERRIDE 0x100
static int sbp2_param_workarounds;
module_param_named(workarounds, sbp2_param_workarounds, int, 0644);
MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36)
", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
", or a combination)");
/* I don't know why the SCSI stack doesn't define something like this... */ /* I don't know why the SCSI stack doesn't define something like this... */
typedef void (*scsi_done_fn_t)(struct scsi_cmnd *); typedef void (*scsi_done_fn_t)(struct scsi_cmnd *);
static const char sbp2_driver_name[] = "sbp2"; static const char sbp2_driver_name[] = "sbp2";
struct sbp2_device { /*
struct kref kref; * We create one struct sbp2_logical_unit per SBP-2 Logical Unit Number Entry
struct fw_unit *unit; * and one struct scsi_device per sbp2_logical_unit.
*/
struct sbp2_logical_unit {
struct sbp2_target *tgt;
struct list_head link;
struct scsi_device *sdev;
struct fw_address_handler address_handler; struct fw_address_handler address_handler;
struct list_head orb_list; struct list_head orb_list;
u64 management_agent_address;
u64 command_block_agent_address; u64 command_block_agent_address;
u32 workarounds; u16 lun;
int login_id; int login_id;
/* /*
* We cache these addresses and only update them once we've * The generation is updated once we've logged in or reconnected
* logged in or reconnected to the sbp2 device. That way, any * to the logical unit. Thus, I/O to the device will automatically
* IO to the device will automatically fail and get retried if * fail and get retried if it happens in a window where the device
* it happens in a window where the device is not ready to * is not ready, e.g. after a bus reset but before we reconnect.
* handle it (e.g. after a bus reset but before we reconnect).
*/ */
int node_id;
int address_high;
int generation; int generation;
int retries; int retries;
struct delayed_work work; struct delayed_work work;
}; };
/*
* We create one struct sbp2_target per IEEE 1212 Unit Directory
* and one struct Scsi_Host per sbp2_target.
*/
struct sbp2_target {
struct kref kref;
struct fw_unit *unit;
u64 management_agent_address;
int directory_id;
int node_id;
int address_high;
unsigned workarounds;
struct list_head lu_list;
};
#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 #define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
#define SBP2_MAX_SECTORS 255 /* Max sectors supported */ #define SBP2_MAX_SECTORS 255 /* Max sectors supported */
#define SBP2_ORB_TIMEOUT 2000 /* Timeout in ms */ #define SBP2_ORB_TIMEOUT 2000 /* Timeout in ms */
...@@ -101,17 +160,9 @@ struct sbp2_device { ...@@ -101,17 +160,9 @@ struct sbp2_device {
#define SBP2_DIRECTION_FROM_MEDIA 0x1 #define SBP2_DIRECTION_FROM_MEDIA 0x1
/* Unit directory keys */ /* Unit directory keys */
#define SBP2_COMMAND_SET_SPECIFIER 0x38 #define SBP2_CSR_FIRMWARE_REVISION 0x3c
#define SBP2_COMMAND_SET 0x39 #define SBP2_CSR_LOGICAL_UNIT_NUMBER 0x14
#define SBP2_COMMAND_SET_REVISION 0x3b #define SBP2_CSR_LOGICAL_UNIT_DIRECTORY 0xd4
#define SBP2_FIRMWARE_REVISION 0x3c
/* Flags for detected oddities and brokeness */
#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
#define SBP2_WORKAROUND_INQUIRY_36 0x2
#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
#define SBP2_WORKAROUND_OVERRIDE 0x100
/* Management orb opcodes */ /* Management orb opcodes */
#define SBP2_LOGIN_REQUEST 0x0 #define SBP2_LOGIN_REQUEST 0x0
...@@ -219,7 +270,7 @@ struct sbp2_command_orb { ...@@ -219,7 +270,7 @@ struct sbp2_command_orb {
} request; } request;
struct scsi_cmnd *cmd; struct scsi_cmnd *cmd;
scsi_done_fn_t done; scsi_done_fn_t done;
struct fw_unit *unit; struct sbp2_logical_unit *lu;
struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8))); struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8)));
dma_addr_t page_table_bus; dma_addr_t page_table_bus;
...@@ -295,7 +346,7 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request, ...@@ -295,7 +346,7 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request,
unsigned long long offset, unsigned long long offset,
void *payload, size_t length, void *callback_data) void *payload, size_t length, void *callback_data)
{ {
struct sbp2_device *sd = callback_data; struct sbp2_logical_unit *lu = callback_data;
struct sbp2_orb *orb; struct sbp2_orb *orb;
struct sbp2_status status; struct sbp2_status status;
size_t header_size; size_t header_size;
...@@ -319,7 +370,7 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request, ...@@ -319,7 +370,7 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request,
/* Lookup the orb corresponding to this status write. */ /* Lookup the orb corresponding to this status write. */
spin_lock_irqsave(&card->lock, flags); spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(orb, &sd->orb_list, link) { list_for_each_entry(orb, &lu->orb_list, link) {
if (STATUS_GET_ORB_HIGH(status) == 0 && if (STATUS_GET_ORB_HIGH(status) == 0 &&
STATUS_GET_ORB_LOW(status) == orb->request_bus) { STATUS_GET_ORB_LOW(status) == orb->request_bus) {
orb->rcode = RCODE_COMPLETE; orb->rcode = RCODE_COMPLETE;
...@@ -329,7 +380,7 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request, ...@@ -329,7 +380,7 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request,
} }
spin_unlock_irqrestore(&card->lock, flags); spin_unlock_irqrestore(&card->lock, flags);
if (&orb->link != &sd->orb_list) if (&orb->link != &lu->orb_list)
orb->callback(orb, &status); orb->callback(orb, &status);
else else
fw_error("status write for unknown orb\n"); fw_error("status write for unknown orb\n");
...@@ -361,20 +412,20 @@ complete_transaction(struct fw_card *card, int rcode, ...@@ -361,20 +412,20 @@ complete_transaction(struct fw_card *card, int rcode,
orb->rcode = rcode; orb->rcode = rcode;
if (orb->rcode != RCODE_COMPLETE) { if (orb->rcode != RCODE_COMPLETE) {
list_del(&orb->link); list_del(&orb->link);
spin_unlock_irqrestore(&card->lock, flags);
orb->callback(orb, NULL); orb->callback(orb, NULL);
} else {
spin_unlock_irqrestore(&card->lock, flags);
} }
spin_unlock_irqrestore(&card->lock, flags);
kref_put(&orb->kref, free_orb); kref_put(&orb->kref, free_orb);
} }
static void static void
sbp2_send_orb(struct sbp2_orb *orb, struct fw_unit *unit, sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
int node_id, int generation, u64 offset) int node_id, int generation, u64 offset)
{ {
struct fw_device *device = fw_device(unit->device.parent); struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
struct sbp2_device *sd = unit->device.driver_data;
unsigned long flags; unsigned long flags;
orb->pointer.high = 0; orb->pointer.high = 0;
...@@ -382,7 +433,7 @@ sbp2_send_orb(struct sbp2_orb *orb, struct fw_unit *unit, ...@@ -382,7 +433,7 @@ sbp2_send_orb(struct sbp2_orb *orb, struct fw_unit *unit,
fw_memcpy_to_be32(&orb->pointer, &orb->pointer, sizeof(orb->pointer)); fw_memcpy_to_be32(&orb->pointer, &orb->pointer, sizeof(orb->pointer));
spin_lock_irqsave(&device->card->lock, flags); spin_lock_irqsave(&device->card->lock, flags);
list_add_tail(&orb->link, &sd->orb_list); list_add_tail(&orb->link, &lu->orb_list);
spin_unlock_irqrestore(&device->card->lock, flags); spin_unlock_irqrestore(&device->card->lock, flags);
/* Take a ref for the orb list and for the transaction callback. */ /* Take a ref for the orb list and for the transaction callback. */
...@@ -395,10 +446,9 @@ sbp2_send_orb(struct sbp2_orb *orb, struct fw_unit *unit, ...@@ -395,10 +446,9 @@ sbp2_send_orb(struct sbp2_orb *orb, struct fw_unit *unit,
complete_transaction, orb); complete_transaction, orb);
} }
static int sbp2_cancel_orbs(struct fw_unit *unit) static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
{ {
struct fw_device *device = fw_device(unit->device.parent); struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
struct sbp2_device *sd = unit->device.driver_data;
struct sbp2_orb *orb, *next; struct sbp2_orb *orb, *next;
struct list_head list; struct list_head list;
unsigned long flags; unsigned long flags;
...@@ -406,7 +456,7 @@ static int sbp2_cancel_orbs(struct fw_unit *unit) ...@@ -406,7 +456,7 @@ static int sbp2_cancel_orbs(struct fw_unit *unit)
INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&list);
spin_lock_irqsave(&device->card->lock, flags); spin_lock_irqsave(&device->card->lock, flags);
list_splice_init(&sd->orb_list, &list); list_splice_init(&lu->orb_list, &list);
spin_unlock_irqrestore(&device->card->lock, flags); spin_unlock_irqrestore(&device->card->lock, flags);
list_for_each_entry_safe(orb, next, &list, link) { list_for_each_entry_safe(orb, next, &list, link) {
...@@ -433,11 +483,11 @@ complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) ...@@ -433,11 +483,11 @@ complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
} }
static int static int
sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation, sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
int function, int lun, void *response) int generation, int function, int lun_or_login_id,
void *response)
{ {
struct fw_device *device = fw_device(unit->device.parent); struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
struct sbp2_device *sd = unit->device.driver_data;
struct sbp2_management_orb *orb; struct sbp2_management_orb *orb;
int retval = -ENOMEM; int retval = -ENOMEM;
...@@ -458,12 +508,12 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation, ...@@ -458,12 +508,12 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
orb->request.misc = orb->request.misc =
MANAGEMENT_ORB_NOTIFY | MANAGEMENT_ORB_NOTIFY |
MANAGEMENT_ORB_FUNCTION(function) | MANAGEMENT_ORB_FUNCTION(function) |
MANAGEMENT_ORB_LUN(lun); MANAGEMENT_ORB_LUN(lun_or_login_id);
orb->request.length = orb->request.length =
MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response)); MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response));
orb->request.status_fifo.high = sd->address_handler.offset >> 32; orb->request.status_fifo.high = lu->address_handler.offset >> 32;
orb->request.status_fifo.low = sd->address_handler.offset; orb->request.status_fifo.low = lu->address_handler.offset;
if (function == SBP2_LOGIN_REQUEST) { if (function == SBP2_LOGIN_REQUEST) {
orb->request.misc |= orb->request.misc |=
...@@ -482,14 +532,14 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation, ...@@ -482,14 +532,14 @@ sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
if (dma_mapping_error(orb->base.request_bus)) if (dma_mapping_error(orb->base.request_bus))
goto fail_mapping_request; goto fail_mapping_request;
sbp2_send_orb(&orb->base, unit, sbp2_send_orb(&orb->base, lu, node_id, generation,
node_id, generation, sd->management_agent_address); lu->tgt->management_agent_address);
wait_for_completion_timeout(&orb->done, wait_for_completion_timeout(&orb->done,
msecs_to_jiffies(SBP2_ORB_TIMEOUT)); msecs_to_jiffies(SBP2_ORB_TIMEOUT));
retval = -EIO; retval = -EIO;
if (sbp2_cancel_orbs(unit) == 0) { if (sbp2_cancel_orbs(lu) == 0) {
fw_error("orb reply timed out, rcode=0x%02x\n", fw_error("orb reply timed out, rcode=0x%02x\n",
orb->base.rcode); orb->base.rcode);
goto out; goto out;
...@@ -534,10 +584,9 @@ complete_agent_reset_write(struct fw_card *card, int rcode, ...@@ -534,10 +584,9 @@ complete_agent_reset_write(struct fw_card *card, int rcode,
kfree(t); kfree(t);
} }
static int sbp2_agent_reset(struct fw_unit *unit) static int sbp2_agent_reset(struct sbp2_logical_unit *lu)
{ {
struct fw_device *device = fw_device(unit->device.parent); struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
struct sbp2_device *sd = unit->device.driver_data;
struct fw_transaction *t; struct fw_transaction *t;
static u32 zero; static u32 zero;
...@@ -546,181 +595,272 @@ static int sbp2_agent_reset(struct fw_unit *unit) ...@@ -546,181 +595,272 @@ static int sbp2_agent_reset(struct fw_unit *unit)
return -ENOMEM; return -ENOMEM;
fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
sd->node_id, sd->generation, device->max_speed, lu->tgt->node_id, lu->generation, device->max_speed,
sd->command_block_agent_address + SBP2_AGENT_RESET, lu->command_block_agent_address + SBP2_AGENT_RESET,
&zero, sizeof(zero), complete_agent_reset_write, t); &zero, sizeof(zero), complete_agent_reset_write, t);
return 0; return 0;
} }
static void sbp2_reconnect(struct work_struct *work); static void sbp2_release_target(struct kref *kref)
static struct scsi_host_template scsi_driver_template;
static void release_sbp2_device(struct kref *kref)
{ {
struct sbp2_device *sd = container_of(kref, struct sbp2_device, kref); struct sbp2_target *tgt = container_of(kref, struct sbp2_target, kref);
struct Scsi_Host *host = struct sbp2_logical_unit *lu, *next;
container_of((void *)sd, struct Scsi_Host, hostdata[0]); struct Scsi_Host *shost =
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
scsi_remove_host(host);
sbp2_send_management_orb(sd->unit, sd->node_id, sd->generation, list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
SBP2_LOGOUT_REQUEST, sd->login_id, NULL); if (lu->sdev)
fw_core_remove_address_handler(&sd->address_handler); scsi_remove_device(lu->sdev);
fw_notify("removed sbp2 unit %s\n", sd->unit->device.bus_id);
put_device(&sd->unit->device); sbp2_send_management_orb(lu, tgt->node_id, lu->generation,
scsi_host_put(host); SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
fw_core_remove_address_handler(&lu->address_handler);
list_del(&lu->link);
kfree(lu);
}
scsi_remove_host(shost);
fw_notify("released %s\n", tgt->unit->device.bus_id);
put_device(&tgt->unit->device);
scsi_host_put(shost);
} }
static struct workqueue_struct *sbp2_wq;
static void sbp2_reconnect(struct work_struct *work);
static void sbp2_login(struct work_struct *work) static void sbp2_login(struct work_struct *work)
{ {
struct sbp2_device *sd = struct sbp2_logical_unit *lu =
container_of(work, struct sbp2_device, work.work); container_of(work, struct sbp2_logical_unit, work.work);
struct Scsi_Host *host = struct Scsi_Host *shost =
container_of((void *)sd, struct Scsi_Host, hostdata[0]); container_of((void *)lu->tgt, struct Scsi_Host, hostdata[0]);
struct fw_unit *unit = sd->unit; struct scsi_device *sdev;
struct scsi_lun eight_bytes_lun;
struct fw_unit *unit = lu->tgt->unit;
struct fw_device *device = fw_device(unit->device.parent); struct fw_device *device = fw_device(unit->device.parent);
struct sbp2_login_response response; struct sbp2_login_response response;
int generation, node_id, local_node_id, lun, retval; int generation, node_id, local_node_id;
/* FIXME: Make this work for multi-lun devices. */
lun = 0;
generation = device->card->generation; generation = device->card->generation;
node_id = device->node->node_id; node_id = device->node->node_id;
local_node_id = device->card->local_node->node_id; local_node_id = device->card->local_node->node_id;
if (sbp2_send_management_orb(unit, node_id, generation, if (sbp2_send_management_orb(lu, node_id, generation,
SBP2_LOGIN_REQUEST, lun, &response) < 0) { SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) {
if (sd->retries++ < 5) { if (lu->retries++ < 5) {
schedule_delayed_work(&sd->work, DIV_ROUND_UP(HZ, 5)); queue_delayed_work(sbp2_wq, &lu->work,
DIV_ROUND_UP(HZ, 5));
} else { } else {
fw_error("failed to login to %s\n", fw_error("failed to login to %s LUN %04x\n",
unit->device.bus_id); unit->device.bus_id, lu->lun);
kref_put(&sd->kref, release_sbp2_device); kref_put(&lu->tgt->kref, sbp2_release_target);
} }
return; return;
} }
sd->generation = generation; lu->generation = generation;
sd->node_id = node_id; lu->tgt->node_id = node_id;
sd->address_high = local_node_id << 16; lu->tgt->address_high = local_node_id << 16;
/* Get command block agent offset and login id. */ /* Get command block agent offset and login id. */
sd->command_block_agent_address = lu->command_block_agent_address =
((u64) (response.command_block_agent.high & 0xffff) << 32) | ((u64) (response.command_block_agent.high & 0xffff) << 32) |
response.command_block_agent.low; response.command_block_agent.low;
sd->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response); lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response);
fw_notify("logged in to sbp2 unit %s (%d retries)\n", fw_notify("logged in to %s LUN %04x (%d retries)\n",
unit->device.bus_id, sd->retries); unit->device.bus_id, lu->lun, lu->retries);
fw_notify(" - management_agent_address: 0x%012llx\n",
(unsigned long long) sd->management_agent_address);
fw_notify(" - command_block_agent_address: 0x%012llx\n",
(unsigned long long) sd->command_block_agent_address);
fw_notify(" - status write address: 0x%012llx\n",
(unsigned long long) sd->address_handler.offset);
#if 0 #if 0
/* FIXME: The linux1394 sbp2 does this last step. */ /* FIXME: The linux1394 sbp2 does this last step. */
sbp2_set_busy_timeout(scsi_id); sbp2_set_busy_timeout(scsi_id);
#endif #endif
PREPARE_DELAYED_WORK(&sd->work, sbp2_reconnect); PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
sbp2_agent_reset(unit); sbp2_agent_reset(lu);
memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun));
eight_bytes_lun.scsi_lun[0] = (lu->lun >> 8) & 0xff;
eight_bytes_lun.scsi_lun[1] = lu->lun & 0xff;
/* FIXME: Loop over luns here. */ sdev = __scsi_add_device(shost, 0, 0,
lun = 0; scsilun_to_int(&eight_bytes_lun), lu);
retval = scsi_add_device(host, 0, 0, lun); if (IS_ERR(sdev)) {
if (retval < 0) { sbp2_send_management_orb(lu, node_id, generation,
sbp2_send_management_orb(unit, sd->node_id, sd->generation, SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
SBP2_LOGOUT_REQUEST, sd->login_id,
NULL);
/* /*
* Set this back to sbp2_login so we fall back and * Set this back to sbp2_login so we fall back and
* retry login on bus reset. * retry login on bus reset.
*/ */
PREPARE_DELAYED_WORK(&sd->work, sbp2_login); PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
} else {
lu->sdev = sdev;
scsi_device_put(sdev);
} }
kref_put(&sd->kref, release_sbp2_device); kref_put(&lu->tgt->kref, sbp2_release_target);
} }
static int sbp2_probe(struct device *dev) static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
{ {
struct fw_unit *unit = fw_unit(dev); struct sbp2_logical_unit *lu;
struct fw_device *device = fw_device(unit->device.parent);
struct sbp2_device *sd;
struct fw_csr_iterator ci;
struct Scsi_Host *host;
int i, key, value, err;
u32 model, firmware_revision;
err = -ENOMEM; lu = kmalloc(sizeof(*lu), GFP_KERNEL);
host = scsi_host_alloc(&scsi_driver_template, sizeof(*sd)); if (!lu)
if (host == NULL) return -ENOMEM;
goto fail;
sd = (struct sbp2_device *) host->hostdata; lu->address_handler.length = 0x100;
unit->device.driver_data = sd; lu->address_handler.address_callback = sbp2_status_write;
sd->unit = unit; lu->address_handler.callback_data = lu;
INIT_LIST_HEAD(&sd->orb_list);
kref_init(&sd->kref);
sd->address_handler.length = 0x100; if (fw_core_add_address_handler(&lu->address_handler,
sd->address_handler.address_callback = sbp2_status_write; &fw_high_memory_region) < 0) {
sd->address_handler.callback_data = sd; kfree(lu);
return -ENOMEM;
}
err = fw_core_add_address_handler(&sd->address_handler, lu->tgt = tgt;
&fw_high_memory_region); lu->sdev = NULL;
if (err < 0) lu->lun = lun_entry & 0xffff;
goto fail_host; lu->retries = 0;
INIT_LIST_HEAD(&lu->orb_list);
INIT_DELAYED_WORK(&lu->work, sbp2_login);
err = fw_device_enable_phys_dma(device); list_add_tail(&lu->link, &tgt->lu_list);
if (err < 0) return 0;
goto fail_address_handler; }
err = scsi_add_host(host, &unit->device); static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory)
if (err < 0) {
goto fail_address_handler; struct fw_csr_iterator ci;
int key, value;
/* fw_csr_iterator_init(&ci, directory);
* Scan unit directory to get management agent address, while (fw_csr_iterator_next(&ci, &key, &value))
* firmware revison and model. Initialize firmware_revision if (key == SBP2_CSR_LOGICAL_UNIT_NUMBER &&
* and model to values that wont match anything in our table. sbp2_add_logical_unit(tgt, value) < 0)
*/ return -ENOMEM;
firmware_revision = 0xff000000; return 0;
model = 0xff000000; }
fw_csr_iterator_init(&ci, unit->directory);
static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
u32 *model, u32 *firmware_revision)
{
struct fw_csr_iterator ci;
int key, value;
fw_csr_iterator_init(&ci, directory);
while (fw_csr_iterator_next(&ci, &key, &value)) { while (fw_csr_iterator_next(&ci, &key, &value)) {
switch (key) { switch (key) {
case CSR_DEPENDENT_INFO | CSR_OFFSET: case CSR_DEPENDENT_INFO | CSR_OFFSET:
sd->management_agent_address = tgt->management_agent_address =
0xfffff0000000ULL + 4 * value; CSR_REGISTER_BASE + 4 * value;
break; break;
case SBP2_FIRMWARE_REVISION:
firmware_revision = value; case CSR_DIRECTORY_ID:
tgt->directory_id = value;
break; break;
case CSR_MODEL: case CSR_MODEL:
model = value; *model = value;
break;
case SBP2_CSR_FIRMWARE_REVISION:
*firmware_revision = value;
break;
case SBP2_CSR_LOGICAL_UNIT_NUMBER:
if (sbp2_add_logical_unit(tgt, value) < 0)
return -ENOMEM;
break;
case SBP2_CSR_LOGICAL_UNIT_DIRECTORY:
if (sbp2_scan_logical_unit_dir(tgt, ci.p + value) < 0)
return -ENOMEM;
break; break;
} }
} }
return 0;
}
static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
u32 firmware_revision)
{
int i;
unsigned w = sbp2_param_workarounds;
if (w)
fw_notify("Please notify linux1394-devel@lists.sourceforge.net "
"if you need the workarounds parameter for %s\n",
tgt->unit->device.bus_id);
if (w & SBP2_WORKAROUND_OVERRIDE)
goto out;
for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) { for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
if (sbp2_workarounds_table[i].firmware_revision != if (sbp2_workarounds_table[i].firmware_revision !=
(firmware_revision & 0xffffff00)) (firmware_revision & 0xffffff00))
continue; continue;
if (sbp2_workarounds_table[i].model != model && if (sbp2_workarounds_table[i].model != model &&
sbp2_workarounds_table[i].model != ~0) sbp2_workarounds_table[i].model != ~0)
continue; continue;
sd->workarounds |= sbp2_workarounds_table[i].workarounds;
w |= sbp2_workarounds_table[i].workarounds;
break; break;
} }
out:
if (sd->workarounds) if (w)
fw_notify("Workarounds for node %s: 0x%x " fw_notify("Workarounds for %s: 0x%x "
"(firmware_revision 0x%06x, model_id 0x%06x)\n", "(firmware_revision 0x%06x, model_id 0x%06x)\n",
unit->device.bus_id, tgt->unit->device.bus_id,
sd->workarounds, firmware_revision, model); w, firmware_revision, model);
tgt->workarounds = w;
}
static struct scsi_host_template scsi_driver_template;
static int sbp2_probe(struct device *dev)
{
struct fw_unit *unit = fw_unit(dev);
struct fw_device *device = fw_device(unit->device.parent);
struct sbp2_target *tgt;
struct sbp2_logical_unit *lu;
struct Scsi_Host *shost;
u32 model, firmware_revision;
shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
if (shost == NULL)
return -ENOMEM;
tgt = (struct sbp2_target *)shost->hostdata;
unit->device.driver_data = tgt;
tgt->unit = unit;
kref_init(&tgt->kref);
INIT_LIST_HEAD(&tgt->lu_list);
if (fw_device_enable_phys_dma(device) < 0)
goto fail_shost_put;
if (scsi_add_host(shost, &unit->device) < 0)
goto fail_shost_put;
/* Initialize to values that won't match anything in our table. */
firmware_revision = 0xff000000;
model = 0xff000000;
/* implicit directory ID */
tgt->directory_id = ((unit->directory - device->config_rom) * 4
+ CSR_CONFIG_ROM) & 0xffffff;
if (sbp2_scan_unit_dir(tgt, unit->directory, &model,
&firmware_revision) < 0)
goto fail_tgt_put;
sbp2_init_workarounds(tgt, model, firmware_revision);
get_device(&unit->device); get_device(&unit->device);
...@@ -729,35 +869,34 @@ static int sbp2_probe(struct device *dev) ...@@ -729,35 +869,34 @@ static int sbp2_probe(struct device *dev)
* reschedule retries. Always get the ref before scheduling * reschedule retries. Always get the ref before scheduling
* work. * work.
*/ */
INIT_DELAYED_WORK(&sd->work, sbp2_login); list_for_each_entry(lu, &tgt->lu_list, link)
if (schedule_delayed_work(&sd->work, 0)) if (queue_delayed_work(sbp2_wq, &lu->work, 0))
kref_get(&sd->kref); kref_get(&tgt->kref);
return 0; return 0;
fail_address_handler: fail_tgt_put:
fw_core_remove_address_handler(&sd->address_handler); kref_put(&tgt->kref, sbp2_release_target);
fail_host: return -ENOMEM;
scsi_host_put(host);
fail: fail_shost_put:
return err; scsi_host_put(shost);
return -ENOMEM;
} }
static int sbp2_remove(struct device *dev) static int sbp2_remove(struct device *dev)
{ {
struct fw_unit *unit = fw_unit(dev); struct fw_unit *unit = fw_unit(dev);
struct sbp2_device *sd = unit->device.driver_data; struct sbp2_target *tgt = unit->device.driver_data;
kref_put(&sd->kref, release_sbp2_device);
kref_put(&tgt->kref, sbp2_release_target);
return 0; return 0;
} }
static void sbp2_reconnect(struct work_struct *work) static void sbp2_reconnect(struct work_struct *work)
{ {
struct sbp2_device *sd = struct sbp2_logical_unit *lu =
container_of(work, struct sbp2_device, work.work); container_of(work, struct sbp2_logical_unit, work.work);
struct fw_unit *unit = sd->unit; struct fw_unit *unit = lu->tgt->unit;
struct fw_device *device = fw_device(unit->device.parent); struct fw_device *device = fw_device(unit->device.parent);
int generation, node_id, local_node_id; int generation, node_id, local_node_id;
...@@ -765,40 +904,49 @@ static void sbp2_reconnect(struct work_struct *work) ...@@ -765,40 +904,49 @@ static void sbp2_reconnect(struct work_struct *work)
node_id = device->node->node_id; node_id = device->node->node_id;
local_node_id = device->card->local_node->node_id; local_node_id = device->card->local_node->node_id;
if (sbp2_send_management_orb(unit, node_id, generation, if (sbp2_send_management_orb(lu, node_id, generation,
SBP2_RECONNECT_REQUEST, SBP2_RECONNECT_REQUEST,
sd->login_id, NULL) < 0) { lu->login_id, NULL) < 0) {
if (sd->retries++ >= 5) { if (lu->retries++ >= 5) {
fw_error("failed to reconnect to %s\n", fw_error("failed to reconnect to %s\n",
unit->device.bus_id); unit->device.bus_id);
/* Fall back and try to log in again. */ /* Fall back and try to log in again. */
sd->retries = 0; lu->retries = 0;
PREPARE_DELAYED_WORK(&sd->work, sbp2_login); PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
} }
schedule_delayed_work(&sd->work, DIV_ROUND_UP(HZ, 5)); queue_delayed_work(sbp2_wq, &lu->work, DIV_ROUND_UP(HZ, 5));
return; return;
} }
sd->generation = generation; lu->generation = generation;
sd->node_id = node_id; lu->tgt->node_id = node_id;
sd->address_high = local_node_id << 16; lu->tgt->address_high = local_node_id << 16;
fw_notify("reconnected to unit %s (%d retries)\n", fw_notify("reconnected to %s LUN %04x (%d retries)\n",
unit->device.bus_id, sd->retries); unit->device.bus_id, lu->lun, lu->retries);
sbp2_agent_reset(unit);
sbp2_cancel_orbs(unit); sbp2_agent_reset(lu);
kref_put(&sd->kref, release_sbp2_device); sbp2_cancel_orbs(lu);
kref_put(&lu->tgt->kref, sbp2_release_target);
} }
static void sbp2_update(struct fw_unit *unit) static void sbp2_update(struct fw_unit *unit)
{ {
struct fw_device *device = fw_device(unit->device.parent); struct sbp2_target *tgt = unit->device.driver_data;
struct sbp2_device *sd = unit->device.driver_data; struct sbp2_logical_unit *lu;
sd->retries = 0; fw_device_enable_phys_dma(fw_device(unit->device.parent));
fw_device_enable_phys_dma(device);
if (schedule_delayed_work(&sd->work, 0)) /*
kref_get(&sd->kref); * Fw-core serializes sbp2_update() against sbp2_remove().
* Iteration over tgt->lu_list is therefore safe here.
*/
list_for_each_entry(lu, &tgt->lu_list, link) {
lu->retries = 0;
if (queue_delayed_work(sbp2_wq, &lu->work, 0))
kref_get(&tgt->kref);
}
} }
#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e #define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
...@@ -868,13 +1016,12 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) ...@@ -868,13 +1016,12 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
{ {
struct sbp2_command_orb *orb = struct sbp2_command_orb *orb =
container_of(base_orb, struct sbp2_command_orb, base); container_of(base_orb, struct sbp2_command_orb, base);
struct fw_unit *unit = orb->unit; struct fw_device *device = fw_device(orb->lu->tgt->unit->device.parent);
struct fw_device *device = fw_device(unit->device.parent);
int result; int result;
if (status != NULL) { if (status != NULL) {
if (STATUS_GET_DEAD(*status)) if (STATUS_GET_DEAD(*status))
sbp2_agent_reset(unit); sbp2_agent_reset(orb->lu);
switch (STATUS_GET_RESPONSE(*status)) { switch (STATUS_GET_RESPONSE(*status)) {
case SBP2_STATUS_REQUEST_COMPLETE: case SBP2_STATUS_REQUEST_COMPLETE:
...@@ -918,12 +1065,10 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) ...@@ -918,12 +1065,10 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
orb->done(orb->cmd); orb->done(orb->cmd);
} }
static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb) static int
sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
struct sbp2_logical_unit *lu)
{ {
struct sbp2_device *sd =
(struct sbp2_device *)orb->cmd->device->host->hostdata;
struct fw_unit *unit = sd->unit;
struct fw_device *device = fw_device(unit->device.parent);
struct scatterlist *sg; struct scatterlist *sg;
int sg_len, l, i, j, count; int sg_len, l, i, j, count;
dma_addr_t sg_addr; dma_addr_t sg_addr;
...@@ -942,10 +1087,9 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb) ...@@ -942,10 +1087,9 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
* tables. * tables.
*/ */
if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) { if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) {
orb->request.data_descriptor.high = sd->address_high; orb->request.data_descriptor.high = lu->tgt->address_high;
orb->request.data_descriptor.low = sg_dma_address(sg); orb->request.data_descriptor.low = sg_dma_address(sg);
orb->request.misc |= orb->request.misc |= COMMAND_ORB_DATA_SIZE(sg_dma_len(sg));
COMMAND_ORB_DATA_SIZE(sg_dma_len(sg));
return 0; return 0;
} }
...@@ -989,7 +1133,7 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb) ...@@ -989,7 +1133,7 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
* initiator (i.e. us), but data_descriptor can refer to data * initiator (i.e. us), but data_descriptor can refer to data
* on other nodes so we need to put our ID in descriptor.high. * on other nodes so we need to put our ID in descriptor.high.
*/ */
orb->request.data_descriptor.high = sd->address_high; orb->request.data_descriptor.high = lu->tgt->address_high;
orb->request.data_descriptor.low = orb->page_table_bus; orb->request.data_descriptor.low = orb->page_table_bus;
orb->request.misc |= orb->request.misc |=
COMMAND_ORB_PAGE_TABLE_PRESENT | COMMAND_ORB_PAGE_TABLE_PRESENT |
...@@ -1008,12 +1152,11 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb) ...@@ -1008,12 +1152,11 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
{ {
struct sbp2_device *sd = struct sbp2_logical_unit *lu = cmd->device->hostdata;
(struct sbp2_device *)cmd->device->host->hostdata; struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
struct fw_unit *unit = sd->unit;
struct fw_device *device = fw_device(unit->device.parent);
struct sbp2_command_orb *orb; struct sbp2_command_orb *orb;
unsigned max_payload; unsigned max_payload;
int retval = SCSI_MLQUEUE_HOST_BUSY;
/* /*
* Bidirectional commands are not yet implemented, and unknown * Bidirectional commands are not yet implemented, and unknown
...@@ -1029,14 +1172,14 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) ...@@ -1029,14 +1172,14 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
orb = kzalloc(sizeof(*orb), GFP_ATOMIC); orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
if (orb == NULL) { if (orb == NULL) {
fw_notify("failed to alloc orb\n"); fw_notify("failed to alloc orb\n");
goto fail_alloc; return SCSI_MLQUEUE_HOST_BUSY;
} }
/* Initialize rcode to something not RCODE_COMPLETE. */ /* Initialize rcode to something not RCODE_COMPLETE. */
orb->base.rcode = -1; orb->base.rcode = -1;
kref_init(&orb->base.kref); kref_init(&orb->base.kref);
orb->unit = unit; orb->lu = lu;
orb->done = done; orb->done = done;
orb->cmd = cmd; orb->cmd = cmd;
...@@ -1062,8 +1205,8 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) ...@@ -1062,8 +1205,8 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
orb->request.misc |= orb->request.misc |=
COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA); COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA);
if (scsi_sg_count(cmd) && sbp2_command_orb_map_scatterlist(orb) < 0) if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)
goto fail_mapping; goto out;
fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request)); fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
...@@ -1076,49 +1219,47 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) ...@@ -1076,49 +1219,47 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
dma_map_single(device->card->device, &orb->request, dma_map_single(device->card->device, &orb->request,
sizeof(orb->request), DMA_TO_DEVICE); sizeof(orb->request), DMA_TO_DEVICE);
if (dma_mapping_error(orb->base.request_bus)) if (dma_mapping_error(orb->base.request_bus))
goto fail_mapping; goto out;
sbp2_send_orb(&orb->base, unit, sd->node_id, sd->generation,
sd->command_block_agent_address + SBP2_ORB_POINTER);
kref_put(&orb->base.kref, free_orb);
return 0;
fail_mapping: sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation,
lu->command_block_agent_address + SBP2_ORB_POINTER);
retval = 0;
out:
kref_put(&orb->base.kref, free_orb); kref_put(&orb->base.kref, free_orb);
fail_alloc: return retval;
return SCSI_MLQUEUE_HOST_BUSY;
} }
static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
{ {
struct sbp2_device *sd = (struct sbp2_device *)sdev->host->hostdata; struct sbp2_logical_unit *lu = sdev->hostdata;
sdev->allow_restart = 1; sdev->allow_restart = 1;
if (sd->workarounds & SBP2_WORKAROUND_INQUIRY_36) if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
sdev->inquiry_len = 36; sdev->inquiry_len = 36;
return 0; return 0;
} }
static int sbp2_scsi_slave_configure(struct scsi_device *sdev) static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
{ {
struct sbp2_device *sd = (struct sbp2_device *)sdev->host->hostdata; struct sbp2_logical_unit *lu = sdev->hostdata;
struct fw_unit *unit = sd->unit;
sdev->use_10_for_rw = 1; sdev->use_10_for_rw = 1;
if (sdev->type == TYPE_ROM) if (sdev->type == TYPE_ROM)
sdev->use_10_for_ms = 1; sdev->use_10_for_ms = 1;
if (sdev->type == TYPE_DISK && if (sdev->type == TYPE_DISK &&
sd->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
sdev->skip_ms_page_8 = 1; sdev->skip_ms_page_8 = 1;
if (sd->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) {
fw_notify("setting fix_capacity for %s\n", unit->device.bus_id); if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
sdev->fix_capacity = 1; sdev->fix_capacity = 1;
}
if (sd->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512); blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
return 0; return 0;
} }
...@@ -1128,13 +1269,11 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev) ...@@ -1128,13 +1269,11 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
*/ */
static int sbp2_scsi_abort(struct scsi_cmnd *cmd) static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
{ {
struct sbp2_device *sd = struct sbp2_logical_unit *lu = cmd->device->hostdata;
(struct sbp2_device *)cmd->device->host->hostdata;
struct fw_unit *unit = sd->unit;
fw_notify("sbp2_scsi_abort\n"); fw_notify("sbp2_scsi_abort\n");
sbp2_agent_reset(unit); sbp2_agent_reset(lu);
sbp2_cancel_orbs(unit); sbp2_cancel_orbs(lu);
return SUCCESS; return SUCCESS;
} }
...@@ -1151,37 +1290,18 @@ sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr, ...@@ -1151,37 +1290,18 @@ sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr,
char *buf) char *buf)
{ {
struct scsi_device *sdev = to_scsi_device(dev); struct scsi_device *sdev = to_scsi_device(dev);
struct sbp2_device *sd; struct sbp2_logical_unit *lu;
struct fw_unit *unit;
struct fw_device *device; struct fw_device *device;
u32 directory_id;
struct fw_csr_iterator ci;
int key, value, lun;
if (!sdev) if (!sdev)
return 0; return 0;
sd = (struct sbp2_device *)sdev->host->hostdata;
unit = sd->unit;
device = fw_device(unit->device.parent);
/* implicit directory ID */
directory_id = ((unit->directory - device->config_rom) * 4
+ CSR_CONFIG_ROM) & 0xffffff;
/* explicit directory ID, overrides implicit ID if present */
fw_csr_iterator_init(&ci, unit->directory);
while (fw_csr_iterator_next(&ci, &key, &value))
if (key == CSR_DIRECTORY_ID) {
directory_id = value;
break;
}
/* FIXME: Make this work for multi-lun devices. */ lu = sdev->hostdata;
lun = 0; device = fw_device(lu->tgt->unit->device.parent);
return sprintf(buf, "%08x%08x:%06x:%04x\n", return sprintf(buf, "%08x%08x:%06x:%04x\n",
device->config_rom[3], device->config_rom[4], device->config_rom[3], device->config_rom[4],
directory_id, lun); lu->tgt->directory_id, lu->lun);
} }
static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL); static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
...@@ -1219,12 +1339,17 @@ MODULE_ALIAS("sbp2"); ...@@ -1219,12 +1339,17 @@ MODULE_ALIAS("sbp2");
static int __init sbp2_init(void) static int __init sbp2_init(void)
{ {
sbp2_wq = create_singlethread_workqueue(KBUILD_MODNAME);
if (!sbp2_wq)
return -ENOMEM;
return driver_register(&sbp2_driver.driver); return driver_register(&sbp2_driver.driver);
} }
static void __exit sbp2_cleanup(void) static void __exit sbp2_cleanup(void)
{ {
driver_unregister(&sbp2_driver.driver); driver_unregister(&sbp2_driver.driver);
destroy_workqueue(sbp2_wq);
} }
module_init(sbp2_init); module_init(sbp2_init);
......
...@@ -152,6 +152,10 @@ static void update_hop_count(struct fw_node *node) ...@@ -152,6 +152,10 @@ static void update_hop_count(struct fw_node *node)
node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2); node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
} }
static inline struct fw_node *fw_node(struct list_head *l)
{
return list_entry(l, struct fw_node, link);
}
/** /**
* build_tree - Build the tree representation of the topology * build_tree - Build the tree representation of the topology
...@@ -162,7 +166,7 @@ static void update_hop_count(struct fw_node *node) ...@@ -162,7 +166,7 @@ static void update_hop_count(struct fw_node *node)
* This function builds the tree representation of the topology given * This function builds the tree representation of the topology given
* by the self IDs from the latest bus reset. During the construction * by the self IDs from the latest bus reset. During the construction
* of the tree, the function checks that the self IDs are valid and * of the tree, the function checks that the self IDs are valid and
* internally consistent. On succcess this funtions returns the * internally consistent. On succcess this function returns the
* fw_node corresponding to the local card otherwise NULL. * fw_node corresponding to the local card otherwise NULL.
*/ */
static struct fw_node *build_tree(struct fw_card *card, static struct fw_node *build_tree(struct fw_card *card,
...@@ -211,6 +215,10 @@ static struct fw_node *build_tree(struct fw_card *card, ...@@ -211,6 +215,10 @@ static struct fw_node *build_tree(struct fw_card *card,
*/ */
for (i = 0, h = &stack; i < child_port_count; i++) for (i = 0, h = &stack; i < child_port_count; i++)
h = h->prev; h = h->prev;
/*
* When the stack is empty, this yields an invalid value,
* but that pointer will never be dereferenced.
*/
child = fw_node(h); child = fw_node(h);
node = fw_node_create(q, port_count, card->color); node = fw_node_create(q, port_count, card->color);
......
...@@ -50,12 +50,6 @@ struct fw_node { ...@@ -50,12 +50,6 @@ struct fw_node {
struct fw_node *ports[0]; struct fw_node *ports[0];
}; };
static inline struct fw_node *
fw_node(struct list_head *l)
{
return list_entry(l, struct fw_node, link);
}
static inline struct fw_node * static inline struct fw_node *
fw_node_get(struct fw_node *node) fw_node_get(struct fw_node *node)
{ {
......
...@@ -410,7 +410,12 @@ EXPORT_SYMBOL(fw_unit_space_region); ...@@ -410,7 +410,12 @@ EXPORT_SYMBOL(fw_unit_space_region);
* controller. When a request is received that falls within the * controller. When a request is received that falls within the
* specified address range, the specified callback is invoked. The * specified address range, the specified callback is invoked. The
* parameters passed to the callback give the details of the * parameters passed to the callback give the details of the
* particular request * particular request.
*
* Return value: 0 on success, non-zero otherwise.
* The start offset of the handler's address region is determined by
* fw_core_add_address_handler() and is returned in handler->offset.
* The offset is quadlet-aligned.
*/ */
int int
fw_core_add_address_handler(struct fw_address_handler *handler, fw_core_add_address_handler(struct fw_address_handler *handler,
...@@ -422,14 +427,15 @@ fw_core_add_address_handler(struct fw_address_handler *handler, ...@@ -422,14 +427,15 @@ fw_core_add_address_handler(struct fw_address_handler *handler,
spin_lock_irqsave(&address_handler_lock, flags); spin_lock_irqsave(&address_handler_lock, flags);
handler->offset = region->start; handler->offset = roundup(region->start, 4);
while (handler->offset + handler->length <= region->end) { while (handler->offset + handler->length <= region->end) {
other = other =
lookup_overlapping_address_handler(&address_handler_list, lookup_overlapping_address_handler(&address_handler_list,
handler->offset, handler->offset,
handler->length); handler->length);
if (other != NULL) { if (other != NULL) {
handler->offset += other->length; handler->offset =
roundup(other->offset + other->length, 4);
} else { } else {
list_add_tail(&handler->link, &address_handler_list); list_add_tail(&handler->link, &address_handler_list);
ret = 0; ret = 0;
......
...@@ -218,12 +218,10 @@ static struct csr1212_keyval *csr1212_new_keyval(u8 type, u8 key) ...@@ -218,12 +218,10 @@ static struct csr1212_keyval *csr1212_new_keyval(u8 type, u8 key)
if (!kv) if (!kv)
return NULL; return NULL;
atomic_set(&kv->refcnt, 1);
kv->key.type = type; kv->key.type = type;
kv->key.id = key; kv->key.id = key;
kv->associate = NULL; kv->associate = NULL;
kv->refcnt = 1;
kv->next = NULL; kv->next = NULL;
kv->prev = NULL; kv->prev = NULL;
kv->offset = 0; kv->offset = 0;
...@@ -326,12 +324,13 @@ void csr1212_associate_keyval(struct csr1212_keyval *kv, ...@@ -326,12 +324,13 @@ void csr1212_associate_keyval(struct csr1212_keyval *kv,
if (kv->associate) if (kv->associate)
csr1212_release_keyval(kv->associate); csr1212_release_keyval(kv->associate);
associate->refcnt++; csr1212_keep_keyval(associate);
kv->associate = associate; kv->associate = associate;
} }
int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir, static int __csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
struct csr1212_keyval *kv) struct csr1212_keyval *kv,
bool keep_keyval)
{ {
struct csr1212_dentry *dentry; struct csr1212_dentry *dentry;
...@@ -341,10 +340,10 @@ int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir, ...@@ -341,10 +340,10 @@ int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
if (!dentry) if (!dentry)
return -ENOMEM; return -ENOMEM;
if (keep_keyval)
csr1212_keep_keyval(kv);
dentry->kv = kv; dentry->kv = kv;
kv->refcnt++;
dentry->next = NULL; dentry->next = NULL;
dentry->prev = dir->value.directory.dentries_tail; dentry->prev = dir->value.directory.dentries_tail;
...@@ -358,6 +357,12 @@ int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir, ...@@ -358,6 +357,12 @@ int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
return CSR1212_SUCCESS; return CSR1212_SUCCESS;
} }
int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
struct csr1212_keyval *kv)
{
return __csr1212_attach_keyval_to_directory(dir, kv, true);
}
#define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \ #define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \
(&((kv)->value.leaf.data[1])) (&((kv)->value.leaf.data[1]))
...@@ -483,15 +488,18 @@ void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir, ...@@ -483,15 +488,18 @@ void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
/* This function is used to free the memory taken by a keyval. If the given /* This function is used to free the memory taken by a keyval. If the given
* keyval is a directory type, then any keyvals contained in that directory * keyval is a directory type, then any keyvals contained in that directory
* will be destroyed as well if their respective refcnts are 0. By means of * will be destroyed as well if noone holds a reference on them. By means of
* list manipulation, this routine will descend a directory structure in a * list manipulation, this routine will descend a directory structure in a
* non-recursive manner. */ * non-recursive manner. */
static void csr1212_destroy_keyval(struct csr1212_keyval *kv) void csr1212_release_keyval(struct csr1212_keyval *kv)
{ {
struct csr1212_keyval *k, *a; struct csr1212_keyval *k, *a;
struct csr1212_dentry dentry; struct csr1212_dentry dentry;
struct csr1212_dentry *head, *tail; struct csr1212_dentry *head, *tail;
if (!atomic_dec_and_test(&kv->refcnt))
return;
dentry.kv = kv; dentry.kv = kv;
dentry.next = NULL; dentry.next = NULL;
dentry.prev = NULL; dentry.prev = NULL;
...@@ -503,9 +511,8 @@ static void csr1212_destroy_keyval(struct csr1212_keyval *kv) ...@@ -503,9 +511,8 @@ static void csr1212_destroy_keyval(struct csr1212_keyval *kv)
k = head->kv; k = head->kv;
while (k) { while (k) {
k->refcnt--; /* must not dec_and_test kv->refcnt again */
if (k != kv && !atomic_dec_and_test(&k->refcnt))
if (k->refcnt > 0)
break; break;
a = k->associate; a = k->associate;
...@@ -536,14 +543,6 @@ static void csr1212_destroy_keyval(struct csr1212_keyval *kv) ...@@ -536,14 +543,6 @@ static void csr1212_destroy_keyval(struct csr1212_keyval *kv)
} }
} }
void csr1212_release_keyval(struct csr1212_keyval *kv)
{
if (kv->refcnt > 1)
kv->refcnt--;
else
csr1212_destroy_keyval(kv);
}
void csr1212_destroy_csr(struct csr1212_csr *csr) void csr1212_destroy_csr(struct csr1212_csr *csr)
{ {
struct csr1212_csr_rom_cache *c, *oc; struct csr1212_csr_rom_cache *c, *oc;
...@@ -1126,6 +1125,7 @@ csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos) ...@@ -1126,6 +1125,7 @@ csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos)
int ret = CSR1212_SUCCESS; int ret = CSR1212_SUCCESS;
struct csr1212_keyval *k = NULL; struct csr1212_keyval *k = NULL;
u32 offset; u32 offset;
bool keep_keyval = true;
switch (CSR1212_KV_KEY_TYPE(ki)) { switch (CSR1212_KV_KEY_TYPE(ki)) {
case CSR1212_KV_TYPE_IMMEDIATE: case CSR1212_KV_TYPE_IMMEDIATE:
...@@ -1135,8 +1135,8 @@ csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos) ...@@ -1135,8 +1135,8 @@ csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos)
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
/* Don't keep local reference when parsing. */
k->refcnt = 0; /* Don't keep local reference when parsing. */ keep_keyval = false;
break; break;
case CSR1212_KV_TYPE_CSR_OFFSET: case CSR1212_KV_TYPE_CSR_OFFSET:
...@@ -1146,7 +1146,8 @@ csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos) ...@@ -1146,7 +1146,8 @@ csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos)
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
k->refcnt = 0; /* Don't keep local reference when parsing. */ /* Don't keep local reference when parsing. */
keep_keyval = false;
break; break;
default: default:
...@@ -1174,8 +1175,10 @@ csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos) ...@@ -1174,8 +1175,10 @@ csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos)
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
k->refcnt = 0; /* Don't keep local reference when parsing. */ /* Don't keep local reference when parsing. */
k->valid = 0; /* Contents not read yet so it's not valid. */ keep_keyval = false;
/* Contents not read yet so it's not valid. */
k->valid = 0;
k->offset = offset; k->offset = offset;
k->prev = dir; k->prev = dir;
...@@ -1183,7 +1186,7 @@ csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos) ...@@ -1183,7 +1186,7 @@ csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos)
dir->next->prev = k; dir->next->prev = k;
dir->next = k; dir->next = k;
} }
ret = csr1212_attach_keyval_to_directory(dir, k); ret = __csr1212_attach_keyval_to_directory(dir, k, keep_keyval);
out: out:
if (ret != CSR1212_SUCCESS && k != NULL) if (ret != CSR1212_SUCCESS && k != NULL)
free_keyval(k); free_keyval(k);
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/atomic.h>
#define CSR1212_MALLOC(size) kmalloc((size), GFP_KERNEL) #define CSR1212_MALLOC(size) kmalloc((size), GFP_KERNEL)
#define CSR1212_FREE(ptr) kfree(ptr) #define CSR1212_FREE(ptr) kfree(ptr)
...@@ -149,7 +150,7 @@ struct csr1212_keyval { ...@@ -149,7 +150,7 @@ struct csr1212_keyval {
struct csr1212_directory directory; struct csr1212_directory directory;
} value; } value;
struct csr1212_keyval *associate; struct csr1212_keyval *associate;
int refcnt; atomic_t refcnt;
/* used in generating and/or parsing CSR image */ /* used in generating and/or parsing CSR image */
struct csr1212_keyval *next, *prev; /* flat list of CSR elements */ struct csr1212_keyval *next, *prev; /* flat list of CSR elements */
...@@ -350,7 +351,8 @@ csr1212_get_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv); ...@@ -350,7 +351,8 @@ csr1212_get_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv);
* need for code to retain a keyval that has been parsed. */ * need for code to retain a keyval that has been parsed. */
static inline void csr1212_keep_keyval(struct csr1212_keyval *kv) static inline void csr1212_keep_keyval(struct csr1212_keyval *kv)
{ {
kv->refcnt++; atomic_inc(&kv->refcnt);
smp_mb__after_atomic_inc();
} }
......
...@@ -1153,8 +1153,6 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid, ...@@ -1153,8 +1153,6 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
pdg->sz++; pdg->sz++;
lh = find_partial_datagram(pdgl, dgl); lh = find_partial_datagram(pdgl, dgl);
} else { } else {
struct partial_datagram *pd;
pd = list_entry(lh, struct partial_datagram, list); pd = list_entry(lh, struct partial_datagram, list);
if (fragment_overlap(&pd->frag_info, fg_off, fg_len)) { if (fragment_overlap(&pd->frag_info, fg_off, fg_len)) {
...@@ -1222,23 +1220,19 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid, ...@@ -1222,23 +1220,19 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
priv->stats.rx_errors++; priv->stats.rx_errors++;
priv->stats.rx_dropped++; priv->stats.rx_dropped++;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
goto bad_proto; } else if (netif_rx(skb) == NET_RX_DROP) {
}
if (netif_rx(skb) == NET_RX_DROP) {
priv->stats.rx_errors++; priv->stats.rx_errors++;
priv->stats.rx_dropped++; priv->stats.rx_dropped++;
goto bad_proto; } else {
priv->stats.rx_packets++;
priv->stats.rx_bytes += skb->len;
} }
/* Statistics */ spin_unlock_irqrestore(&priv->lock, flags);
priv->stats.rx_packets++;
priv->stats.rx_bytes += skb->len;
bad_proto: bad_proto:
if (netif_queue_stopped(dev)) if (netif_queue_stopped(dev))
netif_wake_queue(dev); netif_wake_queue(dev);
spin_unlock_irqrestore(&priv->lock, flags);
dev->last_rx = jiffies; dev->last_rx = jiffies;
......
...@@ -488,7 +488,7 @@ void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot) ...@@ -488,7 +488,7 @@ void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
highlevel_host_reset(host); highlevel_host_reset(host);
} }
static spinlock_t pending_packets_lock = SPIN_LOCK_UNLOCKED; static DEFINE_SPINLOCK(pending_packets_lock);
/** /**
* hpsb_packet_sent - notify core of sending a packet * hpsb_packet_sent - notify core of sending a packet
......
...@@ -1014,13 +1014,13 @@ static struct unit_directory *nodemgr_process_unit_directory ...@@ -1014,13 +1014,13 @@ static struct unit_directory *nodemgr_process_unit_directory
CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) == 0) { CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) == 0) {
switch (last_key_id) { switch (last_key_id) {
case CSR1212_KV_ID_VENDOR: case CSR1212_KV_ID_VENDOR:
ud->vendor_name_kv = kv;
csr1212_keep_keyval(kv); csr1212_keep_keyval(kv);
ud->vendor_name_kv = kv;
break; break;
case CSR1212_KV_ID_MODEL: case CSR1212_KV_ID_MODEL:
ud->model_name_kv = kv;
csr1212_keep_keyval(kv); csr1212_keep_keyval(kv);
ud->model_name_kv = kv;
break; break;
} }
...@@ -1112,7 +1112,7 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent ...@@ -1112,7 +1112,7 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent
{ {
unsigned int ud_id = 0; unsigned int ud_id = 0;
struct csr1212_dentry *dentry; struct csr1212_dentry *dentry;
struct csr1212_keyval *kv; struct csr1212_keyval *kv, *vendor_name_kv = NULL;
u8 last_key_id = 0; u8 last_key_id = 0;
ne->needs_probe = 0; ne->needs_probe = 0;
...@@ -1139,8 +1139,8 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent ...@@ -1139,8 +1139,8 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent
CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH(kv) == 0 && CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH(kv) == 0 &&
CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET(kv) == 0 && CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET(kv) == 0 &&
CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) == 0) { CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE(kv) == 0) {
ne->vendor_name_kv = kv;
csr1212_keep_keyval(kv); csr1212_keep_keyval(kv);
vendor_name_kv = kv;
} }
} }
break; break;
...@@ -1149,10 +1149,13 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent ...@@ -1149,10 +1149,13 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent
} }
if (ne->vendor_name_kv) { if (ne->vendor_name_kv) {
int error = device_create_file(&ne->device, kv = ne->vendor_name_kv;
&dev_attr_ne_vendor_name_kv); ne->vendor_name_kv = vendor_name_kv;
csr1212_release_keyval(kv);
if (error && error != -EEXIST) } else if (vendor_name_kv) {
ne->vendor_name_kv = vendor_name_kv;
if (device_create_file(&ne->device,
&dev_attr_ne_vendor_name_kv) != 0)
HPSB_ERR("Failed to add sysfs attribute"); HPSB_ERR("Failed to add sysfs attribute");
} }
} }
...@@ -1712,7 +1715,8 @@ static int nodemgr_host_thread(void *__hi) ...@@ -1712,7 +1715,8 @@ static int nodemgr_host_thread(void *__hi)
* to make sure things settle down. */ * to make sure things settle down. */
g = get_hpsb_generation(host); g = get_hpsb_generation(host);
for (i = 0; i < 4 ; i++) { for (i = 0; i < 4 ; i++) {
if (msleep_interruptible(63) || kthread_should_stop()) msleep_interruptible(63);
if (kthread_should_stop())
goto exit; goto exit;
/* Now get the generation in which the node ID's we collect /* Now get the generation in which the node ID's we collect
......
...@@ -121,16 +121,6 @@ static int bit_getsda(void *data) ...@@ -121,16 +121,6 @@ static int bit_getsda(void *data)
return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000010; return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000010;
} }
static int bit_reg(struct i2c_client *client)
{
return 0;
}
static int bit_unreg(struct i2c_client *client)
{
return 0;
}
static struct i2c_algo_bit_data bit_data = { static struct i2c_algo_bit_data bit_data = {
.setsda = bit_setsda, .setsda = bit_setsda,
.setscl = bit_setscl, .setscl = bit_setscl,
...@@ -140,14 +130,6 @@ static struct i2c_algo_bit_data bit_data = { ...@@ -140,14 +130,6 @@ static struct i2c_algo_bit_data bit_data = {
.timeout = 100, .timeout = 100,
}; };
static struct i2c_adapter bit_ops = {
.id = 0xAA, //FIXME: probably we should get an id in i2c-id.h
.client_register = bit_reg,
.client_unregister = bit_unreg,
.name = "PCILynx I2C",
};
/* /*
* PCL handling functions. * PCL handling functions.
...@@ -765,7 +747,6 @@ static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg) ...@@ -765,7 +747,6 @@ static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
} else { } else {
struct ti_pcl pcl; struct ti_pcl pcl;
u32 ack; u32 ack;
struct hpsb_packet *packet;
PRINT(KERN_INFO, lynx->id, "cancelling async packet, that was already in PCL"); PRINT(KERN_INFO, lynx->id, "cancelling async packet, that was already in PCL");
...@@ -1436,9 +1417,11 @@ static int __devinit add_card(struct pci_dev *dev, ...@@ -1436,9 +1417,11 @@ static int __devinit add_card(struct pci_dev *dev,
struct i2c_algo_bit_data i2c_adapter_data; struct i2c_algo_bit_data i2c_adapter_data;
error = -ENOMEM; error = -ENOMEM;
i2c_ad = kmemdup(&bit_ops, sizeof(*i2c_ad), GFP_KERNEL); i2c_ad = kzalloc(sizeof(*i2c_ad), GFP_KERNEL);
if (!i2c_ad) FAIL("failed to allocate I2C adapter memory"); if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
i2c_ad->id = I2C_HW_B_PCILYNX;
strlcpy(i2c_ad->name, "PCILynx I2C", sizeof(i2c_ad->name));
i2c_adapter_data = bit_data; i2c_adapter_data = bit_data;
i2c_ad->algo_data = &i2c_adapter_data; i2c_ad->algo_data = &i2c_adapter_data;
i2c_adapter_data.data = lynx; i2c_adapter_data.data = lynx;
...@@ -1465,13 +1448,11 @@ static int __devinit add_card(struct pci_dev *dev, ...@@ -1465,13 +1448,11 @@ static int __devinit add_card(struct pci_dev *dev,
{ 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block } { 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block }
}; };
/* we use i2c_transfer, because i2c_smbus_read_block_data does not work properly and we /* we use i2c_transfer because we have no i2c_client
do it more efficiently in one transaction rather then using several reads */ at hand */
if (i2c_transfer(i2c_ad, msg, 2) < 0) { if (i2c_transfer(i2c_ad, msg, 2) < 0) {
PRINT(KERN_ERR, lynx->id, "unable to read bus info block from i2c"); PRINT(KERN_ERR, lynx->id, "unable to read bus info block from i2c");
} else { } else {
int i;
PRINT(KERN_INFO, lynx->id, "got bus info block from serial eeprom"); PRINT(KERN_INFO, lynx->id, "got bus info block from serial eeprom");
/* FIXME: probably we shoud rewrite the max_rec, max_ROM(1394a), /* FIXME: probably we shoud rewrite the max_rec, max_ROM(1394a),
* generation(1394a) and link_spd(1394a) field and recalculate * generation(1394a) and link_spd(1394a) field and recalculate
......
...@@ -242,6 +242,8 @@ static int sbp2_max_speed_and_size(struct sbp2_lu *); ...@@ -242,6 +242,8 @@ static int sbp2_max_speed_and_size(struct sbp2_lu *);
static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xA, 0xB, 0xC }; static const u8 sbp2_speedto_max_payload[] = { 0x7, 0x8, 0x9, 0xA, 0xB, 0xC };
static DEFINE_RWLOCK(sbp2_hi_logical_units_lock);
static struct hpsb_highlevel sbp2_highlevel = { static struct hpsb_highlevel sbp2_highlevel = {
.name = SBP2_DEVICE_NAME, .name = SBP2_DEVICE_NAME,
.host_reset = sbp2_host_reset, .host_reset = sbp2_host_reset,
...@@ -732,6 +734,7 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud) ...@@ -732,6 +734,7 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
struct sbp2_fwhost_info *hi; struct sbp2_fwhost_info *hi;
struct Scsi_Host *shost = NULL; struct Scsi_Host *shost = NULL;
struct sbp2_lu *lu = NULL; struct sbp2_lu *lu = NULL;
unsigned long flags;
lu = kzalloc(sizeof(*lu), GFP_KERNEL); lu = kzalloc(sizeof(*lu), GFP_KERNEL);
if (!lu) { if (!lu) {
...@@ -784,7 +787,9 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud) ...@@ -784,7 +787,9 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
lu->hi = hi; lu->hi = hi;
write_lock_irqsave(&sbp2_hi_logical_units_lock, flags);
list_add_tail(&lu->lu_list, &hi->logical_units); list_add_tail(&lu->lu_list, &hi->logical_units);
write_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags);
/* Register the status FIFO address range. We could use the same FIFO /* Register the status FIFO address range. We could use the same FIFO
* for targets at different nodes. However we need different FIFOs per * for targets at different nodes. However we need different FIFOs per
...@@ -828,16 +833,20 @@ static void sbp2_host_reset(struct hpsb_host *host) ...@@ -828,16 +833,20 @@ static void sbp2_host_reset(struct hpsb_host *host)
{ {
struct sbp2_fwhost_info *hi; struct sbp2_fwhost_info *hi;
struct sbp2_lu *lu; struct sbp2_lu *lu;
unsigned long flags;
hi = hpsb_get_hostinfo(&sbp2_highlevel, host); hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
if (!hi) if (!hi)
return; return;
read_lock_irqsave(&sbp2_hi_logical_units_lock, flags);
list_for_each_entry(lu, &hi->logical_units, lu_list) list_for_each_entry(lu, &hi->logical_units, lu_list)
if (likely(atomic_read(&lu->state) != if (likely(atomic_read(&lu->state) !=
SBP2LU_STATE_IN_SHUTDOWN)) { SBP2LU_STATE_IN_SHUTDOWN)) {
atomic_set(&lu->state, SBP2LU_STATE_IN_RESET); atomic_set(&lu->state, SBP2LU_STATE_IN_RESET);
scsi_block_requests(lu->shost); scsi_block_requests(lu->shost);
} }
read_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags);
} }
static int sbp2_start_device(struct sbp2_lu *lu) static int sbp2_start_device(struct sbp2_lu *lu)
...@@ -919,6 +928,7 @@ static int sbp2_start_device(struct sbp2_lu *lu) ...@@ -919,6 +928,7 @@ static int sbp2_start_device(struct sbp2_lu *lu)
static void sbp2_remove_device(struct sbp2_lu *lu) static void sbp2_remove_device(struct sbp2_lu *lu)
{ {
struct sbp2_fwhost_info *hi; struct sbp2_fwhost_info *hi;
unsigned long flags;
if (!lu) if (!lu)
return; return;
...@@ -933,7 +943,9 @@ static void sbp2_remove_device(struct sbp2_lu *lu) ...@@ -933,7 +943,9 @@ static void sbp2_remove_device(struct sbp2_lu *lu)
flush_scheduled_work(); flush_scheduled_work();
sbp2util_remove_command_orb_pool(lu, hi->host); sbp2util_remove_command_orb_pool(lu, hi->host);
write_lock_irqsave(&sbp2_hi_logical_units_lock, flags);
list_del(&lu->lu_list); list_del(&lu->lu_list);
write_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags);
if (lu->login_response) if (lu->login_response)
dma_free_coherent(hi->host->device.parent, dma_free_coherent(hi->host->device.parent,
...@@ -1707,6 +1719,7 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, ...@@ -1707,6 +1719,7 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
} }
/* Find the unit which wrote the status. */ /* Find the unit which wrote the status. */
read_lock_irqsave(&sbp2_hi_logical_units_lock, flags);
list_for_each_entry(lu_tmp, &hi->logical_units, lu_list) { list_for_each_entry(lu_tmp, &hi->logical_units, lu_list) {
if (lu_tmp->ne->nodeid == nodeid && if (lu_tmp->ne->nodeid == nodeid &&
lu_tmp->status_fifo_addr == addr) { lu_tmp->status_fifo_addr == addr) {
...@@ -1714,6 +1727,8 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, ...@@ -1714,6 +1727,8 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid,
break; break;
} }
} }
read_unlock_irqrestore(&sbp2_hi_logical_units_lock, flags);
if (unlikely(!lu)) { if (unlikely(!lu)) {
SBP2_ERR("lu is NULL - device is gone?"); SBP2_ERR("lu is NULL - device is gone?");
return RCODE_ADDRESS_ERROR; return RCODE_ADDRESS_ERROR;
......
...@@ -178,6 +178,7 @@ union fw_cdev_event { ...@@ -178,6 +178,7 @@ union fw_cdev_event {
#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso) #define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso)
#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso) #define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso)
#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso) #define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso)
#define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer)
/* FW_CDEV_VERSION History /* FW_CDEV_VERSION History
* *
...@@ -459,4 +460,18 @@ struct fw_cdev_stop_iso { ...@@ -459,4 +460,18 @@ struct fw_cdev_stop_iso {
__u32 handle; __u32 handle;
}; };
/**
* struct fw_cdev_get_cycle_timer - read cycle timer register
* @local_time: system time, in microseconds since the Epoch
* @cycle_timer: isochronous cycle timer, as per OHCI 1.1 clause 5.13
*
* The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer
* and also the system clock. This allows to express the receive time of an
* isochronous packet as a system time with microsecond accuracy.
*/
struct fw_cdev_get_cycle_timer {
__u64 local_time;
__u32 cycle_timer;
};
#endif /* _LINUX_FIREWIRE_CDEV_H */ #endif /* _LINUX_FIREWIRE_CDEV_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment