Commit 57e80e0b authored by Tyrel Datwyler's avatar Tyrel Datwyler Committed by Martin K. Petersen

scsi: ibmvfc: Define per-queue state/list locks

Define per-queue locks for protecting queue state and event pool sent/free
lists. The evt list lock is initially redundant but it allows the driver to
be modified in the follow-up patches to relax the queue locking around
submissions and completions.

Link: https://lore.kernel.org/r/20210106201835.1053593-4-tyreld@linux.ibm.comReviewed-by: default avatarBrian King <brking@linux.vnet.ibm.com>
Signed-off-by: default avatarTyrel Datwyler <tyreld@linux.ibm.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent e4b26f3d
...@@ -176,8 +176,9 @@ static void ibmvfc_trc_start(struct ibmvfc_event *evt) ...@@ -176,8 +176,9 @@ static void ibmvfc_trc_start(struct ibmvfc_event *evt)
struct ibmvfc_mad_common *mad = &evt->iu.mad_common; struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd); struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
struct ibmvfc_trace_entry *entry; struct ibmvfc_trace_entry *entry;
int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
entry = &vhost->trace[vhost->trace_index++]; entry = &vhost->trace[index];
entry->evt = evt; entry->evt = evt;
entry->time = jiffies; entry->time = jiffies;
entry->fmt = evt->crq.format; entry->fmt = evt->crq.format;
...@@ -211,8 +212,10 @@ static void ibmvfc_trc_end(struct ibmvfc_event *evt) ...@@ -211,8 +212,10 @@ static void ibmvfc_trc_end(struct ibmvfc_event *evt)
struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common; struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd); struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd); struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++]; struct ibmvfc_trace_entry *entry;
int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
entry = &vhost->trace[index];
entry->evt = evt; entry->evt = evt;
entry->time = jiffies; entry->time = jiffies;
entry->fmt = evt->crq.format; entry->fmt = evt->crq.format;
...@@ -805,6 +808,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) ...@@ -805,6 +808,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
spin_lock_irqsave(vhost->host->host_lock, flags); spin_lock_irqsave(vhost->host->host_lock, flags);
spin_lock(vhost->crq.q_lock);
vhost->state = IBMVFC_NO_CRQ; vhost->state = IBMVFC_NO_CRQ;
vhost->logged_in = 0; vhost->logged_in = 0;
...@@ -821,6 +825,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) ...@@ -821,6 +825,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
dev_warn(vhost->dev, "Partner adapter not ready\n"); dev_warn(vhost->dev, "Partner adapter not ready\n");
else if (rc != 0) else if (rc != 0)
dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc); dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
spin_unlock(vhost->crq.q_lock);
spin_unlock_irqrestore(vhost->host->host_lock, flags); spin_unlock_irqrestore(vhost->host->host_lock, flags);
return rc; return rc;
...@@ -853,10 +858,16 @@ static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool, ...@@ -853,10 +858,16 @@ static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
static void ibmvfc_free_event(struct ibmvfc_event *evt) static void ibmvfc_free_event(struct ibmvfc_event *evt)
{ {
struct ibmvfc_event_pool *pool = &evt->queue->evt_pool; struct ibmvfc_event_pool *pool = &evt->queue->evt_pool;
unsigned long flags;
BUG_ON(!ibmvfc_valid_event(pool, evt)); BUG_ON(!ibmvfc_valid_event(pool, evt));
BUG_ON(atomic_inc_return(&evt->free) != 1); BUG_ON(atomic_inc_return(&evt->free) != 1);
spin_lock_irqsave(&evt->queue->l_lock, flags);
list_add_tail(&evt->queue_list, &evt->queue->free); list_add_tail(&evt->queue_list, &evt->queue->free);
if (evt->eh_comp)
complete(evt->eh_comp);
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
} }
/** /**
...@@ -875,12 +886,27 @@ static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt) ...@@ -875,12 +886,27 @@ static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
cmnd->scsi_done(cmnd); cmnd->scsi_done(cmnd);
} }
if (evt->eh_comp)
complete(evt->eh_comp);
ibmvfc_free_event(evt); ibmvfc_free_event(evt);
} }
/**
* ibmvfc_complete_purge - Complete failed command list
* @purge_list: list head of failed commands
*
* This function runs completions on commands to fail as a result of a
* host reset or platform migration. Caller must hold host_lock.
**/
static void ibmvfc_complete_purge(struct list_head *purge_list)
{
struct ibmvfc_event *evt, *pos;
list_for_each_entry_safe(evt, pos, purge_list, queue_list) {
list_del(&evt->queue_list);
ibmvfc_trc_end(evt);
evt->done(evt);
}
}
/** /**
* ibmvfc_fail_request - Fail request with specified error code * ibmvfc_fail_request - Fail request with specified error code
* @evt: ibmvfc event struct * @evt: ibmvfc event struct
...@@ -897,10 +923,7 @@ static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code) ...@@ -897,10 +923,7 @@ static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
} else } else
evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED); evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
list_del(&evt->queue_list);
del_timer(&evt->timer); del_timer(&evt->timer);
ibmvfc_trc_end(evt);
evt->done(evt);
} }
/** /**
...@@ -914,10 +937,14 @@ static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code) ...@@ -914,10 +937,14 @@ static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code) static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
{ {
struct ibmvfc_event *evt, *pos; struct ibmvfc_event *evt, *pos;
unsigned long flags;
ibmvfc_dbg(vhost, "Purging all requests\n"); ibmvfc_dbg(vhost, "Purging all requests\n");
spin_lock_irqsave(&vhost->crq.l_lock, flags);
list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list) list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list)
ibmvfc_fail_request(evt, error_code); ibmvfc_fail_request(evt, error_code);
list_splice_init(&vhost->crq.sent, &vhost->purge);
spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
} }
/** /**
...@@ -1314,6 +1341,7 @@ static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost, ...@@ -1314,6 +1341,7 @@ static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
INIT_LIST_HEAD(&queue->sent); INIT_LIST_HEAD(&queue->sent);
INIT_LIST_HEAD(&queue->free); INIT_LIST_HEAD(&queue->free);
spin_lock_init(&queue->l_lock);
for (i = 0; i < pool->size; ++i) { for (i = 0; i < pool->size; ++i) {
struct ibmvfc_event *evt = &pool->events[i]; struct ibmvfc_event *evt = &pool->events[i];
...@@ -1368,11 +1396,14 @@ static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost, ...@@ -1368,11 +1396,14 @@ static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue) static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue)
{ {
struct ibmvfc_event *evt; struct ibmvfc_event *evt;
unsigned long flags;
spin_lock_irqsave(&queue->l_lock, flags);
BUG_ON(list_empty(&queue->free)); BUG_ON(list_empty(&queue->free));
evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list); evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
atomic_set(&evt->free, 0); atomic_set(&evt->free, 0);
list_del(&evt->queue_list); list_del(&evt->queue_list);
spin_unlock_irqrestore(&queue->l_lock, flags);
return evt; return evt;
} }
...@@ -1506,6 +1537,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt, ...@@ -1506,6 +1537,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
struct ibmvfc_host *vhost, unsigned long timeout) struct ibmvfc_host *vhost, unsigned long timeout)
{ {
__be64 *crq_as_u64 = (__be64 *) &evt->crq; __be64 *crq_as_u64 = (__be64 *) &evt->crq;
unsigned long flags;
int rc; int rc;
/* Copy the IU into the transfer area */ /* Copy the IU into the transfer area */
...@@ -1517,7 +1549,6 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt, ...@@ -1517,7 +1549,6 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
else else
BUG(); BUG();
list_add_tail(&evt->queue_list, &evt->queue->sent);
timer_setup(&evt->timer, ibmvfc_timeout, 0); timer_setup(&evt->timer, ibmvfc_timeout, 0);
if (timeout) { if (timeout) {
...@@ -1525,11 +1556,15 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt, ...@@ -1525,11 +1556,15 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
add_timer(&evt->timer); add_timer(&evt->timer);
} }
spin_lock_irqsave(&evt->queue->l_lock, flags);
list_add_tail(&evt->queue_list, &evt->queue->sent);
mb(); mb();
if ((rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]), if ((rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
be64_to_cpu(crq_as_u64[1])))) { be64_to_cpu(crq_as_u64[1])))) {
list_del(&evt->queue_list); list_del(&evt->queue_list);
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
del_timer(&evt->timer); del_timer(&evt->timer);
/* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY. /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
...@@ -1554,8 +1589,10 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt, ...@@ -1554,8 +1589,10 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR); evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
evt->done(evt); evt->done(evt);
} else } else {
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
ibmvfc_trc_start(evt); ibmvfc_trc_start(evt);
}
return 0; return 0;
} }
...@@ -1663,9 +1700,6 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt) ...@@ -1663,9 +1700,6 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
cmnd->scsi_done(cmnd); cmnd->scsi_done(cmnd);
} }
if (evt->eh_comp)
complete(evt->eh_comp);
ibmvfc_free_event(evt); ibmvfc_free_event(evt);
} }
...@@ -2219,28 +2253,28 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device, ...@@ -2219,28 +2253,28 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
ENTER; ENTER;
do { do {
wait = 0; wait = 0;
spin_lock_irqsave(vhost->host->host_lock, flags); spin_lock_irqsave(&vhost->crq.l_lock, flags);
list_for_each_entry(evt, &vhost->crq.sent, queue_list) { list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
if (match(evt, device)) { if (match(evt, device)) {
evt->eh_comp = &comp; evt->eh_comp = &comp;
wait++; wait++;
} }
} }
spin_unlock_irqrestore(vhost->host->host_lock, flags); spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
if (wait) { if (wait) {
timeout = wait_for_completion_timeout(&comp, timeout); timeout = wait_for_completion_timeout(&comp, timeout);
if (!timeout) { if (!timeout) {
wait = 0; wait = 0;
spin_lock_irqsave(vhost->host->host_lock, flags); spin_lock_irqsave(&vhost->crq.l_lock, flags);
list_for_each_entry(evt, &vhost->crq.sent, queue_list) { list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
if (match(evt, device)) { if (match(evt, device)) {
evt->eh_comp = NULL; evt->eh_comp = NULL;
wait++; wait++;
} }
} }
spin_unlock_irqrestore(vhost->host->host_lock, flags); spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
if (wait) if (wait)
dev_err(vhost->dev, "Timed out waiting for aborted commands\n"); dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
LEAVE; LEAVE;
...@@ -2277,14 +2311,16 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) ...@@ -2277,14 +2311,16 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
u16 status; u16 status;
ENTER; ENTER;
spin_lock_irqsave(vhost->host->host_lock, flags);
found_evt = NULL; found_evt = NULL;
spin_lock_irqsave(vhost->host->host_lock, flags);
spin_lock(&vhost->crq.l_lock);
list_for_each_entry(evt, &vhost->crq.sent, queue_list) { list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
if (evt->cmnd && evt->cmnd->device == sdev) { if (evt->cmnd && evt->cmnd->device == sdev) {
found_evt = evt; found_evt = evt;
break; break;
} }
} }
spin_unlock(&vhost->crq.l_lock);
if (!found_evt) { if (!found_evt) {
if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
...@@ -2414,14 +2450,16 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev) ...@@ -2414,14 +2450,16 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT; unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
int rsp_code = 0; int rsp_code = 0;
spin_lock_irqsave(vhost->host->host_lock, flags);
found_evt = NULL; found_evt = NULL;
spin_lock_irqsave(vhost->host->host_lock, flags);
spin_lock(&vhost->crq.l_lock);
list_for_each_entry(evt, &vhost->crq.sent, queue_list) { list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
if (evt->cmnd && evt->cmnd->device == sdev) { if (evt->cmnd && evt->cmnd->device == sdev) {
found_evt = evt; found_evt = evt;
break; break;
} }
} }
spin_unlock(&vhost->crq.l_lock);
if (!found_evt) { if (!found_evt) {
if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL) if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
...@@ -2935,7 +2973,9 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) ...@@ -2935,7 +2973,9 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
} }
del_timer(&evt->timer); del_timer(&evt->timer);
spin_lock(&evt->queue->l_lock);
list_del(&evt->queue_list); list_del(&evt->queue_list);
spin_unlock(&evt->queue->l_lock);
ibmvfc_trc_end(evt); ibmvfc_trc_end(evt);
evt->done(evt); evt->done(evt);
} }
...@@ -3328,6 +3368,7 @@ static void ibmvfc_tasklet(void *data) ...@@ -3328,6 +3368,7 @@ static void ibmvfc_tasklet(void *data)
int done = 0; int done = 0;
spin_lock_irqsave(vhost->host->host_lock, flags); spin_lock_irqsave(vhost->host->host_lock, flags);
spin_lock(vhost->crq.q_lock);
while (!done) { while (!done) {
/* Pull all the valid messages off the async CRQ */ /* Pull all the valid messages off the async CRQ */
while ((async = ibmvfc_next_async_crq(vhost)) != NULL) { while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
...@@ -3358,6 +3399,7 @@ static void ibmvfc_tasklet(void *data) ...@@ -3358,6 +3399,7 @@ static void ibmvfc_tasklet(void *data)
done = 1; done = 1;
} }
spin_unlock(vhost->crq.q_lock);
spin_unlock_irqrestore(vhost->host->host_lock, flags); spin_unlock_irqrestore(vhost->host->host_lock, flags);
} }
...@@ -4734,6 +4776,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) ...@@ -4734,6 +4776,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
struct ibmvfc_target *tgt; struct ibmvfc_target *tgt;
unsigned long flags; unsigned long flags;
struct fc_rport *rport; struct fc_rport *rport;
LIST_HEAD(purge);
int rc; int rc;
ibmvfc_log_ae(vhost, vhost->events_to_log); ibmvfc_log_ae(vhost, vhost->events_to_log);
...@@ -4746,6 +4789,8 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) ...@@ -4746,6 +4789,8 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
break; break;
case IBMVFC_HOST_ACTION_RESET: case IBMVFC_HOST_ACTION_RESET:
vhost->action = IBMVFC_HOST_ACTION_TGT_DEL; vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
list_splice_init(&vhost->purge, &purge);
ibmvfc_complete_purge(&purge);
spin_unlock_irqrestore(vhost->host->host_lock, flags); spin_unlock_irqrestore(vhost->host->host_lock, flags);
rc = ibmvfc_reset_crq(vhost); rc = ibmvfc_reset_crq(vhost);
spin_lock_irqsave(vhost->host->host_lock, flags); spin_lock_irqsave(vhost->host->host_lock, flags);
...@@ -4759,6 +4804,8 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) ...@@ -4759,6 +4804,8 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
break; break;
case IBMVFC_HOST_ACTION_REENABLE: case IBMVFC_HOST_ACTION_REENABLE:
vhost->action = IBMVFC_HOST_ACTION_TGT_DEL; vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
list_splice_init(&vhost->purge, &purge);
ibmvfc_complete_purge(&purge);
spin_unlock_irqrestore(vhost->host->host_lock, flags); spin_unlock_irqrestore(vhost->host->host_lock, flags);
rc = ibmvfc_reenable_crq_queue(vhost); rc = ibmvfc_reenable_crq_queue(vhost);
spin_lock_irqsave(vhost->host->host_lock, flags); spin_lock_irqsave(vhost->host->host_lock, flags);
...@@ -4936,6 +4983,9 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost, ...@@ -4936,6 +4983,9 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
size_t fmt_size; size_t fmt_size;
ENTER; ENTER;
spin_lock_init(&queue->_lock);
queue->q_lock = &queue->_lock;
switch (fmt) { switch (fmt) {
case IBMVFC_CRQ_FMT: case IBMVFC_CRQ_FMT:
fmt_size = sizeof(*queue->msgs.crq); fmt_size = sizeof(*queue->msgs.crq);
...@@ -5098,6 +5148,7 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost) ...@@ -5098,6 +5148,7 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES, vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
sizeof(struct ibmvfc_trace_entry), GFP_KERNEL); sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
atomic_set(&vhost->trace_index, -1);
if (!vhost->trace) if (!vhost->trace)
goto free_disc_buffer; goto free_disc_buffer;
...@@ -5214,6 +5265,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) ...@@ -5214,6 +5265,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
vhost = shost_priv(shost); vhost = shost_priv(shost);
INIT_LIST_HEAD(&vhost->targets); INIT_LIST_HEAD(&vhost->targets);
INIT_LIST_HEAD(&vhost->purge);
sprintf(vhost->name, IBMVFC_NAME); sprintf(vhost->name, IBMVFC_NAME);
vhost->host = shost; vhost->host = shost;
vhost->dev = dev; vhost->dev = dev;
...@@ -5298,6 +5350,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) ...@@ -5298,6 +5350,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
static int ibmvfc_remove(struct vio_dev *vdev) static int ibmvfc_remove(struct vio_dev *vdev)
{ {
struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev); struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
LIST_HEAD(purge);
unsigned long flags; unsigned long flags;
ENTER; ENTER;
...@@ -5315,6 +5368,8 @@ static int ibmvfc_remove(struct vio_dev *vdev) ...@@ -5315,6 +5368,8 @@ static int ibmvfc_remove(struct vio_dev *vdev)
spin_lock_irqsave(vhost->host->host_lock, flags); spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_purge_requests(vhost, DID_ERROR); ibmvfc_purge_requests(vhost, DID_ERROR);
list_splice_init(&vhost->purge, &purge);
ibmvfc_complete_purge(&purge);
spin_unlock_irqrestore(vhost->host->host_lock, flags); spin_unlock_irqrestore(vhost->host->host_lock, flags);
ibmvfc_free_event_pool(vhost, &vhost->crq); ibmvfc_free_event_pool(vhost, &vhost->crq);
......
...@@ -768,10 +768,13 @@ struct ibmvfc_queue { ...@@ -768,10 +768,13 @@ struct ibmvfc_queue {
dma_addr_t msg_token; dma_addr_t msg_token;
enum ibmvfc_msg_fmt fmt; enum ibmvfc_msg_fmt fmt;
int size, cur; int size, cur;
spinlock_t _lock;
spinlock_t *q_lock;
struct ibmvfc_event_pool evt_pool; struct ibmvfc_event_pool evt_pool;
struct list_head sent; struct list_head sent;
struct list_head free; struct list_head free;
spinlock_t l_lock;
}; };
enum ibmvfc_host_action { enum ibmvfc_host_action {
...@@ -808,11 +811,13 @@ struct ibmvfc_host { ...@@ -808,11 +811,13 @@ struct ibmvfc_host {
enum ibmvfc_host_action action; enum ibmvfc_host_action action;
#define IBMVFC_NUM_TRACE_INDEX_BITS 8 #define IBMVFC_NUM_TRACE_INDEX_BITS 8
#define IBMVFC_NUM_TRACE_ENTRIES (1 << IBMVFC_NUM_TRACE_INDEX_BITS) #define IBMVFC_NUM_TRACE_ENTRIES (1 << IBMVFC_NUM_TRACE_INDEX_BITS)
#define IBMVFC_TRACE_INDEX_MASK (IBMVFC_NUM_TRACE_ENTRIES - 1)
#define IBMVFC_TRACE_SIZE (sizeof(struct ibmvfc_trace_entry) * IBMVFC_NUM_TRACE_ENTRIES) #define IBMVFC_TRACE_SIZE (sizeof(struct ibmvfc_trace_entry) * IBMVFC_NUM_TRACE_ENTRIES)
struct ibmvfc_trace_entry *trace; struct ibmvfc_trace_entry *trace;
u32 trace_index:IBMVFC_NUM_TRACE_INDEX_BITS; atomic_t trace_index;
int num_targets; int num_targets;
struct list_head targets; struct list_head targets;
struct list_head purge;
struct device *dev; struct device *dev;
struct dma_pool *sg_pool; struct dma_pool *sg_pool;
mempool_t *tgt_pool; mempool_t *tgt_pool;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment