Commit 1db2c9c0 authored by Andreas Herrmann's avatar Andreas Herrmann Committed by James Bottomley

[SCSI] zfcp: fix bug during adapter shutdown

Fixes a race between zfcp_fsf_req_dismiss_all and
zfcp_qdio_reqid_check. During adapter shutdown it occurred that a
request was cleaned up twice. First during its normal
completion. Second when dismiss_all was called.  The fix is to
serialize access to fsf request list between zfcp_fsf_req_dismiss_all
and zfcp_qdio_reqid_check and delete a fsf request from the list if
its completion is triggered.  (Additionally a rwlock was replaced by a
spinlock and fsf_req_cleanup was eliminated.)
Signed-off-by: default avatarAndreas Herrmann <aherrman@de.ibm.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@SteelEye.com>
parent 64b29a13
...@@ -520,7 +520,7 @@ zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, ...@@ -520,7 +520,7 @@ zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
out: out:
if (fsf_req != NULL) if (fsf_req != NULL)
zfcp_fsf_req_cleanup(fsf_req); zfcp_fsf_req_free(fsf_req);
if ((adapter != NULL) && (retval != -ENXIO)) if ((adapter != NULL) && (retval != -ENXIO))
zfcp_adapter_put(adapter); zfcp_adapter_put(adapter);
...@@ -1149,7 +1149,7 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) ...@@ -1149,7 +1149,7 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
INIT_LIST_HEAD(&adapter->port_remove_lh); INIT_LIST_HEAD(&adapter->port_remove_lh);
/* initialize list of fsf requests */ /* initialize list of fsf requests */
rwlock_init(&adapter->fsf_req_list_lock); spin_lock_init(&adapter->fsf_req_list_lock);
INIT_LIST_HEAD(&adapter->fsf_req_list_head); INIT_LIST_HEAD(&adapter->fsf_req_list_head);
/* initialize abort lock */ /* initialize abort lock */
...@@ -1234,9 +1234,9 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter) ...@@ -1234,9 +1234,9 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev); zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev);
dev_set_drvdata(&adapter->ccw_device->dev, NULL); dev_set_drvdata(&adapter->ccw_device->dev, NULL);
/* sanity check: no pending FSF requests */ /* sanity check: no pending FSF requests */
read_lock_irqsave(&adapter->fsf_req_list_lock, flags); spin_lock_irqsave(&adapter->fsf_req_list_lock, flags);
retval = !list_empty(&adapter->fsf_req_list_head); retval = !list_empty(&adapter->fsf_req_list_head);
read_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
if (retval) { if (retval) {
ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, " ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, "
"%i requests outstanding\n", "%i requests outstanding\n",
......
...@@ -861,7 +861,7 @@ struct zfcp_adapter { ...@@ -861,7 +861,7 @@ struct zfcp_adapter {
u32 ports; /* number of remote ports */ u32 ports; /* number of remote ports */
struct timer_list scsi_er_timer; /* SCSI err recovery watch */ struct timer_list scsi_er_timer; /* SCSI err recovery watch */
struct list_head fsf_req_list_head; /* head of FSF req list */ struct list_head fsf_req_list_head; /* head of FSF req list */
rwlock_t fsf_req_list_lock; /* lock for ops on list of spinlock_t fsf_req_list_lock; /* lock for ops on list of
FSF requests */ FSF requests */
atomic_t fsf_reqs_active; /* # active FSF reqs */ atomic_t fsf_reqs_active; /* # active FSF reqs */
struct zfcp_qdio_queue request_queue; /* request queue */ struct zfcp_qdio_queue request_queue; /* request queue */
......
...@@ -891,7 +891,7 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) ...@@ -891,7 +891,7 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
if (erp_action->fsf_req) { if (erp_action->fsf_req) {
/* take lock to ensure that request is not being deleted meanwhile */ /* take lock to ensure that request is not being deleted meanwhile */
write_lock(&adapter->fsf_req_list_lock); spin_lock(&adapter->fsf_req_list_lock);
/* check whether fsf req does still exist */ /* check whether fsf req does still exist */
list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list) list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list)
if (fsf_req == erp_action->fsf_req) if (fsf_req == erp_action->fsf_req)
...@@ -934,7 +934,7 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) ...@@ -934,7 +934,7 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
*/ */
erp_action->fsf_req = NULL; erp_action->fsf_req = NULL;
} }
write_unlock(&adapter->fsf_req_list_lock); spin_unlock(&adapter->fsf_req_list_lock);
} else } else
debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq"); debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq");
......
...@@ -116,7 +116,7 @@ extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *, ...@@ -116,7 +116,7 @@ extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *,
struct timer_list*, int); struct timer_list*, int);
extern int zfcp_fsf_req_complete(struct zfcp_fsf_req *); extern int zfcp_fsf_req_complete(struct zfcp_fsf_req *);
extern void zfcp_fsf_incoming_els(struct zfcp_fsf_req *); extern void zfcp_fsf_incoming_els(struct zfcp_fsf_req *);
extern void zfcp_fsf_req_cleanup(struct zfcp_fsf_req *); extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_command_task_management( extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_command_task_management(
struct zfcp_adapter *, struct zfcp_unit *, u8, int); struct zfcp_adapter *, struct zfcp_unit *, u8, int);
extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command( extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(
......
...@@ -61,7 +61,6 @@ static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *); ...@@ -61,7 +61,6 @@ static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *);
static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *); static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *);
static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *); static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *);
static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *); static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *);
static void zfcp_fsf_req_free(struct zfcp_fsf_req *);
/* association between FSF command and FSF QTCB type */ /* association between FSF command and FSF QTCB type */
static u32 fsf_qtcb_type[] = { static u32 fsf_qtcb_type[] = {
...@@ -149,7 +148,7 @@ zfcp_fsf_req_alloc(mempool_t *pool, int req_flags) ...@@ -149,7 +148,7 @@ zfcp_fsf_req_alloc(mempool_t *pool, int req_flags)
* *
* locks: none * locks: none
*/ */
static void void
zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req) zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req)
{ {
if (likely(fsf_req->pool != NULL)) if (likely(fsf_req->pool != NULL))
...@@ -170,30 +169,21 @@ zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req) ...@@ -170,30 +169,21 @@ zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req)
int int
zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
{ {
int retval = 0;
struct zfcp_fsf_req *fsf_req, *tmp; struct zfcp_fsf_req *fsf_req, *tmp;
unsigned long flags;
LIST_HEAD(remove_queue);
list_for_each_entry_safe(fsf_req, tmp, &adapter->fsf_req_list_head, spin_lock_irqsave(&adapter->fsf_req_list_lock, flags);
list) list_splice_init(&adapter->fsf_req_list_head, &remove_queue);
zfcp_fsf_req_dismiss(fsf_req);
/* wait_event_timeout? */
while (!list_empty(&adapter->fsf_req_list_head)) {
ZFCP_LOG_DEBUG("fsf req list of adapter %s not yet empty\n",
zfcp_get_busid_by_adapter(adapter));
/* wait for woken intiators to clean up their requests */
msleep(jiffies_to_msecs(ZFCP_FSFREQ_CLEANUP_TIMEOUT));
}
/* consistency check */
if (atomic_read(&adapter->fsf_reqs_active)) {
ZFCP_LOG_NORMAL("bug: There are still %d FSF requests pending "
"on adapter %s after cleanup.\n",
atomic_read(&adapter->fsf_reqs_active),
zfcp_get_busid_by_adapter(adapter));
atomic_set(&adapter->fsf_reqs_active, 0); atomic_set(&adapter->fsf_reqs_active, 0);
spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
list_for_each_entry_safe(fsf_req, tmp, &remove_queue, list) {
list_del(&fsf_req->list);
zfcp_fsf_req_dismiss(fsf_req);
} }
return retval; return 0;
} }
/* /*
...@@ -226,10 +216,6 @@ zfcp_fsf_req_complete(struct zfcp_fsf_req *fsf_req) ...@@ -226,10 +216,6 @@ zfcp_fsf_req_complete(struct zfcp_fsf_req *fsf_req)
{ {
int retval = 0; int retval = 0;
int cleanup; int cleanup;
struct zfcp_adapter *adapter = fsf_req->adapter;
/* do some statistics */
atomic_dec(&adapter->fsf_reqs_active);
if (unlikely(fsf_req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) { if (unlikely(fsf_req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
ZFCP_LOG_DEBUG("Status read response received\n"); ZFCP_LOG_DEBUG("Status read response received\n");
...@@ -260,7 +246,7 @@ zfcp_fsf_req_complete(struct zfcp_fsf_req *fsf_req) ...@@ -260,7 +246,7 @@ zfcp_fsf_req_complete(struct zfcp_fsf_req *fsf_req)
* lock must not be held here since it will be * lock must not be held here since it will be
* grabed by the called routine, too * grabed by the called routine, too
*/ */
zfcp_fsf_req_cleanup(fsf_req); zfcp_fsf_req_free(fsf_req);
} else { } else {
/* notify initiator waiting for the requests completion */ /* notify initiator waiting for the requests completion */
ZFCP_LOG_TRACE("waking initiator of FSF request %p\n",fsf_req); ZFCP_LOG_TRACE("waking initiator of FSF request %p\n",fsf_req);
...@@ -936,7 +922,7 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req) ...@@ -936,7 +922,7 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
mempool_free(status_buffer, adapter->pool.data_status_read); mempool_free(status_buffer, adapter->pool.data_status_read);
zfcp_fsf_req_cleanup(fsf_req); zfcp_fsf_req_free(fsf_req);
goto out; goto out;
} }
...@@ -1033,7 +1019,7 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req) ...@@ -1033,7 +1019,7 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
break; break;
} }
mempool_free(status_buffer, adapter->pool.data_status_read); mempool_free(status_buffer, adapter->pool.data_status_read);
zfcp_fsf_req_cleanup(fsf_req); zfcp_fsf_req_free(fsf_req);
/* /*
* recycle buffer and start new request repeat until outbound * recycle buffer and start new request repeat until outbound
* queue is empty or adapter shutdown is requested * queue is empty or adapter shutdown is requested
...@@ -2258,7 +2244,7 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter, ...@@ -2258,7 +2244,7 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
wait_event(fsf_req->completion_wq, wait_event(fsf_req->completion_wq,
fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
del_timer_sync(timer); del_timer_sync(timer);
zfcp_fsf_req_cleanup(fsf_req); zfcp_fsf_req_free(fsf_req);
out: out:
kfree(timer); kfree(timer);
return retval; return retval;
...@@ -4607,7 +4593,7 @@ zfcp_fsf_req_wait_and_cleanup(struct zfcp_fsf_req *fsf_req, ...@@ -4607,7 +4593,7 @@ zfcp_fsf_req_wait_and_cleanup(struct zfcp_fsf_req *fsf_req,
*status = fsf_req->status; *status = fsf_req->status;
/* cleanup request */ /* cleanup request */
zfcp_fsf_req_cleanup(fsf_req); zfcp_fsf_req_free(fsf_req);
out: out:
return retval; return retval;
} }
...@@ -4806,9 +4792,9 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) ...@@ -4806,9 +4792,9 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
inc_seq_no = 0; inc_seq_no = 0;
/* put allocated FSF request at list tail */ /* put allocated FSF request at list tail */
write_lock_irqsave(&adapter->fsf_req_list_lock, flags); spin_lock_irqsave(&adapter->fsf_req_list_lock, flags);
list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head); list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head);
write_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
/* figure out expiration time of timeout and start timeout */ /* figure out expiration time of timeout and start timeout */
if (unlikely(timer)) { if (unlikely(timer)) {
...@@ -4852,9 +4838,9 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) ...@@ -4852,9 +4838,9 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
*/ */
if (timer) if (timer)
del_timer(timer); del_timer(timer);
write_lock_irqsave(&adapter->fsf_req_list_lock, flags); spin_lock_irqsave(&adapter->fsf_req_list_lock, flags);
list_del(&fsf_req->list); list_del(&fsf_req->list);
write_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
/* /*
* adjust the number of free SBALs in request queue as well as * adjust the number of free SBALs in request queue as well as
* position of first one * position of first one
...@@ -4892,25 +4878,4 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) ...@@ -4892,25 +4878,4 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
return retval; return retval;
} }
/*
* function: zfcp_fsf_req_cleanup
*
* purpose: cleans up an FSF request and removes it from the specified list
*
* returns:
*
* assumption: no pending SB in SBALEs other than QTCB
*/
void
zfcp_fsf_req_cleanup(struct zfcp_fsf_req *fsf_req)
{
struct zfcp_adapter *adapter = fsf_req->adapter;
unsigned long flags;
write_lock_irqsave(&adapter->fsf_req_list_lock, flags);
list_del(&fsf_req->list);
write_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
zfcp_fsf_req_free(fsf_req);
}
#undef ZFCP_LOG_AREA #undef ZFCP_LOG_AREA
...@@ -446,37 +446,37 @@ int ...@@ -446,37 +446,37 @@ int
zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, void *sbale_addr) zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, void *sbale_addr)
{ {
struct zfcp_fsf_req *fsf_req; struct zfcp_fsf_req *fsf_req;
int retval = 0;
/* invalid (per convention used in this driver) */ /* invalid (per convention used in this driver) */
if (unlikely(!sbale_addr)) { if (unlikely(!sbale_addr)) {
ZFCP_LOG_NORMAL("bug: invalid reqid\n"); ZFCP_LOG_NORMAL("bug: invalid reqid\n");
retval = -EINVAL; return -EINVAL;
goto out;
} }
/* valid request id and thus (hopefully :) valid fsf_req address */ /* valid request id and thus (hopefully :) valid fsf_req address */
fsf_req = (struct zfcp_fsf_req *) sbale_addr; fsf_req = (struct zfcp_fsf_req *) sbale_addr;
/* serialize with zfcp_fsf_req_dismiss_all */
spin_lock(&adapter->fsf_req_list_lock);
if (list_empty(&adapter->fsf_req_list_head)) {
spin_unlock(&adapter->fsf_req_list_lock);
return 0;
}
list_del(&fsf_req->list);
atomic_dec(&adapter->fsf_reqs_active);
spin_unlock(&adapter->fsf_req_list_lock);
if (unlikely(adapter != fsf_req->adapter)) { if (unlikely(adapter != fsf_req->adapter)) {
ZFCP_LOG_NORMAL("bug: invalid reqid (fsf_req=%p, " ZFCP_LOG_NORMAL("bug: invalid reqid (fsf_req=%p, "
"fsf_req->adapter=%p, adapter=%p)\n", "fsf_req->adapter=%p, adapter=%p)\n",
fsf_req, fsf_req->adapter, adapter); fsf_req, fsf_req->adapter, adapter);
retval = -EINVAL; return -EINVAL;
goto out;
}
ZFCP_LOG_TRACE("fsf_req at %p, QTCB at %p\n", fsf_req, fsf_req->qtcb);
if (likely(fsf_req->qtcb)) {
ZFCP_LOG_TRACE("hex dump of QTCB:\n");
ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) fsf_req->qtcb,
sizeof(struct fsf_qtcb));
} }
/* finish the FSF request */ /* finish the FSF request */
zfcp_fsf_req_complete(fsf_req); zfcp_fsf_req_complete(fsf_req);
out:
return retval; return 0;
} }
/** /**
......
...@@ -575,7 +575,7 @@ zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) ...@@ -575,7 +575,7 @@ zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
*(u64 *) & new_fsf_req->qtcb->header.fsf_status_qual.word[0]; *(u64 *) & new_fsf_req->qtcb->header.fsf_status_qual.word[0];
dbf_fsf_qual[1] = dbf_fsf_qual[1] =
*(u64 *) & new_fsf_req->qtcb->header.fsf_status_qual.word[2]; *(u64 *) & new_fsf_req->qtcb->header.fsf_status_qual.word[2];
zfcp_fsf_req_cleanup(new_fsf_req); zfcp_fsf_req_free(new_fsf_req);
#else #else
retval = zfcp_fsf_req_wait_and_cleanup(new_fsf_req, retval = zfcp_fsf_req_wait_and_cleanup(new_fsf_req,
ZFCP_UNINTERRUPTIBLE, &status); ZFCP_UNINTERRUPTIBLE, &status);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment