Commit 860784c8 authored by Robert Jennings's avatar Robert Jennings Committed by James Bottomley

[SCSI] ibmvscsi: requeue while CRQ closed

CRQ send errors that return with H_CLOSED should return with
SCSI_MLQUEUE_HOST_BUSY until firmware alerts the client of a CRQ
transport event.  The transport event will either reinitialize and
requeue the requests or fail and return IO with DID_ERROR.

To avoid failing the eh_* functions while re-attaching to the server
adapter this will retry for a period of time while ibmvscsi_send_srp_event
returns SCSI_MLQUEUE_HOST_BUSY.

In ibmvscsi_eh_abort_handler() the loop includes the search of the
event list.  The lock on the hostdata is dropped while waiting to try
again after failing ibmvscsi_send_srp_event.  The event could have been
purged if a login was in progress when the function was called.

In ibmvscsi_eh_device_reset_handler() the loop includes the call to
get_event_struct() because a failing call to ibmvscsi_send_srp_event()
will have freed the event struct.
Signed-off-by: default avatarRobert Jennings <rcj@linux.vnet.ibm.com>
Signed-off-by: default avatarBrian King <brking@linux.vnet.ibm.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@HansenPartnership.com>
parent dc8875e1
...@@ -629,6 +629,16 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, ...@@ -629,6 +629,16 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
list_del(&evt_struct->list); list_del(&evt_struct->list);
del_timer(&evt_struct->timer); del_timer(&evt_struct->timer);
/* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
* Firmware will send a CRQ with a transport event (0xFF) to
* tell this client what has happened to the transport. This
* will be handled in ibmvscsi_handle_crq()
*/
if (rc == H_CLOSED) {
dev_warn(hostdata->dev, "send warning. "
"Receive queue closed, will retry.\n");
goto send_busy;
}
dev_err(hostdata->dev, "send error %d\n", rc); dev_err(hostdata->dev, "send error %d\n", rc);
atomic_inc(&hostdata->request_limit); atomic_inc(&hostdata->request_limit);
goto send_error; goto send_error;
...@@ -976,58 +986,74 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) ...@@ -976,58 +986,74 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
int rsp_rc; int rsp_rc;
unsigned long flags; unsigned long flags;
u16 lun = lun_from_dev(cmd->device); u16 lun = lun_from_dev(cmd->device);
unsigned long wait_switch = 0;
/* First, find this command in our sent list so we can figure /* First, find this command in our sent list so we can figure
* out the correct tag * out the correct tag
*/ */
spin_lock_irqsave(hostdata->host->host_lock, flags); spin_lock_irqsave(hostdata->host->host_lock, flags);
found_evt = NULL; wait_switch = jiffies + (init_timeout * HZ);
list_for_each_entry(tmp_evt, &hostdata->sent, list) { do {
if (tmp_evt->cmnd == cmd) { found_evt = NULL;
found_evt = tmp_evt; list_for_each_entry(tmp_evt, &hostdata->sent, list) {
break; if (tmp_evt->cmnd == cmd) {
found_evt = tmp_evt;
break;
}
} }
}
if (!found_evt) { if (!found_evt) {
spin_unlock_irqrestore(hostdata->host->host_lock, flags); spin_unlock_irqrestore(hostdata->host->host_lock, flags);
return SUCCESS; return SUCCESS;
} }
evt = get_event_struct(&hostdata->pool); evt = get_event_struct(&hostdata->pool);
if (evt == NULL) { if (evt == NULL) {
spin_unlock_irqrestore(hostdata->host->host_lock, flags); spin_unlock_irqrestore(hostdata->host->host_lock, flags);
sdev_printk(KERN_ERR, cmd->device, "failed to allocate abort event\n"); sdev_printk(KERN_ERR, cmd->device,
return FAILED; "failed to allocate abort event\n");
} return FAILED;
}
init_event_struct(evt, init_event_struct(evt,
sync_completion, sync_completion,
VIOSRP_SRP_FORMAT, VIOSRP_SRP_FORMAT,
init_timeout); init_timeout);
tsk_mgmt = &evt->iu.srp.tsk_mgmt; tsk_mgmt = &evt->iu.srp.tsk_mgmt;
/* Set up an abort SRP command */ /* Set up an abort SRP command */
memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
tsk_mgmt->opcode = SRP_TSK_MGMT; tsk_mgmt->opcode = SRP_TSK_MGMT;
tsk_mgmt->lun = ((u64) lun) << 48; tsk_mgmt->lun = ((u64) lun) << 48;
tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK; tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
tsk_mgmt->task_tag = (u64) found_evt; tsk_mgmt->task_tag = (u64) found_evt;
sdev_printk(KERN_INFO, cmd->device, "aborting command. lun 0x%lx, tag 0x%lx\n", evt->sync_srp = &srp_rsp;
tsk_mgmt->lun, tsk_mgmt->task_tag);
init_completion(&evt->comp);
evt->sync_srp = &srp_rsp; rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
init_completion(&evt->comp);
rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
break;
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
msleep(10);
spin_lock_irqsave(hostdata->host->host_lock, flags);
} while (time_before(jiffies, wait_switch));
spin_unlock_irqrestore(hostdata->host->host_lock, flags); spin_unlock_irqrestore(hostdata->host->host_lock, flags);
if (rsp_rc != 0) { if (rsp_rc != 0) {
sdev_printk(KERN_ERR, cmd->device, sdev_printk(KERN_ERR, cmd->device,
"failed to send abort() event. rc=%d\n", rsp_rc); "failed to send abort() event. rc=%d\n", rsp_rc);
return FAILED; return FAILED;
} }
sdev_printk(KERN_INFO, cmd->device,
"aborting command. lun 0x%lx, tag 0x%lx\n",
(((u64) lun) << 48), (u64) found_evt);
wait_for_completion(&evt->comp); wait_for_completion(&evt->comp);
/* make sure we got a good response */ /* make sure we got a good response */
...@@ -1099,41 +1125,56 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) ...@@ -1099,41 +1125,56 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
int rsp_rc; int rsp_rc;
unsigned long flags; unsigned long flags;
u16 lun = lun_from_dev(cmd->device); u16 lun = lun_from_dev(cmd->device);
unsigned long wait_switch = 0;
spin_lock_irqsave(hostdata->host->host_lock, flags); spin_lock_irqsave(hostdata->host->host_lock, flags);
evt = get_event_struct(&hostdata->pool); wait_switch = jiffies + (init_timeout * HZ);
if (evt == NULL) { do {
spin_unlock_irqrestore(hostdata->host->host_lock, flags); evt = get_event_struct(&hostdata->pool);
sdev_printk(KERN_ERR, cmd->device, "failed to allocate reset event\n"); if (evt == NULL) {
return FAILED; spin_unlock_irqrestore(hostdata->host->host_lock, flags);
} sdev_printk(KERN_ERR, cmd->device,
"failed to allocate reset event\n");
return FAILED;
}
init_event_struct(evt, init_event_struct(evt,
sync_completion, sync_completion,
VIOSRP_SRP_FORMAT, VIOSRP_SRP_FORMAT,
init_timeout); init_timeout);
tsk_mgmt = &evt->iu.srp.tsk_mgmt; tsk_mgmt = &evt->iu.srp.tsk_mgmt;
/* Set up a lun reset SRP command */ /* Set up a lun reset SRP command */
memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
tsk_mgmt->opcode = SRP_TSK_MGMT; tsk_mgmt->opcode = SRP_TSK_MGMT;
tsk_mgmt->lun = ((u64) lun) << 48; tsk_mgmt->lun = ((u64) lun) << 48;
tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET; tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%lx\n", evt->sync_srp = &srp_rsp;
tsk_mgmt->lun);
init_completion(&evt->comp);
rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
break;
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
msleep(10);
spin_lock_irqsave(hostdata->host->host_lock, flags);
} while (time_before(jiffies, wait_switch));
evt->sync_srp = &srp_rsp;
init_completion(&evt->comp);
rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
spin_unlock_irqrestore(hostdata->host->host_lock, flags); spin_unlock_irqrestore(hostdata->host->host_lock, flags);
if (rsp_rc != 0) { if (rsp_rc != 0) {
sdev_printk(KERN_ERR, cmd->device, sdev_printk(KERN_ERR, cmd->device,
"failed to send reset event. rc=%d\n", rsp_rc); "failed to send reset event. rc=%d\n", rsp_rc);
return FAILED; return FAILED;
} }
sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%lx\n",
(((u64) lun) << 48));
wait_for_completion(&evt->comp); wait_for_completion(&evt->comp);
/* make sure we got a good response */ /* make sure we got a good response */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment