Commit 870fe24f authored by Darren Trapp's avatar Darren Trapp Committed by Martin K. Petersen

scsi: qla2xxx: Return busy if rport going away

This patch adds mechanism to return EBUSY if rport is going away
to prevent exhausting FC-NVMe layer's retry counter.
Signed-off-by: default avatarDarren Trapp <darren.trapp@cavium.com>
Signed-off-by: default avatarHimanshu Madhani <himanshu.madhani@cavium.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 1763c1fd
...@@ -2356,6 +2356,7 @@ typedef struct fc_port { ...@@ -2356,6 +2356,7 @@ typedef struct fc_port {
uint8_t nvme_flag; uint8_t nvme_flag;
#define NVME_FLAG_REGISTERED 4 #define NVME_FLAG_REGISTERED 4
#define NVME_FLAG_DELETING 2 #define NVME_FLAG_DELETING 2
#define NVME_FLAG_RESETTING 1
struct fc_port *conflict; struct fc_port *conflict;
unsigned char logout_completed; unsigned char logout_completed;
......
...@@ -1910,9 +1910,11 @@ qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) ...@@ -1910,9 +1910,11 @@ qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
ret = QLA_SUCCESS; ret = QLA_SUCCESS;
break; break;
case CS_ABORTED:
case CS_RESET: case CS_RESET:
case CS_PORT_UNAVAILABLE: case CS_PORT_UNAVAILABLE:
fcport->nvme_flag |= NVME_FLAG_RESETTING;
/* fall through */
case CS_ABORTED:
case CS_PORT_LOGGED_OUT: case CS_PORT_LOGGED_OUT:
case CS_PORT_BUSY: case CS_PORT_BUSY:
ql_log(ql_log_warn, fcport->vha, 0x5060, ql_log(ql_log_warn, fcport->vha, 0x5060,
......
...@@ -36,6 +36,7 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport) ...@@ -36,6 +36,7 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
return 0; return 0;
INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port); INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
memset(&req, 0, sizeof(struct nvme_fc_port_info)); memset(&req, 0, sizeof(struct nvme_fc_port_info));
req.port_name = wwn_to_u64(fcport->port_name); req.port_name = wwn_to_u64(fcport->port_name);
...@@ -193,9 +194,9 @@ static void qla_nvme_abort_work(struct work_struct *work) ...@@ -193,9 +194,9 @@ static void qla_nvme_abort_work(struct work_struct *work)
rval = ha->isp_ops->abort_command(sp); rval = ha->isp_ops->abort_command(sp);
ql_dbg(ql_dbg_io, fcport->vha, 0x212b, ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
"%s: %s command for sp=%p on fcport=%p rval=%x\n", __func__, "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
(rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted", __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
sp, fcport, rval); sp, sp->handle, fcport, rval);
} }
static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport, static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
...@@ -327,7 +328,7 @@ static int qla2x00_start_nvme_mq(srb_t *sp) ...@@ -327,7 +328,7 @@ static int qla2x00_start_nvme_mq(srb_t *sp)
} }
if (index == req->num_outstanding_cmds) { if (index == req->num_outstanding_cmds) {
rval = -1; rval = -EBUSY;
goto queuing_error; goto queuing_error;
} }
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
...@@ -341,7 +342,7 @@ static int qla2x00_start_nvme_mq(srb_t *sp) ...@@ -341,7 +342,7 @@ static int qla2x00_start_nvme_mq(srb_t *sp)
req->cnt = req->length - (req->ring_index - cnt); req->cnt = req->length - (req->ring_index - cnt);
if (req->cnt < (req_cnt + 2)){ if (req->cnt < (req_cnt + 2)){
rval = -1; rval = -EBUSY;
goto queuing_error; goto queuing_error;
} }
} }
...@@ -476,14 +477,15 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, ...@@ -476,14 +477,15 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
fc_port_t *fcport; fc_port_t *fcport;
struct srb_iocb *nvme; struct srb_iocb *nvme;
struct scsi_qla_host *vha; struct scsi_qla_host *vha;
int rval = QLA_FUNCTION_FAILED; int rval = -ENODEV;
srb_t *sp; srb_t *sp;
struct qla_qpair *qpair = hw_queue_handle; struct qla_qpair *qpair = hw_queue_handle;
struct nvme_private *priv; struct nvme_private *priv;
struct qla_nvme_rport *qla_rport = rport->private; struct qla_nvme_rport *qla_rport = rport->private;
if (!fd) { if (!fd || !qpair) {
ql_log(ql_log_warn, NULL, 0x2134, "NO NVMe FCP request\n"); ql_log(ql_log_warn, NULL, 0x2134,
"NO NVMe request or Queue Handle\n");
return rval; return rval;
} }
...@@ -495,13 +497,21 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, ...@@ -495,13 +497,21 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
} }
vha = fcport->vha; vha = fcport->vha;
if (!qpair)
/*
* If we know the dev is going away while the transport is still sending
* IO's return busy back to stall the IO Q. This happens when the
* link goes away and fw hasn't notified us yet, but IO's are being
* returned. If the dev comes back quickly we won't exhaust the IO
* retry count at the core.
*/
if (fcport->nvme_flag & NVME_FLAG_RESETTING)
return -EBUSY; return -EBUSY;
/* Alloc SRB structure */ /* Alloc SRB structure */
sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC); sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
if (!sp) if (!sp)
return -EIO; return -EBUSY;
atomic_set(&sp->ref_count, 1); atomic_set(&sp->ref_count, 1);
init_waitqueue_head(&sp->nvme_ls_waitq); init_waitqueue_head(&sp->nvme_ls_waitq);
...@@ -519,7 +529,6 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, ...@@ -519,7 +529,6 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
"qla2x00_start_nvme_mq failed = %d\n", rval); "qla2x00_start_nvme_mq failed = %d\n", rval);
atomic_dec(&sp->ref_count); atomic_dec(&sp->ref_count);
wake_up(&sp->nvme_ls_waitq); wake_up(&sp->nvme_ls_waitq);
return -EIO;
} }
return rval; return rval;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment