Commit 89c72f42 authored by Quinn Tran's avatar Quinn Tran Committed by Martin K. Petersen

scsi: qla2xxx: Add IOCB resource tracking

This patch tracks number of IOCB resources used in the I/O fast path. If
the number of used IOCBs reach a high water limit, driver would return the
I/O as busy and let upper layer retry. This prevents over subscription of
IOCB resources where any future error recovery command is unable to cut
through.  Enable IOCB throttling by default.

Link: https://lore.kernel.org/r/20200904045128.23631-12-njavali@marvell.comReviewed-by: default avatarHimanshu Madhani <himanshu.madhani@oracle.com>
Signed-off-by: default avatarQuinn Tran <qutran@marvell.com>
Signed-off-by: default avatarArun Easi <aeasi@marvell.com>
Signed-off-by: default avatarNilesh Javali <njavali@marvell.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 6152d20f
...@@ -624,6 +624,12 @@ enum { ...@@ -624,6 +624,12 @@ enum {
TYPE_TGT_TMCMD, /* task management */ TYPE_TGT_TMCMD, /* task management */
}; };
struct iocb_resource {
u8 res_type;
u8 pad;
u16 iocb_cnt;
};
typedef struct srb { typedef struct srb {
/* /*
* Do not move cmd_type field, it needs to * Do not move cmd_type field, it needs to
...@@ -631,6 +637,7 @@ typedef struct srb { ...@@ -631,6 +637,7 @@ typedef struct srb {
*/ */
uint8_t cmd_type; uint8_t cmd_type;
uint8_t pad[3]; uint8_t pad[3];
struct iocb_resource iores;
struct kref cmd_kref; /* need to migrate ref_count over to this */ struct kref cmd_kref; /* need to migrate ref_count over to this */
void *priv; void *priv;
wait_queue_head_t nvme_ls_waitq; wait_queue_head_t nvme_ls_waitq;
...@@ -3577,6 +3584,15 @@ struct req_que { ...@@ -3577,6 +3584,15 @@ struct req_que {
uint8_t req_pkt[REQUEST_ENTRY_SIZE]; uint8_t req_pkt[REQUEST_ENTRY_SIZE];
}; };
struct qla_fw_resources {
u16 iocbs_total;
u16 iocbs_limit;
u16 iocbs_qp_limit;
u16 iocbs_used;
};
#define QLA_IOCB_PCT_LIMIT 95
/*Queue pair data structure */ /*Queue pair data structure */
struct qla_qpair { struct qla_qpair {
spinlock_t qp_lock; spinlock_t qp_lock;
...@@ -3629,6 +3645,7 @@ struct qla_qpair { ...@@ -3629,6 +3645,7 @@ struct qla_qpair {
uint64_t retry_term_jiff; uint64_t retry_term_jiff;
struct qla_tgt_counters tgt_counters; struct qla_tgt_counters tgt_counters;
uint16_t cpuid; uint16_t cpuid;
struct qla_fw_resources fwres ____cacheline_aligned;
}; };
/* Place holder for FW buffer parameters */ /* Place holder for FW buffer parameters */
......
...@@ -261,6 +261,8 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) ...@@ -261,6 +261,8 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
struct scsi_qla_host *vha = s->private; struct scsi_qla_host *vha = s->private;
uint16_t mb[MAX_IOCB_MB_REG]; uint16_t mb[MAX_IOCB_MB_REG];
int rc; int rc;
struct qla_hw_data *ha = vha->hw;
u16 iocbs_used, i;
rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG); rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
if (rc != QLA_SUCCESS) { if (rc != QLA_SUCCESS) {
...@@ -285,6 +287,18 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) ...@@ -285,6 +287,18 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
mb[23]); mb[23]);
} }
if (ql2xenforce_iocb_limit) {
/* lock is not require. It's an estimate. */
iocbs_used = ha->base_qpair->fwres.iocbs_used;
for (i = 0; i < ha->max_qpairs; i++) {
if (ha->queue_pair_map[i])
iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
}
seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
iocbs_used, ha->base_qpair->fwres.iocbs_limit);
}
return 0; return 0;
} }
......
...@@ -129,6 +129,8 @@ int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *); ...@@ -129,6 +129,8 @@ int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *);
void qla_rscn_replay(fc_port_t *fcport); void qla_rscn_replay(fc_port_t *fcport);
void qla24xx_free_purex_item(struct purex_item *item); void qla24xx_free_purex_item(struct purex_item *item);
extern bool qla24xx_risc_firmware_invalid(uint32_t *); extern bool qla24xx_risc_firmware_invalid(uint32_t *);
void qla_init_iocb_limit(scsi_qla_host_t *);
/* /*
* Global Data in qla_os.c source file. * Global Data in qla_os.c source file.
...@@ -175,6 +177,7 @@ extern int qla2xuseresexchforels; ...@@ -175,6 +177,7 @@ extern int qla2xuseresexchforels;
extern int ql2xexlogins; extern int ql2xexlogins;
extern int ql2xdifbundlinginternalbuffers; extern int ql2xdifbundlinginternalbuffers;
extern int ql2xfulldump_on_mpifail; extern int ql2xfulldump_on_mpifail;
extern int ql2xenforce_iocb_limit;
extern int qla2x00_loop_reset(scsi_qla_host_t *); extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
......
...@@ -3632,6 +3632,31 @@ qla24xx_detect_sfp(scsi_qla_host_t *vha) ...@@ -3632,6 +3632,31 @@ qla24xx_detect_sfp(scsi_qla_host_t *vha)
return ha->flags.lr_detected; return ha->flags.lr_detected;
} }
void qla_init_iocb_limit(scsi_qla_host_t *vha)
{
u16 i, num_qps;
u32 limit;
struct qla_hw_data *ha = vha->hw;
num_qps = ha->num_qpairs + 1;
limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
ha->base_qpair->fwres.iocbs_limit = limit;
ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps;
ha->base_qpair->fwres.iocbs_used = 0;
for (i = 0; i < ha->max_qpairs; i++) {
if (ha->queue_pair_map[i]) {
ha->queue_pair_map[i]->fwres.iocbs_total =
ha->orig_fw_iocb_count;
ha->queue_pair_map[i]->fwres.iocbs_limit = limit;
ha->queue_pair_map[i]->fwres.iocbs_qp_limit =
limit / num_qps;
ha->queue_pair_map[i]->fwres.iocbs_used = 0;
}
}
}
/** /**
* qla2x00_setup_chip() - Load and start RISC firmware. * qla2x00_setup_chip() - Load and start RISC firmware.
* @vha: HA context * @vha: HA context
...@@ -3731,6 +3756,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) ...@@ -3731,6 +3756,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
MIN_MULTI_ID_FABRIC - 1; MIN_MULTI_ID_FABRIC - 1;
} }
qla2x00_get_resource_cnts(vha); qla2x00_get_resource_cnts(vha);
qla_init_iocb_limit(vha);
/* /*
* Allocate the array of outstanding commands * Allocate the array of outstanding commands
......
...@@ -378,3 +378,58 @@ qla2xxx_get_fc4_priority(struct scsi_qla_host *vha) ...@@ -378,3 +378,58 @@ qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
return (data >> 6) & BIT_0 ? FC4_PRIORITY_FCP : FC4_PRIORITY_NVME; return (data >> 6) & BIT_0 ? FC4_PRIORITY_FCP : FC4_PRIORITY_NVME;
} }
enum {
RESOURCE_NONE,
RESOURCE_INI,
};
static inline int
qla_get_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
{
u16 iocbs_used, i;
struct qla_hw_data *ha = qp->vha->hw;
if (!ql2xenforce_iocb_limit) {
iores->res_type = RESOURCE_NONE;
return 0;
}
if ((iores->iocb_cnt + qp->fwres.iocbs_used) < qp->fwres.iocbs_qp_limit) {
qp->fwres.iocbs_used += iores->iocb_cnt;
return 0;
} else {
/* no need to acquire qpair lock. It's just rough calculation */
iocbs_used = ha->base_qpair->fwres.iocbs_used;
for (i = 0; i < ha->max_qpairs; i++) {
if (ha->queue_pair_map[i])
iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
}
if ((iores->iocb_cnt + iocbs_used) < qp->fwres.iocbs_limit) {
qp->fwres.iocbs_used += iores->iocb_cnt;
return 0;
} else {
iores->res_type = RESOURCE_NONE;
return -ENOSPC;
}
}
}
static inline void
qla_put_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
{
switch (iores->res_type) {
case RESOURCE_NONE:
break;
default:
if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
qp->fwres.iocbs_used -= iores->iocb_cnt;
} else {
// should not happen
qp->fwres.iocbs_used = 0;
}
break;
}
iores->res_type = RESOURCE_NONE;
}
...@@ -1637,6 +1637,12 @@ qla24xx_start_scsi(srb_t *sp) ...@@ -1637,6 +1637,12 @@ qla24xx_start_scsi(srb_t *sp)
tot_dsds = nseg; tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
sp->iores.res_type = RESOURCE_INI;
sp->iores.iocb_cnt = req_cnt;
if (qla_get_iocbs(sp->qpair, &sp->iores))
goto queuing_error;
if (req->cnt < (req_cnt + 2)) { if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
rd_reg_dword_relaxed(req->req_q_out); rd_reg_dword_relaxed(req->req_q_out);
...@@ -1709,6 +1715,7 @@ qla24xx_start_scsi(srb_t *sp) ...@@ -1709,6 +1715,7 @@ qla24xx_start_scsi(srb_t *sp)
if (tot_dsds) if (tot_dsds)
scsi_dma_unmap(cmd); scsi_dma_unmap(cmd);
qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_FUNCTION_FAILED; return QLA_FUNCTION_FAILED;
...@@ -1822,6 +1829,12 @@ qla24xx_dif_start_scsi(srb_t *sp) ...@@ -1822,6 +1829,12 @@ qla24xx_dif_start_scsi(srb_t *sp)
/* Total Data and protection sg segment(s) */ /* Total Data and protection sg segment(s) */
tot_prot_dsds = nseg; tot_prot_dsds = nseg;
tot_dsds += nseg; tot_dsds += nseg;
sp->iores.res_type = RESOURCE_INI;
sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (qla_get_iocbs(sp->qpair, &sp->iores))
goto queuing_error;
if (req->cnt < (req_cnt + 2)) { if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
rd_reg_dword_relaxed(req->req_q_out); rd_reg_dword_relaxed(req->req_q_out);
...@@ -1896,6 +1909,7 @@ qla24xx_dif_start_scsi(srb_t *sp) ...@@ -1896,6 +1909,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
} }
/* Cleanup will be performed by the caller (queuecommand) */ /* Cleanup will be performed by the caller (queuecommand) */
qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_FUNCTION_FAILED; return QLA_FUNCTION_FAILED;
} }
...@@ -1957,6 +1971,12 @@ qla2xxx_start_scsi_mq(srb_t *sp) ...@@ -1957,6 +1971,12 @@ qla2xxx_start_scsi_mq(srb_t *sp)
tot_dsds = nseg; tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
sp->iores.res_type = RESOURCE_INI;
sp->iores.iocb_cnt = req_cnt;
if (qla_get_iocbs(sp->qpair, &sp->iores))
goto queuing_error;
if (req->cnt < (req_cnt + 2)) { if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
rd_reg_dword_relaxed(req->req_q_out); rd_reg_dword_relaxed(req->req_q_out);
...@@ -2029,6 +2049,7 @@ qla2xxx_start_scsi_mq(srb_t *sp) ...@@ -2029,6 +2049,7 @@ qla2xxx_start_scsi_mq(srb_t *sp)
if (tot_dsds) if (tot_dsds)
scsi_dma_unmap(cmd); scsi_dma_unmap(cmd);
qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&qpair->qp_lock, flags); spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_FUNCTION_FAILED; return QLA_FUNCTION_FAILED;
...@@ -2157,6 +2178,12 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp) ...@@ -2157,6 +2178,12 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
/* Total Data and protection sg segment(s) */ /* Total Data and protection sg segment(s) */
tot_prot_dsds = nseg; tot_prot_dsds = nseg;
tot_dsds += nseg; tot_dsds += nseg;
sp->iores.res_type = RESOURCE_INI;
sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (qla_get_iocbs(sp->qpair, &sp->iores))
goto queuing_error;
if (req->cnt < (req_cnt + 2)) { if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
rd_reg_dword_relaxed(req->req_q_out); rd_reg_dword_relaxed(req->req_q_out);
...@@ -2234,6 +2261,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp) ...@@ -2234,6 +2261,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
} }
/* Cleanup will be performed by the caller (queuecommand) */ /* Cleanup will be performed by the caller (queuecommand) */
qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&qpair->qp_lock, flags); spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_FUNCTION_FAILED; return QLA_FUNCTION_FAILED;
} }
......
...@@ -2901,6 +2901,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) ...@@ -2901,6 +2901,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
} }
return; return;
} }
qla_put_iocbs(sp->qpair, &sp->iores);
if (sp->cmd_type != TYPE_SRB) { if (sp->cmd_type != TYPE_SRB) {
req->outstanding_cmds[handle] = NULL; req->outstanding_cmds[handle] = NULL;
...@@ -3313,6 +3314,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) ...@@ -3313,6 +3314,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
default: default:
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) { if (sp) {
qla_put_iocbs(sp->qpair, &sp->iores);
sp->done(sp, res); sp->done(sp, res);
return 0; return 0;
} }
......
...@@ -40,6 +40,11 @@ module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR); ...@@ -40,6 +40,11 @@ module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xfulldump_on_mpifail, MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
"Set this to take full dump on MPI hang."); "Set this to take full dump on MPI hang.");
int ql2xenforce_iocb_limit = 1;
module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xenforce_iocb_limit,
"Enforce IOCB throttling, to avoid FW congestion. (default: 0)");
/* /*
* CT6 CTX allocation cache * CT6 CTX allocation cache
*/ */
...@@ -3316,6 +3321,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -3316,6 +3321,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
for (i = 0; i < ha->max_qpairs; i++) for (i = 0; i < ha->max_qpairs; i++)
qla2xxx_create_qpair(base_vha, 5, 0, startit); qla2xxx_create_qpair(base_vha, 5, 0, startit);
} }
qla_init_iocb_limit(base_vha);
if (ha->flags.running_gold_fw) if (ha->flags.running_gold_fw)
goto skip_dpc; goto skip_dpc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment