Commit 5e5b511d authored by James Smart's avatar James Smart Committed by Martin K. Petersen

scsi: lpfc: Partition XRI buffer list across Hardware Queues

Once the IO buff allocations were made shared, there was a single XRI
buffer list shared by all hardware queues.  A single list isn't great for
performance when shared across the per-cpu hardware queues.

Create a separate XRI IO buffer get/put list for each Hardware Queue.  As
SGLs and associated IO buffers get allocated/posted to the firmware; round
robin their assignment across all available hardware Queues so that there
is an equitable assignment.

Modify SCSI and NVME IO submit code paths to use the Hardware Queue logic
for XRI allocation.

Add a debugfs interface to display hardware queue statistics

Added new empty_io_bufs counter to track if a cpu runs out of XRIs.

Replace common_ variables/names with io_ to make meanings clearer.
Signed-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <jsmart2021@gmail.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent cdb42bec
...@@ -965,13 +965,6 @@ struct lpfc_hba { ...@@ -965,13 +965,6 @@ struct lpfc_hba {
struct list_head lpfc_scsi_buf_list_get; struct list_head lpfc_scsi_buf_list_get;
struct list_head lpfc_scsi_buf_list_put; struct list_head lpfc_scsi_buf_list_put;
uint32_t total_scsi_bufs; uint32_t total_scsi_bufs;
spinlock_t common_buf_list_get_lock; /* Common buf alloc list lock */
spinlock_t common_buf_list_put_lock; /* Common buf free list lock */
struct list_head lpfc_common_buf_list_get;
struct list_head lpfc_common_buf_list_put;
uint32_t total_common_bufs;
uint32_t get_common_bufs;
uint32_t put_common_bufs;
struct list_head lpfc_iocb_list; struct list_head lpfc_iocb_list;
uint32_t total_iocbq_bufs; uint32_t total_iocbq_bufs;
struct list_head active_rrq_list; struct list_head active_rrq_list;
...@@ -1045,6 +1038,7 @@ struct lpfc_hba { ...@@ -1045,6 +1038,7 @@ struct lpfc_hba {
struct dentry *debug_nvmeio_trc; struct dentry *debug_nvmeio_trc;
struct lpfc_debugfs_nvmeio_trc *nvmeio_trc; struct lpfc_debugfs_nvmeio_trc *nvmeio_trc;
struct dentry *debug_hdwqinfo;
atomic_t nvmeio_trc_cnt; atomic_t nvmeio_trc_cnt;
uint32_t nvmeio_trc_size; uint32_t nvmeio_trc_size;
uint32_t nvmeio_trc_output_idx; uint32_t nvmeio_trc_output_idx;
......
...@@ -337,7 +337,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, ...@@ -337,7 +337,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
"XRI Dist lpfc%d Total %d IO %d ELS %d\n", "XRI Dist lpfc%d Total %d IO %d ELS %d\n",
phba->brd_no, phba->brd_no,
phba->sli4_hba.max_cfg_param.max_xri, phba->sli4_hba.max_cfg_param.max_xri,
phba->sli4_hba.common_xri_max, phba->sli4_hba.io_xri_max,
lpfc_sli4_get_els_iocb_cnt(phba)); lpfc_sli4_get_els_iocb_cnt(phba));
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
goto buffer_done; goto buffer_done;
......
...@@ -515,10 +515,12 @@ int lpfc_sli4_read_config(struct lpfc_hba *); ...@@ -515,10 +515,12 @@ int lpfc_sli4_read_config(struct lpfc_hba *);
void lpfc_sli4_node_prep(struct lpfc_hba *); void lpfc_sli4_node_prep(struct lpfc_hba *);
int lpfc_sli4_els_sgl_update(struct lpfc_hba *phba); int lpfc_sli4_els_sgl_update(struct lpfc_hba *phba);
int lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba); int lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba);
int lpfc_sli4_common_sgl_update(struct lpfc_hba *phba); int lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *sglist);
int lpfc_sli4_post_common_sgl_list(struct lpfc_hba *phba, int lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf);
int lpfc_sli4_io_sgl_update(struct lpfc_hba *phba);
int lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
struct list_head *blist, int xricnt); struct list_head *blist, int xricnt);
int lpfc_new_common_buf(struct lpfc_hba *phba, int num_to_alloc); int lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc);
void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *); void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *); uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t); int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
......
...@@ -378,6 +378,73 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) ...@@ -378,6 +378,73 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
return len; return len;
} }
static int lpfc_debugfs_last_hdwq;
/**
* lpfc_debugfs_hdwqinfo_data - Dump Hardware Queue info to a buffer
* @phba: The HBA to gather host buffer info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
*
* Description:
* This routine dumps the Hardware Queue info from the @phba to @buf up to
* @size number of bytes. A header that describes the current hdwq state will be
* dumped to @buf first and then info on each hdwq entry will be dumped to @buf
* until @size bytes have been dumped or all the hdwq info has been dumped.
*
* Notes:
* This routine will rotate through each configured Hardware Queue each
* time called.
*
* Return Value:
* This routine returns the amount of bytes that were dumped into @buf and will
* not exceed @size.
**/
static int
lpfc_debugfs_hdwqinfo_data(struct lpfc_hba *phba, char *buf, int size)
{
struct lpfc_sli4_hdw_queue *qp;
int len = 0;
int i, out;
unsigned long iflag;
if (phba->sli_rev != LPFC_SLI_REV4)
return 0;
if (!phba->sli4_hba.hdwq)
return 0;
for (i = 0; i < phba->cfg_hdw_queue; i++) {
if (len > (LPFC_HDWQINFO_SIZE - 80))
break;
qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_hdwq];
len += snprintf(buf + len, size - len, "HdwQ %d Info ", i);
spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag);
spin_lock(&qp->abts_nvme_buf_list_lock);
spin_lock(&qp->io_buf_list_get_lock);
spin_lock(&qp->io_buf_list_put_lock);
out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs +
qp->abts_scsi_io_bufs + qp->abts_nvme_io_bufs);
len += snprintf(buf + len, size - len,
"tot:%d get:%d put:%d mt:%d "
"ABTS scsi:%d nvme:%d Out:%d\n",
qp->total_io_bufs, qp->get_io_bufs, qp->put_io_bufs,
qp->empty_io_bufs, qp->abts_scsi_io_bufs,
qp->abts_nvme_io_bufs, out);
spin_unlock(&qp->io_buf_list_put_lock);
spin_unlock(&qp->io_buf_list_get_lock);
spin_unlock(&qp->abts_nvme_buf_list_lock);
spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag);
lpfc_debugfs_last_hdwq++;
if (lpfc_debugfs_last_hdwq >= phba->cfg_hdw_queue)
lpfc_debugfs_last_hdwq = 0;
}
return len;
}
static int lpfc_debugfs_last_hba_slim_off; static int lpfc_debugfs_last_hba_slim_off;
/** /**
...@@ -863,17 +930,17 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) ...@@ -863,17 +930,17 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
len += snprintf(buf + len, size - len, "\n"); len += snprintf(buf + len, size - len, "\n");
cnt = 0; cnt = 0;
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_for_each_entry_safe(ctxp, next_ctxp, list_for_each_entry_safe(ctxp, next_ctxp,
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
list) { list) {
cnt++; cnt++;
} }
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
if (cnt) { if (cnt) {
len += snprintf(buf + len, size - len, len += snprintf(buf + len, size - len,
"ABORT: %d ctx entries\n", cnt); "ABORT: %d ctx entries\n", cnt);
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_for_each_entry_safe(ctxp, next_ctxp, list_for_each_entry_safe(ctxp, next_ctxp,
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
list) { list) {
...@@ -885,7 +952,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) ...@@ -885,7 +952,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
ctxp->oxid, ctxp->state, ctxp->oxid, ctxp->state,
ctxp->flag); ctxp->flag);
} }
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
} }
/* Calculate outstanding IOs */ /* Calculate outstanding IOs */
...@@ -1619,6 +1686,48 @@ lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file) ...@@ -1619,6 +1686,48 @@ lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file)
return rc; return rc;
} }
/**
* lpfc_debugfs_hdwqinfo_open - Open the hdwqinfo debugfs buffer
* @inode: The inode pointer that contains a vport pointer.
* @file: The file pointer to attach the log output.
*
* Description:
* This routine is the entry point for the debugfs open file operation. It gets
* the vport from the i_private field in @inode, allocates the necessary buffer
* for the log, fills the buffer from the in-memory log for this vport, and then
* returns a pointer to that log in the private_data field in @file.
*
* Returns:
* This function returns zero if successful. On error it will return a negative
* error value.
**/
static int
lpfc_debugfs_hdwqinfo_open(struct inode *inode, struct file *file)
{
struct lpfc_hba *phba = inode->i_private;
struct lpfc_debug *debug;
int rc = -ENOMEM;
debug = kmalloc(sizeof(*debug), GFP_KERNEL);
if (!debug)
goto out;
/* Round to page boundary */
debug->buffer = kmalloc(LPFC_HDWQINFO_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
goto out;
}
debug->len = lpfc_debugfs_hdwqinfo_data(phba, debug->buffer,
LPFC_HBQINFO_SIZE);
file->private_data = debug;
rc = 0;
out:
return rc;
}
/** /**
* lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer
* @inode: The inode pointer that contains a vport pointer. * @inode: The inode pointer that contains a vport pointer.
...@@ -4819,6 +4928,15 @@ static const struct file_operations lpfc_debugfs_op_hbqinfo = { ...@@ -4819,6 +4928,15 @@ static const struct file_operations lpfc_debugfs_op_hbqinfo = {
.release = lpfc_debugfs_release, .release = lpfc_debugfs_release,
}; };
#undef lpfc_debugfs_op_hdwqinfo
static const struct file_operations lpfc_debugfs_op_hdwqinfo = {
.owner = THIS_MODULE,
.open = lpfc_debugfs_hdwqinfo_open,
.llseek = lpfc_debugfs_lseek,
.read = lpfc_debugfs_read,
.release = lpfc_debugfs_release,
};
#undef lpfc_debugfs_op_dumpHBASlim #undef lpfc_debugfs_op_dumpHBASlim
static const struct file_operations lpfc_debugfs_op_dumpHBASlim = { static const struct file_operations lpfc_debugfs_op_dumpHBASlim = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
...@@ -5244,6 +5362,18 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) ...@@ -5244,6 +5362,18 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba->hba_debugfs_root, phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_hbqinfo); phba, &lpfc_debugfs_op_hbqinfo);
/* Setup hdwqinfo */
snprintf(name, sizeof(name), "hdwqinfo");
phba->debug_hdwqinfo =
debugfs_create_file(name, S_IFREG | 0644,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_hdwqinfo);
if (!phba->debug_hdwqinfo) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0411 Cant create debugfs hdwqinfo\n");
goto debug_failed;
}
/* Setup dumpHBASlim */ /* Setup dumpHBASlim */
if (phba->sli_rev < LPFC_SLI_REV4) { if (phba->sli_rev < LPFC_SLI_REV4) {
snprintf(name, sizeof(name), "dumpHBASlim"); snprintf(name, sizeof(name), "dumpHBASlim");
...@@ -5630,6 +5760,9 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) ...@@ -5630,6 +5760,9 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */ debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
phba->debug_hbqinfo = NULL; phba->debug_hbqinfo = NULL;
debugfs_remove(phba->debug_hdwqinfo); /* hdwqinfo */
phba->debug_hdwqinfo = NULL;
debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */ debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */
phba->debug_dumpHBASlim = NULL; phba->debug_dumpHBASlim = NULL;
......
...@@ -284,6 +284,9 @@ struct lpfc_idiag { ...@@ -284,6 +284,9 @@ struct lpfc_idiag {
#endif #endif
/* hdwqinfo output buffer size */
#define LPFC_HDWQINFO_SIZE 8192
enum { enum {
DUMP_FCP, DUMP_FCP,
DUMP_NVME, DUMP_NVME,
......
This diff is collapsed.
...@@ -58,7 +58,7 @@ ...@@ -58,7 +58,7 @@
static struct lpfc_nvme_buf * static struct lpfc_nvme_buf *
lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
int expedite); int idx, int expedite);
static void static void
lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *); lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);
...@@ -1545,7 +1545,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, ...@@ -1545,7 +1545,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
} }
} }
lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite); lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp,
lpfc_queue_info->index, expedite);
if (lpfc_ncmd == NULL) { if (lpfc_ncmd == NULL) {
atomic_inc(&lport->xmt_fcp_noxri); atomic_inc(&lport->xmt_fcp_noxri);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
...@@ -1913,24 +1914,26 @@ static struct nvme_fc_port_template lpfc_nvme_template = { ...@@ -1913,24 +1914,26 @@ static struct nvme_fc_port_template lpfc_nvme_template = {
}; };
static inline struct lpfc_nvme_buf * static inline struct lpfc_nvme_buf *
lpfc_nvme_buf(struct lpfc_hba *phba) lpfc_nvme_buf(struct lpfc_hba *phba, int idx)
{ {
struct lpfc_sli4_hdw_queue *qp;
struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
qp = &phba->sli4_hba.hdwq[idx];
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_common_buf_list_get, list) { &qp->lpfc_io_buf_list_get, list) {
list_del_init(&lpfc_ncmd->list); list_del_init(&lpfc_ncmd->list);
phba->get_common_bufs--; qp->get_io_bufs--;
return lpfc_ncmd; return lpfc_ncmd;
} }
return NULL; return NULL;
} }
/** /**
* lpfc_get_nvme_buf - Get a nvme buffer from lpfc_common_buf_list of the HBA * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
* @phba: The HBA for which this call is being executed. * @phba: The HBA for which this call is being executed.
* *
* This routine removes a nvme buffer from head of @phba lpfc_common_buf_list * This routine removes a nvme buffer from head of @hdwq io_buf_list
* and returns to caller. * and returns to caller.
* *
* Return codes: * Return codes:
...@@ -1939,30 +1942,32 @@ lpfc_nvme_buf(struct lpfc_hba *phba) ...@@ -1939,30 +1942,32 @@ lpfc_nvme_buf(struct lpfc_hba *phba)
**/ **/
static struct lpfc_nvme_buf * static struct lpfc_nvme_buf *
lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
int expedite) int idx, int expedite)
{ {
struct lpfc_nvme_buf *lpfc_ncmd = NULL; struct lpfc_nvme_buf *lpfc_ncmd = NULL;
struct lpfc_sli4_hdw_queue *qp;
struct sli4_sge *sgl; struct sli4_sge *sgl;
struct lpfc_iocbq *pwqeq; struct lpfc_iocbq *pwqeq;
union lpfc_wqe128 *wqe; union lpfc_wqe128 *wqe;
unsigned long iflag = 0; unsigned long iflag = 0;
spin_lock_irqsave(&phba->common_buf_list_get_lock, iflag); qp = &phba->sli4_hba.hdwq[idx];
if (phba->get_common_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag);
lpfc_ncmd = lpfc_nvme_buf(phba); if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
lpfc_ncmd = lpfc_nvme_buf(phba, idx);
if (!lpfc_ncmd) { if (!lpfc_ncmd) {
spin_lock(&phba->common_buf_list_put_lock); spin_lock(&qp->io_buf_list_put_lock);
list_splice(&phba->lpfc_common_buf_list_put, list_splice(&qp->lpfc_io_buf_list_put,
&phba->lpfc_common_buf_list_get); &qp->lpfc_io_buf_list_get);
phba->get_common_bufs += phba->put_common_bufs; qp->get_io_bufs += qp->put_io_bufs;
INIT_LIST_HEAD(&phba->lpfc_common_buf_list_put); INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
phba->put_common_bufs = 0; qp->put_io_bufs = 0;
spin_unlock(&phba->common_buf_list_put_lock); spin_unlock(&qp->io_buf_list_put_lock);
if (phba->get_common_bufs > LPFC_NVME_EXPEDITE_XRICNT || if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
expedite) expedite)
lpfc_ncmd = lpfc_nvme_buf(phba); lpfc_ncmd = lpfc_nvme_buf(phba, idx);
} }
spin_unlock_irqrestore(&phba->common_buf_list_get_lock, iflag); spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
if (lpfc_ncmd) { if (lpfc_ncmd) {
pwqeq = &(lpfc_ncmd->cur_iocbq); pwqeq = &(lpfc_ncmd->cur_iocbq);
...@@ -1975,6 +1980,7 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, ...@@ -1975,6 +1980,7 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl; pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
lpfc_ncmd->start_time = jiffies; lpfc_ncmd->start_time = jiffies;
lpfc_ncmd->flags = 0; lpfc_ncmd->flags = 0;
lpfc_ncmd->hdwq = idx;
/* Rsp SGE will be filled in when we rcv an IO /* Rsp SGE will be filled in when we rcv an IO
* from the NVME Layer to be sent. * from the NVME Layer to be sent.
...@@ -1993,7 +1999,10 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, ...@@ -1993,7 +1999,10 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
atomic_inc(&ndlp->cmd_pending); atomic_inc(&ndlp->cmd_pending);
lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH; lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH;
} }
}
} else
qp->empty_io_bufs++;
return lpfc_ncmd; return lpfc_ncmd;
} }
...@@ -2003,13 +2012,14 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, ...@@ -2003,13 +2012,14 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
* @lpfc_ncmd: The nvme buffer which is being released. * @lpfc_ncmd: The nvme buffer which is being released.
* *
* This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
* lpfc_common_buf_list list. For SLI4 XRI's are tied to the nvme buffer * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer
* and cannot be reused for at least RA_TOV amount of time if it was * and cannot be reused for at least RA_TOV amount of time if it was
* aborted. * aborted.
**/ **/
static void static void
lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd) lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
{ {
struct lpfc_sli4_hdw_queue *qp;
unsigned long iflag = 0; unsigned long iflag = 0;
if ((lpfc_ncmd->flags & LPFC_BUMP_QDEPTH) && lpfc_ncmd->ndlp) if ((lpfc_ncmd->flags & LPFC_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
...@@ -2018,6 +2028,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd) ...@@ -2018,6 +2028,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
lpfc_ncmd->ndlp = NULL; lpfc_ncmd->ndlp = NULL;
lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH; lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH;
qp = &phba->sli4_hba.hdwq[lpfc_ncmd->hdwq];
if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) { if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6310 XB release deferred for " "6310 XB release deferred for "
...@@ -2025,21 +2036,21 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd) ...@@ -2025,21 +2036,21 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
lpfc_ncmd->cur_iocbq.sli4_xritag, lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_ncmd->cur_iocbq.iotag); lpfc_ncmd->cur_iocbq.iotag);
spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, spin_lock_irqsave(&qp->abts_nvme_buf_list_lock, iflag);
iflag);
list_add_tail(&lpfc_ncmd->list, list_add_tail(&lpfc_ncmd->list,
&phba->sli4_hba.lpfc_abts_nvme_buf_list); &qp->lpfc_abts_nvme_buf_list);
spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, qp->abts_nvme_io_bufs++;
iflag); spin_unlock_irqrestore(&qp->abts_nvme_buf_list_lock, iflag);
} else { } else {
/* MUST zero fields if buffer is reused by another protocol */ /* MUST zero fields if buffer is reused by another protocol */
lpfc_ncmd->nvmeCmd = NULL; lpfc_ncmd->nvmeCmd = NULL;
lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL; lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
spin_lock_irqsave(&phba->common_buf_list_put_lock, iflag);
spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
list_add_tail(&lpfc_ncmd->list, list_add_tail(&lpfc_ncmd->list,
&phba->lpfc_common_buf_list_put); &qp->lpfc_io_buf_list_put);
phba->put_common_bufs++; qp->put_io_bufs++;
spin_unlock_irqrestore(&phba->common_buf_list_put_lock, iflag); spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
} }
} }
...@@ -2517,27 +2528,28 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) ...@@ -2517,27 +2528,28 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
**/ **/
void void
lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri) struct sli4_wcqe_xri_aborted *axri, int idx)
{ {
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd; struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
struct nvmefc_fcp_req *nvme_cmd = NULL; struct nvmefc_fcp_req *nvme_cmd = NULL;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
struct lpfc_sli4_hdw_queue *qp;
unsigned long iflag = 0; unsigned long iflag = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return; return;
qp = &phba->sli4_hba.hdwq[idx];
spin_lock_irqsave(&phba->hbalock, iflag); spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); spin_lock(&qp->abts_nvme_buf_list_lock);
list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd, list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
&phba->sli4_hba.lpfc_abts_nvme_buf_list, &qp->lpfc_abts_nvme_buf_list, list) {
list) {
if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) { if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
list_del_init(&lpfc_ncmd->list); list_del_init(&lpfc_ncmd->list);
qp->abts_nvme_io_bufs--;
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
lpfc_ncmd->status = IOSTAT_SUCCESS; lpfc_ncmd->status = IOSTAT_SUCCESS;
spin_unlock( spin_unlock(&qp->abts_nvme_buf_list_lock);
&phba->sli4_hba.abts_nvme_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
ndlp = lpfc_ncmd->ndlp; ndlp = lpfc_ncmd->ndlp;
...@@ -2563,7 +2575,7 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, ...@@ -2563,7 +2575,7 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
return; return;
} }
} }
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); spin_unlock(&qp->abts_nvme_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
......
...@@ -84,6 +84,8 @@ struct lpfc_nvme_buf { ...@@ -84,6 +84,8 @@ struct lpfc_nvme_buf {
dma_addr_t dma_phys_sgl; dma_addr_t dma_phys_sgl;
struct sli4_sge *dma_sgl; struct sli4_sge *dma_sgl;
struct lpfc_iocbq cur_iocbq; struct lpfc_iocbq cur_iocbq;
uint16_t hdwq;
uint16_t cpu;
/* NVME specific fields */ /* NVME specific fields */
struct nvmefc_fcp_req *nvmeCmd; struct nvmefc_fcp_req *nvmeCmd;
...@@ -95,7 +97,6 @@ struct lpfc_nvme_buf { ...@@ -95,7 +97,6 @@ struct lpfc_nvme_buf {
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ #define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
#define LPFC_BUMP_QDEPTH 0x2 /* bumped queue depth counter */ #define LPFC_BUMP_QDEPTH 0x2 /* bumped queue depth counter */
uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */ uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t cpu;
uint16_t status; /* From IOCB Word 7- ulpStatus */ uint16_t status; /* From IOCB Word 7- ulpStatus */
uint32_t result; /* From IOCB Word 4. */ uint32_t result; /* From IOCB Word 4. */
......
...@@ -226,15 +226,15 @@ lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp) ...@@ -226,15 +226,15 @@ lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
"6313 NVMET Defer ctx release xri x%x flg x%x\n", "6313 NVMET Defer ctx release xri x%x flg x%x\n",
ctxp->oxid, ctxp->flag); ctxp->oxid, ctxp->flag);
spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag); spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
if (ctxp->flag & LPFC_NVMET_CTX_RLS) { if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
iflag); iflag);
return; return;
} }
ctxp->flag |= LPFC_NVMET_CTX_RLS; ctxp->flag |= LPFC_NVMET_CTX_RLS;
list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag); spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
} }
/** /**
...@@ -1162,9 +1162,9 @@ __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba, ...@@ -1162,9 +1162,9 @@ __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags); spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
list_for_each_entry_safe(ctx_buf, next_ctx_buf, list_for_each_entry_safe(ctx_buf, next_ctx_buf,
&infop->nvmet_ctx_list, list) { &infop->nvmet_ctx_list, list) {
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_del_init(&ctx_buf->list); list_del_init(&ctx_buf->list);
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
__lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag); __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
ctx_buf->sglq->state = SGL_FREED; ctx_buf->sglq->state = SGL_FREED;
...@@ -1502,7 +1502,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, ...@@ -1502,7 +1502,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
} }
spin_lock_irqsave(&phba->hbalock, iflag); spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_for_each_entry_safe(ctxp, next_ctxp, list_for_each_entry_safe(ctxp, next_ctxp,
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
list) { list) {
...@@ -1518,7 +1518,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, ...@@ -1518,7 +1518,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
released = true; released = true;
} }
ctxp->flag &= ~LPFC_NVMET_XBUSY; ctxp->flag &= ~LPFC_NVMET_XBUSY;
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
rrq_empty = list_empty(&phba->active_rrq_list); rrq_empty = list_empty(&phba->active_rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
...@@ -1542,7 +1542,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, ...@@ -1542,7 +1542,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
lpfc_worker_wake_up(phba); lpfc_worker_wake_up(phba);
return; return;
} }
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
} }
...@@ -1561,14 +1561,14 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, ...@@ -1561,14 +1561,14 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
xri = be16_to_cpu(fc_hdr->fh_ox_id); xri = be16_to_cpu(fc_hdr->fh_ox_id);
spin_lock_irqsave(&phba->hbalock, iflag); spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
list_for_each_entry_safe(ctxp, next_ctxp, list_for_each_entry_safe(ctxp, next_ctxp,
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
list) { list) {
if (ctxp->ctxbuf->sglq->sli4_xritag != xri) if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
continue; continue;
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
spin_lock_irqsave(&ctxp->ctxlock, iflag); spin_lock_irqsave(&ctxp->ctxlock, iflag);
...@@ -1589,7 +1589,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, ...@@ -1589,7 +1589,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
return 0; return 0;
} }
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
......
...@@ -525,19 +525,26 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) ...@@ -525,19 +525,26 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
{ {
struct lpfc_hba *phba = vport->phba; struct lpfc_hba *phba = vport->phba;
struct lpfc_scsi_buf *psb, *next_psb; struct lpfc_scsi_buf *psb, *next_psb;
struct lpfc_sli4_hdw_queue *qp;
unsigned long iflag = 0; unsigned long iflag = 0;
int idx;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
return; return;
spin_lock_irqsave(&phba->hbalock, iflag); spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
qp = &phba->sli4_hba.hdwq[idx];
spin_lock(&qp->abts_scsi_buf_list_lock);
list_for_each_entry_safe(psb, next_psb, list_for_each_entry_safe(psb, next_psb,
&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { &qp->lpfc_abts_scsi_buf_list, list) {
if (psb->rdata && psb->rdata->pnode if (psb->rdata && psb->rdata->pnode &&
&& psb->rdata->pnode->vport == vport) psb->rdata->pnode->vport == vport)
psb->rdata = NULL; psb->rdata = NULL;
} }
spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); spin_unlock(&qp->abts_scsi_buf_list_lock);
}
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
} }
...@@ -551,11 +558,12 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) ...@@ -551,11 +558,12 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
**/ **/
void void
lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri) struct sli4_wcqe_xri_aborted *axri, int idx)
{ {
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
struct lpfc_scsi_buf *psb, *next_psb; struct lpfc_scsi_buf *psb, *next_psb;
struct lpfc_sli4_hdw_queue *qp;
unsigned long iflag = 0; unsigned long iflag = 0;
struct lpfc_iocbq *iocbq; struct lpfc_iocbq *iocbq;
int i; int i;
...@@ -565,16 +573,19 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, ...@@ -565,16 +573,19 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
return; return;
qp = &phba->sli4_hba.hdwq[idx];
spin_lock_irqsave(&phba->hbalock, iflag); spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); spin_lock(&qp->abts_scsi_buf_list_lock);
list_for_each_entry_safe(psb, next_psb, list_for_each_entry_safe(psb, next_psb,
&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { &qp->lpfc_abts_scsi_buf_list, list) {
if (psb->cur_iocbq.sli4_xritag == xri) { if (psb->cur_iocbq.sli4_xritag == xri) {
list_del(&psb->list); list_del(&psb->list);
qp->abts_scsi_io_bufs--;
psb->exch_busy = 0; psb->exch_busy = 0;
psb->status = IOSTAT_SUCCESS; psb->status = IOSTAT_SUCCESS;
spin_unlock( spin_unlock(
&phba->sli4_hba.abts_scsi_buf_list_lock); &qp->abts_scsi_buf_list_lock);
if (psb->rdata && psb->rdata->pnode) if (psb->rdata && psb->rdata->pnode)
ndlp = psb->rdata->pnode; ndlp = psb->rdata->pnode;
else else
...@@ -593,7 +604,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, ...@@ -593,7 +604,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
return; return;
} }
} }
spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); spin_unlock(&qp->abts_scsi_buf_list_lock);
for (i = 1; i <= phba->sli.last_iotag; i++) { for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i]; iocbq = phba->sli.iocbq_lookup[i];
...@@ -652,10 +663,10 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) ...@@ -652,10 +663,10 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
return lpfc_cmd; return lpfc_cmd;
} }
/** /**
* lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_common_buf_list of the HBA * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA
* @phba: The HBA for which this call is being executed. * @phba: The HBA for which this call is being executed.
* *
* This routine removes a scsi buffer from head of @phba lpfc_common_buf_list * This routine removes a scsi buffer from head of @hdwq io_buf_list
* and returns to caller. * and returns to caller.
* *
* Return codes: * Return codes:
...@@ -666,48 +677,58 @@ static struct lpfc_scsi_buf* ...@@ -666,48 +677,58 @@ static struct lpfc_scsi_buf*
lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{ {
struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next; struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
struct lpfc_sli4_hdw_queue *qp;
unsigned long iflag = 0; unsigned long iflag = 0;
struct sli4_sge *sgl; struct sli4_sge *sgl;
IOCB_t *iocb; IOCB_t *iocb;
dma_addr_t pdma_phys_fcp_rsp; dma_addr_t pdma_phys_fcp_rsp;
dma_addr_t pdma_phys_fcp_cmd; dma_addr_t pdma_phys_fcp_cmd;
uint32_t sgl_size; uint32_t sgl_size, cpu, idx;
int found = 0; int found = 0;
spin_lock_irqsave(&phba->common_buf_list_get_lock, iflag); cpu = smp_processor_id();
if (cpu < phba->cfg_hdw_queue)
idx = cpu;
else
idx = cpu % phba->cfg_hdw_queue;
qp = &phba->sli4_hba.hdwq[idx];
spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag);
list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
&phba->lpfc_common_buf_list_get, list) { &qp->lpfc_io_buf_list_get, list) {
if (lpfc_test_rrq_active(phba, ndlp, if (lpfc_test_rrq_active(phba, ndlp,
lpfc_cmd->cur_iocbq.sli4_lxritag)) lpfc_cmd->cur_iocbq.sli4_lxritag))
continue; continue;
list_del_init(&lpfc_cmd->list); list_del_init(&lpfc_cmd->list);
phba->get_common_bufs--; qp->get_io_bufs--;
found = 1; found = 1;
break; break;
} }
if (!found) { if (!found) {
spin_lock(&phba->common_buf_list_put_lock); spin_lock(&qp->io_buf_list_put_lock);
list_splice(&phba->lpfc_common_buf_list_put, list_splice(&qp->lpfc_io_buf_list_put,
&phba->lpfc_common_buf_list_get); &qp->lpfc_io_buf_list_get);
phba->get_common_bufs += phba->put_common_bufs; qp->get_io_bufs += qp->put_io_bufs;
INIT_LIST_HEAD(&phba->lpfc_common_buf_list_put); INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
phba->put_common_bufs = 0; qp->put_io_bufs = 0;
spin_unlock(&phba->common_buf_list_put_lock); spin_unlock(&qp->io_buf_list_put_lock);
list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
&phba->lpfc_common_buf_list_get, &qp->lpfc_io_buf_list_get,
list) { list) {
if (lpfc_test_rrq_active( if (lpfc_test_rrq_active(
phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag)) phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
continue; continue;
list_del_init(&lpfc_cmd->list); list_del_init(&lpfc_cmd->list);
phba->get_common_bufs--; qp->get_io_bufs--;
found = 1; found = 1;
break; break;
} }
} }
spin_unlock_irqrestore(&phba->common_buf_list_get_lock, iflag); spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
if (!found) if (!found) {
qp->empty_io_bufs++;
return NULL; return NULL;
}
sgl_size = phba->cfg_sg_dma_buf_size - sgl_size = phba->cfg_sg_dma_buf_size -
(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
...@@ -723,10 +744,11 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) ...@@ -723,10 +744,11 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
lpfc_cmd->flags = 0; lpfc_cmd->flags = 0;
lpfc_cmd->start_time = jiffies; lpfc_cmd->start_time = jiffies;
lpfc_cmd->waitq = NULL; lpfc_cmd->waitq = NULL;
lpfc_cmd->cpu = smp_processor_id(); lpfc_cmd->cpu = cpu;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
lpfc_cmd->prot_data_type = 0; lpfc_cmd->prot_data_type = 0;
#endif #endif
lpfc_cmd->hdwq = idx;
lpfc_cmd->fcp_cmnd = (lpfc_cmd->data + sgl_size); lpfc_cmd->fcp_cmnd = (lpfc_cmd->data + sgl_size);
lpfc_cmd->fcp_rsp = (struct fcp_rsp *)((uint8_t *)lpfc_cmd->fcp_cmnd + lpfc_cmd->fcp_rsp = (struct fcp_rsp *)((uint8_t *)lpfc_cmd->fcp_cmnd +
...@@ -825,35 +847,36 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) ...@@ -825,35 +847,36 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
* @phba: The Hba for which this call is being executed. * @phba: The Hba for which this call is being executed.
* @psb: The scsi buffer which is being released. * @psb: The scsi buffer which is being released.
* *
* This routine releases @psb scsi buffer by adding it to tail of @phba * This routine releases @psb scsi buffer by adding it to tail of @hdwq
* lpfc_common_buf_list list. For SLI4 XRI's are tied to the scsi buffer * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer
* and cannot be reused for at least RA_TOV amount of time if it was * and cannot be reused for at least RA_TOV amount of time if it was
* aborted. * aborted.
**/ **/
static void static void
lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{ {
struct lpfc_sli4_hdw_queue *qp;
unsigned long iflag = 0; unsigned long iflag = 0;
psb->seg_cnt = 0; psb->seg_cnt = 0;
psb->prot_seg_cnt = 0; psb->prot_seg_cnt = 0;
qp = &phba->sli4_hba.hdwq[psb->hdwq];
if (psb->exch_busy) { if (psb->exch_busy) {
spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag);
iflag);
psb->pCmd = NULL; psb->pCmd = NULL;
list_add_tail(&psb->list, list_add_tail(&psb->list, &qp->lpfc_abts_scsi_buf_list);
&phba->sli4_hba.lpfc_abts_scsi_buf_list); qp->abts_scsi_io_bufs++;
spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag);
iflag);
} else { } else {
/* MUST zero fields if buffer is reused by another protocol */ /* MUST zero fields if buffer is reused by another protocol */
psb->pCmd = NULL; psb->pCmd = NULL;
psb->cur_iocbq.iocb_cmpl = NULL; psb->cur_iocbq.iocb_cmpl = NULL;
spin_lock_irqsave(&phba->common_buf_list_put_lock, iflag);
list_add_tail(&psb->list, &phba->lpfc_common_buf_list_put); spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
phba->put_common_bufs++; list_add_tail(&psb->list, &qp->lpfc_io_buf_list_put);
spin_unlock_irqrestore(&phba->common_buf_list_put_lock, iflag); qp->put_io_bufs++;
spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
} }
} }
......
...@@ -138,6 +138,8 @@ struct lpfc_scsi_buf { ...@@ -138,6 +138,8 @@ struct lpfc_scsi_buf {
dma_addr_t dma_phys_sgl; dma_addr_t dma_phys_sgl;
struct ulp_bde64 *dma_sgl; struct ulp_bde64 *dma_sgl;
struct lpfc_iocbq cur_iocbq; struct lpfc_iocbq cur_iocbq;
uint16_t hdwq;
uint16_t cpu;
/* SCSI specific fields */ /* SCSI specific fields */
struct scsi_cmnd *pCmd; struct scsi_cmnd *pCmd;
...@@ -150,7 +152,6 @@ struct lpfc_scsi_buf { ...@@ -150,7 +152,6 @@ struct lpfc_scsi_buf {
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ #define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
#define LPFC_SBUF_BUMP_QDEPTH 0x2 /* bumped queue depth counter */ #define LPFC_SBUF_BUMP_QDEPTH 0x2 /* bumped queue depth counter */
uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */ uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t cpu;
uint16_t status; /* From IOCB Word 7- ulpStatus */ uint16_t status; /* From IOCB Word 7- ulpStatus */
uint32_t result; /* From IOCB Word 4. */ uint32_t result; /* From IOCB Word 4. */
......
...@@ -6023,7 +6023,7 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) ...@@ -6023,7 +6023,7 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
list_add_tail(&rsrc_blks->list, ext_blk_list); list_add_tail(&rsrc_blks->list, ext_blk_list);
rsrc_start = rsrc_id; rsrc_start = rsrc_id;
if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) { if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
phba->sli4_hba.common_xri_start = rsrc_start + phba->sli4_hba.io_xri_start = rsrc_start +
lpfc_sli4_get_iocb_cnt(phba); lpfc_sli4_get_iocb_cnt(phba);
} }
...@@ -7051,37 +7051,30 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, ...@@ -7051,37 +7051,30 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
} }
/** /**
* lpfc_sli4_repost_common_sgl_list - Repost all the allocated nvme buffer sgls * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
* @phba: pointer to lpfc hba data structure. * @phba: pointer to lpfc hba data structure.
* *
* This routine walks the list of nvme buffers that have been allocated and * This routine walks the list of nvme buffers that have been allocated and
* repost them to the port by using SGL block post. This is needed after a * repost them to the port by using SGL block post. This is needed after a
* pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
* is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
* to the lpfc_common_buf_list. If the repost fails, reject all nvme buffers. * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
* *
* Returns: 0 = success, non-zero failure. * Returns: 0 = success, non-zero failure.
**/ **/
int int
lpfc_sli4_repost_common_sgl_list(struct lpfc_hba *phba) lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
{ {
LIST_HEAD(post_nblist); LIST_HEAD(post_nblist);
int num_posted, rc = 0; int num_posted, rc = 0;
/* get all NVME buffers need to repost to a local list */ /* get all NVME buffers need to repost to a local list */
spin_lock_irq(&phba->common_buf_list_get_lock); lpfc_io_buf_flush(phba, &post_nblist);
spin_lock(&phba->common_buf_list_put_lock);
list_splice_init(&phba->lpfc_common_buf_list_get, &post_nblist);
list_splice(&phba->lpfc_common_buf_list_put, &post_nblist);
phba->get_common_bufs = 0;
phba->put_common_bufs = 0;
spin_unlock(&phba->common_buf_list_put_lock);
spin_unlock_irq(&phba->common_buf_list_get_lock);
/* post the list of nvme buffer sgls to port if available */ /* post the list of nvme buffer sgls to port if available */
if (!list_empty(&post_nblist)) { if (!list_empty(&post_nblist)) {
num_posted = lpfc_sli4_post_common_sgl_list( num_posted = lpfc_sli4_post_io_sgl_list(
phba, &post_nblist, phba->sli4_hba.common_xri_cnt); phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
/* failed to post any nvme buffer, return error */ /* failed to post any nvme buffer, return error */
if (num_posted == 0) if (num_posted == 0)
rc = -EIO; rc = -EIO;
...@@ -7551,7 +7544,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) ...@@ -7551,7 +7544,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
cnt += phba->sli4_hba.nvmet_xri_cnt; cnt += phba->sli4_hba.nvmet_xri_cnt;
} else { } else {
/* update host common xri-sgl sizes and mappings */ /* update host common xri-sgl sizes and mappings */
rc = lpfc_sli4_common_sgl_update(phba); rc = lpfc_sli4_io_sgl_update(phba);
if (unlikely(rc)) { if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"6082 Failed to update nvme-sgl size " "6082 Failed to update nvme-sgl size "
...@@ -7560,7 +7553,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) ...@@ -7560,7 +7553,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
} }
/* register the allocated common sgl pool to the port */ /* register the allocated common sgl pool to the port */
rc = lpfc_sli4_repost_common_sgl_list(phba); rc = lpfc_sli4_repost_io_sgl_list(phba);
if (unlikely(rc)) { if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"6116 Error %d during nvme sgl post " "6116 Error %d during nvme sgl post "
...@@ -8562,7 +8555,6 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) ...@@ -8562,7 +8555,6 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
if (rc) if (rc)
goto exit; goto exit;
/* /*
* Initialize the bootstrap memory region to avoid stale data areas * Initialize the bootstrap memory region to avoid stale data areas
* in the mailbox post. Then copy the caller's mailbox contents to * in the mailbox post. Then copy the caller's mailbox contents to
...@@ -10002,6 +9994,8 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) ...@@ -10002,6 +9994,8 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
struct lpfc_sli_ring * struct lpfc_sli_ring *
lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
{ {
struct lpfc_scsi_buf *lpfc_cmd;
if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
if (unlikely(!phba->sli4_hba.hdwq)) if (unlikely(!phba->sli4_hba.hdwq))
return NULL; return NULL;
...@@ -10010,11 +10004,8 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) ...@@ -10010,11 +10004,8 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
* be setup based on what work queue we used. * be setup based on what work queue we used.
*/ */
if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
piocb->hba_wqidx = lpfc_cmd = (struct lpfc_scsi_buf *)piocb->context1;
lpfc_sli4_scmd_to_wqidx_distr( piocb->hba_wqidx = lpfc_cmd->hdwq;
phba, piocb->context1);
piocb->hba_wqidx = piocb->hba_wqidx %
phba->cfg_hdw_queue;
} }
return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring; return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring;
} else { } else {
...@@ -12924,7 +12915,8 @@ void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) ...@@ -12924,7 +12915,8 @@ void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
cq_event, struct lpfc_cq_event, list); cq_event, struct lpfc_cq_event, list);
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
/* Notify aborted XRI for FCP work queue */ /* Notify aborted XRI for FCP work queue */
lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri,
cq_event->hdwq);
/* Free the event processed back to the free pool */ /* Free the event processed back to the free pool */
lpfc_sli4_cq_event_release(phba, cq_event); lpfc_sli4_cq_event_release(phba, cq_event);
} }
...@@ -13426,17 +13418,8 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, ...@@ -13426,17 +13418,8 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
switch (cq->subtype) { switch (cq->subtype) {
case LPFC_FCP: case LPFC_FCP:
cq_event = lpfc_cq_event_setup( lpfc_sli4_fcp_xri_aborted(phba, wcqe, cq->hdwq);
phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); workposted = false;
if (!cq_event)
return false;
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list,
&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
/* Set the fcp xri abort event flag */
phba->hba_flag |= FCP_XRI_ABORT_EVENT;
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break; break;
case LPFC_NVME_LS: /* NVME LS uses ELS resources */ case LPFC_NVME_LS: /* NVME LS uses ELS resources */
case LPFC_ELS: case LPFC_ELS:
...@@ -13444,6 +13427,7 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, ...@@ -13444,6 +13427,7 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
if (!cq_event) if (!cq_event)
return false; return false;
cq_event->hdwq = cq->hdwq;
spin_lock_irqsave(&phba->hbalock, iflags); spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list, list_add_tail(&cq_event->list,
&phba->sli4_hba.sp_els_xri_aborted_work_queue); &phba->sli4_hba.sp_els_xri_aborted_work_queue);
...@@ -13457,7 +13441,7 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, ...@@ -13457,7 +13441,7 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
if (phba->nvmet_support) if (phba->nvmet_support)
lpfc_sli4_nvmet_xri_aborted(phba, wcqe); lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
else else
lpfc_sli4_nvme_xri_aborted(phba, wcqe); lpfc_sli4_nvme_xri_aborted(phba, wcqe, cq->hdwq);
workposted = false; workposted = false;
break; break;
...@@ -14073,7 +14057,8 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, ...@@ -14073,7 +14057,8 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
/* First check for NVME/SCSI completion */ /* First check for NVME/SCSI completion */
if (cqid == phba->sli4_hba.hdwq[qidx].nvme_cq_map) { if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
(cqid == phba->sli4_hba.hdwq[qidx].nvme_cq_map)) {
/* Process NVME / NVMET command completion */ /* Process NVME / NVMET command completion */
cq = phba->sli4_hba.hdwq[qidx].nvme_cq; cq = phba->sli4_hba.hdwq[qidx].nvme_cq;
goto process_cq; goto process_cq;
...@@ -16656,7 +16641,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, ...@@ -16656,7 +16641,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
} }
/** /**
* lpfc_sli4_post_common_sgl_block - post a block of nvme sgl list to firmware * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
* @phba: pointer to lpfc hba data structure. * @phba: pointer to lpfc hba data structure.
* @nblist: pointer to nvme buffer list. * @nblist: pointer to nvme buffer list.
* @count: number of scsi buffers on the list. * @count: number of scsi buffers on the list.
...@@ -16667,8 +16652,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, ...@@ -16667,8 +16652,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
* *
**/ **/
static int static int
lpfc_sli4_post_common_sgl_block(struct lpfc_hba *phba, lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
struct list_head *nblist,
int count) int count)
{ {
struct lpfc_nvme_buf *lpfc_ncmd; struct lpfc_nvme_buf *lpfc_ncmd;
...@@ -16770,7 +16754,7 @@ lpfc_sli4_post_common_sgl_block(struct lpfc_hba *phba, ...@@ -16770,7 +16754,7 @@ lpfc_sli4_post_common_sgl_block(struct lpfc_hba *phba,
} }
/** /**
* lpfc_sli4_post_common_sgl_list - Post blocks of nvme buffer sgls from a list * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
* @phba: pointer to lpfc hba data structure. * @phba: pointer to lpfc hba data structure.
* @post_nblist: pointer to the nvme buffer list. * @post_nblist: pointer to the nvme buffer list.
* *
...@@ -16784,7 +16768,7 @@ lpfc_sli4_post_common_sgl_block(struct lpfc_hba *phba, ...@@ -16784,7 +16768,7 @@ lpfc_sli4_post_common_sgl_block(struct lpfc_hba *phba,
* Returns: 0 = failure, non-zero number of successfully posted buffers. * Returns: 0 = failure, non-zero number of successfully posted buffers.
**/ **/
int int
lpfc_sli4_post_common_sgl_list(struct lpfc_hba *phba, lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
struct list_head *post_nblist, int sb_count) struct list_head *post_nblist, int sb_count)
{ {
struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
...@@ -16793,7 +16777,6 @@ lpfc_sli4_post_common_sgl_list(struct lpfc_hba *phba, ...@@ -16793,7 +16777,6 @@ lpfc_sli4_post_common_sgl_list(struct lpfc_hba *phba,
dma_addr_t pdma_phys_sgl1; dma_addr_t pdma_phys_sgl1;
int last_xritag = NO_XRI; int last_xritag = NO_XRI;
int cur_xritag; int cur_xritag;
unsigned long iflag;
LIST_HEAD(prep_nblist); LIST_HEAD(prep_nblist);
LIST_HEAD(blck_nblist); LIST_HEAD(blck_nblist);
LIST_HEAD(nvme_nblist); LIST_HEAD(nvme_nblist);
...@@ -16864,7 +16847,7 @@ lpfc_sli4_post_common_sgl_list(struct lpfc_hba *phba, ...@@ -16864,7 +16847,7 @@ lpfc_sli4_post_common_sgl_list(struct lpfc_hba *phba,
continue; continue;
/* post block of NVME buffer list sgls */ /* post block of NVME buffer list sgls */
status = lpfc_sli4_post_common_sgl_block(phba, &blck_nblist, status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
post_cnt); post_cnt);
/* don't reset xirtag due to hole in xri block */ /* don't reset xirtag due to hole in xri block */
...@@ -16891,17 +16874,8 @@ lpfc_sli4_post_common_sgl_list(struct lpfc_hba *phba, ...@@ -16891,17 +16874,8 @@ lpfc_sli4_post_common_sgl_list(struct lpfc_hba *phba,
} }
} }
/* Push NVME buffers with sgl posted to the available list */ /* Push NVME buffers with sgl posted to the available list */
while (!list_empty(&nvme_nblist)) { lpfc_io_buf_replenish(phba, &nvme_nblist);
list_remove_head(&nvme_nblist, lpfc_ncmd,
struct lpfc_nvme_buf, list);
lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
spin_lock_irqsave(&phba->common_buf_list_put_lock, iflag);
list_add_tail(&lpfc_ncmd->list,
&phba->lpfc_common_buf_list_put);
phba->put_common_bufs++;
spin_unlock_irqrestore(&phba->common_buf_list_put_lock, iflag);
}
return num_posted; return num_posted;
} }
......
...@@ -33,6 +33,7 @@ typedef enum _lpfc_ctx_cmd { ...@@ -33,6 +33,7 @@ typedef enum _lpfc_ctx_cmd {
struct lpfc_cq_event { struct lpfc_cq_event {
struct list_head list; struct list_head list;
uint16_t hdwq;
union { union {
struct lpfc_mcqe mcqe_cmpl; struct lpfc_mcqe mcqe_cmpl;
struct lpfc_acqe_link acqe_link; struct lpfc_acqe_link acqe_link;
......
...@@ -214,6 +214,7 @@ struct lpfc_queue { ...@@ -214,6 +214,7 @@ struct lpfc_queue {
struct work_struct spwork; struct work_struct spwork;
uint64_t isr_timestamp; uint64_t isr_timestamp;
uint16_t hdwq;
uint8_t qe_valid; uint8_t qe_valid;
struct lpfc_queue *assoc_qp; struct lpfc_queue *assoc_qp;
union sli4_qe qe[1]; /* array to index entries (must be last) */ union sli4_qe qe[1]; /* array to index entries (must be last) */
...@@ -538,6 +539,22 @@ struct lpfc_sli4_hdw_queue { ...@@ -538,6 +539,22 @@ struct lpfc_sli4_hdw_queue {
struct lpfc_queue *nvme_wq; /* Fast-path NVME work queue */ struct lpfc_queue *nvme_wq; /* Fast-path NVME work queue */
uint16_t fcp_cq_map; uint16_t fcp_cq_map;
uint16_t nvme_cq_map; uint16_t nvme_cq_map;
/* Keep track of IO buffers for this hardware queue */
spinlock_t io_buf_list_get_lock; /* Common buf alloc list lock */
struct list_head lpfc_io_buf_list_get;
spinlock_t io_buf_list_put_lock; /* Common buf free list lock */
struct list_head lpfc_io_buf_list_put;
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
struct list_head lpfc_abts_scsi_buf_list;
spinlock_t abts_nvme_buf_list_lock; /* list of aborted NVME IOs */
struct list_head lpfc_abts_nvme_buf_list;
uint32_t total_io_bufs;
uint32_t get_io_bufs;
uint32_t put_io_bufs;
uint32_t empty_io_bufs;
uint32_t abts_scsi_io_bufs;
uint32_t abts_nvme_io_bufs;
}; };
struct lpfc_sli4_hba { struct lpfc_sli4_hba {
...@@ -662,19 +679,20 @@ struct lpfc_sli4_hba { ...@@ -662,19 +679,20 @@ struct lpfc_sli4_hba {
uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */ uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */ uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
uint16_t next_rpi; uint16_t next_rpi;
uint16_t common_xri_max; uint16_t io_xri_max;
uint16_t common_xri_cnt; uint16_t io_xri_cnt;
uint16_t common_xri_start; uint16_t io_xri_start;
uint16_t els_xri_cnt; uint16_t els_xri_cnt;
uint16_t nvmet_xri_cnt; uint16_t nvmet_xri_cnt;
uint16_t nvmet_io_wait_cnt; uint16_t nvmet_io_wait_cnt;
uint16_t nvmet_io_wait_total; uint16_t nvmet_io_wait_total;
struct list_head lpfc_els_sgl_list; struct list_head lpfc_els_sgl_list;
struct list_head lpfc_abts_els_sgl_list; struct list_head lpfc_abts_els_sgl_list;
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
struct list_head lpfc_abts_scsi_buf_list;
struct list_head lpfc_nvmet_sgl_list; struct list_head lpfc_nvmet_sgl_list;
spinlock_t abts_nvmet_buf_list_lock; /* list of aborted NVMET IOs */
struct list_head lpfc_abts_nvmet_ctx_list; struct list_head lpfc_abts_nvmet_ctx_list;
struct list_head lpfc_abts_scsi_buf_list;
struct list_head lpfc_abts_nvme_buf_list;
struct list_head lpfc_nvmet_io_wait_list; struct list_head lpfc_nvmet_io_wait_list;
struct lpfc_nvmet_ctx_info *nvmet_ctx_info; struct lpfc_nvmet_ctx_info *nvmet_ctx_info;
struct lpfc_sglq **lpfc_sglq_active_list; struct lpfc_sglq **lpfc_sglq_active_list;
...@@ -703,8 +721,6 @@ struct lpfc_sli4_hba { ...@@ -703,8 +721,6 @@ struct lpfc_sli4_hba {
#define LPFC_SLI4_PPNAME_NON 0 #define LPFC_SLI4_PPNAME_NON 0
#define LPFC_SLI4_PPNAME_GET 1 #define LPFC_SLI4_PPNAME_GET 1
struct lpfc_iov iov; struct lpfc_iov iov;
spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t sgl_list_lock; /* list of aborted els IOs */ spinlock_t sgl_list_lock; /* list of aborted els IOs */
spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */ spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
uint32_t physical_port; uint32_t physical_port;
...@@ -839,7 +855,7 @@ int lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *, ...@@ -839,7 +855,7 @@ int lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
int lpfc_sli4_queue_setup(struct lpfc_hba *); int lpfc_sli4_queue_setup(struct lpfc_hba *);
void lpfc_sli4_queue_unset(struct lpfc_hba *); void lpfc_sli4_queue_unset(struct lpfc_hba *);
int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t); int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
int lpfc_repost_common_sgl_list(struct lpfc_hba *phba); int lpfc_repost_io_sgl_list(struct lpfc_hba *phba);
uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
void lpfc_sli4_free_xri(struct lpfc_hba *, int); void lpfc_sli4_free_xri(struct lpfc_hba *, int);
int lpfc_sli4_post_async_mbox(struct lpfc_hba *); int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
...@@ -862,9 +878,9 @@ int lpfc_sli4_resume_rpi(struct lpfc_nodelist *, ...@@ -862,9 +878,9 @@ int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *, void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *); struct sli4_wcqe_xri_aborted *, int);
void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri); struct sli4_wcqe_xri_aborted *axri, int idx);
void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri); struct sli4_wcqe_xri_aborted *axri);
void lpfc_sli4_els_xri_aborted(struct lpfc_hba *, void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment