Commit 411de511 authored by James Smart's avatar James Smart Committed by Martin K. Petersen

scsi: lpfc: Fix RQ empty firmware trap

When nvme target deferred receive logic waits for exchange resources,
the corresponding receive buffer is not replenished with the hardware.
This can result in a lack of asynchronous receive buffer resources in
the hardware, resulting in a "2885 Port Status Event: ... error
1=0x52004a01 ..." message.

Correct by replenishing the buffer whenenver the deferred logic kicks
in.  Update corresponding debug messages and statistics as well.

[mkp: applied by hand]
Signed-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <james.smart@broadcom.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 91455b85
...@@ -259,6 +259,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, ...@@ -259,6 +259,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&tgtp->xmt_abort_rsp), atomic_read(&tgtp->xmt_abort_rsp),
atomic_read(&tgtp->xmt_abort_rsp_error)); atomic_read(&tgtp->xmt_abort_rsp_error));
len += snprintf(buf + len, PAGE_SIZE - len,
"DELAY: ctx %08x fod %08x wqfull %08x\n",
atomic_read(&tgtp->defer_ctx),
atomic_read(&tgtp->defer_fod),
atomic_read(&tgtp->defer_wqfull));
/* Calculate outstanding IOs */ /* Calculate outstanding IOs */
tot = atomic_read(&tgtp->rcv_fcp_cmd_drop); tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
tot += atomic_read(&tgtp->xmt_fcp_release); tot += atomic_read(&tgtp->xmt_fcp_release);
......
...@@ -753,12 +753,16 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) ...@@ -753,12 +753,16 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
if (rc < 0) { if (rc < 0) {
(rqbp->rqb_free_buffer)(phba, rqb_entry);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6409 Cannot post to RQ %d: %x %x\n", "6409 Cannot post to HRQ %d: %x %x %x "
"DRQ %x %x\n",
rqb_entry->hrq->queue_id, rqb_entry->hrq->queue_id,
rqb_entry->hrq->host_index, rqb_entry->hrq->host_index,
rqb_entry->hrq->hba_index); rqb_entry->hrq->hba_index,
(rqbp->rqb_free_buffer)(phba, rqb_entry); rqb_entry->hrq->entry_count,
rqb_entry->drq->host_index,
rqb_entry->drq->hba_index);
} else { } else {
list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
rqbp->buffer_count++; rqbp->buffer_count++;
......
...@@ -270,8 +270,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) ...@@ -270,8 +270,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
"NVMET RCV BUSY: xri x%x sz %d " "NVMET RCV BUSY: xri x%x sz %d "
"from %06x\n", "from %06x\n",
oxid, size, sid); oxid, size, sid);
/* defer repost rcv buffer till .defer_rcv callback */
ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
atomic_inc(&tgtp->rcv_fcp_cmd_out); atomic_inc(&tgtp->rcv_fcp_cmd_out);
return; return;
} }
...@@ -837,6 +835,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, ...@@ -837,6 +835,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
list_add_tail(&nvmewqeq->list, &wq->wqfull_list); list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
wq->q_flag |= HBA_NVMET_WQFULL; wq->q_flag |= HBA_NVMET_WQFULL;
spin_unlock_irqrestore(&pring->ring_lock, iflags); spin_unlock_irqrestore(&pring->ring_lock, iflags);
atomic_inc(&lpfc_nvmep->defer_wqfull);
return 0; return 0;
} }
...@@ -975,11 +974,9 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, ...@@ -975,11 +974,9 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
tgtp = phba->targetport->private; tgtp = phba->targetport->private;
atomic_inc(&tgtp->rcv_fcp_cmd_defer); atomic_inc(&tgtp->rcv_fcp_cmd_defer);
if (ctxp->flag & LPFC_NVMET_DEFER_RCV_REPOST)
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ /* Free the nvmebuf since a new buffer already replaced it */
else nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
} }
static struct nvmet_fc_target_template lpfc_tgttemplate = { static struct nvmet_fc_target_template lpfc_tgttemplate = {
...@@ -1309,6 +1306,9 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) ...@@ -1309,6 +1306,9 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
atomic_set(&tgtp->xmt_abort_sol, 0); atomic_set(&tgtp->xmt_abort_sol, 0);
atomic_set(&tgtp->xmt_abort_rsp, 0); atomic_set(&tgtp->xmt_abort_rsp, 0);
atomic_set(&tgtp->xmt_abort_rsp_error, 0); atomic_set(&tgtp->xmt_abort_rsp_error, 0);
atomic_set(&tgtp->defer_ctx, 0);
atomic_set(&tgtp->defer_fod, 0);
atomic_set(&tgtp->defer_wqfull, 0);
} }
return error; return error;
} }
...@@ -1810,6 +1810,8 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, ...@@ -1810,6 +1810,8 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n", lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
oxid, size, smp_processor_id()); oxid, size, smp_processor_id());
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
if (!ctx_buf) { if (!ctx_buf) {
/* Queue this NVME IO to process later */ /* Queue this NVME IO to process later */
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
...@@ -1825,10 +1827,11 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, ...@@ -1825,10 +1827,11 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
lpfc_post_rq_buffer( lpfc_post_rq_buffer(
phba, phba->sli4_hba.nvmet_mrq_hdr[qno], phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
atomic_inc(&tgtp->defer_ctx);
return; return;
} }
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
payload = (uint32_t *)(nvmebuf->dbuf.virt); payload = (uint32_t *)(nvmebuf->dbuf.virt);
sid = sli4_sid_from_fc_hdr(fc_hdr); sid = sli4_sid_from_fc_hdr(fc_hdr);
...@@ -1892,12 +1895,20 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, ...@@ -1892,12 +1895,20 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
/* Processing of FCP command is deferred */ /* Processing of FCP command is deferred */
if (rc == -EOVERFLOW) { if (rc == -EOVERFLOW) {
/*
* Post a brand new DMA buffer to RQ and defer
* freeing rcv buffer till .defer_rcv callback
*/
qno = nvmebuf->idx;
lpfc_post_rq_buffer(
phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
lpfc_nvmeio_data(phba, lpfc_nvmeio_data(phba,
"NVMET RCV BUSY: xri x%x sz %d from %06x\n", "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
oxid, size, sid); oxid, size, sid);
/* defer reposting rcv buffer till .defer_rcv callback */
ctxp->flag |= LPFC_NVMET_DEFER_RCV_REPOST;
atomic_inc(&tgtp->rcv_fcp_cmd_out); atomic_inc(&tgtp->rcv_fcp_cmd_out);
atomic_inc(&tgtp->defer_fod);
return; return;
} }
ctxp->rqb_buffer = nvmebuf; ctxp->rqb_buffer = nvmebuf;
......
...@@ -72,7 +72,6 @@ struct lpfc_nvmet_tgtport { ...@@ -72,7 +72,6 @@ struct lpfc_nvmet_tgtport {
atomic_t xmt_fcp_rsp_aborted; atomic_t xmt_fcp_rsp_aborted;
atomic_t xmt_fcp_rsp_drop; atomic_t xmt_fcp_rsp_drop;
/* Stats counters - lpfc_nvmet_xmt_fcp_abort */ /* Stats counters - lpfc_nvmet_xmt_fcp_abort */
atomic_t xmt_fcp_xri_abort_cqe; atomic_t xmt_fcp_xri_abort_cqe;
atomic_t xmt_fcp_abort; atomic_t xmt_fcp_abort;
...@@ -81,6 +80,11 @@ struct lpfc_nvmet_tgtport { ...@@ -81,6 +80,11 @@ struct lpfc_nvmet_tgtport {
atomic_t xmt_abort_unsol; atomic_t xmt_abort_unsol;
atomic_t xmt_abort_rsp; atomic_t xmt_abort_rsp;
atomic_t xmt_abort_rsp_error; atomic_t xmt_abort_rsp_error;
/* Stats counters - defer IO */
atomic_t defer_ctx;
atomic_t defer_fod;
atomic_t defer_wqfull;
}; };
struct lpfc_nvmet_ctx_info { struct lpfc_nvmet_ctx_info {
...@@ -131,7 +135,6 @@ struct lpfc_nvmet_rcv_ctx { ...@@ -131,7 +135,6 @@ struct lpfc_nvmet_rcv_ctx {
#define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */ #define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */
#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */ #define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */ #define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
#define LPFC_NVMET_DEFER_RCV_REPOST 0x20 /* repost to RQ on defer rcv */
#define LPFC_NVMET_DEFER_WQFULL 0x40 /* Waiting on a free WQE */ #define LPFC_NVMET_DEFER_WQFULL 0x40 /* Waiting on a free WQE */
struct rqb_dmabuf *rqb_buffer; struct rqb_dmabuf *rqb_buffer;
struct lpfc_nvmet_ctxbuf *ctxbuf; struct lpfc_nvmet_ctxbuf *ctxbuf;
......
...@@ -6535,9 +6535,11 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, ...@@ -6535,9 +6535,11 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
struct lpfc_rqe hrqe; struct lpfc_rqe hrqe;
struct lpfc_rqe drqe; struct lpfc_rqe drqe;
struct lpfc_rqb *rqbp; struct lpfc_rqb *rqbp;
unsigned long flags;
struct rqb_dmabuf *rqb_buffer; struct rqb_dmabuf *rqb_buffer;
LIST_HEAD(rqb_buf_list); LIST_HEAD(rqb_buf_list);
spin_lock_irqsave(&phba->hbalock, flags);
rqbp = hrq->rqbp; rqbp = hrq->rqbp;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
/* IF RQ is already full, don't bother */ /* IF RQ is already full, don't bother */
...@@ -6561,6 +6563,15 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, ...@@ -6561,6 +6563,15 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
if (rc < 0) { if (rc < 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6421 Cannot post to HRQ %d: %x %x %x "
"DRQ %x %x\n",
hrq->queue_id,
hrq->host_index,
hrq->hba_index,
hrq->entry_count,
drq->host_index,
drq->hba_index);
rqbp->rqb_free_buffer(phba, rqb_buffer); rqbp->rqb_free_buffer(phba, rqb_buffer);
} else { } else {
list_add_tail(&rqb_buffer->hbuf.list, list_add_tail(&rqb_buffer->hbuf.list,
...@@ -6568,6 +6579,7 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, ...@@ -6568,6 +6579,7 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
rqbp->buffer_count++; rqbp->buffer_count++;
} }
} }
spin_unlock_irqrestore(&phba->hbalock, flags);
return 1; return 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment