Commit cbc5de1b authored by James Smart's avatar James Smart Committed by Martin K. Petersen

scsi: lpfc: Fix -EOVERFLOW behavior for NVMET and defer_rcv

The driver is all set to handle the defer_rcv api for the nvmet_fc
transport, yet didn't properly recognize the return status when the
defer_rcv occurred. The driver treated it simply as an error and aborted
the io. Several residual issues occurred at that point.

Finish the defer_rcv support: recognize the return status when the io
request is being handled in a deferred style. This stops the rogue
aborts; Replenish the async cmd rcv buffer in the deferred receive if
needed.
Signed-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <james.smart@broadcom.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent cf1a1d3e
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <../drivers/nvme/host/nvme.h> #include <../drivers/nvme/host/nvme.h>
#include <linux/nvme-fc-driver.h> #include <linux/nvme-fc-driver.h>
#include <linux/nvme-fc.h>
#include "lpfc_version.h" #include "lpfc_version.h"
#include "lpfc_hw4.h" #include "lpfc_hw4.h"
...@@ -218,6 +219,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) ...@@ -218,6 +219,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
ctxp->entry_cnt = 1; ctxp->entry_cnt = 1;
ctxp->flag = 0; ctxp->flag = 0;
ctxp->ctxbuf = ctx_buf; ctxp->ctxbuf = ctx_buf;
ctxp->rqb_buffer = (void *)nvmebuf;
spin_lock_init(&ctxp->ctxlock); spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
...@@ -253,6 +255,17 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) ...@@ -253,6 +255,17 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
return; return;
} }
/* Processing of FCP command is deferred */
if (rc == -EOVERFLOW) {
lpfc_nvmeio_data(phba,
"NVMET RCV BUSY: xri x%x sz %d "
"from %06x\n",
oxid, size, sid);
/* defer repost rcv buffer till .defer_rcv callback */
ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
atomic_inc(&tgtp->rcv_fcp_cmd_out);
return;
}
atomic_inc(&tgtp->rcv_fcp_cmd_drop); atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
...@@ -921,7 +934,11 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, ...@@ -921,7 +934,11 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
tgtp = phba->targetport->private; tgtp = phba->targetport->private;
atomic_inc(&tgtp->rcv_fcp_cmd_defer); atomic_inc(&tgtp->rcv_fcp_cmd_defer);
if (ctxp->flag & LPFC_NVMET_DEFER_RCV_REPOST)
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
else
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
} }
static struct nvmet_fc_target_template lpfc_tgttemplate = { static struct nvmet_fc_target_template lpfc_tgttemplate = {
...@@ -1693,6 +1710,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, ...@@ -1693,6 +1710,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->entry_cnt = 1; ctxp->entry_cnt = 1;
ctxp->flag = 0; ctxp->flag = 0;
ctxp->ctxbuf = ctx_buf; ctxp->ctxbuf = ctx_buf;
ctxp->rqb_buffer = (void *)nvmebuf;
spin_lock_init(&ctxp->ctxlock); spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
...@@ -1726,6 +1744,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, ...@@ -1726,6 +1744,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
/* Process FCP command */ /* Process FCP command */
if (rc == 0) { if (rc == 0) {
ctxp->rqb_buffer = NULL;
atomic_inc(&tgtp->rcv_fcp_cmd_out); atomic_inc(&tgtp->rcv_fcp_cmd_out);
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
return; return;
...@@ -1737,10 +1756,11 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, ...@@ -1737,10 +1756,11 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
"NVMET RCV BUSY: xri x%x sz %d from %06x\n", "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
oxid, size, sid); oxid, size, sid);
/* defer reposting rcv buffer till .defer_rcv callback */ /* defer reposting rcv buffer till .defer_rcv callback */
ctxp->rqb_buffer = nvmebuf; ctxp->flag |= LPFC_NVMET_DEFER_RCV_REPOST;
atomic_inc(&tgtp->rcv_fcp_cmd_out); atomic_inc(&tgtp->rcv_fcp_cmd_out);
return; return;
} }
ctxp->rqb_buffer = nvmebuf;
atomic_inc(&tgtp->rcv_fcp_cmd_drop); atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
......
...@@ -126,6 +126,7 @@ struct lpfc_nvmet_rcv_ctx { ...@@ -126,6 +126,7 @@ struct lpfc_nvmet_rcv_ctx {
#define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */ #define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */
#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */ #define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */ #define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
#define LPFC_NVMET_DEFER_RCV_REPOST 0x20 /* repost to RQ on defer rcv */
struct rqb_dmabuf *rqb_buffer; struct rqb_dmabuf *rqb_buffer;
struct lpfc_nvmet_ctxbuf *ctxbuf; struct lpfc_nvmet_ctxbuf *ctxbuf;
......
...@@ -475,28 +475,30 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, ...@@ -475,28 +475,30 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *temp_hrqe; struct lpfc_rqe *temp_hrqe;
struct lpfc_rqe *temp_drqe; struct lpfc_rqe *temp_drqe;
struct lpfc_register doorbell; struct lpfc_register doorbell;
int put_index; int hq_put_index;
int dq_put_index;
/* sanity check on queue memory */ /* sanity check on queue memory */
if (unlikely(!hq) || unlikely(!dq)) if (unlikely(!hq) || unlikely(!dq))
return -ENOMEM; return -ENOMEM;
put_index = hq->host_index; hq_put_index = hq->host_index;
temp_hrqe = hq->qe[put_index].rqe; dq_put_index = dq->host_index;
temp_drqe = dq->qe[dq->host_index].rqe; temp_hrqe = hq->qe[hq_put_index].rqe;
temp_drqe = dq->qe[dq_put_index].rqe;
if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
return -EINVAL; return -EINVAL;
if (put_index != dq->host_index) if (hq_put_index != dq_put_index)
return -EINVAL; return -EINVAL;
/* If the host has not yet processed the next entry then we are done */ /* If the host has not yet processed the next entry then we are done */
if (((put_index + 1) % hq->entry_count) == hq->hba_index) if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
return -EBUSY; return -EBUSY;
lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
/* Update the host index to point to the next slot */ /* Update the host index to point to the next slot */
hq->host_index = ((put_index + 1) % hq->entry_count); hq->host_index = ((hq_put_index + 1) % hq->entry_count);
dq->host_index = ((dq->host_index + 1) % dq->entry_count); dq->host_index = ((dq_put_index + 1) % dq->entry_count);
hq->RQ_buf_posted++; hq->RQ_buf_posted++;
/* Ring The Header Receive Queue Doorbell */ /* Ring The Header Receive Queue Doorbell */
...@@ -517,7 +519,7 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, ...@@ -517,7 +519,7 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
} }
writel(doorbell.word0, hq->db_regaddr); writel(doorbell.word0, hq->db_regaddr);
} }
return put_index; return hq_put_index;
} }
/** /**
...@@ -12887,8 +12889,8 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) ...@@ -12887,8 +12889,8 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
lpfc_printf_log(phba, KERN_ERR, LOG_SLI, lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2537 Receive Frame Truncated!!\n"); "2537 Receive Frame Truncated!!\n");
case FC_STATUS_RQ_SUCCESS: case FC_STATUS_RQ_SUCCESS:
lpfc_sli4_rq_release(hrq, drq);
spin_lock_irqsave(&phba->hbalock, iflags); spin_lock_irqsave(&phba->hbalock, iflags);
lpfc_sli4_rq_release(hrq, drq);
dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
if (!dma_buf) { if (!dma_buf) {
hrq->RQ_no_buf_found++; hrq->RQ_no_buf_found++;
...@@ -13290,8 +13292,8 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, ...@@ -13290,8 +13292,8 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
"6126 Receive Frame Truncated!!\n"); "6126 Receive Frame Truncated!!\n");
/* Drop thru */ /* Drop thru */
case FC_STATUS_RQ_SUCCESS: case FC_STATUS_RQ_SUCCESS:
lpfc_sli4_rq_release(hrq, drq);
spin_lock_irqsave(&phba->hbalock, iflags); spin_lock_irqsave(&phba->hbalock, iflags);
lpfc_sli4_rq_release(hrq, drq);
dma_buf = lpfc_sli_rqbuf_get(phba, hrq); dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
if (!dma_buf) { if (!dma_buf) {
hrq->RQ_no_buf_found++; hrq->RQ_no_buf_found++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment