Commit 6514b25d authored by James Smart's avatar James Smart Committed by Jens Axboe

lpfc: Refactor Send LS Request support

Currently, the ability to send an NVME LS request is limited to the nvme
(host) side of the driver.  In preparation of both the nvme and nvmet sides
support Send LS Request, rework the existing send ls_req and ls_req
completion routines such that there is common code that can be used by
both sides.
Signed-off-by: default avatarPaul Ely <paul.ely@broadcom.com>
Signed-off-by: default avatarJames Smart <jsmart2021@gmail.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 3a8070c5
...@@ -412,43 +412,43 @@ lpfc_nvme_handle_lsreq(struct lpfc_hba *phba, ...@@ -412,43 +412,43 @@ lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
return 1; return 1;
} }
static void /**
lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME
struct lpfc_wcqe_complete *wcqe) * LS request.
* @phba: Pointer to HBA context object
* @vport: The local port that issued the LS
* @cmdwqe: Pointer to driver command WQE object.
* @wcqe: Pointer to driver response CQE object.
*
* This function is the generic completion handler for NVME LS requests.
* The function updates any states and statistics, calls the transport
* ls_req done() routine, then tears down the command and buffers used
* for the LS request.
**/
void
__lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_iocbq *cmdwqe,
struct lpfc_wcqe_complete *wcqe)
{ {
struct lpfc_vport *vport = cmdwqe->vport;
struct lpfc_nvme_lport *lport;
uint32_t status;
struct nvmefc_ls_req *pnvme_lsreq; struct nvmefc_ls_req *pnvme_lsreq;
struct lpfc_dmabuf *buf_ptr; struct lpfc_dmabuf *buf_ptr;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
uint32_t status;
pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2; pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
if (vport->localport) {
lport = (struct lpfc_nvme_lport *)vport->localport->private;
if (lport) {
atomic_inc(&lport->fc4NvmeLsCmpls);
if (status) {
if (bf_get(lpfc_wcqe_c_xb, wcqe))
atomic_inc(&lport->cmpl_ls_xb);
atomic_inc(&lport->cmpl_ls_err);
}
}
}
ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6047 nvme cmpl Enter " "6047 NVMEx LS REQ %px cmpl DID %x Xri: %x "
"Data %px DID %x Xri: %x status %x reason x%x " "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px "
"cmd:x%px lsreg:x%px bmp:x%px ndlp:x%px\n", "ndlp:x%px\n",
pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
cmdwqe->sli4_xritag, status, cmdwqe->sli4_xritag, status,
(wcqe->parameter & 0xffff), (wcqe->parameter & 0xffff),
cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp); cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
lpfc_nvmeio_data(phba, "NVME LS CMPL: xri x%x stat x%x parm x%x\n", lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n",
cmdwqe->sli4_xritag, status, wcqe->parameter); cmdwqe->sli4_xritag, status, wcqe->parameter);
if (cmdwqe->context3) { if (cmdwqe->context3) {
...@@ -461,7 +461,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, ...@@ -461,7 +461,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
pnvme_lsreq->done(pnvme_lsreq, status); pnvme_lsreq->done(pnvme_lsreq, status);
else else
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
"6046 nvme cmpl without done call back? " "6046 NVMEx cmpl without done call back? "
"Data %px DID %x Xri: %x status %x\n", "Data %px DID %x Xri: %x status %x\n",
pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
cmdwqe->sli4_xritag, status); cmdwqe->sli4_xritag, status);
...@@ -472,6 +472,31 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, ...@@ -472,6 +472,31 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
lpfc_sli_release_iocbq(phba, cmdwqe); lpfc_sli_release_iocbq(phba, cmdwqe);
} }
static void
lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_wcqe_complete *wcqe)
{
struct lpfc_vport *vport = cmdwqe->vport;
struct lpfc_nvme_lport *lport;
uint32_t status;
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
if (vport->localport) {
lport = (struct lpfc_nvme_lport *)vport->localport->private;
if (lport) {
atomic_inc(&lport->fc4NvmeLsCmpls);
if (status) {
if (bf_get(lpfc_wcqe_c_xb, wcqe))
atomic_inc(&lport->cmpl_ls_xb);
atomic_inc(&lport->cmpl_ls_err);
}
}
}
__lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe);
}
static int static int
lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
struct lpfc_dmabuf *inp, struct lpfc_dmabuf *inp,
...@@ -573,13 +598,6 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, ...@@ -573,13 +598,6 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
/* Issue GEN REQ WQE for NPORT <did> */ /* Issue GEN REQ WQE for NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"6050 Issue GEN REQ WQE to NPORT x%x "
"Data: x%x x%x wq:x%px lsreq:x%px bmp:x%px "
"xmit:%d 1st:%d\n",
ndlp->nlp_DID, genwqe->iotag,
vport->port_state,
genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
genwqe->wqe_cmpl = cmpl; genwqe->wqe_cmpl = cmpl;
genwqe->iocb_cmpl = NULL; genwqe->iocb_cmpl = NULL;
genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT; genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
...@@ -591,105 +609,108 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, ...@@ -591,105 +609,108 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe); rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
if (rc) { if (rc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC | LOG_ELS,
"6045 Issue GEN REQ WQE to NPORT x%x " "6045 Issue GEN REQ WQE to NPORT x%x "
"Data: x%x x%x\n", "Data: x%x x%x rc x%x\n",
ndlp->nlp_DID, genwqe->iotag, ndlp->nlp_DID, genwqe->iotag,
vport->port_state); vport->port_state, rc);
lpfc_sli_release_iocbq(phba, genwqe); lpfc_sli_release_iocbq(phba, genwqe);
return 1; return 1;
} }
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS,
"6050 Issue GEN REQ WQE to NPORT x%x "
"Data: oxid: x%x state: x%x wq:x%px lsreq:x%px "
"bmp:x%px xmit:%d 1st:%d\n",
ndlp->nlp_DID, genwqe->sli4_xritag,
vport->port_state,
genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
return 0; return 0;
} }
/** /**
* lpfc_nvme_ls_req - Issue an Link Service request * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request
* @lpfc_pnvme: Pointer to the driver's nvme instance data * @vport: The local port issuing the LS
* @lpfc_nvme_lport: Pointer to the driver's local port data * @ndlp: The remote port to send the LS to
* @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq * @pnvme_lsreq: Pointer to LS request structure from the transport
* *
* Driver registers this routine to handle any link service request * Routine validates the ndlp, builds buffers and sends a GEN_REQUEST
* from the nvme_fc transport to a remote nvme-aware port. * WQE to perform the LS operation.
* *
* Return value : * Return value :
* 0 - Success * 0 - Success
* TODO: What are the failure codes. * non-zero: various error codes, in form of -Exxx
**/ **/
static int int
lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct nvme_fc_remote_port *pnvme_rport, struct nvmefc_ls_req *pnvme_lsreq,
struct nvmefc_ls_req *pnvme_lsreq) void (*gen_req_cmp)(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdwqe,
struct lpfc_wcqe_complete *wcqe))
{ {
int ret = 0;
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_rport *rport;
struct lpfc_vport *vport;
struct lpfc_nodelist *ndlp;
struct ulp_bde64 *bpl;
struct lpfc_dmabuf *bmp; struct lpfc_dmabuf *bmp;
struct ulp_bde64 *bpl;
int ret;
uint16_t ntype, nstate; uint16_t ntype, nstate;
/* there are two dma buf in the request, actually there is one and
* the second one is just the start address + cmd size.
* Before calling lpfc_nvme_gen_req these buffers need to be wrapped
* in a lpfc_dmabuf struct. When freeing we just free the wrapper
* because the nvem layer owns the data bufs.
* We do not have to break these packets open, we don't care what is in
* them. And we do not have to look at the resonse data, we only care
* that we got a response. All of the caring is going to happen in the
* nvme-fc layer.
*/
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
if (unlikely(!lport) || unlikely(!rport))
return -EINVAL;
vport = lport->vport;
if (vport->load_flag & FC_UNLOADING)
return -ENODEV;
/* Need the ndlp. It is stored in the driver's rport. */
ndlp = rport->ndlp;
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, lpfc_printf_vlog(vport, KERN_ERR,
"6051 Remoteport x%px, rport has invalid ndlp. " LOG_NVME_DISC | LOG_NODE | LOG_NVME_IOERR,
"Failing LS Req\n", pnvme_rport); "6051 NVMEx LS REQ: Bad NDLP x%px, Failing "
"LS Req\n",
ndlp);
return -ENODEV; return -ENODEV;
} }
/* The remote node has to be a mapped nvme target or an
* unmapped nvme initiator or it's an error.
*/
ntype = ndlp->nlp_type; ntype = ndlp->nlp_type;
nstate = ndlp->nlp_state; nstate = ndlp->nlp_state;
if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) || if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
(ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) { (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, lpfc_printf_vlog(vport, KERN_ERR,
"6088 DID x%06x not ready for " LOG_NVME_DISC | LOG_NODE | LOG_NVME_IOERR,
"IO. State x%x, Type x%x\n", "6088 NVMEx LS REQ: Fail DID x%06x not "
pnvme_rport->port_id, "ready for IO. Type x%x, State x%x\n",
ndlp->nlp_state, ndlp->nlp_type); ndlp->nlp_DID, ntype, nstate);
return -ENODEV; return -ENODEV;
} }
bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
/*
* there are two dma buf in the request, actually there is one and
* the second one is just the start address + cmd size.
* Before calling lpfc_nvme_gen_req these buffers need to be wrapped
* in a lpfc_dmabuf struct. When freeing we just free the wrapper
* because the nvem layer owns the data bufs.
* We do not have to break these packets open, we don't care what is
* in them. And we do not have to look at the resonse data, we only
* care that we got a response. All of the caring is going to happen
* in the nvme-fc layer.
*/
bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
if (!bmp) { if (!bmp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, lpfc_printf_vlog(vport, KERN_ERR,
"6044 Could not find node for DID %x\n", LOG_NVME_DISC | LOG_NVME_IOERR,
pnvme_rport->port_id); "6044 NVMEx LS REQ: Could not alloc LS buf "
return 2; "for DID %x\n",
ndlp->nlp_DID);
return -ENOMEM;
} }
INIT_LIST_HEAD(&bmp->list);
bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys)); bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
if (!bmp->virt) { if (!bmp->virt) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, lpfc_printf_vlog(vport, KERN_ERR,
"6042 Could not find node for DID %x\n", LOG_NVME_DISC | LOG_NVME_IOERR,
pnvme_rport->port_id); "6042 NVMEx LS REQ: Could not alloc mbuf "
"for DID %x\n",
ndlp->nlp_DID);
kfree(bmp); kfree(bmp);
return 3; return -ENOMEM;
} }
INIT_LIST_HEAD(&bmp->list);
bpl = (struct ulp_bde64 *)bmp->virt; bpl = (struct ulp_bde64 *)bmp->virt;
bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma)); bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma)); bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
...@@ -704,37 +725,69 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, ...@@ -704,37 +725,69 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
bpl->tus.f.bdeSize = pnvme_lsreq->rsplen; bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
bpl->tus.w = le32_to_cpu(bpl->tus.w); bpl->tus.w = le32_to_cpu(bpl->tus.w);
/* Expand print to include key fields. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6149 Issue LS Req to DID 0x%06x lport x%px, " "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, "
"rport x%px lsreq x%px rqstlen:%d rsplen:%d " "rqstlen:%d rsplen:%d %pad %pad\n",
"%pad %pad\n", ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen,
ndlp->nlp_DID, pnvme_lport, pnvme_rport, pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
pnvme_lsreq, pnvme_lsreq->rqstlen, &pnvme_lsreq->rspdma);
pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
&pnvme_lsreq->rspdma);
atomic_inc(&lport->fc4NvmeLsRequests);
/* Hardcode the wait to 30 seconds. Connections are failing otherwise.
* This code allows it all to work.
*/
ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr, ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
pnvme_lsreq, lpfc_nvme_cmpl_gen_req, pnvme_lsreq, gen_req_cmp, ndlp, 2,
ndlp, 2, 30, 0); LPFC_NVME_LS_TIMEOUT, 0);
if (ret != WQE_SUCCESS) { if (ret != WQE_SUCCESS) {
atomic_inc(&lport->xmt_ls_err); lpfc_printf_vlog(vport, KERN_ERR,
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, LOG_NVME_DISC | LOG_NVME_IOERR,
"6052 EXIT. issue ls wqe failed lport x%px, " "6052 NVMEx REQ: EXIT. issue ls wqe failed "
"rport x%px lsreq x%px Status %x DID %x\n", "lsreq x%px Status %x DID %x\n",
pnvme_lport, pnvme_rport, pnvme_lsreq, pnvme_lsreq, ret, ndlp->nlp_DID);
ret, ndlp->nlp_DID);
lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys); lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
kfree(bmp); kfree(bmp);
return ret; return -EIO;
} }
/* Stub in routine and return 0 for now. */ return 0;
}
/**
* lpfc_nvme_ls_req - Issue an NVME Link Service request
* @lpfc_nvme_lport: Transport localport that LS is to be issued from.
* @lpfc_nvme_rport: Transport remoteport that LS is to be sent to.
* @pnvme_lsreq - the transport nvme_ls_req structure for the LS
*
* Driver registers this routine to handle any link service request
* from the nvme_fc transport to a remote nvme-aware port.
*
* Return value :
* 0 - Success
* non-zero: various error codes, in form of -Exxx
**/
static int
lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
struct nvme_fc_remote_port *pnvme_rport,
struct nvmefc_ls_req *pnvme_lsreq)
{
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_rport *rport;
struct lpfc_vport *vport;
int ret;
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
if (unlikely(!lport) || unlikely(!rport))
return -EINVAL;
vport = lport->vport;
if (vport->load_flag & FC_UNLOADING)
return -ENODEV;
atomic_inc(&lport->fc4NvmeLsRequests);
ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq,
lpfc_nvme_ls_req_cmp);
if (ret)
atomic_inc(&lport->xmt_ls_err);
return ret; return ret;
} }
......
...@@ -79,6 +79,12 @@ struct lpfc_nvme_fcpreq_priv { ...@@ -79,6 +79,12 @@ struct lpfc_nvme_fcpreq_priv {
struct lpfc_io_buf *nvme_buf; struct lpfc_io_buf *nvme_buf;
}; };
/*
* set NVME LS request timeouts to 30s. It is larger than the 2*R_A_TOV
* set by the spec, which appears to have issues with some devices.
*/
#define LPFC_NVME_LS_TIMEOUT 30
#define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */ #define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */
#define LPFC_NVMET_RQE_MIN_POST 128 #define LPFC_NVMET_RQE_MIN_POST 128
...@@ -224,6 +230,13 @@ struct lpfc_async_xchg_ctx { ...@@ -224,6 +230,13 @@ struct lpfc_async_xchg_ctx {
/* routines found in lpfc_nvme.c */ /* routines found in lpfc_nvme.c */
int __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct nvmefc_ls_req *pnvme_lsreq,
void (*gen_req_cmp)(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdwqe,
struct lpfc_wcqe_complete *wcqe));
void __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
/* routines found in lpfc_nvmet.c */ /* routines found in lpfc_nvmet.c */
int lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba, int lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment