Commit d51cf5bd authored by James Smart's avatar James Smart Committed by Martin K. Petersen

scsi: lpfc: Fix field overload in lpfc_iocbq data structure

The lpfc_iocbq data structure has void * pointers that are overloaded to be
as many as 8 different data types and the driver translates the void * by
casting.  This patch removes the void * pointers by declaring the specific
types needed by the driver.  It also expands the context_un to include more
seldom used pointer types to save structure bytes.  It also groups the u8
types together to pack the 8 bytes needed.  This work allows the lpfc_iocbq
data structure to be more strongly typed and keeps it from being allocated
from the 512 byte slab.

[mkp: rolled in zeroday fix]

Link: https://lore.kernel.org/r/20220412222008.126521-21-jsmart2021@gmail.comReported-by: default avatarkernel test robot <lkp@intel.com>
Co-developed-by: default avatarJustin Tee <justin.tee@broadcom.com>
Signed-off-by: default avatarJustin Tee <justin.tee@broadcom.com>
Signed-off-by: default avatarJames Smart <jsmart2021@gmail.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 1045592f
......@@ -310,7 +310,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
int rc = 0;
u32 ulp_status, ulp_word4, total_data_placed;
dd_data = cmdiocbq->context1;
dd_data = cmdiocbq->context_un.dd_data;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
......@@ -328,10 +328,10 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
spin_unlock_irqrestore(&phba->hbalock, flags);
iocb = &dd_data->context_un.iocb;
ndlp = iocb->cmdiocbq->context_un.ndlp;
ndlp = iocb->cmdiocbq->ndlp;
rmp = iocb->rmp;
cmp = cmdiocbq->context2;
bmp = cmdiocbq->context3;
cmp = cmdiocbq->cmd_dmabuf;
bmp = cmdiocbq->bpl_dmabuf;
ulp_status = get_job_ulpstatus(phba, rspiocbq);
ulp_word4 = get_job_word4(phba, rspiocbq);
total_data_placed = get_job_data_placed(phba, rspiocbq);
......@@ -470,14 +470,12 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
cmdiocbq->num_bdes = num_entry;
cmdiocbq->vport = phba->pport;
cmdiocbq->context2 = cmp;
cmdiocbq->context3 = bmp;
cmdiocbq->cmd_dmabuf = cmp;
cmdiocbq->bpl_dmabuf = bmp;
cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
cmdiocbq->cmd_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
cmdiocbq->context1 = dd_data;
cmdiocbq->context2 = cmp;
cmdiocbq->context3 = bmp;
cmdiocbq->context_un.dd_data = dd_data;
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
......@@ -495,8 +493,8 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
readl(phba->HCregaddr); /* flush */
}
cmdiocbq->context_un.ndlp = lpfc_nlp_get(ndlp);
if (!cmdiocbq->context_un.ndlp) {
cmdiocbq->ndlp = lpfc_nlp_get(ndlp);
if (!cmdiocbq->ndlp) {
rc = -ENODEV;
goto free_rmp;
}
......@@ -573,9 +571,9 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
int rc = 0;
u32 ulp_status, ulp_word4, total_data_placed;
dd_data = cmdiocbq->context1;
dd_data = cmdiocbq->context_un.dd_data;
ndlp = dd_data->context_un.iocb.ndlp;
cmdiocbq->context1 = ndlp;
cmdiocbq->ndlp = ndlp;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
......@@ -595,7 +593,7 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
ulp_status = get_job_ulpstatus(phba, rspiocbq);
ulp_word4 = get_job_word4(phba, rspiocbq);
total_data_placed = get_job_data_placed(phba, rspiocbq);
pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
pcmd = cmdiocbq->cmd_dmabuf;
prsp = (struct lpfc_dmabuf *)pcmd->list.next;
/* Copy the completed job data or determine the job status if job is
......@@ -711,8 +709,8 @@ lpfc_bsg_rport_els(struct bsg_job *job)
/* Transfer the request payload to allocated command dma buffer */
sg_copy_to_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
job->request_payload.payload_len);
cmdiocbq->cmd_dmabuf->virt,
cmdsize);
rpi = ndlp->nlp_rpi;
......@@ -722,8 +720,8 @@ lpfc_bsg_rport_els(struct bsg_job *job)
else
cmdiocbq->iocb.ulpContext = rpi;
cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
cmdiocbq->context1 = dd_data;
cmdiocbq->context_un.ndlp = ndlp;
cmdiocbq->context_un.dd_data = dd_data;
cmdiocbq->ndlp = ndlp;
cmdiocbq->cmd_cmpl = lpfc_bsg_rport_els_cmp;
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
......@@ -742,8 +740,8 @@ lpfc_bsg_rport_els(struct bsg_job *job)
readl(phba->HCregaddr); /* flush */
}
cmdiocbq->context1 = lpfc_nlp_get(ndlp);
if (!cmdiocbq->context1) {
cmdiocbq->ndlp = lpfc_nlp_get(ndlp);
if (!cmdiocbq->ndlp) {
rc = -EIO;
goto linkdown_err;
}
......@@ -917,8 +915,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct ulp_bde64 *bde;
dma_addr_t dma_addr;
int i;
struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
struct lpfc_dmabuf *bdeBuf1 = piocbq->cmd_dmabuf;
struct lpfc_dmabuf *bdeBuf2 = piocbq->bpl_dmabuf;
struct lpfc_sli_ct_request *ct_req;
struct bsg_job *job = NULL;
struct fc_bsg_reply *bsg_reply;
......@@ -985,9 +983,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
list_for_each_entry(iocbq, &head, list) {
size = 0;
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
bdeBuf1 = iocbq->context2;
bdeBuf2 = iocbq->context3;
bdeBuf1 = iocbq->cmd_dmabuf;
bdeBuf2 = iocbq->bpl_dmabuf;
}
if (phba->sli_rev == LPFC_SLI_REV4)
bde_count = iocbq->wcqe_cmpl.word3;
......@@ -1384,7 +1381,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
int rc = 0;
u32 ulp_status, ulp_word4;
dd_data = cmdiocbq->context1;
dd_data = cmdiocbq->context_un.dd_data;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
......@@ -1401,8 +1398,8 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
spin_unlock_irqrestore(&phba->hbalock, flags);
ndlp = dd_data->context_un.iocb.ndlp;
cmp = cmdiocbq->context2;
bmp = cmdiocbq->context3;
cmp = cmdiocbq->cmd_dmabuf;
bmp = cmdiocbq->bpl_dmabuf;
ulp_status = get_job_ulpstatus(phba, rspiocbq);
ulp_word4 = get_job_word4(phba, rspiocbq);
......@@ -1529,10 +1526,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
ctiocb->cmd_flag |= LPFC_IO_LIBDFC;
ctiocb->vport = phba->pport;
ctiocb->context1 = dd_data;
ctiocb->context2 = cmp;
ctiocb->context3 = bmp;
ctiocb->context_un.ndlp = ndlp;
ctiocb->context_un.dd_data = dd_data;
ctiocb->cmd_dmabuf = cmp;
ctiocb->bpl_dmabuf = bmp;
ctiocb->ndlp = ndlp;
ctiocb->cmd_cmpl = lpfc_issue_ct_rsp_cmp;
dd_data->type = TYPE_IOCB;
......@@ -2671,7 +2668,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
ctreq->CommandResponse.bits.Size = 0;
cmdiocbq->context3 = dmabuf;
cmdiocbq->bpl_dmabuf = dmabuf;
cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
cmdiocbq->vport = phba->pport;
cmdiocbq->cmd_cmpl = NULL;
......@@ -3231,7 +3228,7 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK;
cmdiocbq->vport = phba->pport;
cmdiocbq->cmd_cmpl = NULL;
cmdiocbq->context3 = txbmp;
cmdiocbq->bpl_dmabuf = txbmp;
if (phba->sli_rev < LPFC_SLI_REV4) {
lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, 0, txxri,
......@@ -3384,7 +3381,7 @@ lpfc_bsg_get_dfc_rev(struct bsg_job *job)
* This is completion handler function for mailbox commands issued from
* lpfc_bsg_issue_mbox function. This function is called by the
* mailbox event handler function with no lock held. This function
* will wake up thread waiting on the wait queue pointed by context1
* will wake up thread waiting on the wait queue pointed by dd_data
* of the mailbox.
**/
static void
......@@ -5034,9 +5031,9 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
unsigned int rsp_size;
int rc = 0;
dd_data = cmdiocbq->context1;
cmp = cmdiocbq->context2;
bmp = cmdiocbq->context3;
dd_data = cmdiocbq->context_un.dd_data;
cmp = cmdiocbq->cmd_dmabuf;
bmp = cmdiocbq->bpl_dmabuf;
menlo = &dd_data->context_un.menlo;
rmp = menlo->rmp;
rsp = &rspiocbq->iocb;
......@@ -5233,9 +5230,9 @@ lpfc_menlo_cmd(struct bsg_job *job)
/* We want the firmware to timeout before we do */
cmd->ulpTimeout = MENLO_TIMEOUT - 5;
cmdiocbq->cmd_cmpl = lpfc_bsg_menlo_cmd_cmp;
cmdiocbq->context1 = dd_data;
cmdiocbq->context2 = cmp;
cmdiocbq->context3 = bmp;
cmdiocbq->context_un.dd_data = dd_data;
cmdiocbq->cmd_dmabuf = cmp;
cmdiocbq->bpl_dmabuf = bmp;
if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
cmd->ulpPU = MENLO_PU; /* 3 */
......
This diff is collapsed.
This diff is collapsed.
......@@ -5156,7 +5156,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
if (pring->ringno == LPFC_ELS_RING) {
switch (ulp_command) {
case CMD_GEN_REQUEST64_CR:
if (iocb->context_un.ndlp == ndlp)
if (iocb->ndlp == ndlp)
return 1;
fallthrough;
case CMD_ELS_REQUEST64_CR:
......@@ -5164,7 +5164,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
return 1;
fallthrough;
case CMD_XMIT_ELS_RSP64_CX:
if (iocb->context1 == (uint8_t *) ndlp)
if (iocb->ndlp == ndlp)
return 1;
}
} else if (pring->ringno == LPFC_FCP_RING) {
......@@ -6099,7 +6099,7 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
*/
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
if (iocb->context1 != ndlp)
if (iocb->ndlp != ndlp)
continue;
ulp_command = get_job_cmnd(phba, iocb);
......@@ -6113,7 +6113,7 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
/* Next check the txcmplq */
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
if (iocb->context1 != ndlp)
if (iocb->ndlp != ndlp)
continue;
ulp_command = get_job_cmnd(phba, iocb);
......
......@@ -4323,9 +4323,10 @@ lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"6074 Current allocated XRI sgl count:%d, "
"maximum XRI count:%d\n",
"maximum XRI count:%d els_xri_cnt:%d\n\n",
phba->sli4_hba.io_xri_cnt,
phba->sli4_hba.io_xri_max);
phba->sli4_hba.io_xri_max,
els_xri_cnt);
cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
......@@ -4464,12 +4465,11 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
}
pwqeq->sli4_lxritag = lxri;
pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
pwqeq->context1 = lpfc_ncmd;
/* Initialize local short-hand pointers. */
lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd;
spin_lock_init(&lpfc_ncmd->buf_lock);
/* add the nvme buffer to a post list */
......@@ -4478,7 +4478,9 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
}
lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
"6114 Allocate %d out of %d requested new NVME "
"buffers\n", bcnt, num_to_alloc);
"buffers of size x%zu bytes\n", bcnt, num_to_alloc,
sizeof(*lpfc_ncmd));
/* post the list of nvme buffer sgls to port if available */
if (!list_empty(&post_nblist))
......
......@@ -173,9 +173,9 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
void *ptr = NULL;
u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
pcmd = cmdiocb->cmd_dmabuf;
/* For lpfc_els_abort, context2 could be zero'ed to delay
/* For lpfc_els_abort, cmd_dmabuf could be zero'ed to delay
* freeing associated memory till after ABTS completes.
*/
if (pcmd) {
......@@ -343,7 +343,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
u32 remote_did;
memset(&stat, 0, sizeof (struct ls_rjt));
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
if (wwn_to_u64(sp->portName.u.wwn) == 0) {
......@@ -716,7 +716,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t *lp;
uint32_t cmd;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
cmd = *lp++;
......@@ -924,7 +924,7 @@ lpfc_rcv_prli_support_check(struct lpfc_vport *vport,
uint32_t *payload;
uint32_t cmd;
payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
payload = cmdiocb->cmd_dmabuf->virt;
cmd = *payload;
if (vport->phba->nvmet_support) {
/* Must be a NVME PRLI */
......@@ -961,9 +961,9 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct fc_rport *rport = ndlp->rport;
u32 roles;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *)pcmd->virt;
npr = (PRLI *)((uint8_t *)lp + sizeof(uint32_t));
if ((npr->prliType == PRLI_FCP_TYPE) ||
(npr->prliType == PRLI_NVME_TYPE)) {
......@@ -1224,7 +1224,7 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb = arg;
struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
uint32_t *lp = (uint32_t *) pcmd->virt;
struct serv_parm *sp = (struct serv_parm *) (lp + 1);
struct ls_rjt stat;
......@@ -1345,7 +1345,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
u32 did;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
rspiocb = cmdiocb->rsp_iocb;
ulp_status = get_job_ulpstatus(phba, rspiocb);
......@@ -1357,7 +1357,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
if (ulp_status)
goto out;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
pcmd = cmdiocb->cmd_dmabuf;
prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
if (!prsp)
......@@ -1703,7 +1703,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
rspiocb = cmdiocb->rsp_iocb;
ulp_status = get_job_ulpstatus(phba, rspiocb);
......@@ -2158,7 +2158,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
rspiocb = cmdiocb->rsp_iocb;
ulp_status = get_job_ulpstatus(phba, rspiocb);
......@@ -2778,7 +2778,7 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
rspiocb = cmdiocb->rsp_iocb;
ulp_status = get_job_ulpstatus(phba, rspiocb);
......@@ -2797,7 +2797,7 @@ lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
rspiocb = cmdiocb->rsp_iocb;
ulp_status = get_job_ulpstatus(phba, rspiocb);
......@@ -2833,7 +2833,7 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
rspiocb = cmdiocb->rsp_iocb;
ulp_status = get_job_ulpstatus(phba, rspiocb);
......
......@@ -319,8 +319,10 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp;
uint32_t status;
pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
pnvme_lsreq = cmdwqe->context_un.nvme_lsreq;
ndlp = cmdwqe->ndlp;
buf_ptr = cmdwqe->bpl_dmabuf;
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
......@@ -330,16 +332,16 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
cmdwqe->sli4_xritag, status,
(wcqe->parameter & 0xffff),
cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
cmdwqe, pnvme_lsreq, cmdwqe->bpl_dmabuf,
ndlp);
lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n",
cmdwqe->sli4_xritag, status, wcqe->parameter);
if (cmdwqe->context3) {
buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
if (buf_ptr) {
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
kfree(buf_ptr);
cmdwqe->context3 = NULL;
cmdwqe->bpl_dmabuf = NULL;
}
if (pnvme_lsreq->done)
pnvme_lsreq->done(pnvme_lsreq, status);
......@@ -351,7 +353,7 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
cmdwqe->sli4_xritag, status);
if (ndlp) {
lpfc_nlp_put(ndlp);
cmdwqe->context1 = NULL;
cmdwqe->ndlp = NULL;
}
lpfc_sli_release_iocbq(phba, cmdwqe);
}
......@@ -407,19 +409,19 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
/* Initialize only 64 bytes */
memset(wqe, 0, sizeof(union lpfc_wqe));
genwqe->context3 = (uint8_t *)bmp;
genwqe->bpl_dmabuf = bmp;
genwqe->cmd_flag |= LPFC_IO_NVME_LS;
/* Save for completion so we can release these resources */
genwqe->context1 = lpfc_nlp_get(ndlp);
if (!genwqe->context1) {
genwqe->ndlp = lpfc_nlp_get(ndlp);
if (!genwqe->ndlp) {
dev_warn(&phba->pcidev->dev,
"Warning: Failed node ref, not sending LS_REQ\n");
lpfc_sli_release_iocbq(phba, genwqe);
return 1;
}
genwqe->context2 = (uint8_t *)pnvme_lsreq;
genwqe->context_un.nvme_lsreq = pnvme_lsreq;
/* Fill in payload, bp points to frame payload */
if (!tmo)
......@@ -730,7 +732,7 @@ __lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(&phba->hbalock);
spin_lock(&pring->ring_lock);
list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
if (wqe->context2 == pnvme_lsreq) {
if (wqe->context_un.nvme_lsreq == pnvme_lsreq) {
wqe->cmd_flag |= LPFC_DRIVER_ABORTED;
foundit = true;
break;
......@@ -929,8 +931,7 @@ static void
lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
struct lpfc_iocbq *pwqeOut)
{
struct lpfc_io_buf *lpfc_ncmd =
(struct lpfc_io_buf *)pwqeIn->context1;
struct lpfc_io_buf *lpfc_ncmd = pwqeIn->io_buf;
struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
struct lpfc_vport *vport = pwqeIn->vport;
struct nvmefc_fcp_req *nCmd;
......@@ -2717,7 +2718,7 @@ lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
struct lpfc_wcqe_complete wcqe;
struct lpfc_wcqe_complete *wcqep = &wcqe;
lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
lpfc_ncmd = pwqeIn->io_buf;
if (!lpfc_ncmd) {
lpfc_sli_release_iocbq(phba, pwqeIn);
return;
......
......@@ -295,7 +295,7 @@ void
__lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_iocbq *rspwqe)
{
struct lpfc_async_xchg_ctx *axchg = cmdwqe->context2;
struct lpfc_async_xchg_ctx *axchg = cmdwqe->context_un.axchg;
struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
uint32_t status, result;
......@@ -317,9 +317,9 @@ __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
"6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n",
status, result, axchg->oxid);
lpfc_nlp_put(cmdwqe->context1);
cmdwqe->context2 = NULL;
cmdwqe->context3 = NULL;
lpfc_nlp_put(cmdwqe->ndlp);
cmdwqe->context_un.axchg = NULL;
cmdwqe->bpl_dmabuf = NULL;
lpfc_sli_release_iocbq(phba, cmdwqe);
ls_rsp->done(ls_rsp);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
......@@ -728,7 +728,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
int id;
#endif
ctxp = cmdwqe->context2;
ctxp = cmdwqe->context_un.axchg;
ctxp->flag &= ~LPFC_NVME_IO_INP;
rsp = &ctxp->hdlrctx.fcp_req;
......@@ -903,7 +903,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
/* Save numBdes for bpl2sgl */
nvmewqeq->num_bdes = 1;
nvmewqeq->hba_wqidx = 0;
nvmewqeq->context3 = &dmabuf;
nvmewqeq->bpl_dmabuf = &dmabuf;
dmabuf.virt = &bpl;
bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
......@@ -917,7 +917,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
*/
nvmewqeq->cmd_cmpl = xmt_ls_rsp_cmp;
nvmewqeq->context2 = axchg;
nvmewqeq->context_un.axchg = axchg;
lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen);
......@@ -925,7 +925,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq);
/* clear to be sure there's no reference */
nvmewqeq->context3 = NULL;
nvmewqeq->bpl_dmabuf = NULL;
if (rc == WQE_SUCCESS) {
/*
......@@ -942,7 +942,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
rc = -ENXIO;
lpfc_nlp_put(nvmewqeq->context1);
lpfc_nlp_put(nvmewqeq->ndlp);
out_free_buf:
/* Give back resources */
......@@ -1075,7 +1075,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
}
nvmewqeq->cmd_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
nvmewqeq->context2 = ctxp;
nvmewqeq->context_un.axchg = ctxp;
nvmewqeq->cmd_flag |= LPFC_IO_NVMET;
ctxp->wqeq->hba_wqidx = rsp->hwqid;
......@@ -1119,8 +1119,8 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
ctxp->oxid, rc);
ctxp->wqeq->hba_wqidx = 0;
nvmewqeq->context2 = NULL;
nvmewqeq->context3 = NULL;
nvmewqeq->context_un.axchg = NULL;
nvmewqeq->bpl_dmabuf = NULL;
rc = -EBUSY;
aerr:
return rc;
......@@ -1590,7 +1590,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
/* Initialize WQE */
memset(wqe, 0, sizeof(union lpfc_wqe));
ctx_buf->iocbq->context1 = NULL;
ctx_buf->iocbq->cmd_dmabuf = NULL;
spin_lock(&phba->sli4_hba.sgl_list_lock);
ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
spin_unlock(&phba->sli4_hba.sgl_list_lock);
......@@ -2025,7 +2025,7 @@ lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
&wq->wqfull_list, list) {
if (ctxp) {
/* Checking for a specific IO to flush */
if (nvmewqeq->context2 == ctxp) {
if (nvmewqeq->context_un.axchg == ctxp) {
list_del(&nvmewqeq->list);
spin_unlock_irqrestore(&pring->ring_lock,
iflags);
......@@ -2071,7 +2071,7 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
list);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
ctxp = (struct lpfc_async_xchg_ctx *)nvmewqeq->context2;
ctxp = nvmewqeq->context_un.axchg;
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
spin_lock_irqsave(&pring->ring_lock, iflags);
if (rc == -EBUSY) {
......@@ -2617,10 +2617,10 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
ctxp->wqeq = nvmewqe;
/* prevent preparing wqe with NULL ndlp reference */
nvmewqe->context1 = lpfc_nlp_get(ndlp);
if (nvmewqe->context1 == NULL)
nvmewqe->ndlp = lpfc_nlp_get(ndlp);
if (!nvmewqe->ndlp)
goto nvme_wqe_free_wqeq_exit;
nvmewqe->context2 = ctxp;
nvmewqe->context_un.axchg = ctxp;
wqe = &nvmewqe->wqe;
memset(wqe, 0, sizeof(union lpfc_wqe));
......@@ -2692,8 +2692,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
return nvmewqe;
nvme_wqe_free_wqeq_exit:
nvmewqe->context2 = NULL;
nvmewqe->context3 = NULL;
nvmewqe->context_un.axchg = NULL;
nvmewqe->ndlp = NULL;
nvmewqe->bpl_dmabuf = NULL;
lpfc_sli_release_iocbq(phba, nvmewqe);
return NULL;
}
......@@ -2995,7 +2996,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
nvmewqe->retry = 1;
nvmewqe->vport = phba->pport;
nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
nvmewqe->context1 = ndlp;
nvmewqe->ndlp = ndlp;
for_each_sg(rsp->sg, sgel, nsegs, i) {
physaddr = sg_dma_address(sgel);
......@@ -3053,7 +3054,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
bool released = false;
struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
ctxp = cmdwqe->context2;
ctxp = cmdwqe->context_un.axchg;
result = wcqe->parameter;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
......@@ -3084,8 +3085,8 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
wcqe->word0, wcqe->total_data_placed,
result, wcqe->word3);
cmdwqe->context2 = NULL;
cmdwqe->context3 = NULL;
cmdwqe->rsp_dmabuf = NULL;
cmdwqe->bpl_dmabuf = NULL;
/*
* if transport has released ctx, then can reuse it. Otherwise,
* will be recycled by transport release call.
......@@ -3123,7 +3124,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
bool released = false;
struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
ctxp = cmdwqe->context2;
ctxp = cmdwqe->context_un.axchg;
result = wcqe->parameter;
if (!ctxp) {
......@@ -3169,8 +3170,8 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
wcqe->word0, wcqe->total_data_placed,
result, wcqe->word3);
cmdwqe->context2 = NULL;
cmdwqe->context3 = NULL;
cmdwqe->rsp_dmabuf = NULL;
cmdwqe->bpl_dmabuf = NULL;
/*
* if transport has released ctx, then can reuse it. Otherwise,
* will be recycled by transport release call.
......@@ -3203,7 +3204,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
uint32_t result;
struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
ctxp = cmdwqe->context2;
ctxp = cmdwqe->context_un.axchg;
result = wcqe->parameter;
if (phba->nvmet_support) {
......@@ -3234,8 +3235,8 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
}
cmdwqe->context2 = NULL;
cmdwqe->context3 = NULL;
cmdwqe->rsp_dmabuf = NULL;
cmdwqe->bpl_dmabuf = NULL;
lpfc_sli_release_iocbq(phba, cmdwqe);
kfree(ctxp);
}
......@@ -3322,9 +3323,9 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
OTHER_COMMAND);
abts_wqeq->vport = phba->pport;
abts_wqeq->context1 = ndlp;
abts_wqeq->context2 = ctxp;
abts_wqeq->context3 = NULL;
abts_wqeq->ndlp = ndlp;
abts_wqeq->context_un.axchg = ctxp;
abts_wqeq->bpl_dmabuf = NULL;
abts_wqeq->num_bdes = 0;
/* hba_wqidx should already be setup from command we are aborting */
abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
......@@ -3477,7 +3478,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
abts_wqeq->cmd_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
abts_wqeq->cmd_flag |= LPFC_IO_NVME;
abts_wqeq->context2 = ctxp;
abts_wqeq->context_un.axchg = ctxp;
abts_wqeq->vport = phba->pport;
if (!ctxp->hdwq)
ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
......@@ -3630,8 +3631,8 @@ lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
out:
if (tgtp)
atomic_inc(&tgtp->xmt_abort_rsp_error);
abts_wqeq->context2 = NULL;
abts_wqeq->context3 = NULL;
abts_wqeq->rsp_dmabuf = NULL;
abts_wqeq->bpl_dmabuf = NULL;
lpfc_sli_release_iocbq(phba, abts_wqeq);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6056 Failed to Issue ABTS. Status x%x\n", rc);
......
......@@ -433,7 +433,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
iocb->ulpClass = CLASS3;
psb->status = IOSTAT_SUCCESS;
/* Put it back into the SCSI buffer list */
psb->cur_iocbq.context1 = psb;
psb->cur_iocbq.io_buf = psb;
spin_lock_init(&psb->buf_lock);
lpfc_release_scsi_buf_s3(phba, psb);
......@@ -4082,8 +4082,7 @@ static void
lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
struct lpfc_iocbq *pwqeOut)
{
struct lpfc_io_buf *lpfc_cmd =
(struct lpfc_io_buf *)pwqeIn->context1;
struct lpfc_io_buf *lpfc_cmd = pwqeIn->io_buf;
struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
struct lpfc_vport *vport = pwqeIn->vport;
struct lpfc_rport_data *rdata;
......@@ -4421,7 +4420,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct lpfc_iocbq *pIocbOut)
{
struct lpfc_io_buf *lpfc_cmd =
(struct lpfc_io_buf *) pIocbIn->context1;
(struct lpfc_io_buf *) pIocbIn->io_buf;
struct lpfc_vport *vport = pIocbIn->vport;
struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
struct lpfc_nodelist *pnode = rdata->pnode;
......@@ -4744,7 +4743,7 @@ static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
piocbq->iocb.ulpFCP2Rcvy = 0;
piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
piocbq->context1 = lpfc_cmd;
piocbq->io_buf = lpfc_cmd;
if (!piocbq->cmd_cmpl)
piocbq->cmd_cmpl = lpfc_scsi_cmd_iocb_cmpl;
piocbq->iocb.ulpTimeout = tmo;
......@@ -4856,8 +4855,7 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
pwqeq->vport = vport;
pwqeq->vport = vport;
pwqeq->context1 = lpfc_cmd;
pwqeq->io_buf = lpfc_cmd;
pwqeq->hba_wqidx = lpfc_cmd->hdwq_no;
pwqeq->cmd_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
......@@ -5098,8 +5096,7 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocbq,
struct lpfc_iocbq *rspiocbq)
{
struct lpfc_io_buf *lpfc_cmd =
(struct lpfc_io_buf *) cmdiocbq->context1;
struct lpfc_io_buf *lpfc_cmd = cmdiocbq->io_buf;
if (lpfc_cmd)
lpfc_release_scsi_buf(phba, lpfc_cmd);
return;
......@@ -5916,7 +5913,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
goto out_unlock_ring;
}
BUG_ON(iocb->context1 != lpfc_cmd);
WARN_ON(iocb->io_buf != lpfc_cmd);
/* abort issued in recovery is still in progress */
if (iocb->cmd_flag & LPFC_DRIVER_ABORTED) {
......
This diff is collapsed.
......@@ -77,11 +77,15 @@ struct lpfc_iocbq {
u32 unsol_rcv_len; /* Receive len in usol path */
uint8_t num_bdes;
uint8_t abort_bls; /* ABTS by initiator or responder */
u8 abort_rctl; /* ACC or RJT flag */
uint8_t priority; /* OAS priority */
uint8_t retry; /* retry counter for IOCB cmd - if needed */
/* Pack the u8's together and make them module-4. */
u8 num_bdes; /* Number of BDEs */
u8 abort_bls; /* ABTS by initiator or responder */
u8 abort_rctl; /* ACC or RJT flag */
u8 priority; /* OAS priority */
u8 retry; /* retry counter for IOCB cmd - if needed */
u8 rsvd1; /* Pad for u32 */
u8 rsvd2; /* Pad for u32 */
u8 rsvd3; /* Pad for u32 */
u32 cmd_flag;
#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
......@@ -116,18 +120,22 @@ struct lpfc_iocbq {
uint32_t drvrTimeout; /* driver timeout in seconds */
struct lpfc_vport *vport;/* virtual port pointer */
void *context1; /* caller context information */
void *context2; /* caller context information */
void *context3; /* caller context information */
struct lpfc_dmabuf *cmd_dmabuf;
struct lpfc_dmabuf *rsp_dmabuf;
struct lpfc_dmabuf *bpl_dmabuf;
uint32_t event_tag; /* LA Event tag */
union {
wait_queue_head_t *wait_queue;
struct lpfc_iocbq *rsp_iocb;
struct lpfcMboxq *mbox;
struct lpfc_nodelist *ndlp;
struct lpfc_node_rrq *rrq;
struct nvmefc_ls_req *nvme_lsreq;
struct lpfc_async_xchg_ctx *axchg;
struct bsg_job_data *dd_data;
} context_un;
struct lpfc_io_buf *io_buf;
struct lpfc_iocbq *rsp_iocb;
struct lpfc_nodelist *ndlp;
union lpfc_vmid_tag vmid_tag;
void (*fabric_cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd,
struct lpfc_iocbq *rsp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment