Commit 78e9e350 authored by Justin Tee's avatar Justin Tee Committed by Martin K. Petersen

scsi: lpfc: Match lock ordering of lpfc_cmd->buf_lock and hbalock for abort paths

The SCSI version of the abort handler routine, lpfc_abort_handler(), takes
the lpfc_cmd->buf_lock and then phba->hbalock.

Make the same change for the NVMe abort path, lpfc_nvme_fcp_abort(), to
have consistent lock ordering logic between the two abort paths.
Signed-off-by: default avatarJustin Tee <justin.tee@broadcom.com>
Link: https://lore.kernel.org/r/20230417191558.83100-4-justintee8345@gmail.comSigned-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 97f97582
...@@ -1893,38 +1893,38 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, ...@@ -1893,38 +1893,38 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
pnvme_rport->port_id, pnvme_rport->port_id,
pnvme_fcreq); pnvme_fcreq);
/* If the hba is getting reset, this flag is set. It is
* cleared when the reset is complete and rings reestablished.
*/
spin_lock_irqsave(&phba->hbalock, flags);
/* driver queued commands are in process of being flushed */
if (phba->hba_flag & HBA_IOQ_FLUSH) {
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6139 Driver in reset cleanup - flushing "
"NVME Req now. hba_flag x%x\n",
phba->hba_flag);
return;
}
lpfc_nbuf = freqpriv->nvme_buf; lpfc_nbuf = freqpriv->nvme_buf;
if (!lpfc_nbuf) { if (!lpfc_nbuf) {
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6140 NVME IO req has no matching lpfc nvme " "6140 NVME IO req has no matching lpfc nvme "
"io buffer. Skipping abort req.\n"); "io buffer. Skipping abort req.\n");
return; return;
} else if (!lpfc_nbuf->nvmeCmd) { } else if (!lpfc_nbuf->nvmeCmd) {
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6141 lpfc NVME IO req has no nvme_fcreq " "6141 lpfc NVME IO req has no nvme_fcreq "
"io buffer. Skipping abort req.\n"); "io buffer. Skipping abort req.\n");
return; return;
} }
nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
/* Guard against IO completion being called at same time */ /* Guard against IO completion being called at same time */
spin_lock(&lpfc_nbuf->buf_lock); spin_lock_irqsave(&lpfc_nbuf->buf_lock, flags);
/* If the hba is getting reset, this flag is set. It is
* cleared when the reset is complete and rings reestablished.
*/
spin_lock(&phba->hbalock);
/* driver queued commands are in process of being flushed */
if (phba->hba_flag & HBA_IOQ_FLUSH) {
spin_unlock(&phba->hbalock);
spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6139 Driver in reset cleanup - flushing "
"NVME Req now. hba_flag x%x\n",
phba->hba_flag);
return;
}
nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
/* /*
* The lpfc_nbuf and the mapped nvme_fcreq in the driver's * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
...@@ -1971,8 +1971,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, ...@@ -1971,8 +1971,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe, ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe,
lpfc_nvme_abort_fcreq_cmpl); lpfc_nvme_abort_fcreq_cmpl);
spin_unlock(&lpfc_nbuf->buf_lock); spin_unlock(&phba->hbalock);
spin_unlock_irqrestore(&phba->hbalock, flags); spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags);
/* Make sure HBA is alive */ /* Make sure HBA is alive */
lpfc_issue_hb_tmo(phba); lpfc_issue_hb_tmo(phba);
...@@ -1998,8 +1998,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, ...@@ -1998,8 +1998,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
return; return;
out_unlock: out_unlock:
spin_unlock(&lpfc_nbuf->buf_lock); spin_unlock(&phba->hbalock);
spin_unlock_irqrestore(&phba->hbalock, flags); spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags);
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment