Commit 8c258641 authored by James Smart's avatar James Smart Committed by Martin K. Petersen

scsi: lpfc: NVME Target: Merge into FC discovery

NVME Target: Merge into FC discovery

Adds NVME PRLI handling and Nameserver registrations for NVME
Signed-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <james.smart@broadcom.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 2d7dbc4c
...@@ -1433,7 +1433,13 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, ...@@ -1433,7 +1433,13 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) && (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) &&
(context == FC_TYPE_NVME)) { (context == FC_TYPE_NVME)) {
lpfc_nvme_update_localport(vport); if ((vport == phba->pport) && phba->nvmet_support) {
CtReq->un.rff.fbits = (FC4_FEATURE_TARGET |
FC4_FEATURE_NVME_DISC);
/* todo: update targetport attributes */
} else {
lpfc_nvme_update_localport(vport);
}
CtReq->un.rff.type_code = context; CtReq->un.rff.type_code = context;
} else if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || } else if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
......
...@@ -2001,11 +2001,21 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) ...@@ -2001,11 +2001,21 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
sp->cmn.fcphHigh = FC_PH3; sp->cmn.fcphHigh = FC_PH3;
sp->cmn.valid_vendor_ver_level = 0; sp->cmn.valid_vendor_ver_level = 0;
memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue PLOGI: did:x%x", "Issue PLOGI: did:x%x",
did, 0, 0); did, 0, 0);
/* If our firmware supports this feature, convey that
* information to the target using the vendor specific field.
*/
if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
sp->cmn.valid_vendor_ver_level = 1;
sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
}
phba->fc_stat.elsXmitPLOGI++; phba->fc_stat.elsXmitPLOGI++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
...@@ -2207,7 +2217,13 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ...@@ -2207,7 +2217,13 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
!phba->nvmet_support) !phba->nvmet_support)
bf_set(prli_fba, npr_nvme, 1); bf_set(prli_fba, npr_nvme, 1);
bf_set(prli_init, npr_nvme, 1); if (phba->nvmet_support) {
bf_set(prli_tgt, npr_nvme, 1);
bf_set(prli_disc, npr_nvme, 1);
} else {
bf_set(prli_init, npr_nvme, 1);
}
npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ; elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ;
...@@ -2619,8 +2635,11 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -2619,8 +2635,11 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
phba->pport->fc_myDID = 0; phba->pport->fc_myDID = 0;
if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
lpfc_nvme_update_localport(phba->pport); if (!phba->nvmet_support)
lpfc_nvme_update_localport(phba->pport);
/* todo: tgt: update targetport attributes */
}
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) { if (mbox) {
...@@ -4074,10 +4093,25 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, ...@@ -4074,10 +4093,25 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
sizeof(struct lpfc_name)); sizeof(struct lpfc_name));
memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, memcpy(&sp->nodeName, &vport->fc_sparam.nodeName,
sizeof(struct lpfc_name)); sizeof(struct lpfc_name));
} else } else {
memcpy(pcmd, &vport->fc_sparam, memcpy(pcmd, &vport->fc_sparam,
sizeof(struct serv_parm)); sizeof(struct serv_parm));
sp->cmn.valid_vendor_ver_level = 0;
memset(sp->un.vendorVersion, 0,
sizeof(sp->un.vendorVersion));
/* If our firmware supports this feature, convey that
* info to the target using the vendor specific field.
*/
if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
sp->cmn.valid_vendor_ver_level = 1;
sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
sp->un.vv.flags =
cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
}
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, 0); ndlp->nlp_DID, ndlp->nlp_flag, 0);
...@@ -4397,7 +4431,22 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, ...@@ -4397,7 +4431,22 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */
bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED);
bf_set(prli_init, npr_nvme, 1); if (phba->nvmet_support) {
bf_set(prli_tgt, npr_nvme, 1);
bf_set(prli_disc, npr_nvme, 1);
if (phba->cfg_nvme_enable_fb) {
bf_set(prli_fba, npr_nvme, 1);
/* TBD. Target mode needs to post buffers
* that support the configured first burst
* byte size.
*/
bf_set(prli_fb_sz, npr_nvme,
phba->cfg_nvmet_fb_size);
}
} else {
bf_set(prli_init, npr_nvme, 1);
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6015 NVME issue PRLI ACC word1 x%08x " "6015 NVME issue PRLI ACC word1 x%08x "
...@@ -5815,6 +5864,8 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport) ...@@ -5815,6 +5864,8 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
(ndlp->nlp_state == NLP_STE_UNUSED_NODE) || (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
!lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
continue; continue;
if (vport->phba->nvmet_support)
continue;
lpfc_disc_state_machine(vport, ndlp, NULL, lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY); NLP_EVT_DEVICE_RECOVERY);
lpfc_cancel_retry_delay_tmo(vport, ndlp); lpfc_cancel_retry_delay_tmo(vport, ndlp);
......
...@@ -910,8 +910,11 @@ lpfc_linkdown(struct lpfc_hba *phba) ...@@ -910,8 +910,11 @@ lpfc_linkdown(struct lpfc_hba *phba)
vports[i]->fc_myDID = 0; vports[i]->fc_myDID = 0;
if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
lpfc_nvme_update_localport(vports[i]); if (!phba->nvmet_support)
lpfc_nvme_update_localport(vports[i]);
/* todo: tgt: update targetport attributes */
}
} }
} }
lpfc_destroy_vport_work_array(phba, vports); lpfc_destroy_vport_work_array(phba, vports);
...@@ -3583,8 +3586,11 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) ...@@ -3583,8 +3586,11 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport->fc_myDID = 0; vport->fc_myDID = 0;
if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
lpfc_nvme_update_localport(vport); if (!phba->nvmet_support)
lpfc_nvme_update_localport(vport);
/* todo: update targetport attributes */
}
goto out; goto out;
} }
...@@ -4175,6 +4181,11 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ...@@ -4175,6 +4181,11 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/ */
vport->phba->nport_event_cnt++; vport->phba->nport_event_cnt++;
lpfc_nvme_register_port(vport, ndlp); lpfc_nvme_register_port(vport, ndlp);
} else {
/* Just take an NDLP ref count since the
* target does not register rports.
*/
lpfc_nlp_get(ndlp);
} }
} }
} }
...@@ -5096,6 +5107,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) ...@@ -5096,6 +5107,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
return NULL; return NULL;
lpfc_nlp_init(vport, ndlp, did); lpfc_nlp_init(vport, ndlp, did);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
if (vport->phba->nvmet_support)
return ndlp;
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC; ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
...@@ -5104,6 +5117,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) ...@@ -5104,6 +5117,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE); ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
if (!ndlp) if (!ndlp)
return NULL; return NULL;
if (vport->phba->nvmet_support)
return ndlp;
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC; ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
...@@ -5123,6 +5138,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) ...@@ -5123,6 +5138,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
* delay timeout is not needed. * delay timeout is not needed.
*/ */
lpfc_cancel_retry_delay_tmo(vport, ndlp); lpfc_cancel_retry_delay_tmo(vport, ndlp);
if (vport->phba->nvmet_support)
return ndlp;
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC; ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
...@@ -5138,6 +5155,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) ...@@ -5138,6 +5155,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
ndlp->nlp_flag & NLP_RCV_PLOGI) ndlp->nlp_flag & NLP_RCV_PLOGI)
return NULL; return NULL;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
if (vport->phba->nvmet_support)
return ndlp;
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC; ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
......
...@@ -515,7 +515,15 @@ struct serv_parm { /* Structure is in Big Endian format */ ...@@ -515,7 +515,15 @@ struct serv_parm { /* Structure is in Big Endian format */
struct class_parms cls2; struct class_parms cls2;
struct class_parms cls3; struct class_parms cls3;
struct class_parms cls4; struct class_parms cls4;
uint8_t vendorVersion[16]; union {
uint8_t vendorVersion[16];
struct {
uint32_t vid;
#define LPFC_VV_EMLX_ID 0x454d4c58 /* EMLX */
uint32_t flags;
#define LPFC_VV_SUPPRESS_RSP 1
} vv;
} un;
}; };
/* /*
......
...@@ -288,6 +288,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ...@@ -288,6 +288,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t ed_tov; uint32_t ed_tov;
LPFC_MBOXQ_t *mbox; LPFC_MBOXQ_t *mbox;
struct ls_rjt stat; struct ls_rjt stat;
uint32_t vid, flag;
int rc; int rc;
memset(&stat, 0, sizeof (struct ls_rjt)); memset(&stat, 0, sizeof (struct ls_rjt));
...@@ -423,6 +424,15 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ...@@ -423,6 +424,15 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_can_disctmo(vport); lpfc_can_disctmo(vport);
} }
ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
sp->cmn.valid_vendor_ver_level) {
vid = be32_to_cpu(sp->un.vv.vid);
flag = be32_to_cpu(sp->un.vv.flags);
if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP))
ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
}
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) if (!mbox)
goto out; goto out;
...@@ -744,6 +754,14 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ...@@ -744,6 +754,14 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
} }
if (npr->Retry) if (npr->Retry)
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
/* If this driver is in nvme target mode, set the ndlp's fc4
* type to NVME provided the PRLI response claims NVME FC4
* type. Target mode does not issue gft_id so doesn't get
* the fc4 type set until now.
*/
if ((phba->nvmet_support) && (npr->prliType == PRLI_NVME_TYPE))
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
} }
if (rport) { if (rport) {
/* We need to update the rport role values */ /* We need to update the rport role values */
...@@ -1041,6 +1059,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, ...@@ -1041,6 +1059,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
struct lpfc_iocbq *cmdiocb, *rspiocb; struct lpfc_iocbq *cmdiocb, *rspiocb;
struct lpfc_dmabuf *pcmd, *prsp, *mp; struct lpfc_dmabuf *pcmd, *prsp, *mp;
uint32_t *lp; uint32_t *lp;
uint32_t vid, flag;
IOCB_t *irsp; IOCB_t *irsp;
struct serv_parm *sp; struct serv_parm *sp;
uint32_t ed_tov; uint32_t ed_tov;
...@@ -1109,6 +1128,16 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, ...@@ -1109,6 +1128,16 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
ed_tov = (phba->fc_edtov + 999999) / 1000000; ed_tov = (phba->fc_edtov + 999999) / 1000000;
} }
ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
sp->cmn.valid_vendor_ver_level) {
vid = be32_to_cpu(sp->un.vv.vid);
flag = be32_to_cpu(sp->un.vv.flags);
if ((vid == LPFC_VV_EMLX_ID) &&
(flag & LPFC_VV_SUPPRESS_RSP))
ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
}
/* /*
* Use the larger EDTOV * Use the larger EDTOV
* RATOV = 2 * EDTOV for pt-to-pt * RATOV = 2 * EDTOV for pt-to-pt
...@@ -1504,9 +1533,37 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport, ...@@ -1504,9 +1533,37 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
uint32_t evt) uint32_t evt)
{ {
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
struct ls_rjt stat;
/* Initiator mode. */ if (vport->phba->nvmet_support) {
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); /* NVME Target mode. Handle and respond to the PRLI and
* transition to UNMAPPED provided the RPI has completed
* registration.
*/
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
lpfc_rcv_prli(vport, ndlp, cmdiocb);
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
} else {
/* RPI registration has not completed. Reject the PRLI
* to prevent an illegal state transition when the
* rpi registration does complete.
*/
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
"6115 NVMET ndlp rpi %d state "
"unknown, state x%x flags x%08x\n",
ndlp->nlp_rpi, ndlp->nlp_state,
ndlp->nlp_flag);
memset(&stat, 0, sizeof(struct ls_rjt));
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
ndlp, NULL);
}
} else {
/* Initiator mode. */
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
}
return ndlp->nlp_state; return ndlp->nlp_state;
} }
...@@ -1668,7 +1725,12 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, ...@@ -1668,7 +1725,12 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
lpfc_issue_els_prli(vport, ndlp, 0); lpfc_issue_els_prli(vport, ndlp, 0);
} else { } else {
/* Only Fabric ports should transition */ if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support)
phba->targetport->port_id = vport->fc_myDID;
/* Only Fabric ports should transition. NVME target
* must complete PRLI.
*/
if (ndlp->nlp_type & NLP_FABRIC) { if (ndlp->nlp_type & NLP_FABRIC) {
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
...@@ -1714,6 +1776,13 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport, ...@@ -1714,6 +1776,13 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
/* If we are a target we won't immediately transition into PRLI,
* so if REG_LOGIN already completed we don't need to ignore it.
*/
if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) ||
!vport->phba->nvmet_support)
ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
lpfc_disc_set_adisc(vport, ndlp); lpfc_disc_set_adisc(vport, ndlp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment