Commit 84318a9f authored by Quinn Tran's avatar Quinn Tran Committed by Martin K. Petersen

scsi: qla2xxx: edif: Add send, receive, and accept for auth_els

Some FC adapters from Marvell offer the ability to encrypt data in flight
(EDIF). This feature requires an application to act as an authenticator.

Add the ability for authentication application to send and retrieve
messages as part of the authentication process via existing
FC_BSG_HST_ELS_NOLOGIN BSG interface.

To send a message, application is expected to format the data in the AUTH
ELS format. Refer to FC-SP2 for details.

If a message was received, application is required to reply with either a
LS_ACC or LS_RJT complete the exchange using the same interface. Otherwise,
remote device will treat it as a timeout.

Link: https://lore.kernel.org/r/20210624052606.21613-4-njavali@marvell.comReviewed-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarHimanshu Madhani <himanshu.madhani@oracle.com>
Co-developed-by: default avatarLarry Wisneski <Larry.Wisneski@marvell.com>
Signed-off-by: default avatarLarry Wisneski <Larry.Wisneski@marvell.com>
Co-developed-by: default avatarDuane Grigsby <duane.grigsby@marvell.com>
Signed-off-by: default avatarDuane Grigsby <duane.grigsby@marvell.com>
Co-developed-by: default avatarRick Hicksted Jr <rhicksted@marvell.com>
Signed-off-by: default avatarRick Hicksted Jr <rhicksted@marvell.com>
Signed-off-by: default avatarQuinn Tran <qutran@marvell.com>
Signed-off-by: default avatarNilesh Javali <njavali@marvell.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 7878f22a
......@@ -3107,6 +3107,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
qla2x00_wait_for_sess_deletion(vha);
qla_nvme_delete(vha);
qla_enode_stop(vha);
vha->flags.delete_progress = 1;
qlt_remove_target(ha, vha);
......
......@@ -27,6 +27,10 @@ void qla2x00_bsg_job_done(srb_t *sp, int res)
sp->free(sp);
ql_dbg(ql_dbg_user, sp->vha, 0x7009,
"%s: sp hdl %x, result=%x bsg ptr %p\n",
__func__, sp->handle, res, bsg_job);
bsg_reply->result = res;
bsg_job_done(bsg_job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
......@@ -53,11 +57,19 @@ void qla2x00_bsg_sp_free(srb_t *sp)
bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
} else {
dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
if (sp->remap.remapped) {
dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
sp->remap.rsp.dma);
dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
sp->remap.req.dma);
} else {
dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
}
}
if (sp->type == SRB_CT_CMD ||
......@@ -266,6 +278,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
int req_sg_cnt, rsp_sg_cnt;
int rval = (DID_ERROR << 16);
uint16_t nextlid = 0;
uint32_t els_cmd = 0;
if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
rport = fc_bsg_to_rport(bsg_job);
......@@ -279,6 +292,9 @@ qla2x00_process_els(struct bsg_job *bsg_job)
vha = shost_priv(host);
ha = vha->hw;
type = "FC_BSG_HST_ELS_NOLOGIN";
els_cmd = bsg_request->rqst_data.h_els.command_code;
if (els_cmd == ELS_AUTH_ELS)
return qla_edif_process_els(vha, bsg_job);
}
if (!vha->flags.online) {
......@@ -2948,27 +2964,26 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
if (((sp->type == SRB_CT_CMD) ||
(sp->type == SRB_ELS_CMD_HST) ||
(sp->type == SRB_FXIOCB_BCMD))
&& (sp->u.bsg_job == bsg_job)) {
req->outstanding_cmds[cnt] = NULL;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {
ql_log(ql_log_warn, vha, 0x7089,
"mbx abort_command "
"failed.\n");
bsg_reply->result = -EIO;
} else {
ql_dbg(ql_dbg_user, vha, 0x708a,
"mbx abort_command "
"success.\n");
bsg_reply->result = 0;
}
spin_lock_irqsave(&ha->hardware_lock, flags);
goto done;
if (sp &&
(sp->type == SRB_CT_CMD ||
sp->type == SRB_ELS_CMD_HST ||
sp->type == SRB_ELS_CMD_HST_NOLOGIN ||
sp->type == SRB_FXIOCB_BCMD) &&
sp->u.bsg_job == bsg_job) {
req->outstanding_cmds[cnt] = NULL;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {
ql_log(ql_log_warn, vha, 0x7089,
"mbx abort_command failed.\n");
bsg_reply->result = -EIO;
} else {
ql_dbg(ql_dbg_user, vha, 0x708a,
"mbx abort_command success.\n");
bsg_reply->result = 0;
}
spin_lock_irqsave(&ha->hardware_lock, flags);
goto done;
}
}
}
......
......@@ -341,6 +341,13 @@ struct name_list_extended {
u32 size;
u8 sent;
};
struct els_reject {
struct fc_els_ls_rjt *c;
dma_addr_t cdma;
u16 size;
};
/*
* Timeout timer counts in seconds
*/
......@@ -618,6 +625,21 @@ struct srb_iocb {
#define SRB_PRLI_CMD 21
#define SRB_CTRL_VP 22
#define SRB_PRLO_CMD 23
#define SRB_SA_UPDATE 25
#define SRB_ELS_CMD_HST_NOLOGIN 26
#define SRB_SA_REPLACE 27
struct qla_els_pt_arg {
u8 els_opcode;
u8 vp_idx;
__le16 nport_handle;
u16 control_flags;
__le32 rx_xchg_address;
port_id_t did;
u32 tx_len, tx_byte_count, rx_len, rx_byte_count;
dma_addr_t tx_addr, rx_addr;
};
enum {
TYPE_SRB,
......@@ -631,6 +653,13 @@ struct iocb_resource {
u16 iocb_cnt;
};
struct bsg_cmd {
struct bsg_job *bsg_job;
union {
struct qla_els_pt_arg els_arg;
} u;
};
typedef struct srb {
/*
* Do not move cmd_type field, it needs to
......@@ -663,7 +692,21 @@ typedef struct srb {
struct srb_iocb iocb_cmd;
struct bsg_job *bsg_job;
struct srb_cmd scmd;
struct bsg_cmd bsg_cmd;
} u;
struct {
bool remapped;
struct {
dma_addr_t dma;
void *buf;
uint len;
} req;
struct {
dma_addr_t dma;
void *buf;
uint len;
} rsp;
} remap;
/*
* Report completion status @res and call sp_put(@sp). @res is
* an NVMe status code, a SCSI result (e.g. DID_OK << 16) or a
......@@ -4640,8 +4683,12 @@ struct qla_hw_data {
struct qla_hw_data_stat stat;
pci_error_state_t pci_error_state;
u64 prev_cmd_cnt;
struct dma_pool *purex_dma_pool;
struct els_reject elsrej;
};
#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES))
struct active_regions {
uint8_t global;
struct {
......@@ -5113,6 +5160,7 @@ enum nexus_wait_type {
WAIT_LUN,
};
#define QLA_SKIP_HANDLE QLA_TGT_SKIP_HANDLE
/* Refer to SNIA SFF 8247 */
struct sff_8247_a0 {
u8 txid; /* transceiver id */
......
This diff is collapsed.
......@@ -30,4 +30,38 @@ struct edif_dbell {
struct completion dbell;
};
#define MAX_PAYLOAD 1024
#define PUR_GET 1
struct dinfo {
int nodecnt;
int lstate;
};
struct pur_ninfo {
unsigned int pur_pend:1;
port_id_t pur_sid;
port_id_t pur_did;
uint8_t vp_idx;
short pur_bytes_rcvd;
unsigned short pur_nphdl;
unsigned int pur_rx_xchg_address;
};
struct purexevent {
struct pur_ninfo pur_info;
unsigned char *msgp;
u32 msgp_len;
};
#define N_UNDEF 0
#define N_PUREX 1
struct enode {
struct list_head list;
struct dinfo dinfo;
uint32_t ntype;
union {
struct purexevent purexinfo;
} u;
};
#endif /* __QLA_EDIF_H */
......@@ -130,6 +130,8 @@ void qla24xx_free_purex_item(struct purex_item *item);
extern bool qla24xx_risc_firmware_invalid(uint32_t *);
void qla_init_iocb_limit(scsi_qla_host_t *);
int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsgjob);
const char *sc_to_str(uint16_t cmd);
/*
* Global Data in qla_os.c source file.
......@@ -280,7 +282,8 @@ extern int qla2x00_vp_abort_isp(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_iocb.c source file.
*/
void qla_els_pt_iocb(struct scsi_qla_host *vha,
struct els_entry_24xx *pkt, struct qla_els_pt_arg *a);
extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
......@@ -950,6 +953,7 @@ extern void qla_nvme_abort_process_comp_status
/* nvme.c */
void qla_nvme_unregister_remote_port(struct fc_port *fcport);
fc_port_t *qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id);
void qla_edb_stop(scsi_qla_host_t *vha);
int32_t qla_edif_app_mgmt(struct bsg_job *bsg_job);
void qla_enode_init(scsi_qla_host_t *vha);
......
......@@ -3053,6 +3053,43 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
return rval;
}
/* it is assume qpair lock is held */
void qla_els_pt_iocb(struct scsi_qla_host *vha,
struct els_entry_24xx *els_iocb,
struct qla_els_pt_arg *a)
{
els_iocb->entry_type = ELS_IOCB_TYPE;
els_iocb->entry_count = 1;
els_iocb->sys_define = 0;
els_iocb->entry_status = 0;
els_iocb->handle = QLA_SKIP_HANDLE;
els_iocb->nport_handle = a->nport_handle;
els_iocb->rx_xchg_address = a->rx_xchg_address;
els_iocb->tx_dsd_count = cpu_to_le16(1);
els_iocb->vp_index = a->vp_idx;
els_iocb->sof_type = EST_SOFI3;
els_iocb->rx_dsd_count = cpu_to_le16(0);
els_iocb->opcode = a->els_opcode;
els_iocb->d_id[0] = a->did.b.al_pa;
els_iocb->d_id[1] = a->did.b.area;
els_iocb->d_id[2] = a->did.b.domain;
/* For SID the byte order is different than DID */
els_iocb->s_id[1] = vha->d_id.b.al_pa;
els_iocb->s_id[2] = vha->d_id.b.area;
els_iocb->s_id[0] = vha->d_id.b.domain;
els_iocb->control_flags = cpu_to_le16(a->control_flags);
els_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count);
els_iocb->tx_len = cpu_to_le32(a->tx_len);
put_unaligned_le64(a->tx_addr, &els_iocb->tx_address);
els_iocb->rx_byte_count = cpu_to_le32(a->rx_byte_count);
els_iocb->rx_len = cpu_to_le32(a->rx_len);
put_unaligned_le64(a->rx_addr, &els_iocb->rx_address);
}
static void
qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
......@@ -3751,6 +3788,10 @@ qla2x00_start_sp(srb_t *sp)
case SRB_ELS_CMD_HST:
qla24xx_els_iocb(sp, pkt);
break;
case SRB_ELS_CMD_HST_NOLOGIN:
qla_els_pt_iocb(sp->vha, pkt, &sp->u.bsg_cmd.u.els_arg);
((struct els_entry_24xx *)pkt)->handle = sp->handle;
break;
case SRB_CT_CMD:
IS_FWI2_CAPABLE(ha) ?
qla24xx_ct_iocb(sp, pkt) :
......
......@@ -1971,7 +1971,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
}
static void
qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
struct sts_entry_24xx *pkt, int iocb_type)
{
struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
......@@ -1982,18 +1982,58 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
struct fc_bsg_reply *bsg_reply;
uint16_t comp_status;
uint32_t fw_status[3];
int res;
int res, logit = 1;
struct srb_iocb *els;
uint n;
scsi_qla_host_t *vha;
struct els_sts_entry_24xx *e = (struct els_sts_entry_24xx *)pkt;
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
if (!sp)
return;
bsg_job = sp->u.bsg_job;
vha = sp->vha;
type = NULL;
comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
fw_status[1] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
fw_status[2] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
switch (sp->type) {
case SRB_ELS_CMD_RPT:
case SRB_ELS_CMD_HST:
type = "rpt hst";
break;
case SRB_ELS_CMD_HST_NOLOGIN:
type = "els";
{
struct els_entry_24xx *els = (void *)pkt;
struct qla_bsg_auth_els_request *p =
(struct qla_bsg_auth_els_request *)bsg_job->request;
ql_dbg(ql_dbg_user, vha, 0x700f,
"%s %s. portid=%02x%02x%02x status %x xchg %x bsg ptr %p\n",
__func__, sc_to_str(p->e.sub_cmd),
e->d_id[2], e->d_id[1], e->d_id[0],
comp_status, p->e.extra_rx_xchg_address, bsg_job);
if (!(le16_to_cpu(els->control_flags) & ECF_PAYLOAD_DESCR_MASK)) {
if (sp->remap.remapped) {
n = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt,
sp->remap.rsp.buf,
sp->remap.rsp.len);
ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x700e,
"%s: SG copied %x of %x\n",
__func__, n, sp->remap.rsp.len);
} else {
ql_dbg(ql_dbg_user, vha, 0x700f,
"%s: NOT REMAPPED (error)...!!!\n",
__func__);
}
}
}
break;
case SRB_CT_CMD:
type = "ct pass-through";
......@@ -2023,10 +2063,6 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
return;
}
comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
fw_status[1] = le32_to_cpu(ese->error_subcode_1);
fw_status[2] = le32_to_cpu(ese->error_subcode_2);
if (iocb_type == ELS_IOCB_TYPE) {
els = &sp->u.iocb_cmd;
els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
......@@ -2040,15 +2076,52 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
res = DID_OK << 16;
els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
ese->total_byte_count));
if (sp->remap.remapped &&
((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_ACC) {
ql_dbg(ql_dbg_user, vha, 0x503f,
"%s IOCB Done LS_ACC %02x%02x%02x -> %02x%02x%02x",
__func__, e->s_id[0], e->s_id[2], e->s_id[1],
e->d_id[2], e->d_id[1], e->d_id[0]);
logit = 0;
}
} else if (comp_status == CS_PORT_LOGGED_OUT) {
els->u.els_plogi.len = 0;
res = DID_IMM_RETRY << 16;
} else {
els->u.els_plogi.len = 0;
res = DID_ERROR << 16;
}
if (logit) {
if (sp->remap.remapped &&
((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
ql_dbg(ql_dbg_user, vha, 0x503f,
"%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n",
type, sp->handle, comp_status);
ql_dbg(ql_dbg_user, vha, 0x503f,
"subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
fw_status[1], fw_status[2],
le32_to_cpu(((struct els_sts_entry_24xx *)
pkt)->total_byte_count),
e->s_id[0], e->s_id[2], e->s_id[1],
e->d_id[2], e->d_id[1], e->d_id[0]);
} else {
ql_log(ql_log_info, vha, 0x503f,
"%s IOCB Done hdl=%x comp_status=0x%x\n",
type, sp->handle, comp_status);
ql_log(ql_log_info, vha, 0x503f,
"subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
fw_status[1], fw_status[2],
le32_to_cpu(((struct els_sts_entry_24xx *)
pkt)->total_byte_count),
e->s_id[0], e->s_id[2], e->s_id[1],
e->d_id[2], e->d_id[1], e->d_id[0]);
}
}
}
ql_dbg(ql_dbg_disc, vha, 0x503f,
"ELS IOCB Done -%s hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
type, sp->handle, comp_status, fw_status[1], fw_status[2],
le32_to_cpu(ese->total_byte_count));
goto els_ct_done;
}
......
......@@ -3460,6 +3460,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
probe_failed:
qla_enode_stop(base_vha);
if (base_vha->gnl.l) {
dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
base_vha->gnl.l, base_vha->gnl.ldma);
......@@ -3762,6 +3763,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
base_vha->gnl.l = NULL;
qla_enode_stop(base_vha);
vfree(base_vha->scan.l);
......@@ -4264,8 +4266,36 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
goto fail_flt_buffer;
}
/* allocate the purex dma pool */
ha->purex_dma_pool = dma_pool_create(name, &ha->pdev->dev,
MAX_PAYLOAD, 8, 0);
if (!ha->purex_dma_pool) {
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
"Unable to allocate purex_dma_pool.\n");
goto fail_flt;
}
ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16;
ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev,
ha->elsrej.size, &ha->elsrej.cdma, GFP_KERNEL);
if (!ha->elsrej.c) {
ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff,
"Alloc failed for els reject cmd.\n");
goto fail_elsrej;
}
ha->elsrej.c->er_cmd = ELS_LS_RJT;
ha->elsrej.c->er_reason = ELS_RJT_BUSY;
ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA;
return 0;
fail_elsrej:
dma_pool_destroy(ha->purex_dma_pool);
fail_flt:
dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
ha->flt, ha->flt_dma);
fail_flt_buffer:
dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
ha->sfp_data, ha->sfp_data_dma);
......@@ -4776,6 +4806,16 @@ qla2x00_mem_free(struct qla_hw_data *ha)
if (ha->init_cb)
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
ha->init_cb, ha->init_cb_dma);
dma_pool_destroy(ha->purex_dma_pool);
ha->purex_dma_pool = NULL;
if (ha->elsrej.c) {
dma_free_coherent(&ha->pdev->dev, ha->elsrej.size,
ha->elsrej.c, ha->elsrej.cdma);
ha->elsrej.c = NULL;
}
ha->init_cb = NULL;
ha->init_cb_dma = 0;
......@@ -4837,6 +4877,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
spin_lock_init(&vha->cmd_list_lock);
init_waitqueue_head(&vha->fcport_waitQ);
init_waitqueue_head(&vha->vref_waitq);
qla_enode_init(vha);
vha->gnl.size = sizeof(struct get_name_list_extended) *
(ha->max_loop_id + 1);
......
......@@ -184,8 +184,7 @@ static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
return QLA_SUCCESS;
}
static inline
struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha,
be_id_t d_id)
{
struct scsi_qla_host *host;
......@@ -299,7 +298,7 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
goto abort;
}
host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
host = qla_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
if (host != NULL) {
ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
"Requeuing unknown ATIO_TYPE7 %p\n", u);
......@@ -348,7 +347,7 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
switch (atio->u.raw.entry_type) {
case ATIO_TYPE7:
{
struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
struct scsi_qla_host *host = qla_find_host_by_d_id(vha,
atio->u.isp24.fcp_hdr.d_id);
if (unlikely(NULL == host)) {
ql_dbg(ql_dbg_tgt, vha, 0xe03e,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment