Commit f83adb61 authored by Quinn Tran's avatar Quinn Tran Committed by Christoph Hellwig

qla2xxx: T10-Dif: add T10-PI support

Add support for T10-Dif for Target Mode to qla driver.
The driver will look for firmware attribute that support
this feature.  When the feature is present, the capabilities
will be report to TCM layer.

Add CTIO CRC2 iocb to build T10-Dif commands.
Add support routines to process good & error cases.
Signed-off-by: default avatarQuinn Tran <quinn.tran@qlogic.com>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
Signed-off-by: default avatarGiridhar Malavali <giridhar.malavali@qlogic.com>
Signed-off-by: default avatarSaurav Kashyap <saurav.kashyap@qlogic.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 5921cda6
......@@ -66,7 +66,7 @@
* | | | 0xd030-0xd0ff |
* | | | 0xd101-0xd1fe |
* | | | 0xd213-0xd2fe |
* | Target Mode | 0xe070 | 0xe021 |
* | Target Mode | 0xe078 | |
* | Target Mode Management | 0xf072 | 0xf002-0xf003 |
* | | | 0xf046-0xf049 |
* | Target Mode Task Management | 0x1000b | |
......
......@@ -1629,10 +1629,20 @@ typedef struct {
#define PO_MODE_DIF_PASS 2
#define PO_MODE_DIF_REPLACE 3
#define PO_MODE_DIF_TCP_CKSUM 6
#define PO_ENABLE_DIF_BUNDLING BIT_8
#define PO_ENABLE_INCR_GUARD_SEED BIT_3
#define PO_DISABLE_INCR_REF_TAG BIT_5
#define PO_DISABLE_GUARD_CHECK BIT_4
#define PO_DISABLE_INCR_REF_TAG BIT_5
#define PO_DIS_HEADER_MODE BIT_7
#define PO_ENABLE_DIF_BUNDLING BIT_8
#define PO_DIS_FRAME_MODE BIT_9
#define PO_DIS_VALD_APP_ESC BIT_10 /* Dis validation for escape tag/ffffh */
#define PO_DIS_VALD_APP_REF_ESC BIT_11
#define PO_DIS_APP_TAG_REPL BIT_12 /* disable REG Tag replacement */
#define PO_DIS_REF_TAG_REPL BIT_13
#define PO_DIS_APP_TAG_VALD BIT_14 /* disable REF Tag validation */
#define PO_DIS_REF_TAG_VALD BIT_15
/*
* ISP queue - 64-Bit addressing, continuation crc entry structure definition.
*/
......
......@@ -220,6 +220,13 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
uint32_t *, uint16_t, struct qla_tgt_cmd *);
extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
uint32_t *, uint16_t, struct qla_tgt_cmd *);
extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
uint32_t *, uint16_t, struct qla_tgt_cmd *);
/*
* Global Function Prototypes in qla_mbx.c source file.
......
......@@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_target.h"
/**
* qla24xx_calc_iocbs() - Determine number of Command Type 3 and
* Continuation Type 1 IOCBs to allocate.
......@@ -128,12 +129,20 @@ qla2x00_clear_loop_id(fc_port_t *fcport) {
}
static inline void
qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp,
struct qla_tgt_cmd *tc)
{
struct dsd_dma *dsd_ptr, *tdsd_ptr;
struct crc_context *ctx;
ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
if (sp)
ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
else if (tc)
ctx = (struct crc_context *)tc->ctx;
else {
BUG();
return;
}
/* clean up allocated prev pool */
list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
......
......@@ -936,9 +936,9 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
return 1;
}
static int
int
qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
uint32_t *dsd, uint16_t tot_dsds)
uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
......@@ -948,21 +948,35 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
uint32_t *cur_dsd = dsd;
uint16_t used_dsds = tot_dsds;
uint32_t prot_int;
uint32_t prot_int; /* protection interval */
uint32_t partial;
struct qla2_sgx sgx;
dma_addr_t sle_dma;
uint32_t sle_dma_len, tot_prot_dma_len = 0;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
prot_int = cmd->device->sector_size;
struct scsi_cmnd *cmd;
struct scsi_qla_host *vha;
memset(&sgx, 0, sizeof(struct qla2_sgx));
sgx.tot_bytes = scsi_bufflen(cmd);
sgx.cur_sg = scsi_sglist(cmd);
sgx.sp = sp;
sg_prot = scsi_prot_sglist(cmd);
if (sp) {
vha = sp->fcport->vha;
cmd = GET_CMD_SP(sp);
prot_int = cmd->device->sector_size;
sgx.tot_bytes = scsi_bufflen(cmd);
sgx.cur_sg = scsi_sglist(cmd);
sgx.sp = sp;
sg_prot = scsi_prot_sglist(cmd);
} else if (tc) {
vha = tc->vha;
prot_int = tc->blk_sz;
sgx.tot_bytes = tc->bufflen;
sgx.cur_sg = tc->sg;
sg_prot = tc->prot_sg;
} else {
BUG();
return 1;
}
while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
......@@ -995,10 +1009,18 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
return 1;
}
list_add_tail(&dsd_ptr->list,
&((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
if (sp) {
list_add_tail(&dsd_ptr->list,
&((struct crc_context *)
sp->u.scmd.ctx)->dsd_list);
sp->flags |= SRB_CRC_CTX_DSD_VALID;
} else {
list_add_tail(&dsd_ptr->list,
&(tc->ctx->dsd_list));
tc->ctx_dsd_alloced = 1;
}
sp->flags |= SRB_CRC_CTX_DSD_VALID;
/* add new list to cmd iocb or last list */
*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
......@@ -1033,21 +1055,35 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
return 0;
}
static int
int
qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
uint16_t tot_dsds)
uint16_t tot_dsds, struct qla_tgt_cmd *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
uint32_t dsd_list_len;
struct dsd_dma *dsd_ptr;
struct scatterlist *sg;
struct scatterlist *sg, *sgl;
uint32_t *cur_dsd = dsd;
int i;
uint16_t used_dsds = tot_dsds;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct scsi_cmnd *cmd;
struct scsi_qla_host *vha;
if (sp) {
cmd = GET_CMD_SP(sp);
sgl = scsi_sglist(cmd);
vha = sp->fcport->vha;
} else if (tc) {
sgl = tc->sg;
vha = tc->vha;
} else {
BUG();
return 1;
}
scsi_for_each_sg(cmd, sg, tot_dsds, i) {
for_each_sg(sgl, sg, tot_dsds, i) {
dma_addr_t sle_dma;
/* Allocate additional continuation packets? */
......@@ -1076,10 +1112,17 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
return 1;
}
list_add_tail(&dsd_ptr->list,
&((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
if (sp) {
list_add_tail(&dsd_ptr->list,
&((struct crc_context *)
sp->u.scmd.ctx)->dsd_list);
sp->flags |= SRB_CRC_CTX_DSD_VALID;
sp->flags |= SRB_CRC_CTX_DSD_VALID;
} else {
list_add_tail(&dsd_ptr->list,
&(tc->ctx->dsd_list));
tc->ctx_dsd_alloced = 1;
}
/* add new list to cmd iocb or last list */
*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
......@@ -1102,23 +1145,37 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
return 0;
}
static int
int
qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
uint32_t *dsd,
uint16_t tot_dsds)
uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
uint32_t dsd_list_len;
struct dsd_dma *dsd_ptr;
struct scatterlist *sg;
struct scatterlist *sg, *sgl;
int i;
struct scsi_cmnd *cmd;
uint32_t *cur_dsd = dsd;
uint16_t used_dsds = tot_dsds;
uint16_t used_dsds = tot_dsds;
struct scsi_qla_host *vha;
if (sp) {
cmd = GET_CMD_SP(sp);
sgl = scsi_prot_sglist(cmd);
vha = sp->fcport->vha;
} else if (tc) {
vha = tc->vha;
sgl = tc->prot_sg;
} else {
BUG();
return 1;
}
cmd = GET_CMD_SP(sp);
scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
ql_dbg(ql_dbg_tgt, vha, 0xe021,
"%s: enter\n", __func__);
for_each_sg(sgl, sg, tot_dsds, i) {
dma_addr_t sle_dma;
/* Allocate additional continuation packets? */
......@@ -1147,10 +1204,17 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
return 1;
}
list_add_tail(&dsd_ptr->list,
&((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
if (sp) {
list_add_tail(&dsd_ptr->list,
&((struct crc_context *)
sp->u.scmd.ctx)->dsd_list);
sp->flags |= SRB_CRC_CTX_DSD_VALID;
sp->flags |= SRB_CRC_CTX_DSD_VALID;
} else {
list_add_tail(&dsd_ptr->list,
&(tc->ctx->dsd_list));
tc->ctx_dsd_alloced = 1;
}
/* add new list to cmd iocb or last list */
*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
......@@ -1386,10 +1450,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
if (!bundling && tot_prot_dsds) {
if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
cur_dsd, tot_dsds))
cur_dsd, tot_dsds, NULL))
goto crc_queuing_error;
} else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
(tot_dsds - tot_prot_dsds)))
(tot_dsds - tot_prot_dsds), NULL))
goto crc_queuing_error;
if (bundling && tot_prot_dsds) {
......@@ -1398,7 +1462,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
__constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
tot_prot_dsds))
tot_prot_dsds, NULL))
goto crc_queuing_error;
}
return QLA_SUCCESS;
......
......@@ -2474,12 +2474,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
if (pkt->entry_status != 0) {
qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
(void)qlt_24xx_process_response_error(vha, pkt);
if (qlt_24xx_process_response_error(vha, pkt))
goto process_err;
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
wmb();
continue;
}
process_err:
switch (pkt->entry_type) {
case STATUS_TYPE:
......@@ -2496,10 +2498,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
qla24xx_logio_entry(vha, rsp->req,
(struct logio_entry_24xx *)pkt);
break;
case CT_IOCB_TYPE:
case CT_IOCB_TYPE:
qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
break;
case ELS_IOCB_TYPE:
case ELS_IOCB_TYPE:
qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
break;
case ABTS_RECV_24XX:
......@@ -2508,6 +2510,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case NOTIFY_ACK_TYPE:
case CTIO_CRC2:
qlt_response_pkt_all_vps(vha, (response_t *)pkt);
break;
case MARKER_TYPE:
......
......@@ -616,7 +616,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
/* List assured to be having elements */
qla2x00_clean_dsd_pool(ha, sp);
qla2x00_clean_dsd_pool(ha, sp, NULL);
sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
}
......
This diff is collapsed.
......@@ -293,6 +293,7 @@ struct ctio_to_2xxx {
#define CTIO_ABORTED 0x02
#define CTIO_INVALID_RX_ID 0x08
#define CTIO_TIMEOUT 0x0B
#define CTIO_DIF_ERROR 0x0C /* DIF error detected */
#define CTIO_LIP_RESET 0x0E
#define CTIO_TARGET_RESET 0x17
#define CTIO_PORT_UNAVAILABLE 0x28
......@@ -498,11 +499,12 @@ struct ctio7_from_24xx {
#define CTIO7_FLAGS_DONT_RET_CTIO BIT_8
#define CTIO7_FLAGS_STATUS_MODE_0 0
#define CTIO7_FLAGS_STATUS_MODE_1 BIT_6
#define CTIO7_FLAGS_STATUS_MODE_2 BIT_7
#define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5
#define CTIO7_FLAGS_CONFIRM_SATISF BIT_4
#define CTIO7_FLAGS_DSD_PTR BIT_2
#define CTIO7_FLAGS_DATA_IN BIT_1
#define CTIO7_FLAGS_DATA_OUT BIT_0
#define CTIO7_FLAGS_DATA_IN BIT_1 /* data to initiator */
#define CTIO7_FLAGS_DATA_OUT BIT_0 /* data from initiator */
#define ELS_PLOGI 0x3
#define ELS_FLOGI 0x4
......@@ -513,6 +515,68 @@ struct ctio7_from_24xx {
#define ELS_PDISC 0x50
#define ELS_ADISC 0x52
/*
*CTIO Type CRC_2 IOCB
*/
struct ctio_crc2_to_fw {
uint8_t entry_type; /* Entry type. */
#define CTIO_CRC2 0x7A
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
uint16_t nport_handle; /* N_PORT handle. */
uint16_t timeout; /* Command timeout. */
uint16_t dseg_count; /* Data segment count. */
uint8_t vp_index;
uint8_t add_flags; /* additional flags */
#define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3
uint8_t initiator_id[3]; /* initiator ID */
uint8_t reserved1;
uint32_t exchange_addr; /* rcv exchange address */
uint16_t reserved2;
uint16_t flags; /* refer to CTIO7 flags values */
uint32_t residual;
uint16_t ox_id;
uint16_t scsi_status;
uint32_t relative_offset;
uint32_t reserved5;
uint32_t transfer_length; /* total fc transfer length */
uint32_t reserved6;
uint32_t crc_context_address[2];/* Data segment address. */
uint16_t crc_context_len; /* Data segment length. */
uint16_t reserved_1; /* MUST be set to 0. */
} __packed;
/* CTIO Type CRC_x Status IOCB */
struct ctio_crc_from_fw {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
uint16_t status;
uint16_t timeout; /* Command timeout. */
uint16_t dseg_count; /* Data segment count. */
uint32_t reserved1;
uint16_t state_flags;
#define CTIO_CRC_SF_DIF_CHOPPED BIT_4
uint32_t exchange_address; /* rcv exchange address */
uint16_t reserved2;
uint16_t flags;
uint32_t resid_xfer_length;
uint16_t ox_id;
uint8_t reserved3[12];
uint16_t runt_guard; /* reported runt blk guard */
uint8_t actual_dif[8];
uint8_t expected_dif[8];
} __packed;
/*
* ISP queue - ABTS received/response entries structure definition for 24xx.
*/
......@@ -641,6 +705,7 @@ struct qla_tgt_func_tmpl {
int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
unsigned char *, uint32_t, int, int, int);
void (*handle_data)(struct qla_tgt_cmd *);
void (*handle_dif_err)(struct qla_tgt_cmd *);
int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,
uint32_t);
void (*free_cmd)(struct qla_tgt_cmd *);
......@@ -829,9 +894,9 @@ struct qla_tgt_sess {
};
struct qla_tgt_cmd {
struct se_cmd se_cmd;
struct qla_tgt_sess *sess;
int state;
struct se_cmd se_cmd;
struct work_struct free_work;
struct work_struct work;
/* Sense buffer that will be mapped into outgoing status */
......@@ -843,6 +908,7 @@ struct qla_tgt_cmd {
unsigned int free_sg:1;
unsigned int aborted:1; /* Needed in case of SRR */
unsigned int write_data_transferred:1;
unsigned int ctx_dsd_alloced:1;
struct scatterlist *sg; /* cmd data buffer SG vector */
int sg_cnt; /* SG segments count */
......@@ -857,6 +923,12 @@ struct qla_tgt_cmd {
struct scsi_qla_host *vha;
struct atio_from_isp atio;
/* t10dif */
struct scatterlist *prot_sg;
uint32_t prot_sg_cnt;
uint32_t blk_sz;
struct crc_context *ctx;
};
struct qla_tgt_sess_work_param {
......@@ -901,6 +973,10 @@ struct qla_tgt_prm {
int sense_buffer_len;
int residual;
int add_status_pkt;
/* dif */
struct scatterlist *prot_sg;
uint16_t prot_seg_cnt;
uint16_t tot_dsds;
};
struct qla_tgt_srr_imm {
......@@ -976,6 +1052,8 @@ extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *,
extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
extern int qlt_rdy_to_xfer_dif(struct qla_tgt_cmd *);
extern int qlt_xmit_response_dif(struct qla_tgt_cmd *, int, uint8_t);
extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
......
......@@ -472,6 +472,11 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
cmd->sg_cnt = se_cmd->t_data_nents;
cmd->sg = se_cmd->t_data_sg;
cmd->prot_sg_cnt = se_cmd->t_prot_nents;
cmd->prot_sg = se_cmd->t_prot_sg;
cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size;
se_cmd->pi_err = 0;
/*
* qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
* the SGL mappings into PCIe memory for incoming FCP WRITE data.
......@@ -567,8 +572,13 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
return;
}
transport_generic_request_failure(&cmd->se_cmd,
TCM_CHECK_CONDITION_ABORT_CMD);
if (cmd->se_cmd.pi_err)
transport_generic_request_failure(&cmd->se_cmd,
cmd->se_cmd.pi_err);
else
transport_generic_request_failure(&cmd->se_cmd,
TCM_CHECK_CONDITION_ABORT_CMD);
return;
}
......@@ -584,6 +594,27 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
}
static void tcm_qla2xxx_handle_dif_work(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
/* take an extra kref to prevent cmd free too early.
* need to wait for SCSI status/check condition to
* finish responding generate by transport_generic_request_failure.
*/
kref_get(&cmd->se_cmd.cmd_kref);
transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err);
}
/*
* Called from qla_target.c:qlt_do_ctio_completion()
*/
static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd)
{
INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work);
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
}
/*
* Called from qla_target.c:qlt_issue_task_mgmt()
*/
......@@ -610,6 +641,11 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
cmd->sg = se_cmd->t_data_sg;
cmd->offset = 0;
cmd->prot_sg_cnt = se_cmd->t_prot_nents;
cmd->prot_sg = se_cmd->t_prot_sg;
cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size;
se_cmd->pi_err = 0;
/*
* Now queue completed DATA_IN the qla2xxx LLD and response ring
*/
......@@ -1600,6 +1636,7 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.handle_cmd = tcm_qla2xxx_handle_cmd,
.handle_data = tcm_qla2xxx_handle_data,
.handle_dif_err = tcm_qla2xxx_handle_dif_err,
.handle_tmr = tcm_qla2xxx_handle_tmr,
.free_cmd = tcm_qla2xxx_free_cmd,
.free_mcmd = tcm_qla2xxx_free_mcmd,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment