Commit 8cb2049c authored by Arun Easi's avatar Arun Easi Committed by James Bottomley

[SCSI] qla2xxx: T10 DIF - Handle uninitalized sectors.

Driver needs to update protection bytes for uninitialized sectors as they are
not DMA-d.
Signed-off-by: default avatarArun Easi <arun.easi@qlogic.com>
Reviewed-by: default avatarAndrew Vasquez <andrew.vasquez@qlogic.com>
Signed-off-by: default avatarChad Dupuis <chad.dupuis@qlogic.com>
Signed-off-by: default avatarJames Bottomley <JBottomley@Parallels.com>
parent 01350d05
...@@ -1788,11 +1788,14 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) ...@@ -1788,11 +1788,14 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4) { if (ha->fw_attributes & BIT_4) {
int prot = 0;
vha->flags.difdix_supported = 1; vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_user, vha, 0x7082, ql_dbg(ql_dbg_user, vha, 0x7082,
"Registered for DIF/DIX type 1 and 3 protection.\n"); "Registered for DIF/DIX type 1 and 3 protection.\n");
if (ql2xenabledif == 1)
prot = SHOST_DIX_TYPE0_PROTECTION;
scsi_host_set_prot(vha->host, scsi_host_set_prot(vha->host,
SHOST_DIF_TYPE1_PROTECTION prot | SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE2_PROTECTION
| SHOST_DIF_TYPE3_PROTECTION | SHOST_DIF_TYPE3_PROTECTION
| SHOST_DIX_TYPE1_PROTECTION | SHOST_DIX_TYPE1_PROTECTION
......
...@@ -537,6 +537,11 @@ struct sts_entry_24xx { ...@@ -537,6 +537,11 @@ struct sts_entry_24xx {
/* /*
* If DIF Error is set in comp_status, these additional fields are * If DIF Error is set in comp_status, these additional fields are
* defined: * defined:
*
* !!! NOTE: Firmware sends expected/actual DIF data in big endian
* format; but all of the "data" field gets swab32-d in the beginning
* of qla2x00_status_entry().
*
* &data[10] : uint8_t report_runt_bg[2]; - computed guard * &data[10] : uint8_t report_runt_bg[2]; - computed guard
* &data[12] : uint8_t actual_dif[8]; - DIF Data received * &data[12] : uint8_t actual_dif[8]; - DIF Data received
* &data[20] : uint8_t expected_dif[8]; - DIF Data computed * &data[20] : uint8_t expected_dif[8]; - DIF Data computed
......
...@@ -102,3 +102,24 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state) ...@@ -102,3 +102,24 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state)
fcport->d_id.b.al_pa); fcport->d_id.b.al_pa);
} }
} }
static inline int
qla2x00_hba_err_chk_enabled(unsigned char op)
{
switch (op) {
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_WRITE_INSERT:
if (ql2xenablehba_err_chk >= 1)
return 1;
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
if (ql2xenablehba_err_chk >= 2)
return 1;
break;
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
return 1;
}
return 0;
}
...@@ -717,12 +717,17 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, ...@@ -717,12 +717,17 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
unsigned char op = scsi_get_prot_op(cmd); unsigned char op = scsi_get_prot_op(cmd);
switch (scsi_get_prot_type(cmd)) { switch (scsi_get_prot_type(cmd)) {
/* For TYPE 0 protection: no checking */
case SCSI_PROT_DIF_TYPE0: case SCSI_PROT_DIF_TYPE0:
pkt->ref_tag_mask[0] = 0x00; /*
pkt->ref_tag_mask[1] = 0x00; * No check for ql2xenablehba_err_chk, as it would be an
pkt->ref_tag_mask[2] = 0x00; * I/O error if hba tag generation is not done.
pkt->ref_tag_mask[3] = 0x00; */
pkt->ref_tag = cpu_to_le32((uint32_t)
(0xffffffff & scsi_get_lba(cmd)));
pkt->ref_tag_mask[0] = 0xff;
pkt->ref_tag_mask[1] = 0xff;
pkt->ref_tag_mask[2] = 0xff;
pkt->ref_tag_mask[3] = 0xff;
break; break;
/* /*
...@@ -730,7 +735,7 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, ...@@ -730,7 +735,7 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
* match LBA in CDB + N * match LBA in CDB + N
*/ */
case SCSI_PROT_DIF_TYPE2: case SCSI_PROT_DIF_TYPE2:
if (!ql2xenablehba_err_chk) if (!qla2x00_hba_err_chk_enabled(op))
break; break;
if (scsi_prot_sg_count(cmd)) { if (scsi_prot_sg_count(cmd)) {
...@@ -763,7 +768,7 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, ...@@ -763,7 +768,7 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
* 16 bit app tag. * 16 bit app tag.
*/ */
case SCSI_PROT_DIF_TYPE1: case SCSI_PROT_DIF_TYPE1:
if (!ql2xenablehba_err_chk) if (!qla2x00_hba_err_chk_enabled(op))
break; break;
if (protcnt && (op == SCSI_PROT_WRITE_STRIP || if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
...@@ -798,7 +803,161 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, ...@@ -798,7 +803,161 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
scsi_get_prot_type(cmd), cmd); scsi_get_prot_type(cmd), cmd);
} }
struct qla2_sgx {
dma_addr_t dma_addr; /* OUT */
uint32_t dma_len; /* OUT */
uint32_t tot_bytes; /* IN */
struct scatterlist *cur_sg; /* IN */
/* for book keeping, bzero on initial invocation */
uint32_t bytes_consumed;
uint32_t num_bytes;
uint32_t tot_partial;
/* for debugging */
uint32_t num_sg;
srb_t *sp;
};
static int
qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
uint32_t *partial)
{
struct scatterlist *sg;
uint32_t cumulative_partial, sg_len;
dma_addr_t sg_dma_addr;
if (sgx->num_bytes == sgx->tot_bytes)
return 0;
sg = sgx->cur_sg;
cumulative_partial = sgx->tot_partial;
sg_dma_addr = sg_dma_address(sg);
sg_len = sg_dma_len(sg);
sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
sgx->dma_len = (blk_sz - cumulative_partial);
sgx->tot_partial = 0;
sgx->num_bytes += blk_sz;
*partial = 0;
} else {
sgx->dma_len = sg_len - sgx->bytes_consumed;
sgx->tot_partial += sgx->dma_len;
*partial = 1;
}
sgx->bytes_consumed += sgx->dma_len;
if (sg_len == sgx->bytes_consumed) {
sg = sg_next(sg);
sgx->num_sg++;
sgx->cur_sg = sg;
sgx->bytes_consumed = 0;
}
return 1;
}
static int
qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
uint32_t *dsd, uint16_t tot_dsds)
{
void *next_dsd;
uint8_t avail_dsds = 0;
uint32_t dsd_list_len;
struct dsd_dma *dsd_ptr;
struct scatterlist *sg_prot;
uint32_t *cur_dsd = dsd;
uint16_t used_dsds = tot_dsds;
uint32_t prot_int;
uint32_t partial;
struct qla2_sgx sgx;
dma_addr_t sle_dma;
uint32_t sle_dma_len, tot_prot_dma_len = 0;
struct scsi_cmnd *cmd = sp->cmd;
prot_int = cmd->device->sector_size;
memset(&sgx, 0, sizeof(struct qla2_sgx));
sgx.tot_bytes = scsi_bufflen(sp->cmd);
sgx.cur_sg = scsi_sglist(sp->cmd);
sgx.sp = sp;
sg_prot = scsi_prot_sglist(sp->cmd);
while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
sle_dma = sgx.dma_addr;
sle_dma_len = sgx.dma_len;
alloc_and_fill:
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
QLA_DSDS_PER_IOCB : used_dsds;
dsd_list_len = (avail_dsds + 1) * 12;
used_dsds -= avail_dsds;
/* allocate tracking DS */
dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
if (!dsd_ptr)
return 1;
/* allocate new list */
dsd_ptr->dsd_addr = next_dsd =
dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
&dsd_ptr->dsd_list_dma);
if (!next_dsd) {
/*
* Need to cleanup only this dsd_ptr, rest
* will be done by sp_free_dma()
*/
kfree(dsd_ptr);
return 1;
}
list_add_tail(&dsd_ptr->list,
&((struct crc_context *)sp->ctx)->dsd_list);
sp->flags |= SRB_CRC_CTX_DSD_VALID;
/* add new list to cmd iocb or last list */
*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
*cur_dsd++ = dsd_list_len;
cur_dsd = (uint32_t *)next_dsd;
}
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
*cur_dsd++ = cpu_to_le32(sle_dma_len);
avail_dsds--;
if (partial == 0) {
/* Got a full protection interval */
sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
sle_dma_len = 8;
tot_prot_dma_len += sle_dma_len;
if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
tot_prot_dma_len = 0;
sg_prot = sg_next(sg_prot);
}
partial = 1; /* So as to not re-enter this block */
goto alloc_and_fill;
}
}
/* Null termination */
*cur_dsd++ = 0;
*cur_dsd++ = 0;
*cur_dsd++ = 0;
return 0;
}
static int static int
qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
uint16_t tot_dsds) uint16_t tot_dsds)
...@@ -981,7 +1140,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, ...@@ -981,7 +1140,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
struct scsi_cmnd *cmd; struct scsi_cmnd *cmd;
struct scatterlist *cur_seg; struct scatterlist *cur_seg;
int sgc; int sgc;
uint32_t total_bytes; uint32_t total_bytes = 0;
uint32_t data_bytes; uint32_t data_bytes;
uint32_t dif_bytes; uint32_t dif_bytes;
uint8_t bundling = 1; uint8_t bundling = 1;
...@@ -1023,8 +1182,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, ...@@ -1023,8 +1182,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
__constant_cpu_to_le16(CF_READ_DATA); __constant_cpu_to_le16(CF_READ_DATA);
} }
tot_prot_dsds = scsi_prot_sg_count(cmd); if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) ||
if (!tot_prot_dsds) (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) ||
(scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) ||
(scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))
bundling = 0; bundling = 0;
/* Allocate CRC context from global pool */ /* Allocate CRC context from global pool */
...@@ -1107,15 +1268,28 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, ...@@ -1107,15 +1268,28 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
/* Compute dif len and adjust data len to incude protection */ /* Compute dif len and adjust data len to incude protection */
total_bytes = data_bytes;
dif_bytes = 0; dif_bytes = 0;
blk_size = cmd->device->sector_size; blk_size = cmd->device->sector_size;
if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { dif_bytes = (data_bytes / blk_size) * 8;
dif_bytes = (data_bytes / blk_size) * 8;
total_bytes += dif_bytes; switch (scsi_get_prot_op(sp->cmd)) {
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
total_bytes = data_bytes;
data_bytes += dif_bytes;
break;
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_WRITE_INSERT:
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
total_bytes = data_bytes + dif_bytes;
break;
default:
BUG();
} }
if (!ql2xenablehba_err_chk) if (!qla2x00_hba_err_chk_enabled(scsi_get_prot_op(cmd)))
fw_prot_opts |= 0x10; /* Disable Guard tag checking */ fw_prot_opts |= 0x10; /* Disable Guard tag checking */
if (!bundling) { if (!bundling) {
...@@ -1151,7 +1325,12 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, ...@@ -1151,7 +1325,12 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
cmd_pkt->control_flags |= cmd_pkt->control_flags |=
__constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
if (!bundling && tot_prot_dsds) {
if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
cur_dsd, tot_dsds))
goto crc_queuing_error;
} else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
(tot_dsds - tot_prot_dsds))) (tot_dsds - tot_prot_dsds)))
goto crc_queuing_error; goto crc_queuing_error;
...@@ -1414,6 +1593,22 @@ qla24xx_dif_start_scsi(srb_t *sp) ...@@ -1414,6 +1593,22 @@ qla24xx_dif_start_scsi(srb_t *sp)
goto queuing_error; goto queuing_error;
else else
sp->flags |= SRB_DMA_VALID; sp->flags |= SRB_DMA_VALID;
if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
(scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
struct qla2_sgx sgx;
uint32_t partial;
memset(&sgx, 0, sizeof(struct qla2_sgx));
sgx.tot_bytes = scsi_bufflen(cmd);
sgx.cur_sg = scsi_sglist(cmd);
sgx.sp = sp;
nseg = 0;
while (qla24xx_get_one_block_sg(
cmd->device->sector_size, &sgx, &partial))
nseg++;
}
} else } else
nseg = 0; nseg = 0;
...@@ -1428,6 +1623,11 @@ qla24xx_dif_start_scsi(srb_t *sp) ...@@ -1428,6 +1623,11 @@ qla24xx_dif_start_scsi(srb_t *sp)
goto queuing_error; goto queuing_error;
else else
sp->flags |= SRB_CRC_PROT_DMA_VALID; sp->flags |= SRB_CRC_PROT_DMA_VALID;
if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
(scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
}
} else { } else {
nseg = 0; nseg = 0;
} }
...@@ -1454,6 +1654,7 @@ qla24xx_dif_start_scsi(srb_t *sp) ...@@ -1454,6 +1654,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
/* Build header part of command packet (excluding the OPCODE). */ /* Build header part of command packet (excluding the OPCODE). */
req->current_outstanding_cmd = handle; req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp; req->outstanding_cmds[handle] = sp;
sp->handle = handle;
sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
req->cnt -= req_cnt; req->cnt -= req_cnt;
......
...@@ -1435,25 +1435,27 @@ struct scsi_dif_tuple { ...@@ -1435,25 +1435,27 @@ struct scsi_dif_tuple {
* ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
* to indicate to the kernel that the HBA detected error. * to indicate to the kernel that the HBA detected error.
*/ */
static inline void static inline int
qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
{ {
struct scsi_qla_host *vha = sp->fcport->vha; struct scsi_qla_host *vha = sp->fcport->vha;
struct scsi_cmnd *cmd = sp->cmd; struct scsi_cmnd *cmd = sp->cmd;
struct scsi_dif_tuple *ep = uint8_t *ap = &sts24->data[12];
(struct scsi_dif_tuple *)&sts24->data[20]; uint8_t *ep = &sts24->data[20];
struct scsi_dif_tuple *ap =
(struct scsi_dif_tuple *)&sts24->data[12];
uint32_t e_ref_tag, a_ref_tag; uint32_t e_ref_tag, a_ref_tag;
uint16_t e_app_tag, a_app_tag; uint16_t e_app_tag, a_app_tag;
uint16_t e_guard, a_guard; uint16_t e_guard, a_guard;
e_ref_tag = be32_to_cpu(ep->ref_tag); /*
a_ref_tag = be32_to_cpu(ap->ref_tag); * swab32 of the "data" field in the beginning of qla2x00_status_entry()
e_app_tag = be16_to_cpu(ep->app_tag); * would make guard field appear at offset 2
a_app_tag = be16_to_cpu(ap->app_tag); */
e_guard = be16_to_cpu(ep->guard); a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
a_guard = be16_to_cpu(ap->guard); a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
ql_dbg(ql_dbg_io, vha, 0x3023, ql_dbg(ql_dbg_io, vha, 0x3023,
"iocb(s) %p Returned STATUS.\n", sts24); "iocb(s) %p Returned STATUS.\n", sts24);
...@@ -1465,6 +1467,63 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) ...@@ -1465,6 +1467,63 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
a_app_tag, e_app_tag, a_guard, e_guard); a_app_tag, e_app_tag, a_guard, e_guard);
/*
* Ignore sector if:
* For type 3: ref & app tag is all 'f's
* For type 0,1,2: app tag is all 'f's
*/
if ((a_app_tag == 0xffff) &&
((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
(a_ref_tag == 0xffffffff))) {
uint32_t blocks_done, resid;
sector_t lba_s = scsi_get_lba(cmd);
/* 2TB boundary case covered automatically with this */
blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
resid = scsi_bufflen(cmd) - (blocks_done *
cmd->device->sector_size);
scsi_set_resid(cmd, resid);
cmd->result = DID_OK << 16;
/* Update protection tag */
if (scsi_prot_sg_count(cmd)) {
uint32_t i, j = 0, k = 0, num_ent;
struct scatterlist *sg;
struct sd_dif_tuple *spt;
/* Patch the corresponding protection tags */
scsi_for_each_prot_sg(cmd, sg,
scsi_prot_sg_count(cmd), i) {
num_ent = sg_dma_len(sg) / 8;
if (k + num_ent < blocks_done) {
k += num_ent;
continue;
}
j = blocks_done - k - 1;
k = blocks_done;
break;
}
if (k != blocks_done) {
qla_printk(KERN_WARNING, sp->fcport->vha->hw,
"unexpected tag values tag:lba=%x:%lx)\n",
e_ref_tag, lba_s);
return 1;
}
spt = page_address(sg_page(sg)) + sg->offset;
spt += j;
spt->app_tag = 0xffff;
if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
spt->ref_tag = 0xffffffff;
}
return 0;
}
/* check guard */ /* check guard */
if (e_guard != a_guard) { if (e_guard != a_guard) {
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
...@@ -1472,7 +1531,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) ...@@ -1472,7 +1531,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
set_driver_byte(cmd, DRIVER_SENSE); set_driver_byte(cmd, DRIVER_SENSE);
set_host_byte(cmd, DID_ABORT); set_host_byte(cmd, DID_ABORT);
cmd->result |= SAM_STAT_CHECK_CONDITION << 1; cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
return; return 1;
} }
/* check appl tag */ /* check appl tag */
...@@ -1482,7 +1541,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) ...@@ -1482,7 +1541,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
set_driver_byte(cmd, DRIVER_SENSE); set_driver_byte(cmd, DRIVER_SENSE);
set_host_byte(cmd, DID_ABORT); set_host_byte(cmd, DID_ABORT);
cmd->result |= SAM_STAT_CHECK_CONDITION << 1; cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
return; return 1;
} }
/* check ref tag */ /* check ref tag */
...@@ -1492,8 +1551,9 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) ...@@ -1492,8 +1551,9 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
set_driver_byte(cmd, DRIVER_SENSE); set_driver_byte(cmd, DRIVER_SENSE);
set_host_byte(cmd, DID_ABORT); set_host_byte(cmd, DID_ABORT);
cmd->result |= SAM_STAT_CHECK_CONDITION << 1; cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
return; return 1;
} }
return 1;
} }
/** /**
...@@ -1767,7 +1827,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) ...@@ -1767,7 +1827,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
break; break;
case CS_DIF_ERROR: case CS_DIF_ERROR:
qla2x00_handle_dif_error(sp, sts24); logit = qla2x00_handle_dif_error(sp, sts24);
break; break;
default: default:
cp->result = DID_ERROR << 16; cp->result = DID_ERROR << 16;
......
...@@ -106,17 +106,21 @@ MODULE_PARM_DESC(ql2xmaxqdepth, ...@@ -106,17 +106,21 @@ MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to report for target devices."); "Maximum queue depth to report for target devices.");
/* Do not change the value of this after module load */ /* Do not change the value of this after module load */
int ql2xenabledif = 1; int ql2xenabledif = 0;
module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR); module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xenabledif, MODULE_PARM_DESC(ql2xenabledif,
" Enable T10-CRC-DIF " " Enable T10-CRC-DIF "
" Default is 0 - No DIF Support. 1 - Enable it"); " Default is 0 - No DIF Support. 1 - Enable it"
", 2 - Enable DIF for all types, except Type 0.");
int ql2xenablehba_err_chk; int ql2xenablehba_err_chk = 2;
module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xenablehba_err_chk, MODULE_PARM_DESC(ql2xenablehba_err_chk,
" Enable T10-CRC-DIF Error isolation by HBA" " Enable T10-CRC-DIF Error isolation by HBA:\n"
" Default is 0 - Error isolation disabled, 1 - Enable it"); " Default is 1.\n"
" 0 -- Error isolation disabled\n"
" 1 -- Error isolation enabled only for DIX Type 0\n"
" 2 -- Error isolation enabled for all Types\n");
int ql2xiidmaenable=1; int ql2xiidmaenable=1;
module_param(ql2xiidmaenable, int, S_IRUGO); module_param(ql2xiidmaenable, int, S_IRUGO);
...@@ -2380,11 +2384,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2380,11 +2384,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4) { if (ha->fw_attributes & BIT_4) {
int prot = 0;
base_vha->flags.difdix_supported = 1; base_vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_init, base_vha, 0x00f1, ql_dbg(ql_dbg_init, base_vha, 0x00f1,
"Registering for DIF/DIX type 1 and 3 protection.\n"); "Registering for DIF/DIX type 1 and 3 protection.\n");
if (ql2xenabledif == 1)
prot = SHOST_DIX_TYPE0_PROTECTION;
scsi_host_set_prot(host, scsi_host_set_prot(host,
SHOST_DIF_TYPE1_PROTECTION prot | SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE2_PROTECTION
| SHOST_DIF_TYPE3_PROTECTION | SHOST_DIF_TYPE3_PROTECTION
| SHOST_DIX_TYPE1_PROTECTION | SHOST_DIX_TYPE1_PROTECTION
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment