Commit 51162264 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'scsi-target-for-v4.10' of...

Merge branch 'scsi-target-for-v4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/bvanassche/linux

Pull SCSI target fixes from Bart Van Assche:

 - two small fixes for the ibmvscsis driver

 - ten patches with bug fixes for the target mode of the qla2xxx driver

 - four patches that avoid that the "sparse" and "smatch" static
   analyzer tools report false positives for the qla2xxx code base

* 'scsi-target-for-v4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/bvanassche/linux:
  qla2xxx: Disable out-of-order processing by default in firmware
  qla2xxx: Fix erroneous invalid handle message
  qla2xxx: Reduce exess wait during chip reset
  qla2xxx: Terminate exchange if corrupted
  qla2xxx: Fix crash due to null pointer access
  qla2xxx: Collect additional information to debug fw dump
  qla2xxx: Reset reserved field in firmware options to 0
  qla2xxx: Set tcm_qla2xxx version to automatically track qla2xxx version
  qla2xxx: Include ATIO queue in firmware dump when in target mode
  qla2xxx: Fix wrong IOCB type assumption
  qla2xxx: Avoid that building with W=1 triggers complaints about set-but-not-used variables
  qla2xxx: Move two arrays from header files to .c files
  qla2xxx: Declare an array with file scope static
  qla2xxx: Fix indentation
  ibmvscsis: Fix sleeping in interrupt context
  ibmvscsis: Fix max transfer length
parents e3737b91 300af14b
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#define INITIAL_SRP_LIMIT 800 #define INITIAL_SRP_LIMIT 800
#define DEFAULT_MAX_SECTORS 256 #define DEFAULT_MAX_SECTORS 256
#define MAX_TXU 1024 * 1024
static uint max_vdma_size = MAX_H_COPY_RDMA; static uint max_vdma_size = MAX_H_COPY_RDMA;
...@@ -1391,7 +1392,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi, ...@@ -1391,7 +1392,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
} }
info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token, info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
GFP_KERNEL); GFP_ATOMIC);
if (!info) { if (!info) {
dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
iue->target); iue->target);
...@@ -1443,7 +1444,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi, ...@@ -1443,7 +1444,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
info->mad_version = cpu_to_be32(MAD_VERSION_1); info->mad_version = cpu_to_be32(MAD_VERSION_1);
info->os_type = cpu_to_be32(LINUX); info->os_type = cpu_to_be32(LINUX);
memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu)); memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE); info->port_max_txu[0] = cpu_to_be32(MAX_TXU);
dma_wmb(); dma_wmb();
rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn, rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
...@@ -1509,7 +1510,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue) ...@@ -1509,7 +1510,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
} }
cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token, cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
GFP_KERNEL); GFP_ATOMIC);
if (!cap) { if (!cap) {
dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
iue->target); iue->target);
......
...@@ -761,7 +761,6 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj, ...@@ -761,7 +761,6 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj))); struct device, kobj)));
int type; int type;
int rval = 0;
port_id_t did; port_id_t did;
type = simple_strtol(buf, NULL, 10); type = simple_strtol(buf, NULL, 10);
...@@ -775,7 +774,7 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj, ...@@ -775,7 +774,7 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type); ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
rval = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did); qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
return count; return count;
} }
......
...@@ -1556,7 +1556,8 @@ typedef struct { ...@@ -1556,7 +1556,8 @@ typedef struct {
struct atio { struct atio {
uint8_t entry_type; /* Entry type. */ uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */ uint8_t entry_count; /* Entry count. */
uint8_t data[58]; __le16 attr_n_length;
uint8_t data[56];
uint32_t signature; uint32_t signature;
#define ATIO_PROCESSED 0xDEADDEAD /* Signature */ #define ATIO_PROCESSED 0xDEADDEAD /* Signature */
}; };
......
...@@ -1191,7 +1191,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) ...@@ -1191,7 +1191,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
/* Wait for soft-reset to complete. */ /* Wait for soft-reset to complete. */
RD_REG_DWORD(&reg->ctrl_status); RD_REG_DWORD(&reg->ctrl_status);
for (cnt = 0; cnt < 6000000; cnt++) { for (cnt = 0; cnt < 60; cnt++) {
barrier(); barrier();
if ((RD_REG_DWORD(&reg->ctrl_status) & if ((RD_REG_DWORD(&reg->ctrl_status) &
CSRX_ISP_SOFT_RESET) == 0) CSRX_ISP_SOFT_RESET) == 0)
...@@ -1234,7 +1234,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) ...@@ -1234,7 +1234,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
RD_REG_DWORD(&reg->hccr); RD_REG_DWORD(&reg->hccr);
RD_REG_WORD(&reg->mailbox0); RD_REG_WORD(&reg->mailbox0);
for (cnt = 6000000; RD_REG_WORD(&reg->mailbox0) != 0 && for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) { rval == QLA_SUCCESS; cnt--) {
barrier(); barrier();
if (cnt) if (cnt)
......
...@@ -2492,6 +2492,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) ...@@ -2492,6 +2492,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
if (pkt->entry_status & RF_BUSY) if (pkt->entry_status & RF_BUSY)
res = DID_BUS_BUSY << 16; res = DID_BUS_BUSY << 16;
if (pkt->entry_type == NOTIFY_ACK_TYPE &&
pkt->handle == QLA_TGT_SKIP_HANDLE)
return;
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) { if (sp) {
sp->done(ha, sp, res); sp->done(ha, sp, res);
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/gfp.h> #include <linux/gfp.h>
struct rom_cmd { static struct rom_cmd {
uint16_t cmd; uint16_t cmd;
} rom_cmds[] = { } rom_cmds[] = {
{ MBC_LOAD_RAM }, { MBC_LOAD_RAM },
...@@ -101,12 +101,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ...@@ -101,12 +101,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT; return QLA_FUNCTION_TIMEOUT;
} }
/* if PCI error, then avoid mbx processing.*/ /* if PCI error, then avoid mbx processing.*/
if (test_bit(PCI_ERR, &base_vha->dpc_flags)) { if (test_bit(PCI_ERR, &base_vha->dpc_flags)) {
ql_log(ql_log_warn, vha, 0x1191, ql_log(ql_log_warn, vha, 0x1191,
"PCI error, exiting.\n"); "PCI error, exiting.\n");
return QLA_FUNCTION_TIMEOUT; return QLA_FUNCTION_TIMEOUT;
} }
reg = ha->iobase; reg = ha->iobase;
io_lock_on = base_vha->flags.init_done; io_lock_on = base_vha->flags.init_done;
...@@ -323,20 +323,33 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ...@@ -323,20 +323,33 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
} }
} else { } else {
uint16_t mb0; uint16_t mb[8];
uint32_t ictrl; uint32_t ictrl, host_status, hccr;
uint16_t w; uint16_t w;
if (IS_FWI2_CAPABLE(ha)) { if (IS_FWI2_CAPABLE(ha)) {
mb0 = RD_REG_WORD(&reg->isp24.mailbox0); mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
ictrl = RD_REG_DWORD(&reg->isp24.ictrl); ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
host_status = RD_REG_DWORD(&reg->isp24.host_status);
hccr = RD_REG_DWORD(&reg->isp24.hccr);
ql_log(ql_log_warn, vha, 0x1119,
"MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
"mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
mb[7], host_status, hccr);
} else { } else {
mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0); mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
ictrl = RD_REG_WORD(&reg->isp.ictrl); ictrl = RD_REG_WORD(&reg->isp.ictrl);
ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
"MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
"mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
} }
ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
"MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
"mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
/* Capture FW dump only, if PCI device active */ /* Capture FW dump only, if PCI device active */
...@@ -684,7 +697,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) ...@@ -684,7 +697,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
mbx_cmd_t mc; mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc; mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
int configured_count;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
"Entered %s.\n", __func__); "Entered %s.\n", __func__);
...@@ -707,7 +719,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) ...@@ -707,7 +719,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
/*EMPTY*/ /*EMPTY*/
ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval); ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
} else { } else {
configured_count = mcp->mb[11];
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
"Done %s.\n", __func__); "Done %s.\n", __func__);
} }
......
...@@ -42,6 +42,11 @@ static int qla82xx_crb_table_initialized; ...@@ -42,6 +42,11 @@ static int qla82xx_crb_table_initialized;
(crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \ (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20) QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
const int MD_MIU_TEST_AGT_RDDATA[] = {
0x410000A8, 0x410000AC,
0x410000B8, 0x410000BC
};
static void qla82xx_crb_addr_transform_setup(void) static void qla82xx_crb_addr_transform_setup(void)
{ {
qla82xx_crb_addr_transform(XDMA); qla82xx_crb_addr_transform(XDMA);
......
...@@ -1176,8 +1176,7 @@ struct qla82xx_md_entry_queue { ...@@ -1176,8 +1176,7 @@ struct qla82xx_md_entry_queue {
#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094 #define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 #define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC, extern const int MD_MIU_TEST_AGT_RDDATA[4];
0x410000B8, 0x410000BC };
#define CRB_NIU_XG_PAUSE_CTL_P0 0x1 #define CRB_NIU_XG_PAUSE_CTL_P0 0x1
#define CRB_NIU_XG_PAUSE_CTL_P1 0x8 #define CRB_NIU_XG_PAUSE_CTL_P1 0x8
......
...@@ -15,6 +15,23 @@ ...@@ -15,6 +15,23 @@
#define TIMEOUT_100_MS 100 #define TIMEOUT_100_MS 100
static const uint32_t qla8044_reg_tbl[] = {
QLA8044_PEG_HALT_STATUS1,
QLA8044_PEG_HALT_STATUS2,
QLA8044_PEG_ALIVE_COUNTER,
QLA8044_CRB_DRV_ACTIVE,
QLA8044_CRB_DEV_STATE,
QLA8044_CRB_DRV_STATE,
QLA8044_CRB_DRV_SCRATCH,
QLA8044_CRB_DEV_PART_INFO1,
QLA8044_CRB_IDC_VER_MAJOR,
QLA8044_FW_VER_MAJOR,
QLA8044_FW_VER_MINOR,
QLA8044_FW_VER_SUB,
QLA8044_CMDPEG_STATE,
QLA8044_ASIC_TEMP,
};
/* 8044 Flash Read/Write functions */ /* 8044 Flash Read/Write functions */
uint32_t uint32_t
qla8044_rd_reg(struct qla_hw_data *ha, ulong addr) qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
......
...@@ -535,23 +535,6 @@ enum qla_regs { ...@@ -535,23 +535,6 @@ enum qla_regs {
#define CRB_CMDPEG_CHECK_RETRY_COUNT 60 #define CRB_CMDPEG_CHECK_RETRY_COUNT 60
#define CRB_CMDPEG_CHECK_DELAY 500 #define CRB_CMDPEG_CHECK_DELAY 500
static const uint32_t qla8044_reg_tbl[] = {
QLA8044_PEG_HALT_STATUS1,
QLA8044_PEG_HALT_STATUS2,
QLA8044_PEG_ALIVE_COUNTER,
QLA8044_CRB_DRV_ACTIVE,
QLA8044_CRB_DEV_STATE,
QLA8044_CRB_DRV_STATE,
QLA8044_CRB_DRV_SCRATCH,
QLA8044_CRB_DEV_PART_INFO1,
QLA8044_CRB_IDC_VER_MAJOR,
QLA8044_FW_VER_MAJOR,
QLA8044_FW_VER_MINOR,
QLA8044_FW_VER_SUB,
QLA8044_CMDPEG_STATE,
QLA8044_ASIC_TEMP,
};
/* MiniDump Structures */ /* MiniDump Structures */
/* Driver_code is for driver to write some info about the entry /* Driver_code is for driver to write some info about the entry
......
...@@ -3662,7 +3662,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, ...@@ -3662,7 +3662,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
sizeof(struct ct6_dsd), 0, sizeof(struct ct6_dsd), 0,
SLAB_HWCACHE_ALIGN, NULL); SLAB_HWCACHE_ALIGN, NULL);
if (!ctx_cachep) if (!ctx_cachep)
goto fail_free_gid_list; goto fail_free_srb_mempool;
} }
ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
ctx_cachep); ctx_cachep);
...@@ -3815,7 +3815,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, ...@@ -3815,7 +3815,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long), ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
GFP_KERNEL); GFP_KERNEL);
if (!ha->loop_id_map) if (!ha->loop_id_map)
goto fail_async_pd; goto fail_loop_id_map;
else { else {
qla2x00_set_reserved_loop_ids(ha); qla2x00_set_reserved_loop_ids(ha);
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
...@@ -3824,6 +3824,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, ...@@ -3824,6 +3824,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
return 0; return 0;
fail_loop_id_map:
dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
fail_async_pd: fail_async_pd:
dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
fail_ex_init_cb: fail_ex_init_cb:
...@@ -3851,6 +3853,10 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, ...@@ -3851,6 +3853,10 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
ha->ms_iocb = NULL; ha->ms_iocb = NULL;
ha->ms_iocb_dma = 0; ha->ms_iocb_dma = 0;
if (ha->sns_cmd)
dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
ha->sns_cmd, ha->sns_cmd_dma);
fail_dma_pool: fail_dma_pool:
if (IS_QLA82XX(ha) || ql2xenabledif) { if (IS_QLA82XX(ha) || ql2xenabledif) {
dma_pool_destroy(ha->fcp_cmnd_dma_pool); dma_pool_destroy(ha->fcp_cmnd_dma_pool);
...@@ -3868,10 +3874,12 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, ...@@ -3868,10 +3874,12 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
kfree(ha->nvram); kfree(ha->nvram);
ha->nvram = NULL; ha->nvram = NULL;
fail_free_ctx_mempool: fail_free_ctx_mempool:
mempool_destroy(ha->ctx_mempool); if (ha->ctx_mempool)
mempool_destroy(ha->ctx_mempool);
ha->ctx_mempool = NULL; ha->ctx_mempool = NULL;
fail_free_srb_mempool: fail_free_srb_mempool:
mempool_destroy(ha->srb_mempool); if (ha->srb_mempool)
mempool_destroy(ha->srb_mempool);
ha->srb_mempool = NULL; ha->srb_mempool = NULL;
fail_free_gid_list: fail_free_gid_list:
dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
......
...@@ -668,11 +668,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) ...@@ -668,11 +668,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
{ {
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
struct qla_tgt_sess *sess = NULL; struct qla_tgt_sess *sess = NULL;
uint32_t unpacked_lun, lun = 0;
uint16_t loop_id; uint16_t loop_id;
int res = 0; int res = 0;
struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
struct atio_from_isp *a = (struct atio_from_isp *)iocb;
unsigned long flags; unsigned long flags;
loop_id = le16_to_cpu(n->u.isp24.nport_handle); loop_id = le16_to_cpu(n->u.isp24.nport_handle);
...@@ -725,11 +723,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) ...@@ -725,11 +723,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
"loop_id %d)\n", vha->host_no, sess, sess->port_name, "loop_id %d)\n", vha->host_no, sess, sess->port_name,
mcmd, loop_id); mcmd, loop_id);
lun = a->u.isp24.fcp_cmnd.lun; return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
iocb, QLA24XX_MGMT_SEND_NACK);
} }
/* ha->tgt.sess_lock supposed to be held on entry */ /* ha->tgt.sess_lock supposed to be held on entry */
...@@ -3067,7 +3061,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, ...@@ -3067,7 +3061,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
pkt->entry_type = NOTIFY_ACK_TYPE; pkt->entry_type = NOTIFY_ACK_TYPE;
pkt->entry_count = 1; pkt->entry_count = 1;
pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; pkt->handle = QLA_TGT_SKIP_HANDLE;
nack = (struct nack_to_isp *)pkt; nack = (struct nack_to_isp *)pkt;
nack->ox_id = ntfy->ox_id; nack->ox_id = ntfy->ox_id;
...@@ -3110,6 +3104,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, ...@@ -3110,6 +3104,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
#if 0 /* Todo */ #if 0 /* Todo */
if (rc == -ENOMEM) if (rc == -ENOMEM)
qlt_alloc_qfull_cmd(vha, imm, 0, 0); qlt_alloc_qfull_cmd(vha, imm, 0, 0);
#else
if (rc) {
}
#endif #endif
goto done; goto done;
} }
...@@ -6457,12 +6454,29 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) ...@@ -6457,12 +6454,29 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
if (!vha->flags.online) if (!vha->flags.online)
return; return;
while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) { while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
cnt = pkt->u.raw.entry_count; cnt = pkt->u.raw.entry_count;
qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt, if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
ha_locked); /*
* This packet is corrupted. The header + payload
* can not be trusted. There is no point in passing
* it further up.
*/
ql_log(ql_log_warn, vha, 0xffff,
"corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
pkt->u.isp24.fcp_hdr.s_id,
be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
adjust_corrupted_atio(pkt);
qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0);
} else {
qlt_24xx_atio_pkt_all_vps(vha,
(struct atio_from_isp *)pkt, ha_locked);
}
for (i = 0; i < cnt; i++) { for (i = 0; i < cnt; i++) {
ha->tgt.atio_ring_index++; ha->tgt.atio_ring_index++;
...@@ -6545,6 +6559,13 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) ...@@ -6545,6 +6559,13 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
/* Disable Full Login after LIP */ /* Disable Full Login after LIP */
nv->host_p &= cpu_to_le32(~BIT_10); nv->host_p &= cpu_to_le32(~BIT_10);
/*
* clear BIT 15 explicitly as we have seen at least
* a couple of instances where this was set and this
* was causing the firmware to not be initialized.
*/
nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
/* Enable target PRLI control */ /* Enable target PRLI control */
nv->firmware_options_2 |= cpu_to_le32(BIT_14); nv->firmware_options_2 |= cpu_to_le32(BIT_14);
} else { } else {
...@@ -6560,9 +6581,6 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) ...@@ -6560,9 +6581,6 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
return; return;
} }
/* out-of-order frames reassembly */
nv->firmware_options_3 |= BIT_6|BIT_9;
if (ha->tgt.enable_class_2) { if (ha->tgt.enable_class_2) {
if (vha->flags.init_done) if (vha->flags.init_done)
fc_host_supported_classes(vha->host) = fc_host_supported_classes(vha->host) =
...@@ -6629,11 +6647,17 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) ...@@ -6629,11 +6647,17 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
/* Disable ini mode, if requested */ /* Disable ini mode, if requested */
if (!qla_ini_mode_enabled(vha)) if (!qla_ini_mode_enabled(vha))
nv->firmware_options_1 |= cpu_to_le32(BIT_5); nv->firmware_options_1 |= cpu_to_le32(BIT_5);
/* Disable Full Login after LIP */ /* Disable Full Login after LIP */
nv->firmware_options_1 &= cpu_to_le32(~BIT_13); nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
/* Enable initial LIP */ /* Enable initial LIP */
nv->firmware_options_1 &= cpu_to_le32(~BIT_9); nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
/*
* clear BIT 15 explicitly as we have seen at
* least a couple of instances where this was set
* and this was causing the firmware to not be
* initialized.
*/
nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
if (ql2xtgt_tape_enable) if (ql2xtgt_tape_enable)
/* Enable FC tape support */ /* Enable FC tape support */
nv->firmware_options_2 |= cpu_to_le32(BIT_12); nv->firmware_options_2 |= cpu_to_le32(BIT_12);
...@@ -6658,9 +6682,6 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) ...@@ -6658,9 +6682,6 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
return; return;
} }
/* out-of-order frames reassembly */
nv->firmware_options_3 |= BIT_6|BIT_9;
if (ha->tgt.enable_class_2) { if (ha->tgt.enable_class_2) {
if (vha->flags.init_done) if (vha->flags.init_done)
fc_host_supported_classes(vha->host) = fc_host_supported_classes(vha->host) =
......
...@@ -427,13 +427,33 @@ struct atio_from_isp { ...@@ -427,13 +427,33 @@ struct atio_from_isp {
struct { struct {
uint8_t entry_type; /* Entry type. */ uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */ uint8_t entry_count; /* Entry count. */
uint8_t data[58]; __le16 attr_n_length;
#define FCP_CMD_LENGTH_MASK 0x0fff
#define FCP_CMD_LENGTH_MIN 0x38
uint8_t data[56];
uint32_t signature; uint32_t signature;
#define ATIO_PROCESSED 0xDEADDEAD /* Signature */ #define ATIO_PROCESSED 0xDEADDEAD /* Signature */
} raw; } raw;
} u; } u;
} __packed; } __packed;
static inline int fcpcmd_is_corrupted(struct atio *atio)
{
if (atio->entry_type == ATIO_TYPE7 &&
(le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
FCP_CMD_LENGTH_MIN))
return 1;
else
return 0;
}
/* adjust corrupted atio so we won't trip over the same entry again. */
static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
{
atio->u.raw.attr_n_length = cpu_to_le16(FCP_CMD_LENGTH_MIN);
atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
}
#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
/* /*
......
...@@ -433,6 +433,18 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, ...@@ -433,6 +433,18 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
count++; count++;
} }
} }
} else if (QLA_TGT_MODE_ENABLED() &&
ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
struct qla_hw_data *ha = vha->hw;
struct atio *atr = ha->tgt.atio_ring;
if (atr || !buf) {
length = ha->tgt.atio_q_length;
qla27xx_insert16(0, buf, len);
qla27xx_insert16(length, buf, len);
qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len);
count++;
}
} else { } else {
ql_dbg(ql_dbg_misc, vha, 0xd026, ql_dbg(ql_dbg_misc, vha, 0xd026,
"%s: unknown queue %x\n", __func__, ent->t263.queue_type); "%s: unknown queue %x\n", __func__, ent->t263.queue_type);
...@@ -676,6 +688,18 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, ...@@ -676,6 +688,18 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
count++; count++;
} }
} }
} else if (QLA_TGT_MODE_ENABLED() &&
ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
struct qla_hw_data *ha = vha->hw;
struct atio *atr = ha->tgt.atio_ring_ptr;
if (atr || !buf) {
qla27xx_insert16(0, buf, len);
qla27xx_insert16(1, buf, len);
qla27xx_insert32(ha->tgt.atio_q_in ?
readl(ha->tgt.atio_q_in) : 0, buf, len);
count++;
}
} else { } else {
ql_dbg(ql_dbg_misc, vha, 0xd02f, ql_dbg(ql_dbg_misc, vha, 0xd02f,
"%s: unknown queue %x\n", __func__, ent->t274.queue_type); "%s: unknown queue %x\n", __func__, ent->t274.queue_type);
......
...@@ -1800,7 +1800,7 @@ static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item, ...@@ -1800,7 +1800,7 @@ static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item,
{ {
return sprintf(page, return sprintf(page,
"TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on " "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
utsname()->machine); utsname()->machine);
} }
...@@ -1906,7 +1906,7 @@ static int tcm_qla2xxx_register_configfs(void) ...@@ -1906,7 +1906,7 @@ static int tcm_qla2xxx_register_configfs(void)
int ret; int ret;
pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on " pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
utsname()->machine); utsname()->machine);
ret = target_register_template(&tcm_qla2xxx_ops); ret = target_register_template(&tcm_qla2xxx_ops);
......
#include <target/target_core_base.h> #include <target/target_core_base.h>
#include <linux/btree.h> #include <linux/btree.h>
#define TCM_QLA2XXX_VERSION "v0.1"
/* length of ASCII WWPNs including pad */ /* length of ASCII WWPNs including pad */
#define TCM_QLA2XXX_NAMELEN 32 #define TCM_QLA2XXX_NAMELEN 32
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment