Commit e8e9941b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
 "This is actually just a small set of mainly bug fixes for the original
  merge window code plus a few trivial updates and qedi boot from SAN
  support feature patch"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: libfc: pass an error pointer to fc_disc_error()
  scsi: hisi_sas: make several const arrays static
  scsi: qla2xxx: Off by one in qlt_ctio_to_cmd()
  scsi: sg: fix SG_DXFER_FROM_DEV transfers
  scsi: virtio_scsi: always read VPD pages for multiqueue too
  scsi: qedf: fix spelling mistake: "offlading" -> "offloading"
  scsi: qedi: fix another spelling mistake: "alloction" -> "allocation"
  scsi: isci: fix typo in function names
  scsi: cxlflash: return -EFAULT if copy_from_user() fails
  scsi: qedi: Add support for Boot from SAN over iSCSI offload
parents cb0fbbf2 6f37e210
...@@ -3401,9 +3401,10 @@ static int cxlflash_afu_debug(struct cxlflash_cfg *cfg, ...@@ -3401,9 +3401,10 @@ static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
if (is_write) { if (is_write) {
req_flags |= SISL_REQ_FLAGS_HOST_WRITE; req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
rc = copy_from_user(kbuf, ubuf, ulen); if (copy_from_user(kbuf, ubuf, ulen)) {
if (unlikely(rc)) rc = -EFAULT;
goto out; goto out;
}
} }
} }
...@@ -3431,8 +3432,10 @@ static int cxlflash_afu_debug(struct cxlflash_cfg *cfg, ...@@ -3431,8 +3432,10 @@ static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
goto out; goto out;
} }
if (ulen && !is_write) if (ulen && !is_write) {
rc = copy_to_user(ubuf, kbuf, ulen); if (copy_to_user(ubuf, kbuf, ulen))
rc = -EFAULT;
}
out: out:
kfree(buf); kfree(buf);
dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
......
...@@ -1693,7 +1693,7 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba, ...@@ -1693,7 +1693,7 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
static int parse_trans_tx_err_code_v2_hw(u32 err_msk) static int parse_trans_tx_err_code_v2_hw(u32 err_msk)
{ {
const u8 trans_tx_err_code_prio[] = { static const u8 trans_tx_err_code_prio[] = {
TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS, TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS,
TRANS_TX_ERR_PHY_NOT_ENABLE, TRANS_TX_ERR_PHY_NOT_ENABLE,
TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION, TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION,
...@@ -1738,7 +1738,7 @@ static int parse_trans_tx_err_code_v2_hw(u32 err_msk) ...@@ -1738,7 +1738,7 @@ static int parse_trans_tx_err_code_v2_hw(u32 err_msk)
static int parse_trans_rx_err_code_v2_hw(u32 err_msk) static int parse_trans_rx_err_code_v2_hw(u32 err_msk)
{ {
const u8 trans_rx_err_code_prio[] = { static const u8 trans_rx_err_code_prio[] = {
TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR, TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR,
TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR,
TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM,
...@@ -1784,7 +1784,7 @@ static int parse_trans_rx_err_code_v2_hw(u32 err_msk) ...@@ -1784,7 +1784,7 @@ static int parse_trans_rx_err_code_v2_hw(u32 err_msk)
static int parse_dma_tx_err_code_v2_hw(u32 err_msk) static int parse_dma_tx_err_code_v2_hw(u32 err_msk)
{ {
const u8 dma_tx_err_code_prio[] = { static const u8 dma_tx_err_code_prio[] = {
DMA_TX_UNEXP_XFER_ERR, DMA_TX_UNEXP_XFER_ERR,
DMA_TX_UNEXP_RETRANS_ERR, DMA_TX_UNEXP_RETRANS_ERR,
DMA_TX_XFER_LEN_OVERFLOW, DMA_TX_XFER_LEN_OVERFLOW,
...@@ -1810,7 +1810,7 @@ static int parse_dma_tx_err_code_v2_hw(u32 err_msk) ...@@ -1810,7 +1810,7 @@ static int parse_dma_tx_err_code_v2_hw(u32 err_msk)
static int parse_sipc_rx_err_code_v2_hw(u32 err_msk) static int parse_sipc_rx_err_code_v2_hw(u32 err_msk)
{ {
const u8 sipc_rx_err_code_prio[] = { static const u8 sipc_rx_err_code_prio[] = {
SIPC_RX_FIS_STATUS_ERR_BIT_VLD, SIPC_RX_FIS_STATUS_ERR_BIT_VLD,
SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR,
SIPC_RX_FIS_STATUS_BSY_BIT_ERR, SIPC_RX_FIS_STATUS_BSY_BIT_ERR,
...@@ -1836,7 +1836,7 @@ static int parse_sipc_rx_err_code_v2_hw(u32 err_msk) ...@@ -1836,7 +1836,7 @@ static int parse_sipc_rx_err_code_v2_hw(u32 err_msk)
static int parse_dma_rx_err_code_v2_hw(u32 err_msk) static int parse_dma_rx_err_code_v2_hw(u32 err_msk)
{ {
const u8 dma_rx_err_code_prio[] = { static const u8 dma_rx_err_code_prio[] = {
DMA_RX_UNKNOWN_FRM_ERR, DMA_RX_UNKNOWN_FRM_ERR,
DMA_RX_DATA_LEN_OVERFLOW, DMA_RX_DATA_LEN_OVERFLOW,
DMA_RX_DATA_LEN_UNDERFLOW, DMA_RX_DATA_LEN_UNDERFLOW,
......
...@@ -213,7 +213,7 @@ static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) ...@@ -213,7 +213,7 @@ static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
* @task_context: * @task_context:
* *
*/ */
static void scu_ssp_reqeust_construct_task_context( static void scu_ssp_request_construct_task_context(
struct isci_request *ireq, struct isci_request *ireq,
struct scu_task_context *task_context) struct scu_task_context *task_context)
{ {
...@@ -425,7 +425,7 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, ...@@ -425,7 +425,7 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
u8 prot_type = scsi_get_prot_type(scmd); u8 prot_type = scsi_get_prot_type(scmd);
u8 prot_op = scsi_get_prot_op(scmd); u8 prot_op = scsi_get_prot_op(scmd);
scu_ssp_reqeust_construct_task_context(ireq, task_context); scu_ssp_request_construct_task_context(ireq, task_context);
task_context->ssp_command_iu_length = task_context->ssp_command_iu_length =
sizeof(struct ssp_cmd_iu) / sizeof(u32); sizeof(struct ssp_cmd_iu) / sizeof(u32);
...@@ -472,7 +472,7 @@ static void scu_ssp_task_request_construct_task_context(struct isci_request *ire ...@@ -472,7 +472,7 @@ static void scu_ssp_task_request_construct_task_context(struct isci_request *ire
{ {
struct scu_task_context *task_context = ireq->tc; struct scu_task_context *task_context = ireq->tc;
scu_ssp_reqeust_construct_task_context(ireq, task_context); scu_ssp_request_construct_task_context(ireq, task_context);
task_context->control_frame = 1; task_context->control_frame = 1;
task_context->priority = SCU_TASK_PRIORITY_HIGH; task_context->priority = SCU_TASK_PRIORITY_HIGH;
...@@ -495,7 +495,7 @@ static void scu_ssp_task_request_construct_task_context(struct isci_request *ire ...@@ -495,7 +495,7 @@ static void scu_ssp_task_request_construct_task_context(struct isci_request *ire
* the command buffer is complete. none Revisit task context construction to * the command buffer is complete. none Revisit task context construction to
* determine what is common for SSP/SMP/STP task context structures. * determine what is common for SSP/SMP/STP task context structures.
*/ */
static void scu_sata_reqeust_construct_task_context( static void scu_sata_request_construct_task_context(
struct isci_request *ireq, struct isci_request *ireq,
struct scu_task_context *task_context) struct scu_task_context *task_context)
{ {
...@@ -562,7 +562,7 @@ static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq ...@@ -562,7 +562,7 @@ static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq
{ {
struct scu_task_context *task_context = ireq->tc; struct scu_task_context *task_context = ireq->tc;
scu_sata_reqeust_construct_task_context(ireq, task_context); scu_sata_request_construct_task_context(ireq, task_context);
task_context->control_frame = 0; task_context->control_frame = 0;
task_context->priority = SCU_TASK_PRIORITY_NORMAL; task_context->priority = SCU_TASK_PRIORITY_NORMAL;
...@@ -613,7 +613,7 @@ static void sci_stp_optimized_request_construct(struct isci_request *ireq, ...@@ -613,7 +613,7 @@ static void sci_stp_optimized_request_construct(struct isci_request *ireq,
struct scu_task_context *task_context = ireq->tc; struct scu_task_context *task_context = ireq->tc;
/* Build the STP task context structure */ /* Build the STP task context structure */
scu_sata_reqeust_construct_task_context(ireq, task_context); scu_sata_request_construct_task_context(ireq, task_context);
/* Copy over the SGL elements */ /* Copy over the SGL elements */
sci_request_build_sgl(ireq); sci_request_build_sgl(ireq);
...@@ -1401,7 +1401,7 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re ...@@ -1401,7 +1401,7 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re
* @data_buffer: The buffer of data to be copied. * @data_buffer: The buffer of data to be copied.
* @length: The length of the data transfer. * @length: The length of the data transfer.
* *
* Copy the data from the buffer for the length specified to the IO reqeust SGL * Copy the data from the buffer for the length specified to the IO request SGL
* specified data region. enum sci_status * specified data region. enum sci_status
*/ */
static enum sci_status static enum sci_status
......
...@@ -573,7 +573,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, ...@@ -573,7 +573,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
event = DISC_EV_FAILED; event = DISC_EV_FAILED;
} }
if (error) if (error)
fc_disc_error(disc, fp); fc_disc_error(disc, ERR_PTR(error));
else if (event != DISC_EV_NONE) else if (event != DISC_EV_NONE)
fc_disc_done(disc, event); fc_disc_done(disc, event);
fc_frame_free(fp); fc_frame_free(fp);
......
...@@ -1227,7 +1227,7 @@ static void qedf_rport_event_handler(struct fc_lport *lport, ...@@ -1227,7 +1227,7 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
if (rdata->spp_type != FC_TYPE_FCP) { if (rdata->spp_type != FC_TYPE_FCP) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
"Not offlading since since spp type isn't FCP\n"); "Not offloading since spp type isn't FCP\n");
break; break;
} }
if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
......
...@@ -23,11 +23,17 @@ ...@@ -23,11 +23,17 @@
#include <linux/qed/qed_iscsi_if.h> #include <linux/qed/qed_iscsi_if.h>
#include <linux/qed/qed_ll2_if.h> #include <linux/qed/qed_ll2_if.h>
#include "qedi_version.h" #include "qedi_version.h"
#include "qedi_nvm_iscsi_cfg.h"
#define QEDI_MODULE_NAME "qedi" #define QEDI_MODULE_NAME "qedi"
struct qedi_endpoint; struct qedi_endpoint;
#ifndef GET_FIELD2
#define GET_FIELD2(value, name) \
(((value) & (name ## _MASK)) >> (name ## _OFFSET))
#endif
/* /*
* PCI function probe defines * PCI function probe defines
*/ */
...@@ -66,6 +72,11 @@ struct qedi_endpoint; ...@@ -66,6 +72,11 @@ struct qedi_endpoint;
#define QEDI_HW_DMA_BOUNDARY 0xfff #define QEDI_HW_DMA_BOUNDARY 0xfff
#define QEDI_PATH_HANDLE 0xFE0000000UL #define QEDI_PATH_HANDLE 0xFE0000000UL
enum qedi_nvm_tgts {
QEDI_NVM_TGT_PRI,
QEDI_NVM_TGT_SEC,
};
struct qedi_uio_ctrl { struct qedi_uio_ctrl {
/* meta data */ /* meta data */
u32 uio_hsi_version; u32 uio_hsi_version;
...@@ -283,6 +294,8 @@ struct qedi_ctx { ...@@ -283,6 +294,8 @@ struct qedi_ctx {
void *bdq_pbl_list; void *bdq_pbl_list;
dma_addr_t bdq_pbl_list_dma; dma_addr_t bdq_pbl_list_dma;
u8 bdq_pbl_list_num_entries; u8 bdq_pbl_list_num_entries;
struct nvm_iscsi_cfg *iscsi_cfg;
dma_addr_t nvm_buf_dma;
void __iomem *bdq_primary_prod; void __iomem *bdq_primary_prod;
void __iomem *bdq_secondary_prod; void __iomem *bdq_secondary_prod;
u16 bdq_prod_idx; u16 bdq_prod_idx;
...@@ -337,6 +350,10 @@ struct qedi_ctx { ...@@ -337,6 +350,10 @@ struct qedi_ctx {
bool use_fast_sge; bool use_fast_sge;
atomic_t num_offloads; atomic_t num_offloads;
#define SYSFS_FLAG_FW_SEL_BOOT 2
#define IPV6_LEN 41
#define IPV4_LEN 17
struct iscsi_boot_kset *boot_kset;
}; };
struct qedi_work { struct qedi_work {
......
...@@ -1411,7 +1411,7 @@ static void qedi_tmf_work(struct work_struct *work) ...@@ -1411,7 +1411,7 @@ static void qedi_tmf_work(struct work_struct *work)
list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC); list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC);
if (!list_work) { if (!list_work) {
QEDI_ERR(&qedi->dbg_ctx, "Memory alloction failed\n"); QEDI_ERR(&qedi->dbg_ctx, "Memory allocation failed\n");
goto abort_ret; goto abort_ret;
} }
......
This diff is collapsed.
/*
* QLogic iSCSI Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef NVM_ISCSI_CFG_H
#define NVM_ISCSI_CFG_H
#define NUM_OF_ISCSI_TARGET_PER_PF 4 /* Defined as per the
* ISCSI IBFT constraint
*/
#define NUM_OF_ISCSI_PF_SUPPORTED 4 /* One PF per Port -
* assuming 4 port card
*/
#define NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN 256
union nvm_iscsi_dhcp_vendor_id {
u32 value[NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN / 4];
u8 byte[NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN];
};
#define NVM_ISCSI_IPV4_ADDR_BYTE_LEN 4
union nvm_iscsi_ipv4_addr {
u32 addr;
u8 byte[NVM_ISCSI_IPV4_ADDR_BYTE_LEN];
};
#define NVM_ISCSI_IPV6_ADDR_BYTE_LEN 16
union nvm_iscsi_ipv6_addr {
u32 addr[4];
u8 byte[NVM_ISCSI_IPV6_ADDR_BYTE_LEN];
};
struct nvm_iscsi_initiator_ipv4 {
union nvm_iscsi_ipv4_addr addr; /* 0x0 */
union nvm_iscsi_ipv4_addr subnet_mask; /* 0x4 */
union nvm_iscsi_ipv4_addr gateway; /* 0x8 */
union nvm_iscsi_ipv4_addr primary_dns; /* 0xC */
union nvm_iscsi_ipv4_addr secondary_dns; /* 0x10 */
union nvm_iscsi_ipv4_addr dhcp_addr; /* 0x14 */
union nvm_iscsi_ipv4_addr isns_server; /* 0x18 */
union nvm_iscsi_ipv4_addr slp_server; /* 0x1C */
union nvm_iscsi_ipv4_addr primay_radius_server; /* 0x20 */
union nvm_iscsi_ipv4_addr secondary_radius_server; /* 0x24 */
union nvm_iscsi_ipv4_addr rsvd[4]; /* 0x28 */
};
struct nvm_iscsi_initiator_ipv6 {
union nvm_iscsi_ipv6_addr addr; /* 0x0 */
union nvm_iscsi_ipv6_addr subnet_mask; /* 0x10 */
union nvm_iscsi_ipv6_addr gateway; /* 0x20 */
union nvm_iscsi_ipv6_addr primary_dns; /* 0x30 */
union nvm_iscsi_ipv6_addr secondary_dns; /* 0x40 */
union nvm_iscsi_ipv6_addr dhcp_addr; /* 0x50 */
union nvm_iscsi_ipv6_addr isns_server; /* 0x60 */
union nvm_iscsi_ipv6_addr slp_server; /* 0x70 */
union nvm_iscsi_ipv6_addr primay_radius_server; /* 0x80 */
union nvm_iscsi_ipv6_addr secondary_radius_server; /* 0x90 */
union nvm_iscsi_ipv6_addr rsvd[3]; /* 0xA0 */
u32 config; /* 0xD0 */
#define NVM_ISCSI_CFG_INITIATOR_IPV6_SUBNET_MASK_PREFIX_MASK 0x000000FF
#define NVM_ISCSI_CFG_INITIATOR_IPV6_SUBNET_MASK_PREFIX_OFFSET 0
u32 rsvd_1[3];
};
#define NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN 256
union nvm_iscsi_name {
u32 value[NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN / 4];
u8 byte[NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN];
};
#define NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN 256
union nvm_iscsi_chap_name {
u32 value[NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN / 4];
u8 byte[NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN];
};
#define NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN 16 /* md5 need per RFC1996
* is 16 octets
*/
union nvm_iscsi_chap_password {
u32 value[NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN / 4];
u8 byte[NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN];
};
union nvm_iscsi_lun {
u8 byte[8];
u32 value[2];
};
struct nvm_iscsi_generic {
u32 ctrl_flags; /* 0x0 */
#define NVM_ISCSI_CFG_GEN_CHAP_ENABLED BIT(0)
#define NVM_ISCSI_CFG_GEN_DHCP_TCPIP_CONFIG_ENABLED BIT(1)
#define NVM_ISCSI_CFG_GEN_DHCP_ISCSI_CONFIG_ENABLED BIT(2)
#define NVM_ISCSI_CFG_GEN_IPV6_ENABLED BIT(3)
#define NVM_ISCSI_CFG_GEN_IPV4_FALLBACK_ENABLED BIT(4)
#define NVM_ISCSI_CFG_GEN_ISNS_WORLD_LOGIN BIT(5)
#define NVM_ISCSI_CFG_GEN_ISNS_SELECTIVE_LOGIN BIT(6)
#define NVM_ISCSI_CFG_GEN_ADDR_REDIRECT_ENABLED BIT(7)
#define NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED BIT(8)
u32 timeout; /* 0x4 */
#define NVM_ISCSI_CFG_GEN_DHCP_REQUEST_TIMEOUT_MASK 0x0000FFFF
#define NVM_ISCSI_CFG_GEN_DHCP_REQUEST_TIMEOUT_OFFSET 0
#define NVM_ISCSI_CFG_GEN_PORT_LOGIN_TIMEOUT_MASK 0xFFFF0000
#define NVM_ISCSI_CFG_GEN_PORT_LOGIN_TIMEOUT_OFFSET 16
union nvm_iscsi_dhcp_vendor_id dhcp_vendor_id; /* 0x8 */
u32 rsvd[62]; /* 0x108 */
};
struct nvm_iscsi_initiator {
struct nvm_iscsi_initiator_ipv4 ipv4; /* 0x0 */
struct nvm_iscsi_initiator_ipv6 ipv6; /* 0x38 */
union nvm_iscsi_name initiator_name; /* 0x118 */
union nvm_iscsi_chap_name chap_name; /* 0x218 */
union nvm_iscsi_chap_password chap_password; /* 0x318 */
u32 generic_cont0; /* 0x398 */
#define NVM_ISCSI_CFG_INITIATOR_VLAN_MASK 0x0000FFFF
#define NVM_ISCSI_CFG_INITIATOR_VLAN_OFFSET 0
#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_MASK 0x00030000
#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_OFFSET 16
#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_4 1
#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_6 2
#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_4_AND_6 3
u32 ctrl_flags;
#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_PRIORITY_V6 BIT(0)
#define NVM_ISCSI_CFG_INITIATOR_VLAN_ENABLED BIT(1)
u32 rsvd[116]; /* 0x32C */
};
struct nvm_iscsi_target {
u32 ctrl_flags; /* 0x0 */
#define NVM_ISCSI_CFG_TARGET_ENABLED BIT(0)
#define NVM_ISCSI_CFG_BOOT_TIME_LOGIN_STATUS BIT(1)
u32 generic_cont0; /* 0x4 */
#define NVM_ISCSI_CFG_TARGET_TCP_PORT_MASK 0x0000FFFF
#define NVM_ISCSI_CFG_TARGET_TCP_PORT_OFFSET 0
u32 ip_ver;
#define NVM_ISCSI_CFG_IPv4 4
#define NVM_ISCSI_CFG_IPv6 6
u32 rsvd_1[7]; /* 0x24 */
union nvm_iscsi_ipv4_addr ipv4_addr; /* 0x28 */
union nvm_iscsi_ipv6_addr ipv6_addr; /* 0x2C */
union nvm_iscsi_lun lun; /* 0x3C */
union nvm_iscsi_name target_name; /* 0x44 */
union nvm_iscsi_chap_name chap_name; /* 0x144 */
union nvm_iscsi_chap_password chap_password; /* 0x244 */
u32 rsvd_2[107]; /* 0x2C4 */
};
struct nvm_iscsi_block {
u32 id; /* 0x0 */
#define NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_MASK 0x0000000F
#define NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_OFFSET 0
#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK 0x00000FF0
#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET 4
#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_IS_NOT_EMPTY BIT(0)
#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_PF_MAPPED BIT(1)
u32 rsvd_1[5]; /* 0x4 */
struct nvm_iscsi_generic generic; /* 0x18 */
struct nvm_iscsi_initiator initiator; /* 0x218 */
struct nvm_iscsi_target target[NUM_OF_ISCSI_TARGET_PER_PF];
/* 0x718 */
u32 rsvd_2[58]; /* 0x1718 */
/* total size - 0x1800 - 6K block */
};
struct nvm_iscsi_cfg {
u32 id; /* 0x0 */
#define NVM_ISCSI_CFG_BLK_VERSION_MINOR_MASK 0x000000FF
#define NVM_ISCSI_CFG_BLK_VERSION_MAJOR_MASK 0x0000FF00
#define NVM_ISCSI_CFG_BLK_SIGNATURE_MASK 0xFFFF0000
#define NVM_ISCSI_CFG_BLK_SIGNATURE 0x49430000 /* IC - Iscsi
* Config
*/
#define NVM_ISCSI_CFG_BLK_VERSION_MAJOR 0
#define NVM_ISCSI_CFG_BLK_VERSION_MINOR 10
#define NVM_ISCSI_CFG_BLK_VERSION ((NVM_ISCSI_CFG_BLK_VERSION_MAJOR << 8) | \
NVM_ISCSI_CFG_BLK_VERSION_MINOR)
struct nvm_iscsi_block block[NUM_OF_ISCSI_PF_SUPPORTED]; /* 0x4 */
};
#endif
...@@ -3727,7 +3727,7 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, ...@@ -3727,7 +3727,7 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
h &= QLA_CMD_HANDLE_MASK; h &= QLA_CMD_HANDLE_MASK;
if (h != QLA_TGT_NULL_HANDLE) { if (h != QLA_TGT_NULL_HANDLE) {
if (unlikely(h > req->num_outstanding_cmds)) { if (unlikely(h >= req->num_outstanding_cmds)) {
ql_dbg(ql_dbg_tgt, vha, 0xe052, ql_dbg(ql_dbg_tgt, vha, 0xe052,
"qla_target(%d): Wrong handle %x received\n", "qla_target(%d): Wrong handle %x received\n",
vha->vp_idx, handle); vha->vp_idx, handle);
......
...@@ -758,8 +758,11 @@ static bool sg_is_valid_dxfer(sg_io_hdr_t *hp) ...@@ -758,8 +758,11 @@ static bool sg_is_valid_dxfer(sg_io_hdr_t *hp)
if (hp->dxferp || hp->dxfer_len > 0) if (hp->dxferp || hp->dxfer_len > 0)
return false; return false;
return true; return true;
case SG_DXFER_TO_DEV:
case SG_DXFER_FROM_DEV: case SG_DXFER_FROM_DEV:
if (hp->dxfer_len < 0)
return false;
return true;
case SG_DXFER_TO_DEV:
case SG_DXFER_TO_FROM_DEV: case SG_DXFER_TO_FROM_DEV:
if (!hp->dxferp || hp->dxfer_len == 0) if (!hp->dxferp || hp->dxfer_len == 0)
return false; return false;
......
...@@ -837,6 +837,7 @@ static struct scsi_host_template virtscsi_host_template_multi = { ...@@ -837,6 +837,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
.eh_abort_handler = virtscsi_abort, .eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset, .eh_device_reset_handler = virtscsi_device_reset,
.eh_timed_out = virtscsi_eh_timed_out, .eh_timed_out = virtscsi_eh_timed_out,
.slave_alloc = virtscsi_device_alloc,
.can_queue = 1024, .can_queue = 1024,
.dma_boundary = UINT_MAX, .dma_boundary = UINT_MAX,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment