Commit d52c89f1 authored by Michal Kalderon's avatar Michal Kalderon Committed by David S. Miller

qed*: Utilize FW 8.37.2.0

This FW contains several fixes and features.

RDMA
- Several modifications and fixes for Memory Windows
- drop vlan and tcp timestamp from mss calculation in driver for
  this FW
- Fix SQ completion flow when local ack timeout is infinite
- Modifications in t10dif support

ETH
- Fix aRFS for tunneled traffic without inner IP.
- Fix chip configuration which may fail under heavy traffic conditions.
- Support receiving any-VNI in VXLAN and GENEVE RX classification.

iSCSI / FcoE
- Fix iSCSI recovery flow
- Drop vlan and tcp timestamp from mss calc for fw 8.37.2.0

Misc
- Several registers (split registers) won't read correctly with
  ethtool -d
Signed-off-by: default avatarAriel Elior <Ariel.Elior@cavium.com>
Signed-off-by: default avatarManish Rangankar <manish.rangankar@cavium.com>
Signed-off-by: default avatarMichal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 95358a95
......@@ -116,6 +116,7 @@ enum rdma_cqe_requester_status_enum {
RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
RDMA_CQE_REQ_STS_XRC_VOILATION_ERR,
RDMA_CQE_REQ_STS_SIG_ERR,
MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
};
......@@ -152,12 +153,12 @@ struct rdma_rq_sge {
struct regpair addr;
__le32 length;
__le32 flags;
#define RDMA_RQ_SGE_L_KEY_MASK 0x3FFFFFF
#define RDMA_RQ_SGE_L_KEY_SHIFT 0
#define RDMA_RQ_SGE_L_KEY_LO_MASK 0x3FFFFFF
#define RDMA_RQ_SGE_L_KEY_LO_SHIFT 0
#define RDMA_RQ_SGE_NUM_SGES_MASK 0x7
#define RDMA_RQ_SGE_NUM_SGES_SHIFT 26
#define RDMA_RQ_SGE_RESERVED0_MASK 0x7
#define RDMA_RQ_SGE_RESERVED0_SHIFT 29
#define RDMA_RQ_SGE_L_KEY_HI_MASK 0x7
#define RDMA_RQ_SGE_L_KEY_HI_SHIFT 29
};
struct rdma_srq_sge {
......@@ -241,18 +242,39 @@ enum rdma_dif_io_direction_flg {
MAX_RDMA_DIF_IO_DIRECTION_FLG
};
/* RDMA DIF Runt Result Structure */
struct rdma_dif_runt_result {
__le16 guard_tag;
__le16 reserved[3];
struct rdma_dif_params {
__le32 base_ref_tag;
__le16 app_tag;
__le16 app_tag_mask;
__le16 runt_crc_value;
__le16 flags;
#define RDMA_DIF_PARAMS_IO_DIRECTION_FLG_MASK 0x1
#define RDMA_DIF_PARAMS_IO_DIRECTION_FLG_SHIFT 0
#define RDMA_DIF_PARAMS_BLOCK_SIZE_MASK 0x1
#define RDMA_DIF_PARAMS_BLOCK_SIZE_SHIFT 1
#define RDMA_DIF_PARAMS_RUNT_VALID_FLG_MASK 0x1
#define RDMA_DIF_PARAMS_RUNT_VALID_FLG_SHIFT 2
#define RDMA_DIF_PARAMS_VALIDATE_CRC_GUARD_MASK 0x1
#define RDMA_DIF_PARAMS_VALIDATE_CRC_GUARD_SHIFT 3
#define RDMA_DIF_PARAMS_VALIDATE_REF_TAG_MASK 0x1
#define RDMA_DIF_PARAMS_VALIDATE_REF_TAG_SHIFT 4
#define RDMA_DIF_PARAMS_VALIDATE_APP_TAG_MASK 0x1
#define RDMA_DIF_PARAMS_VALIDATE_APP_TAG_SHIFT 5
#define RDMA_DIF_PARAMS_CRC_SEED_MASK 0x1
#define RDMA_DIF_PARAMS_CRC_SEED_SHIFT 6
#define RDMA_DIF_PARAMS_RX_REF_TAG_CONST_MASK 0x1
#define RDMA_DIF_PARAMS_RX_REF_TAG_CONST_SHIFT 7
#define RDMA_DIF_PARAMS_BLOCK_GUARD_TYPE_MASK 0x1
#define RDMA_DIF_PARAMS_BLOCK_GUARD_TYPE_SHIFT 8
#define RDMA_DIF_PARAMS_APP_ESCAPE_MASK 0x1
#define RDMA_DIF_PARAMS_APP_ESCAPE_SHIFT 9
#define RDMA_DIF_PARAMS_REF_ESCAPE_MASK 0x1
#define RDMA_DIF_PARAMS_REF_ESCAPE_SHIFT 10
#define RDMA_DIF_PARAMS_RESERVED4_MASK 0x1F
#define RDMA_DIF_PARAMS_RESERVED4_SHIFT 11
__le32 reserved5;
};
/* Memory window type enumeration */
enum rdma_mw_type {
RDMA_MW_TYPE_1,
RDMA_MW_TYPE_2A,
MAX_RDMA_MW_TYPE
};
struct rdma_sq_atomic_wqe {
__le32 reserved1;
......@@ -334,17 +356,17 @@ struct rdma_sq_bind_wqe {
#define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT 3
#define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK 0x1
#define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT 4
#define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x7
#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 5
#define RDMA_SQ_BIND_WQE_DIF_ON_HOST_FLG_MASK 0x1
#define RDMA_SQ_BIND_WQE_DIF_ON_HOST_FLG_SHIFT 5
#define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x3
#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 6
u8 wqe_size;
u8 prev_wqe_size;
u8 bind_ctrl;
#define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK 0x1
#define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT 0
#define RDMA_SQ_BIND_WQE_MW_TYPE_MASK 0x1
#define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT 1
#define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x3F
#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 2
#define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x7F
#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 1
u8 access_ctrl;
#define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK 0x1
#define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT 0
......@@ -363,6 +385,7 @@ struct rdma_sq_bind_wqe {
__le32 length_lo;
__le32 parent_l_key;
__le32 reserved4;
struct rdma_dif_params dif_params;
};
/* First element (16 bytes) of bind wqe */
......@@ -392,10 +415,8 @@ struct rdma_sq_bind_wqe_2nd {
u8 bind_ctrl;
#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK 0x1
#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT 0
#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK 0x1
#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT 1
#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x3F
#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 2
#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x7F
#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 1
u8 access_ctrl;
#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK 0x1
#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT 0
......@@ -416,6 +437,11 @@ struct rdma_sq_bind_wqe_2nd {
__le32 reserved4;
};
/* Third element (16 bytes) of bind wqe */
struct rdma_sq_bind_wqe_3rd {
struct rdma_dif_params dif_params;
};
/* Structure with only the SQ WQE common
* fields. Size is of one SQ element (16B)
*/
......@@ -486,30 +512,6 @@ struct rdma_sq_fmr_wqe {
u8 length_hi;
__le32 length_lo;
struct regpair pbl_addr;
__le32 dif_base_ref_tag;
__le16 dif_app_tag;
__le16 dif_app_tag_mask;
__le16 dif_runt_crc_value;
__le16 dif_flags;
#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0
#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1
#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5
#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6
#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT 7
#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0xFF
#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8
__le32 reserved5;
};
/* First element (16 bytes) of fmr wqe */
......@@ -566,33 +568,6 @@ struct rdma_sq_fmr_wqe_2nd {
struct regpair pbl_addr;
};
/* Third element (16 bytes) of fmr wqe */
struct rdma_sq_fmr_wqe_3rd {
__le32 dif_base_ref_tag;
__le16 dif_app_tag;
__le16 dif_app_tag_mask;
__le16 dif_runt_crc_value;
__le16 dif_flags;
#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0
#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1
#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5
#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6
#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_SHIFT 7
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0xFF
#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8
__le32 reserved5;
};
struct rdma_sq_local_inv_wqe {
struct regpair reserved;
......@@ -637,8 +612,8 @@ struct rdma_sq_rdma_wqe {
#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK 0x1
#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT 6
#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x1
#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 7
#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1
#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 7
u8 wqe_size;
u8 prev_wqe_size;
struct regpair remote_va;
......@@ -646,13 +621,9 @@ struct rdma_sq_rdma_wqe {
u8 dif_flags;
#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK 0x1
#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT 0
#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK 0x1
#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1
#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK 0x1
#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT 2
#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1F
#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 3
u8 reserved2[3];
#define RDMA_SQ_RDMA_WQE_RESERVED2_MASK 0x7F
#define RDMA_SQ_RDMA_WQE_RESERVED2_SHIFT 1
u8 reserved3[3];
};
/* First element (16 bytes) of rdma wqe */
......
......@@ -3276,7 +3276,7 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
wr->num_sge);
SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
wr->sg_list[i].lkey);
RQ_SGE_SET(rqe, wr->sg_list[i].addr,
......@@ -3295,7 +3295,7 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
/* First one must include the number
* of SGE in the list
*/
SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
RQ_SGE_SET(rqe, 0, 0, flags);
......
......@@ -183,16 +183,9 @@ enum platform_ids {
MAX_PLATFORM_IDS
};
struct chip_platform_defs {
u8 num_ports;
u8 num_pfs;
u8 num_vfs;
};
/* Chip constant definitions */
struct chip_defs {
const char *name;
struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
};
/* Platform constant definitions */
......@@ -317,6 +310,11 @@ struct phy_defs {
u32 tbus_data_hi_addr;
};
/* Split type definitions */
struct split_type_defs {
const char *name;
};
/******************************** Constants **********************************/
#define MAX_LCIDS 320
......@@ -469,21 +467,9 @@ static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
{ "bb",
{{MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB},
{0, 0, 0},
{0, 0, 0},
{0, 0, 0} } },
{ "ah",
{{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2},
{0, 0, 0},
{0, 0, 0},
{0, 0, 0} } },
{ "reserved",
{{0, 0, 0},
{0, 0, 0},
{0, 0, 0},
{0, 0, 0} } }
{"bb"},
{"ah"},
{"reserved"},
};
/* Storm constant definitions array */
......@@ -1588,7 +1574,7 @@ static struct grc_param_defs s_grc_param_defs[] = {
{{0, 0, 0}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_BMB */
{{0, 0, 0}, 0, 1, false, false, 0, 1},
{{0, 0, 0}, 0, 1, false, false, 0, 0},
/* DBG_GRC_PARAM_DUMP_NIG */
{{1, 1, 1}, 0, 1, false, false, 0, 1},
......@@ -1745,6 +1731,23 @@ static struct phy_defs s_phy_defs[] = {
PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
};
static struct split_type_defs s_split_type_defs[] = {
/* SPLIT_TYPE_NONE */
{"eng"},
/* SPLIT_TYPE_PORT */
{"port"},
/* SPLIT_TYPE_PF */
{"pf"},
/* SPLIT_TYPE_PORT_PF */
{"port"},
/* SPLIT_TYPE_VF */
{"vf"}
};
/**************************** Private Functions ******************************/
/* Reads and returns a single dword from the specified unaligned buffer */
......@@ -1781,28 +1784,68 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u8 num_pfs = 0, max_pfs_per_port = 0;
if (dev_data->initialized)
return DBG_STATUS_OK;
/* Set chip */
if (QED_IS_K2(p_hwfn->cdev)) {
dev_data->chip_id = CHIP_K2;
dev_data->mode_enable[MODE_K2] = 1;
dev_data->num_vfs = MAX_NUM_VFS_K2;
num_pfs = MAX_NUM_PFS_K2;
max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
dev_data->chip_id = CHIP_BB;
dev_data->mode_enable[MODE_BB] = 1;
dev_data->num_vfs = MAX_NUM_VFS_BB;
num_pfs = MAX_NUM_PFS_BB;
max_pfs_per_port = MAX_NUM_PFS_BB;
} else {
return DBG_STATUS_UNKNOWN_CHIP;
}
/* Set platofrm */
dev_data->platform_id = PLATFORM_ASIC;
dev_data->mode_enable[MODE_ASIC] = 1;
/* Set port mode */
switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
case 0:
dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
break;
case 1:
dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
break;
case 2:
dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
break;
}
/* Set 100G mode */
if (dev_data->chip_id == CHIP_BB &&
qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB) == 2)
dev_data->mode_enable[MODE_100G] = 1;
/* Set number of ports */
if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
dev_data->mode_enable[MODE_100G])
dev_data->num_ports = 1;
else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
dev_data->num_ports = 2;
else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
dev_data->num_ports = 4;
/* Set number of PFs per port */
dev_data->num_pfs_per_port = min_t(u32,
num_pfs / dev_data->num_ports,
max_pfs_per_port);
/* Initializes the GRC parameters */
qed_dbg_grc_init_params(p_hwfn);
dev_data->use_dmae = true;
dev_data->num_regs_read = 0;
dev_data->initialized = 1;
return DBG_STATUS_OK;
......@@ -1821,7 +1864,7 @@ static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
/* Reads the FW info structure for the specified Storm from the chip,
* and writes it to the specified fw_info pointer.
*/
static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 storm_id, struct fw_info *fw_info)
{
......@@ -1945,27 +1988,14 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf, bool dump)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
struct fw_info fw_info = { {0}, {0} };
u32 offset = 0;
if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
/* Read FW image/version from PRAM in a non-reset SEMI */
bool found = false;
u8 storm_id;
for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found;
storm_id++) {
struct storm_defs *storm = &s_storm_defs[storm_id];
/* Read FW version/image */
if (dev_data->block_in_reset[storm->block_id])
continue;
/* Read FW info for the current Storm */
qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
/* Read FW info from chip */
qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
/* Create FW version/image strings */
if (snprintf(fw_ver_str, sizeof(fw_ver_str),
......@@ -1982,9 +2012,6 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
strcpy(fw_img_str, "unknown");
break;
}
found = true;
}
}
/* Dump FW version, image and timestamp */
......@@ -2413,19 +2440,20 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
/* Dumps GRC registers section header. Returns the dumped size in dwords.
* The following parameters are dumped:
* - count: no. of dumped entries
* - split: split type
* - id: split ID (dumped only if split_id >= 0)
* - split_type: split type
* - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
* - param_name: user parameter value (dumped only if param_name != NULL
* and param_val != NULL).
*/
static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
bool dump,
u32 num_reg_entries,
const char *split_type,
int split_id,
enum init_split_types split_type,
u8 split_id,
const char *param_name, const char *param_val)
{
u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
u8 num_params = 2 +
(split_type != SPLIT_TYPE_NONE ? 1 : 0) + (param_name ? 1 : 0);
u32 offset = 0;
offset += qed_dump_section_hdr(dump_buf + offset,
......@@ -2433,8 +2461,9 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
offset += qed_dump_num_param(dump_buf + offset,
dump, "count", num_reg_entries);
offset += qed_dump_str_param(dump_buf + offset,
dump, "split", split_type);
if (split_id >= 0)
dump, "split",
s_split_type_defs[split_type].name);
if (split_type != SPLIT_TYPE_NONE)
offset += qed_dump_num_param(dump_buf + offset,
dump, "id", split_id);
if (param_name && param_val)
......@@ -2463,9 +2492,12 @@ void qed_read_regs(struct qed_hwfn *p_hwfn,
static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
bool dump, u32 addr, u32 len, bool wide_bus)
bool dump, u32 addr, u32 len, bool wide_bus,
enum init_split_types split_type,
u8 split_id)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0;
if (!dump)
return len;
......@@ -2481,8 +2513,27 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
dev_data->num_regs_read = 0;
}
switch (split_type) {
case SPLIT_TYPE_PORT:
port_id = split_id;
break;
case SPLIT_TYPE_PF:
pf_id = split_id;
break;
case SPLIT_TYPE_PORT_PF:
port_id = split_id / dev_data->num_pfs_per_port;
pf_id = port_id + dev_data->num_ports *
(split_id % dev_data->num_pfs_per_port);
break;
case SPLIT_TYPE_VF:
vf_id = split_id;
break;
default:
break;
}
/* Try reading using DMAE */
if (dev_data->use_dmae &&
if (dev_data->use_dmae && split_type == SPLIT_TYPE_NONE &&
(len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
wide_bus)) {
if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
......@@ -2494,7 +2545,37 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
"Failed reading from chip using DMAE, using GRC instead\n");
}
/* Read registers */
/* If not read using DMAE, read using GRC */
/* Set pretend */
if (split_type != dev_data->pretend.split_type || split_id !=
dev_data->pretend.split_id) {
switch (split_type) {
case SPLIT_TYPE_PORT:
qed_port_pretend(p_hwfn, p_ptt, port_id);
break;
case SPLIT_TYPE_PF:
fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
qed_fid_pretend(p_hwfn, p_ptt, fid);
break;
case SPLIT_TYPE_PORT_PF:
fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
break;
case SPLIT_TYPE_VF:
fid = BIT(PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) |
(vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT);
qed_fid_pretend(p_hwfn, p_ptt, fid);
break;
default:
break;
}
dev_data->pretend.split_type = (u8)split_type;
dev_data->pretend.split_id = split_id;
}
/* Read registers using GRC */
qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
return len;
......@@ -2518,7 +2599,8 @@ static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *dump_buf,
bool dump, u32 addr, u32 len, bool wide_bus)
bool dump, u32 addr, u32 len, bool wide_bus,
enum init_split_types split_type, u8 split_id)
{
u32 offset = 0;
......@@ -2526,7 +2608,8 @@ static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
dump_buf + offset,
dump, addr, len, wide_bus);
dump, addr, len, wide_bus,
split_type, split_id);
return offset;
}
......@@ -2559,7 +2642,8 @@ static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
dump_buf + offset,
dump, addr, curr_len, false);
dump, addr, curr_len, false,
SPLIT_TYPE_NONE, 0);
reg_offset += curr_len;
addr += curr_len;
......@@ -2581,6 +2665,8 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
struct dbg_array input_regs_arr,
u32 *dump_buf,
bool dump,
enum init_split_types split_type,
u8 split_id,
bool block_enable[MAX_BLOCK_ID],
u32 *num_dumped_reg_entries)
{
......@@ -2628,7 +2714,8 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
dump,
addr,
len,
wide_bus);
wide_bus,
split_type, split_id);
(*num_dumped_reg_entries)++;
}
}
......@@ -2643,19 +2730,28 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump,
bool block_enable[MAX_BLOCK_ID],
const char *split_type_name,
u32 split_id,
enum init_split_types split_type,
u8 split_id,
const char *param_name,
const char *param_val)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
enum init_split_types hdr_split_type = split_type;
u32 num_dumped_reg_entries, offset;
u8 hdr_split_id = split_id;
/* In PORT_PF split type, print a port split header */
if (split_type == SPLIT_TYPE_PORT_PF) {
hdr_split_type = SPLIT_TYPE_PORT;
hdr_split_id = split_id / dev_data->num_pfs_per_port;
}
/* Calculate register dump header size (and skip it for now) */
offset = qed_grc_dump_regs_hdr(dump_buf,
false,
0,
split_type_name,
split_id, param_name, param_val);
hdr_split_type,
hdr_split_id, param_name, param_val);
/* Dump registers */
offset += qed_grc_dump_regs_entries(p_hwfn,
......@@ -2663,6 +2759,8 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
input_regs_arr,
dump_buf + offset,
dump,
split_type,
split_id,
block_enable,
&num_dumped_reg_entries);
......@@ -2671,8 +2769,8 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
qed_grc_dump_regs_hdr(dump_buf,
dump,
num_dumped_reg_entries,
split_type_name,
split_id, param_name, param_val);
hdr_split_type,
hdr_split_id, param_name, param_val);
return num_dumped_reg_entries > 0 ? offset : 0;
}
......@@ -2688,26 +2786,21 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
const char *param_name, const char *param_val)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
struct chip_platform_defs *chip_platform;
u32 offset = 0, input_offset = 0;
struct chip_defs *chip;
u8 port_id, pf_id, vf_id;
u16 fid;
chip = &s_chip_defs[dev_data->chip_id];
chip_platform = &chip->per_platform[dev_data->platform_id];
while (input_offset <
s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
const struct dbg_dump_split_hdr *split_hdr;
struct dbg_array curr_input_regs_arr;
enum init_split_types split_type;
u16 split_count = 0;
u32 split_data_size;
u8 split_type_id;
u8 split_id;
split_hdr =
(const struct dbg_dump_split_hdr *)
&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
split_type_id =
split_type =
GET_FIELD(split_hdr->hdr,
DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
split_data_size =
......@@ -2717,99 +2810,44 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
curr_input_regs_arr.size_in_dwords = split_data_size;
switch (split_type_id) {
switch (split_type) {
case SPLIT_TYPE_NONE:
offset += qed_grc_dump_split_data(p_hwfn,
p_ptt,
curr_input_regs_arr,
dump_buf + offset,
dump,
block_enable,
"eng",
(u32)(-1),
param_name,
param_val);
split_count = 1;
break;
case SPLIT_TYPE_PORT:
for (port_id = 0; port_id < chip_platform->num_ports;
port_id++) {
if (dump)
qed_port_pretend(p_hwfn, p_ptt,
port_id);
offset +=
qed_grc_dump_split_data(p_hwfn, p_ptt,
curr_input_regs_arr,
dump_buf + offset,
dump, block_enable,
"port", port_id,
param_name,
param_val);
}
split_count = dev_data->num_ports;
break;
case SPLIT_TYPE_PF:
case SPLIT_TYPE_PORT_PF:
for (pf_id = 0; pf_id < chip_platform->num_pfs;
pf_id++) {
u8 pfid_shift =
PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
if (dump) {
fid = pf_id << pfid_shift;
qed_fid_pretend(p_hwfn, p_ptt, fid);
}
offset +=
qed_grc_dump_split_data(p_hwfn,
p_ptt,
curr_input_regs_arr,
dump_buf + offset,
dump,
block_enable,
"pf",
pf_id,
param_name,
param_val);
}
split_count = dev_data->num_ports *
dev_data->num_pfs_per_port;
break;
case SPLIT_TYPE_VF:
for (vf_id = 0; vf_id < chip_platform->num_vfs;
vf_id++) {
u8 vfvalid_shift =
PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
u8 vfid_shift =
PXP_PRETEND_CONCRETE_FID_VFID_SHIFT;
if (dump) {
fid = BIT(vfvalid_shift) |
(vf_id << vfid_shift);
qed_fid_pretend(p_hwfn, p_ptt, fid);
split_count = dev_data->num_vfs;
break;
default:
return 0;
}
offset +=
qed_grc_dump_split_data(p_hwfn, p_ptt,
for (split_id = 0; split_id < split_count; split_id++)
offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
curr_input_regs_arr,
dump_buf + offset,
dump, block_enable,
"vf", vf_id,
split_type,
split_id,
param_name,
param_val);
}
break;
default:
break;
}
input_offset += split_data_size;
}
/* Pretend to original PF */
/* Cancel pretends (pretend to original PF) */
if (dump) {
fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
qed_fid_pretend(p_hwfn, p_ptt, fid);
dev_data->pretend.split_type = SPLIT_TYPE_NONE;
dev_data->pretend.split_id = 0;
}
return offset;
......@@ -2825,7 +2863,8 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
/* Calculate header size */
offset += qed_grc_dump_regs_hdr(dump_buf,
false, 0, "eng", -1, NULL, NULL);
false, 0,
SPLIT_TYPE_NONE, 0, NULL, NULL);
/* Write reset registers */
for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
......@@ -2838,14 +2877,15 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
dump,
BYTES_TO_DWORDS
(s_reset_regs_defs[i].addr), 1,
false);
false, SPLIT_TYPE_NONE, 0);
num_regs++;
}
/* Write header */
if (dump)
qed_grc_dump_regs_hdr(dump_buf,
true, num_regs, "eng", -1, NULL, NULL);
true, num_regs, SPLIT_TYPE_NONE,
0, NULL, NULL);
return offset;
}
......@@ -2864,7 +2904,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
/* Calculate header size */
offset += qed_grc_dump_regs_hdr(dump_buf,
false, 0, "eng", -1, NULL, NULL);
false, 0, SPLIT_TYPE_NONE,
0, NULL, NULL);
/* Write parity registers */
for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
......@@ -2899,7 +2940,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
addr,
1, false);
1, false,
SPLIT_TYPE_NONE, 0);
addr = GET_FIELD(reg_data->data,
DBG_ATTN_REG_STS_ADDRESS);
offset += qed_grc_dump_reg_entry(p_hwfn,
......@@ -2907,7 +2949,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
addr,
1, false);
1, false,
SPLIT_TYPE_NONE, 0);
num_reg_entries += 2;
}
}
......@@ -2929,7 +2972,7 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
dump,
addr,
1,
false);
false, SPLIT_TYPE_NONE, 0);
num_reg_entries++;
}
......@@ -2937,7 +2980,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
if (dump)
qed_grc_dump_regs_hdr(dump_buf,
true,
num_reg_entries, "eng", -1, NULL, NULL);
num_reg_entries, SPLIT_TYPE_NONE,
0, NULL, NULL);
return offset;
}
......@@ -2950,7 +2994,8 @@ static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
u32 offset = 0, addr;
offset += qed_grc_dump_regs_hdr(dump_buf,
dump, 2, "eng", -1, NULL, NULL);
dump, 2, SPLIT_TYPE_NONE, 0,
NULL, NULL);
/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
* skipped).
......@@ -3096,7 +3141,8 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
offset += qed_grc_dump_addr_range(p_hwfn,
p_ptt,
dump_buf + offset,
dump, addr, len, wide_bus);
dump, addr, len, wide_bus,
SPLIT_TYPE_NONE, 0);
return offset;
}
......@@ -3235,12 +3281,12 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
const struct dbg_dump_split_hdr *split_hdr;
struct dbg_array curr_input_mems_arr;
enum init_split_types split_type;
u32 split_data_size;
u8 split_type_id;
split_hdr = (const struct dbg_dump_split_hdr *)
&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
split_type_id =
split_type =
GET_FIELD(split_hdr->hdr,
DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
split_data_size =
......@@ -3250,20 +3296,15 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
curr_input_mems_arr.size_in_dwords = split_data_size;
switch (split_type_id) {
case SPLIT_TYPE_NONE:
if (split_type == SPLIT_TYPE_NONE)
offset += qed_grc_dump_mem_entries(p_hwfn,
p_ptt,
curr_input_mems_arr,
dump_buf + offset,
dump);
break;
default:
else
DP_NOTICE(p_hwfn,
"Dumping split memories is currently not supported\n");
break;
}
input_offset += split_data_size;
}
......@@ -3623,7 +3664,8 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
dump,
addr,
num_dwords_to_read,
false);
false,
SPLIT_TYPE_NONE, 0);
total_dwords -= num_dwords_to_read;
rss_addr++;
}
......@@ -3682,7 +3724,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
dump,
addr,
len,
false);
false, SPLIT_TYPE_NONE, 0);
}
return offset;
......@@ -3731,7 +3773,8 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
/* Dump required non-MCP registers */
offset += qed_grc_dump_regs_hdr(dump_buf + offset,
dump, 1, "eng", -1, "block", "MCP");
dump, 1, SPLIT_TYPE_NONE, 0,
"block", "MCP");
addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
offset += qed_grc_dump_reg_entry(p_hwfn,
p_ptt,
......@@ -3739,7 +3782,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
dump,
addr,
1,
false);
false, SPLIT_TYPE_NONE, 0);
/* Release MCP */
if (halted && qed_mcp_resume(p_hwfn, p_ptt))
......@@ -3923,7 +3966,8 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
dump,
addr,
len,
true);
true, SPLIT_TYPE_NONE,
0);
}
/* Disable block's client and debug output */
......@@ -3949,28 +3993,15 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
bool parities_masked = false;
u8 i, port_mode = 0;
u32 offset = 0;
u8 i;
*num_dumped_dwords = 0;
if (dump) {
/* Find port mode */
switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
case 0:
port_mode = 1;
break;
case 1:
port_mode = 2;
break;
case 2:
port_mode = 4;
break;
}
dev_data->num_regs_read = 0;
/* Update reset state */
if (dump)
qed_update_blocks_reset_state(p_hwfn, p_ptt);
}
/* Dump global params */
offset += qed_dump_common_global_params(p_hwfn,
......@@ -3989,7 +4020,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
qed_grc_get_param(p_hwfn,
DBG_GRC_PARAM_NUM_LTIDS));
offset += qed_dump_num_param(dump_buf + offset,
dump, "num-ports", port_mode);
dump, "num-ports", dev_data->num_ports);
/* Dump reset registers (dumped before taking blocks out of reset ) */
if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
......@@ -4093,10 +4124,10 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
offset += qed_grc_dump_phy(p_hwfn,
p_ptt, dump_buf + offset, dump);
/* Dump static debug data */
/* Dump static debug data (only if not during debug bus recording) */
if (qed_grc_is_included(p_hwfn,
DBG_GRC_PARAM_DUMP_STATIC) &&
dev_data->bus.state == DBG_BUS_STATE_IDLE)
(!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE))
offset += qed_grc_dump_static_debug(p_hwfn,
p_ptt,
dump_buf + offset, dump);
......@@ -4250,7 +4281,8 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
addr,
reg->size, wide_bus);
reg->size, wide_bus,
SPLIT_TYPE_NONE, 0);
}
}
......@@ -4373,7 +4405,8 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
next_reg_offset,
dump, addr,
reg->entry_size,
wide_bus);
wide_bus,
SPLIT_TYPE_NONE, 0);
}
/* Call rule condition function.
......@@ -4723,7 +4756,8 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
BYTES_TO_DWORDS(trace_data_grc_addr),
trace_data_size_dwords, false);
trace_data_size_dwords, false,
SPLIT_TYPE_NONE, 0);
/* Resume MCP (only if halt succeeded) */
if (halted && qed_mcp_resume(p_hwfn, p_ptt))
......@@ -4829,7 +4863,8 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
true,
addr,
len,
true);
true, SPLIT_TYPE_NONE,
0);
fifo_has_data = qed_rd(p_hwfn, p_ptt,
GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
}
......@@ -4898,7 +4933,8 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
true,
addr,
len,
true);
true, SPLIT_TYPE_NONE,
0);
fifo_has_data = qed_rd(p_hwfn, p_ptt,
IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
}
......@@ -4956,7 +4992,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
true,
addr,
override_window_dwords,
true);
true, SPLIT_TYPE_NONE, 0);
qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
override_window_dwords);
out:
......@@ -4998,7 +5034,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
continue;
/* Read FW info for the current Storm */
qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
asserts = &fw_info.fw_asserts_section;
......@@ -5036,7 +5072,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump, addr,
asserts->list_element_dword_size,
false);
false, SPLIT_TYPE_NONE, 0);
}
/* Dump last section */
......@@ -5063,6 +5099,28 @@ enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
return DBG_STATUS_OK;
}
bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct fw_info *fw_info)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u8 storm_id;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
struct storm_defs *storm = &s_storm_defs[storm_id];
/* Skip Storm if it's in reset */
if (dev_data->block_in_reset[storm->block_id])
continue;
/* Read FW info for the current Storm */
qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info);
return true;
}
return false;
}
/* Assign default GRC param values */
void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
{
......
......@@ -2792,7 +2792,7 @@ static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn,
{
u32 port_mode;
port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0);
port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);
if (port_mode < 3) {
p_hwfn->cdev->num_ports_in_engine = 1;
......
......@@ -1095,14 +1095,16 @@ enum personality_type {
struct pf_start_tunnel_config {
u8 set_vxlan_udp_port_flg;
u8 set_geneve_udp_port_flg;
u8 set_no_inner_l2_vxlan_udp_port_flg;
u8 tunnel_clss_vxlan;
u8 tunnel_clss_l2geneve;
u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre;
u8 tunnel_clss_ipgre;
u8 reserved;
__le16 vxlan_udp_port;
__le16 geneve_udp_port;
__le16 no_inner_l2_vxlan_udp_port;
__le16 reserved[3];
};
/* Ramrod data for PF start ramrod */
......@@ -1145,14 +1147,17 @@ struct pf_update_tunnel_config {
u8 update_rx_def_non_ucast_clss;
u8 set_vxlan_udp_port_flg;
u8 set_geneve_udp_port_flg;
u8 set_no_inner_l2_vxlan_udp_port_flg;
u8 tunnel_clss_vxlan;
u8 tunnel_clss_l2geneve;
u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre;
u8 tunnel_clss_ipgre;
u8 reserved;
__le16 vxlan_udp_port;
__le16 geneve_udp_port;
__le16 reserved;
__le16 no_inner_l2_vxlan_udp_port;
__le16 reserved1[3];
};
/* Data for port update ramrod */
......@@ -2535,7 +2540,14 @@ struct idle_chk_data {
u16 reserved2;
};
/* Debug Tools data (per HW function) */
struct pretend_params {
u8 split_type;
u8 reserved;
u16 split_id;
};
/* Debug Tools data (per HW function)
*/
struct dbg_tools_data {
struct dbg_grc_data grc;
struct dbg_bus_data bus;
......@@ -2544,8 +2556,13 @@ struct dbg_tools_data {
u8 block_in_reset[88];
u8 chip_id;
u8 platform_id;
u8 num_ports;
u8 num_pfs_per_port;
u8 num_vfs;
u8 initialized;
u8 use_dmae;
u8 reserved;
struct pretend_params pretend;
u32 num_regs_read;
};
......@@ -2974,6 +2991,24 @@ enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
void qed_read_regs(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
/**
* @brief qed_read_fw_info - Reads FW info from the chip.
*
* The FW info contains FW-related information, such as the FW version,
* FW image (main/L2B/kuku), FW timestamp, etc.
* The FW info is read from the internal RAM of the first Storm that is not in
* reset.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param fw_info - Out: a pointer to write the FW info into.
*
* @return true if the FW info was read successfully from one of the Storms,
* or false if all Storms are in reset.
*/
bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct fw_info *fw_info);
/**
* @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
* default value.
......@@ -4110,6 +4145,21 @@ void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
*/
void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
#define NUM_STORMS 6
/**
* @brief qed_set_rdma_error_level - Sets the RDMA assert level.
* If the severity of the error will be
* above the level, the FW will assert.
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers
* @param assert_level - An array of assert levels for each storm.
*
*/
void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 assert_level[NUM_STORMS]);
/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
......@@ -4340,27 +4390,67 @@ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
(IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
/* Xstorm error level for assert */
#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
(IRO[47].base + ((pf_id) * IRO[47].m1))
#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[47].size)
/* Ystorm error level for assert */
#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
(IRO[48].base + ((pf_id) * IRO[48].m1))
#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size)
/* Pstorm error level for assert */
#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
(IRO[49].base + ((pf_id) * IRO[49].m1))
#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size)
/* Tstorm error level for assert */
#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
(IRO[50].base + ((pf_id) * IRO[50].m1))
#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size)
/* Mstorm error level for assert */
#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
(IRO[51].base + ((pf_id) * IRO[51].m1))
#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size)
/* Ustorm error level for assert */
#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
(IRO[52].base + ((pf_id) * IRO[52].m1))
#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size)
/* Xstorm iWARP rxmit stats */
#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
(IRO[47].base + ((pf_id) * IRO[47].m1))
#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[47].size)
(IRO[53].base + ((pf_id) * IRO[53].m1))
#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[53].size)
/* Tstorm RoCE Event Statistics */
#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
(IRO[48].base + ((roce_pf_id) * IRO[48].m1))
#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size)
(IRO[54].base + ((roce_pf_id) * IRO[54].m1))
#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[54].size)
/* DCQCN Received Statistics */
#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \
(IRO[49].base + ((roce_pf_id) * IRO[49].m1))
#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[49].size)
(IRO[55].base + ((roce_pf_id) * IRO[55].m1))
#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[55].size)
/* RoCE Error Statistics */
#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \
(IRO[56].base + ((roce_pf_id) * IRO[56].m1))
#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[56].size)
/* DCQCN Sent Statistics */
#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
(IRO[50].base + ((roce_pf_id) * IRO[50].m1))
#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[50].size)
(IRO[57].base + ((roce_pf_id) * IRO[57].m1))
#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[57].size)
/* RoCE CQEs Statistics */
#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \
(IRO[58].base + ((roce_pf_id) * IRO[58].m1))
#define USTORM_ROCE_CQE_STATS_SIZE (IRO[58].size)
static const struct iro iro_arr[51] = {
static const struct iro iro_arr[59] = {
{0x0, 0x0, 0x0, 0x0, 0x8},
{0x4cb8, 0x88, 0x0, 0x0, 0x88},
{0x6530, 0x20, 0x0, 0x0, 0x20},
......@@ -4408,10 +4498,18 @@ static const struct iro iro_arr[51] = {
{0x10768, 0x20, 0x0, 0x0, 0x20},
{0x2d48, 0x80, 0x0, 0x0, 0x10},
{0x5048, 0x10, 0x0, 0x0, 0x10},
{0xc748, 0x8, 0x0, 0x0, 0x1},
{0xa128, 0x8, 0x0, 0x0, 0x1},
{0x10f00, 0x8, 0x0, 0x0, 0x1},
{0xf030, 0x8, 0x0, 0x0, 0x1},
{0x13028, 0x8, 0x0, 0x0, 0x1},
{0x12c58, 0x8, 0x0, 0x0, 0x1},
{0xc9b8, 0x30, 0x0, 0x0, 0x10},
{0xed90, 0x10, 0x0, 0x0, 0x10},
{0xa3a0, 0x10, 0x0, 0x0, 0x10},
{0xed90, 0x28, 0x0, 0x0, 0x28},
{0xa520, 0x18, 0x0, 0x0, 0x18},
{0xa6a0, 0x8, 0x0, 0x0, 0x8},
{0x13108, 0x8, 0x0, 0x0, 0x8},
{0x13c50, 0x18, 0x0, 0x0, 0x18},
};
/* Runtime array offsets */
......@@ -4797,147 +4895,147 @@ static const struct iro iro_arr[51] = {
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785
#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 39786
#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39787
#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39786
#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39795
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39794
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40819
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40818
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41331
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41330
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41842
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42355
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42354
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512
#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42867
#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42866
#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32
#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42899
#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42900
#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42901
#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42902
#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42903
#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42904
#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42905
#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42906
#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42907
#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42908
#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42909
#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42910
#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42911
#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42912
#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42913
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42914
#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42915
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42916
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42917
#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42918
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42919
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42920
#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42921
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42922
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42923
#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42924
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42925
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42926
#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42927
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42928
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42929
#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42930
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42931
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42932
#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42933
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42934
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42935
#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42936
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42937
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42938
#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42939
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42940
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42941
#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42942
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42943
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42944
#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42945
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42946
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42947
#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42948
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42949
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42950
#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42951
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42952
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42953
#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42954
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42955
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42956
#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42957
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42958
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42959
#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42960
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42961
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42962
#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42963
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42964
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42965
#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42966
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42967
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42968
#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42969
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42970
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42971
#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42972
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42973
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42974
#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42975
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42976
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42977
#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42978
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42979
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42980
#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42981
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42982
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42983
#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42984
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42985
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42986
#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42987
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42988
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42989
#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42990
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42991
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42992
#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42993
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42994
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42995
#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42996
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42997
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42998
#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42999
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 43000
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43001
#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43002
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43003
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43004
#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43005
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43006
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43007
#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43008
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43009
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43010
#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43011
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43012
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43013
#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43014
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43015
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43016
#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43017
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43018
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43019
#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43020
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43021
#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43022
#define RUNTIME_ARRAY_SIZE 43023
#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42898
#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42899
#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42900
#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42901
#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42902
#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42903
#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42904
#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42905
#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42906
#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42907
#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42908
#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42909
#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42910
#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42911
#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42912
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42913
#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42914
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42915
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42916
#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42917
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42918
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42919
#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42920
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42921
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42922
#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42923
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42924
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42925
#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42926
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42927
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42928
#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42929
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42930
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42931
#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42932
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42933
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42934
#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42935
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42936
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42937
#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42938
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42939
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42940
#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42941
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42942
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42943
#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42944
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42945
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42946
#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42947
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42948
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42949
#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42950
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42951
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42952
#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42953
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42954
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42955
#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42956
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42957
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42958
#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42959
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42960
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42961
#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42962
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42963
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42964
#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42965
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42966
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42967
#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42968
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42969
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42970
#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42971
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42972
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42973
#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42974
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42975
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42976
#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42977
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42978
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42979
#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42980
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42981
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42982
#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42983
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42984
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42985
#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42986
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42987
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42988
#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42989
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42990
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42991
#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42992
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42993
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42994
#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42995
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42996
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42997
#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42998
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 42999
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43000
#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43001
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43002
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43003
#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43004
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43005
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43006
#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43007
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43008
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43009
#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43010
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43011
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43012
#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43013
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43014
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43015
#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43016
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43017
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43018
#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43019
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43020
#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43021
#define RUNTIME_ARRAY_SIZE 43022
/* Init Callbacks */
#define DMAE_READY_CB 0
......@@ -5694,8 +5792,10 @@ struct eth_vport_rx_mode {
#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4
#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_MASK 0x1
#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_SHIFT 6
#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x1FF
#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 7
};
/* Command for setting tpa parameters */
......@@ -6756,7 +6856,7 @@ struct e4_ystorm_rdma_task_ag_ctx {
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 key;
__le32 mw_cnt;
__le32 mw_cnt_or_qp_id;
u8 ref_cnt_seq;
u8 ctx_upd_seq;
__le16 dif_flags;
......@@ -6812,7 +6912,7 @@ struct e4_mstorm_rdma_task_ag_ctx {
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 key;
__le32 mw_cnt;
__le32 mw_cnt_or_qp_id;
u8 ref_cnt_seq;
u8 ctx_upd_seq;
__le16 dif_flags;
......@@ -7075,8 +7175,7 @@ struct rdma_register_tid_ramrod_data {
struct regpair va;
struct regpair pbl_base;
struct regpair dif_error_addr;
struct regpair dif_runt_addr;
__le32 reserved4[2];
__le32 reserved4[4];
};
/* rdma resize cq output params */
......@@ -7144,8 +7243,7 @@ struct rdma_srq_modify_ramrod_data {
enum rdma_tid_type {
RDMA_TID_REGISTERED_MR,
RDMA_TID_FMR,
RDMA_TID_MW_TYPE1,
RDMA_TID_MW_TYPE2A,
RDMA_TID_MW,
MAX_RDMA_TID_TYPE
};
......@@ -7681,6 +7779,16 @@ struct e4_roce_conn_context {
struct ustorm_roce_conn_st_ctx ustorm_st_context;
};
/* roce cqes statistics */
struct roce_cqe_stats {
__le32 req_cqe_error;
__le32 req_remote_access_errors;
__le32 req_remote_invalid_request;
__le32 resp_cqe_error;
__le32 resp_local_length_error;
__le32 reserved;
};
/* roce create qp requester ramrod data */
struct roce_create_qp_req_ramrod_data {
__le16 flags;
......@@ -7798,8 +7906,8 @@ struct roce_dcqcn_sent_stats {
/* RoCE destroy qp requester output params */
struct roce_destroy_qp_req_output_params {
__le32 num_bound_mw;
__le32 cq_prod;
__le32 reserved;
};
/* RoCE destroy qp requester ramrod data */
......@@ -7809,8 +7917,8 @@ struct roce_destroy_qp_req_ramrod_data {
/* RoCE destroy qp responder output params */
struct roce_destroy_qp_resp_output_params {
__le32 num_invalidated_mw;
__le32 cq_prod;
__le32 reserved;
};
/* RoCE destroy qp responder ramrod data */
......@@ -7818,16 +7926,27 @@ struct roce_destroy_qp_resp_ramrod_data {
struct regpair output_params_addr;
};
/* roce error statistics */
struct roce_error_stats {
__le32 resp_remote_access_errors;
__le32 reserved;
};
/* roce special events statistics */
struct roce_events_stats {
__le16 silent_drops;
__le16 rnr_naks_sent;
__le32 silent_drops;
__le32 rnr_naks_sent;
__le32 retransmit_count;
__le32 icrc_error_count;
__le32 reserved;
__le32 implied_nak_seq_err;
__le32 duplicate_request;
__le32 local_ack_timeout_err;
__le32 out_of_sequence;
__le32 packet_seq_err;
__le32 rnr_nak_retry_err;
};
/* ROCE slow path EQ cmd IDs */
/* roce slow path EQ cmd IDs */
enum roce_event_opcode {
ROCE_EVENT_CREATE_QP = 11,
ROCE_EVENT_MODIFY_QP,
......@@ -7845,6 +7964,9 @@ struct roce_init_func_params {
u8 cnp_dscp;
u8 reserved;
__le32 cnp_send_timeout;
__le16 rl_offset;
u8 rl_count_log;
u8 reserved1[5];
};
/* roce func init ramrod data */
......@@ -8532,7 +8654,7 @@ struct e4_tstorm_roce_resp_conn_ag_ctx {
__le16 rq_prod;
__le16 conn_dpi;
__le16 irq_cons;
__le32 num_invlidated_mw;
__le32 reg9;
__le32 reg10;
};
......
......@@ -360,6 +360,26 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
*(u32 *)&p_ptt->pxp.pretend);
}
void qed_port_fid_pretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 port_id, u16 fid)
{
u16 control = 0;
SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
p_ptt->pxp.pretend.control = cpu_to_le16(control);
p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid);
REG_WR(p_hwfn,
qed_ptt_config_addr(p_ptt) +
offsetof(struct pxp_ptt_entry, pretend),
*(u32 *)&p_ptt->pxp.pretend);
}
u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
{
u32 concrete_fid = 0;
......
......@@ -244,6 +244,18 @@ void qed_port_pretend(struct qed_hwfn *p_hwfn,
void qed_port_unpretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
* @brief qed_port_fid_pretend - pretend to another port and another function
* when accessing the ptt window
*
* @param p_hwfn
* @param p_ptt
* @param port_id - the port to pretend to
* @param fid - fid field of pxp_pretend structure. Can contain either pf / vf.
*/
void qed_port_fid_pretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 port_id, u16 fid);
/**
* @brief qed_vfid_to_concrete - build a concrete FID for a
* given VF ID
......
......@@ -1245,7 +1245,7 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
bool udp,
bool ipv4, bool ipv6, enum gft_profile_type profile_type)
{
u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft;
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn,
......@@ -1314,6 +1314,9 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
ram_line_lo = 0;
ram_line_hi = 0;
/* Search no IP as GFT */
search_non_ip_as_gft = 0;
/* Tunnel type */
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
......@@ -1337,8 +1340,13 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
} else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
/* Allow tunneled traffic without inner IP */
search_non_ip_as_gft = 1;
}
qed_wr(p_hwfn,
p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft);
qed_wr(p_hwfn,
p_ptt,
PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
......@@ -1509,3 +1517,43 @@ void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
}
static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
{
switch (storm_id) {
case 0:
return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
case 1:
return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
case 2:
return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
case 3:
return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
case 4:
return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
case 5:
return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
default:
return 0;
}
}
void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 assert_level[NUM_STORMS])
{
u8 storm_id;
for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
u32 ram_addr = qed_get_rdma_assert_ram_addr(p_hwfn, storm_id);
qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);
}
}
......@@ -1159,7 +1159,6 @@ int qed_iwarp_connect(void *rdma_cxt,
struct qed_iwarp_info *iwarp_info;
struct qed_iwarp_ep *ep;
u8 mpa_data_size = 0;
u8 ts_hdr_size = 0;
u32 cid;
int rc;
......@@ -1218,10 +1217,7 @@ int qed_iwarp_connect(void *rdma_cxt,
iparams->cm_info.private_data,
iparams->cm_info.private_data_len);
if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
ts_hdr_size = TIMESTAMP_HEADER_SIZE;
ep->mss = iparams->mss - ts_hdr_size;
ep->mss = iparams->mss;
ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
ep->event_cb = iparams->event_cb;
......@@ -2337,7 +2333,6 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
u8 local_mac_addr[ETH_ALEN];
struct qed_iwarp_ep *ep;
int tcp_start_offset;
u8 ts_hdr_size = 0;
u8 ll2_syn_handle;
int payload_len;
u32 hdr_size;
......@@ -2415,11 +2410,7 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
ts_hdr_size = TIMESTAMP_HEADER_SIZE;
hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60) +
ts_hdr_size;
hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60);
ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
......
......@@ -586,6 +586,9 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
!!(accept_filter & QED_ACCEPT_BCAST));
SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI,
!!(accept_filter & QED_ACCEPT_ANY_VNI));
p_ramrod->rx_mode.state = cpu_to_le16(state);
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"p_ramrod->rx_mode.state = 0x%x\n", state);
......
......@@ -183,6 +183,7 @@ struct qed_filter_accept_flags {
#define QED_ACCEPT_MCAST_MATCHED 0x08
#define QED_ACCEPT_MCAST_UNMATCHED 0x10
#define QED_ACCEPT_BCAST 0x20
#define QED_ACCEPT_ANY_VNI 0x40
};
struct qed_arfs_config_params {
......
......@@ -1508,11 +1508,8 @@ qed_rdma_register_tid(void *rdma_cxt,
case QED_RDMA_TID_FMR:
tid_type = RDMA_TID_FMR;
break;
case QED_RDMA_TID_MW_TYPE1:
tid_type = RDMA_TID_MW_TYPE1;
break;
case QED_RDMA_TID_MW_TYPE2A:
tid_type = RDMA_TID_MW_TYPE2A;
case QED_RDMA_TID_MW:
tid_type = RDMA_TID_MW;
break;
default:
rc = -EINVAL;
......@@ -1544,7 +1541,6 @@ qed_rdma_register_tid(void *rdma_cxt,
RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
params->dif_error_addr);
DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
}
rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
......
......@@ -178,7 +178,7 @@
0x008c80UL
#define MCP_REG_SCRATCH \
0xe20000UL
#define CNIG_REG_NW_PORT_MODE_BB_B0 \
#define CNIG_REG_NW_PORT_MODE_BB \
0x218200UL
#define MISCS_REG_CHIP_NUM \
0x00976cUL
......@@ -1621,6 +1621,7 @@
#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT 1
#define PRS_REG_SEARCH_GFT 0x1f11bcUL
#define PRS_REG_SEARCH_NON_IP_AS_GFT 0x1f11c0UL
#define PRS_REG_CM_HDR_GFT 0x1f11c8UL
#define PRS_REG_GFT_CAM 0x1f1100UL
#define PRS_REG_GFT_PROFILE_MASK_RAM 0x1f1000UL
......
......@@ -681,7 +681,6 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp,
u32 *num_invalidated_mw,
u32 *cq_prod)
{
struct roce_destroy_qp_resp_output_params *p_ramrod_res;
......@@ -692,8 +691,6 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
*num_invalidated_mw = 0;
*cq_prod = qp->cq_prod;
if (!qp->resp_offloaded) {
......@@ -742,7 +739,6 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
if (rc)
goto err;
*num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
*cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
qp->cq_prod = *cq_prod;
......@@ -764,8 +760,7 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
}
static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp,
u32 *num_bound_mw)
struct qed_rdma_qp *qp)
{
struct roce_destroy_qp_req_output_params *p_ramrod_res;
struct roce_destroy_qp_req_ramrod_data *p_ramrod;
......@@ -807,7 +802,6 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
if (rc)
goto err;
*num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw);
/* Free ORQ - only if ramrod succeeded, in case FW is still using it */
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
......@@ -968,8 +962,6 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
u32 num_invalidated_mw = 0;
u32 num_bound_mw = 0;
u32 cq_prod;
int rc;
......@@ -984,22 +976,14 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
&num_invalidated_mw,
&cq_prod);
if (rc)
return rc;
/* Send destroy requester ramrod */
rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
&num_bound_mw);
rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
if (rc)
return rc;
if (num_invalidated_mw != num_bound_mw) {
DP_NOTICE(p_hwfn,
"number of invalidate memory windows is different from bounded ones\n");
return -EINVAL;
}
}
return 0;
......@@ -1010,7 +994,6 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
enum qed_roce_qp_state prev_state,
struct qed_rdma_modify_qp_in_params *params)
{
u32 num_invalidated_mw = 0, num_bound_mw = 0;
int rc = 0;
/* Perform additional operations according to the current state and the
......@@ -1090,7 +1073,6 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
/* Send destroy responder ramrod */
rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
qp,
&num_invalidated_mw,
&cq_prod);
if (rc)
......@@ -1098,14 +1080,7 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
qp->cq_prod = cq_prod;
rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
&num_bound_mw);
if (num_invalidated_mw != num_bound_mw) {
DP_NOTICE(p_hwfn,
"number of invalidate memory windows is different from bounded ones\n");
return -EINVAL;
}
rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
} else {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
}
......
......@@ -471,14 +471,8 @@ static u16 qedi_calc_mss(u16 pmtu, u8 is_ipv6, u8 tcp_ts_en, u8 vlan_en)
else
hdrs += IPV4_HDR_LEN;
if (vlan_en)
hdrs += VLAN_LEN;
mss = pmtu - hdrs;
if (tcp_ts_en)
mss -= TCP_OPTION_LEN;
if (!mss)
mss = DEF_MSS;
......
......@@ -109,8 +109,8 @@
#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
#define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 33
#define FW_REVISION_VERSION 11
#define FW_MINOR_VERSION 37
#define FW_REVISION_VERSION 2
#define FW_ENGINEERING_VERSION 0
/***********************/
......
......@@ -799,8 +799,8 @@ struct e4_mstorm_iscsi_task_ag_ctx {
#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
......@@ -849,8 +849,8 @@ struct e4_ustorm_iscsi_task_ag_ctx {
#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
u8 flags1;
......
......@@ -65,8 +65,7 @@ enum qed_roce_qp_state {
enum qed_rdma_tid_type {
QED_RDMA_TID_REGISTERED_MR,
QED_RDMA_TID_FMR,
QED_RDMA_TID_MW_TYPE1,
QED_RDMA_TID_MW_TYPE2A
QED_RDMA_TID_MW
};
struct qed_rdma_events {
......@@ -280,7 +279,6 @@ struct qed_rdma_register_tid_in_params {
bool dif_enabled;
u64 dif_error_addr;
u64 dif_runt_addr;
};
struct qed_rdma_create_cq_in_params {
......
......@@ -43,6 +43,7 @@
#define ROCE_MAX_QPS (32 * 1024)
#define ROCE_DCQCN_NP_MAX_QPS (64)
#define ROCE_DCQCN_RP_MAX_QPS (64)
#define ROCE_LKEY_MW_DIF_EN_BIT (28)
/* Affiliated asynchronous events / errors enumeration */
enum roce_async_events_type {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment