Commit 21dd79e8 authored by Tomer Tayar's avatar Tomer Tayar Committed by David S. Miller

qed*: HSI renaming for different types of HW

This patch renames defines and structures in the FW HSI files to allow a
distinction between different types of HW.
Signed-off-by: default avatarAriel Elior <Ariel.Elior@cavium.com>
Signed-off-by: default avatarMichal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: default avatarChad Dupuis <Chad.Dupuis@cavium.com>
Signed-off-by: default avatarManish Rangankar <Manish.Rangankar@cavium.com>
Signed-off-by: default avatarTomer Tayar <Tomer.Tayar@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a2e7699e
...@@ -264,7 +264,7 @@ static int qedr_register_device(struct qedr_dev *dev) ...@@ -264,7 +264,7 @@ static int qedr_register_device(struct qedr_dev *dev)
static int qedr_alloc_mem_sb(struct qedr_dev *dev, static int qedr_alloc_mem_sb(struct qedr_dev *dev,
struct qed_sb_info *sb_info, u16 sb_id) struct qed_sb_info *sb_info, u16 sb_id)
{ {
struct status_block *sb_virt; struct status_block_e4 *sb_virt;
dma_addr_t sb_phys; dma_addr_t sb_phys;
int rc; int rc;
......
...@@ -86,22 +86,22 @@ ...@@ -86,22 +86,22 @@
/* connection context union */ /* connection context union */
union conn_context { union conn_context {
struct core_conn_context core_ctx; struct e4_core_conn_context core_ctx;
struct eth_conn_context eth_ctx; struct e4_eth_conn_context eth_ctx;
struct iscsi_conn_context iscsi_ctx; struct e4_iscsi_conn_context iscsi_ctx;
struct fcoe_conn_context fcoe_ctx; struct e4_fcoe_conn_context fcoe_ctx;
struct roce_conn_context roce_ctx; struct e4_roce_conn_context roce_ctx;
}; };
/* TYPE-0 task context - iSCSI, FCOE */ /* TYPE-0 task context - iSCSI, FCOE */
union type0_task_context { union type0_task_context {
struct iscsi_task_context iscsi_ctx; struct e4_iscsi_task_context iscsi_ctx;
struct fcoe_task_context fcoe_ctx; struct e4_fcoe_task_context fcoe_ctx;
}; };
/* TYPE-1 task context - ROCE */ /* TYPE-1 task context - ROCE */
union type1_task_context { union type1_task_context {
struct rdma_task_context roce_ctx; struct e4_rdma_task_context roce_ctx;
}; };
struct src_ent { struct src_ent {
......
...@@ -610,9 +610,9 @@ static struct block_defs block_cnig_defs = { ...@@ -610,9 +610,9 @@ static struct block_defs block_cnig_defs = {
"cnig", "cnig",
{false, true}, false, 0, {false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2, CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2, CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
CNIG_REG_DBG_FORCE_FRAME_K2, CNIG_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV, 0 true, false, DBG_RESET_REG_MISCS_PL_HV, 0
}; };
...@@ -654,11 +654,11 @@ static struct block_defs block_pcie_defs = { ...@@ -654,11 +654,11 @@ static struct block_defs block_pcie_defs = {
"pcie", "pcie",
{false, true}, false, 0, {false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
PCIE_REG_DBG_COMMON_SELECT_K2, PCIE_REG_DBG_COMMON_SELECT_K2_E5,
PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2, PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
PCIE_REG_DBG_COMMON_SHIFT_K2, PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
PCIE_REG_DBG_COMMON_FORCE_VALID_K2, PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
PCIE_REG_DBG_COMMON_FORCE_FRAME_K2, PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
false, false, MAX_DBG_RESET_REGS, 0 false, false, MAX_DBG_RESET_REGS, 0
}; };
...@@ -760,9 +760,9 @@ static struct block_defs block_pglcs_defs = { ...@@ -760,9 +760,9 @@ static struct block_defs block_pglcs_defs = {
"pglcs", "pglcs",
{false, true}, false, 0, {false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
PGLCS_REG_DBG_SELECT_K2, PGLCS_REG_DBG_DWORD_ENABLE_K2, PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
PGLCS_REG_DBG_SHIFT_K2, PGLCS_REG_DBG_FORCE_VALID_K2, PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
PGLCS_REG_DBG_FORCE_FRAME_K2, PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV, 2 true, false, DBG_RESET_REG_MISCS_PL_HV, 2
}; };
...@@ -1255,9 +1255,9 @@ static struct block_defs block_umac_defs = { ...@@ -1255,9 +1255,9 @@ static struct block_defs block_umac_defs = {
"umac", "umac",
{false, true}, false, 0, {false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
UMAC_REG_DBG_SELECT_K2, UMAC_REG_DBG_DWORD_ENABLE_K2, UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
UMAC_REG_DBG_SHIFT_K2, UMAC_REG_DBG_FORCE_VALID_K2, UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
UMAC_REG_DBG_FORCE_FRAME_K2, UMAC_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV, 6 true, false, DBG_RESET_REG_MISCS_PL_HV, 6
}; };
...@@ -1289,9 +1289,9 @@ static struct block_defs block_wol_defs = { ...@@ -1289,9 +1289,9 @@ static struct block_defs block_wol_defs = {
"wol", "wol",
{false, true}, false, 0, {false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
WOL_REG_DBG_SELECT_K2, WOL_REG_DBG_DWORD_ENABLE_K2, WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
WOL_REG_DBG_SHIFT_K2, WOL_REG_DBG_FORCE_VALID_K2, WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
WOL_REG_DBG_FORCE_FRAME_K2, WOL_REG_DBG_FORCE_FRAME_K2_E5,
true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
}; };
...@@ -1299,9 +1299,9 @@ static struct block_defs block_bmbn_defs = { ...@@ -1299,9 +1299,9 @@ static struct block_defs block_bmbn_defs = {
"bmbn", "bmbn",
{false, true}, false, 0, {false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB}, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
BMBN_REG_DBG_SELECT_K2, BMBN_REG_DBG_DWORD_ENABLE_K2, BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
BMBN_REG_DBG_SHIFT_K2, BMBN_REG_DBG_FORCE_VALID_K2, BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
BMBN_REG_DBG_FORCE_FRAME_K2, BMBN_REG_DBG_FORCE_FRAME_K2_E5,
false, false, MAX_DBG_RESET_REGS, 0 false, false, MAX_DBG_RESET_REGS, 0
}; };
...@@ -1316,9 +1316,9 @@ static struct block_defs block_nwm_defs = { ...@@ -1316,9 +1316,9 @@ static struct block_defs block_nwm_defs = {
"nwm", "nwm",
{false, true}, false, 0, {false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
NWM_REG_DBG_SELECT_K2, NWM_REG_DBG_DWORD_ENABLE_K2, NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
NWM_REG_DBG_SHIFT_K2, NWM_REG_DBG_FORCE_VALID_K2, NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
NWM_REG_DBG_FORCE_FRAME_K2, NWM_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0 true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
}; };
...@@ -1326,9 +1326,9 @@ static struct block_defs block_nws_defs = { ...@@ -1326,9 +1326,9 @@ static struct block_defs block_nws_defs = {
"nws", "nws",
{false, true}, false, 0, {false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
NWS_REG_DBG_SELECT_K2, NWS_REG_DBG_DWORD_ENABLE_K2, NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
NWS_REG_DBG_SHIFT_K2, NWS_REG_DBG_FORCE_VALID_K2, NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
NWS_REG_DBG_FORCE_FRAME_K2, NWS_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV, 12 true, false, DBG_RESET_REG_MISCS_PL_HV, 12
}; };
...@@ -1336,9 +1336,9 @@ static struct block_defs block_ms_defs = { ...@@ -1336,9 +1336,9 @@ static struct block_defs block_ms_defs = {
"ms", "ms",
{false, true}, false, 0, {false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
MS_REG_DBG_SELECT_K2, MS_REG_DBG_DWORD_ENABLE_K2, MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
MS_REG_DBG_SHIFT_K2, MS_REG_DBG_FORCE_VALID_K2, MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
MS_REG_DBG_FORCE_FRAME_K2, MS_REG_DBG_FORCE_FRAME_K2_E5,
true, false, DBG_RESET_REG_MISCS_PL_HV, 13 true, false, DBG_RESET_REG_MISCS_PL_HV, 13
}; };
...@@ -1346,11 +1346,11 @@ static struct block_defs block_phy_pcie_defs = { ...@@ -1346,11 +1346,11 @@ static struct block_defs block_phy_pcie_defs = {
"phy_pcie", "phy_pcie",
{false, true}, false, 0, {false, true}, false, 0,
{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
PCIE_REG_DBG_COMMON_SELECT_K2, PCIE_REG_DBG_COMMON_SELECT_K2_E5,
PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2, PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
PCIE_REG_DBG_COMMON_SHIFT_K2, PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
PCIE_REG_DBG_COMMON_FORCE_VALID_K2, PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
PCIE_REG_DBG_COMMON_FORCE_FRAME_K2, PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
false, false, MAX_DBG_RESET_REGS, 0 false, false, MAX_DBG_RESET_REGS, 0
}; };
...@@ -1659,7 +1659,7 @@ static struct reset_reg_defs s_reset_regs_defs[] = { ...@@ -1659,7 +1659,7 @@ static struct reset_reg_defs s_reset_regs_defs[] = {
{true, true} }, {true, true} },
/* DBG_RESET_REG_MISCS_PL_HV_2 */ /* DBG_RESET_REG_MISCS_PL_HV_2 */
{ MISCS_REG_RESET_PL_HV_2_K2, 0x0, { MISCS_REG_RESET_PL_HV_2_K2_E5, 0x0,
{false, true} }, {false, true} },
/* DBG_RESET_REG_MISC_PL_UA */ /* DBG_RESET_REG_MISC_PL_UA */
...@@ -1685,25 +1685,25 @@ static struct reset_reg_defs s_reset_regs_defs[] = { ...@@ -1685,25 +1685,25 @@ static struct reset_reg_defs s_reset_regs_defs[] = {
static struct phy_defs s_phy_defs[] = { static struct phy_defs s_phy_defs[] = {
{"nw_phy", NWS_REG_NWS_CMU_K2, {"nw_phy", NWS_REG_NWS_CMU_K2,
PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2}, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
{"sgmii_phy", MS_REG_MS_CMU_K2, {"sgmii_phy", MS_REG_MS_CMU_K2_E5,
PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2}, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
{"pcie_phy0", PHY_PCIE_REG_PHY0_K2, {"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2}, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
{"pcie_phy1", PHY_PCIE_REG_PHY1_K2, {"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2}, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
}; };
/**************************** Private Functions ******************************/ /**************************** Private Functions ******************************/
...@@ -1795,7 +1795,7 @@ static void qed_read_fw_info(struct qed_hwfn *p_hwfn, ...@@ -1795,7 +1795,7 @@ static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
* The address is located in the last line of the Storm RAM. * The address is located in the last line of the Storm RAM.
*/ */
addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM + addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) - DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) -
sizeof(fw_info_location); sizeof(fw_info_location);
dest = (u32 *)&fw_info_location; dest = (u32 *)&fw_info_location;
...@@ -3637,7 +3637,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, ...@@ -3637,7 +3637,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
dump, dump,
NULL, NULL,
BYTES_TO_DWORDS(MCP_REG_SCRATCH), BYTES_TO_DWORDS(MCP_REG_SCRATCH),
MCP_REG_SCRATCH_SIZE, MCP_REG_SCRATCH_SIZE_BB_K2,
false, 0, false, "MCP", false, 0); false, 0, false, "MCP", false, 0);
/* Dump MCP cpu_reg_file */ /* Dump MCP cpu_reg_file */
......
...@@ -115,7 +115,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, ...@@ -115,7 +115,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
struct qed_fcoe_pf_params *fcoe_pf_params = NULL; struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
struct fcoe_init_ramrod_params *p_ramrod = NULL; struct fcoe_init_ramrod_params *p_ramrod = NULL;
struct fcoe_init_func_ramrod_data *p_data; struct fcoe_init_func_ramrod_data *p_data;
struct fcoe_conn_context *p_cxt = NULL; struct e4_fcoe_conn_context *p_cxt = NULL;
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data; struct qed_sp_init_data init_data;
struct qed_cxt_info cxt_info; struct qed_cxt_info cxt_info;
...@@ -167,7 +167,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, ...@@ -167,7 +167,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
} }
p_cxt = cxt_info.p_cxt; p_cxt = cxt_info.p_cxt;
SET_FIELD(p_cxt->tstorm_ag_context.flags3, SET_FIELD(p_cxt->tstorm_ag_context.flags3,
TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1); E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
fcoe_pf_params->dummy_icid = (u16)dummy_cid; fcoe_pf_params->dummy_icid = (u16)dummy_cid;
...@@ -568,7 +568,7 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn) ...@@ -568,7 +568,7 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
void qed_fcoe_setup(struct qed_hwfn *p_hwfn) void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
{ {
struct fcoe_task_context *p_task_ctx = NULL; struct e4_fcoe_task_context *p_task_ctx = NULL;
int rc; int rc;
u32 i; u32 i;
...@@ -580,13 +580,13 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn) ...@@ -580,13 +580,13 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
if (rc) if (rc)
continue; continue;
memset(p_task_ctx, 0, sizeof(struct fcoe_task_context)); memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
SET_FIELD(p_task_ctx->timer_context.logical_client_0, SET_FIELD(p_task_ctx->timer_context.logical_client_0,
TIMERS_CONTEXT_VALIDLC0, 1); TIMERS_CONTEXT_VALIDLC0, 1);
SET_FIELD(p_task_ctx->timer_context.logical_client_1, SET_FIELD(p_task_ctx->timer_context.logical_client_1,
TIMERS_CONTEXT_VALIDLC1, 1); TIMERS_CONTEXT_VALIDLC1, 1);
SET_FIELD(p_task_ctx->tstorm_ag_context.flags0, SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1); E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
} }
} }
......
This diff is collapsed.
...@@ -64,7 +64,7 @@ ...@@ -64,7 +64,7 @@
#define QM_WFQ_VP_PQ_VOQ_SHIFT 0 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
/* Bit of PF in WFQ VP PQ map */ /* Bit of PF in WFQ VP PQ map */
#define QM_WFQ_VP_PQ_PF_SHIFT 5 #define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
/* 0x9000 = 4*9*1024 */ /* 0x9000 = 4*9*1024 */
#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000) #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
...@@ -171,7 +171,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) ...@@ -171,7 +171,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
if (pf_rl_en) { if (pf_rl_en) {
/* Enable RLs for all VOQs */ /* Enable RLs for all VOQs */
STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
(1 << MAX_NUM_VOQS) - 1); (1 << MAX_NUM_VOQS_E4) - 1);
/* Write RL period */ /* Write RL period */
STORE_RT_REG(p_hwfn, STORE_RT_REG(p_hwfn,
QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M); QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
...@@ -260,7 +260,7 @@ static void qed_cmdq_lines_rt_init( ...@@ -260,7 +260,7 @@ static void qed_cmdq_lines_rt_init(
u8 tc, voq, port_id, num_tcs_in_port; u8 tc, voq, port_id, num_tcs_in_port;
/* Clear PBF lines for all VOQs */ /* Clear PBF lines for all VOQs */
for (voq = 0; voq < MAX_NUM_VOQS; voq++) for (voq = 0; voq < MAX_NUM_VOQS_E4; voq++)
STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0); STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
for (port_id = 0; port_id < max_ports_per_engine; port_id++) { for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
if (port_params[port_id].active) { if (port_params[port_id].active) {
...@@ -387,7 +387,7 @@ static void qed_tx_pq_map_rt_init( ...@@ -387,7 +387,7 @@ static void qed_tx_pq_map_rt_init(
u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id, u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
p_params->max_phys_tcs_per_port); p_params->max_phys_tcs_per_port);
bool is_vf_pq = (i >= p_params->num_pf_pqs); bool is_vf_pq = (i >= p_params->num_pf_pqs);
struct qm_rf_pq_map tx_pq_map; struct qm_rf_pq_map_e4 tx_pq_map;
bool rl_valid = p_params->pq_params[i].rl_valid && bool rl_valid = p_params->pq_params[i].rl_valid &&
(p_params->pq_params[i].vport_id < (p_params->pq_params[i].vport_id <
...@@ -410,7 +410,7 @@ static void qed_tx_pq_map_rt_init( ...@@ -410,7 +410,7 @@ static void qed_tx_pq_map_rt_init(
first_tx_pq_id, first_tx_pq_id,
(voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
(p_params->pf_id << (p_params->pf_id <<
QM_WFQ_VP_PQ_PF_SHIFT)); QM_WFQ_VP_PQ_PF_E4_SHIFT));
} }
if (p_params->pq_params[i].rl_valid && !rl_valid) if (p_params->pq_params[i].rl_valid && !rl_valid)
...@@ -418,15 +418,16 @@ static void qed_tx_pq_map_rt_init( ...@@ -418,15 +418,16 @@ static void qed_tx_pq_map_rt_init(
"Invalid VPORT ID for rate limiter configuration"); "Invalid VPORT ID for rate limiter configuration");
/* Fill PQ map entry */ /* Fill PQ map entry */
memset(&tx_pq_map, 0, sizeof(tx_pq_map)); memset(&tx_pq_map, 0, sizeof(tx_pq_map));
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_E4_PQ_VALID, 1);
SET_FIELD(tx_pq_map.reg, SET_FIELD(tx_pq_map.reg,
QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0); QM_RF_PQ_MAP_E4_RL_VALID, rl_valid ? 1 : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_E4_VP_PQ_ID,
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID, first_tx_pq_id);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_E4_RL_ID,
rl_valid ? rl_valid ?
p_params->pq_params[i].vport_id : 0); p_params->pq_params[i].vport_id : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_E4_VOQ, voq);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP,
p_params->pq_params[i].wrr_group); p_params->pq_params[i].wrr_group);
/* Write PQ map entry to CAM */ /* Write PQ map entry to CAM */
STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
...@@ -902,7 +903,7 @@ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, ...@@ -902,7 +903,7 @@ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) if (reg_val)
qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
PRS_ETH_TUNN_FIC_FORMAT); PRS_ETH_TUNN_FIC_FORMAT);
reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
...@@ -929,7 +930,7 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, ...@@ -929,7 +930,7 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable); qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) if (reg_val)
qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
PRS_ETH_TUNN_FIC_FORMAT); PRS_ETH_TUNN_FIC_FORMAT);
reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
...@@ -970,7 +971,7 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, ...@@ -970,7 +971,7 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) if (reg_val)
qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
PRS_ETH_TUNN_FIC_FORMAT); PRS_ETH_TUNN_FIC_FORMAT);
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE, qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
...@@ -981,9 +982,9 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, ...@@ -981,9 +982,9 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
if (QED_IS_BB_B0(p_hwfn->cdev)) if (QED_IS_BB_B0(p_hwfn->cdev))
return; return;
qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN, qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
eth_geneve_enable ? 1 : 0); eth_geneve_enable ? 1 : 0);
qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN, qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
ip_geneve_enable ? 1 : 0); ip_geneve_enable ? 1 : 0);
} }
......
...@@ -62,7 +62,7 @@ struct qed_sb_sp_info { ...@@ -62,7 +62,7 @@ struct qed_sb_sp_info {
struct qed_sb_info sb_info; struct qed_sb_info sb_info;
/* per protocol index data */ /* per protocol index data */
struct qed_pi_info pi_info_arr[PIS_PER_SB]; struct qed_pi_info pi_info_arr[PIS_PER_SB_E4];
}; };
enum qed_attention_type { enum qed_attention_type {
...@@ -1313,7 +1313,7 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, ...@@ -1313,7 +1313,7 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
if (IS_VF(p_hwfn->cdev)) if (IS_VF(p_hwfn->cdev))
return; return;
sb_offset = igu_sb_id * PIS_PER_SB; sb_offset = igu_sb_id * PIS_PER_SB_E4;
memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
......
...@@ -197,7 +197,7 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev); ...@@ -197,7 +197,7 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
#define QED_SB_EVENT_MASK 0x0003 #define QED_SB_EVENT_MASK 0x0003
#define SB_ALIGNED_SIZE(p_hwfn) \ #define SB_ALIGNED_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(struct status_block, p_hwfn) ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn)
#define QED_SB_INVALID_IDX 0xffff #define QED_SB_INVALID_IDX 0xffff
......
...@@ -564,7 +564,7 @@ ...@@ -564,7 +564,7 @@
#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL #define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
#define PRS_REG_GRE_PROTOCOL 0x1f0734UL #define PRS_REG_GRE_PROTOCOL 0x1f0734UL
#define PRS_REG_VXLAN_PORT 0x1f0738UL #define PRS_REG_VXLAN_PORT 0x1f0738UL
#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL #define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL
#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL #define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0) #define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0)
...@@ -583,8 +583,8 @@ ...@@ -583,8 +583,8 @@
#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL #define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL #define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL #define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL #define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5 0x10092cUL
#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL #define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5 0x100930UL
#define NIG_REG_NGE_IP_ENABLE 0x508b28UL #define NIG_REG_NGE_IP_ENABLE 0x508b28UL
#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL #define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL
...@@ -595,15 +595,15 @@ ...@@ -595,15 +595,15 @@
#define QM_REG_WFQPFWEIGHT 0x2f4e80UL #define QM_REG_WFQPFWEIGHT 0x2f4e80UL
#define QM_REG_WFQVPWEIGHT 0x2fa000UL #define QM_REG_WFQVPWEIGHT 0x2fa000UL
#define PGLCS_REG_DBG_SELECT_K2 \ #define PGLCS_REG_DBG_SELECT_K2_E5 \
0x001d14UL 0x001d14UL
#define PGLCS_REG_DBG_DWORD_ENABLE_K2 \ #define PGLCS_REG_DBG_DWORD_ENABLE_K2_E5 \
0x001d18UL 0x001d18UL
#define PGLCS_REG_DBG_SHIFT_K2 \ #define PGLCS_REG_DBG_SHIFT_K2_E5 \
0x001d1cUL 0x001d1cUL
#define PGLCS_REG_DBG_FORCE_VALID_K2 \ #define PGLCS_REG_DBG_FORCE_VALID_K2_E5 \
0x001d20UL 0x001d20UL
#define PGLCS_REG_DBG_FORCE_FRAME_K2 \ #define PGLCS_REG_DBG_FORCE_FRAME_K2_E5 \
0x001d24UL 0x001d24UL
#define MISC_REG_RESET_PL_PDA_VMAIN_1 \ #define MISC_REG_RESET_PL_PDA_VMAIN_1 \
0x008070UL 0x008070UL
...@@ -615,7 +615,7 @@ ...@@ -615,7 +615,7 @@
0x009050UL 0x009050UL
#define MISCS_REG_RESET_PL_HV \ #define MISCS_REG_RESET_PL_HV \
0x009060UL 0x009060UL
#define MISCS_REG_RESET_PL_HV_2_K2 \ #define MISCS_REG_RESET_PL_HV_2_K2_E5 \
0x009150UL 0x009150UL
#define DMAE_REG_DBG_SELECT \ #define DMAE_REG_DBG_SELECT \
0x00c510UL 0x00c510UL
...@@ -647,15 +647,15 @@ ...@@ -647,15 +647,15 @@
0x0500b0UL 0x0500b0UL
#define GRC_REG_DBG_FORCE_FRAME \ #define GRC_REG_DBG_FORCE_FRAME \
0x0500b4UL 0x0500b4UL
#define UMAC_REG_DBG_SELECT_K2 \ #define UMAC_REG_DBG_SELECT_K2_E5 \
0x051094UL 0x051094UL
#define UMAC_REG_DBG_DWORD_ENABLE_K2 \ #define UMAC_REG_DBG_DWORD_ENABLE_K2_E5 \
0x051098UL 0x051098UL
#define UMAC_REG_DBG_SHIFT_K2 \ #define UMAC_REG_DBG_SHIFT_K2_E5 \
0x05109cUL 0x05109cUL
#define UMAC_REG_DBG_FORCE_VALID_K2 \ #define UMAC_REG_DBG_FORCE_VALID_K2_E5 \
0x0510a0UL 0x0510a0UL
#define UMAC_REG_DBG_FORCE_FRAME_K2 \ #define UMAC_REG_DBG_FORCE_FRAME_K2_E5 \
0x0510a4UL 0x0510a4UL
#define MCP2_REG_DBG_SELECT \ #define MCP2_REG_DBG_SELECT \
0x052400UL 0x052400UL
...@@ -717,15 +717,15 @@ ...@@ -717,15 +717,15 @@
0x1f0ba0UL 0x1f0ba0UL
#define PRS_REG_DBG_FORCE_FRAME \ #define PRS_REG_DBG_FORCE_FRAME \
0x1f0ba4UL 0x1f0ba4UL
#define CNIG_REG_DBG_SELECT_K2 \ #define CNIG_REG_DBG_SELECT_K2_E5 \
0x218254UL 0x218254UL
#define CNIG_REG_DBG_DWORD_ENABLE_K2 \ #define CNIG_REG_DBG_DWORD_ENABLE_K2_E5 \
0x218258UL 0x218258UL
#define CNIG_REG_DBG_SHIFT_K2 \ #define CNIG_REG_DBG_SHIFT_K2_E5 \
0x21825cUL 0x21825cUL
#define CNIG_REG_DBG_FORCE_VALID_K2 \ #define CNIG_REG_DBG_FORCE_VALID_K2_E5 \
0x218260UL 0x218260UL
#define CNIG_REG_DBG_FORCE_FRAME_K2 \ #define CNIG_REG_DBG_FORCE_FRAME_K2_E5 \
0x218264UL 0x218264UL
#define PRM_REG_DBG_SELECT \ #define PRM_REG_DBG_SELECT \
0x2306a8UL 0x2306a8UL
...@@ -997,35 +997,35 @@ ...@@ -997,35 +997,35 @@
0x580710UL 0x580710UL
#define CDU_REG_DBG_FORCE_FRAME \ #define CDU_REG_DBG_FORCE_FRAME \
0x580714UL 0x580714UL
#define WOL_REG_DBG_SELECT_K2 \ #define WOL_REG_DBG_SELECT_K2_E5 \
0x600140UL 0x600140UL
#define WOL_REG_DBG_DWORD_ENABLE_K2 \ #define WOL_REG_DBG_DWORD_ENABLE_K2_E5 \
0x600144UL 0x600144UL
#define WOL_REG_DBG_SHIFT_K2 \ #define WOL_REG_DBG_SHIFT_K2_E5 \
0x600148UL 0x600148UL
#define WOL_REG_DBG_FORCE_VALID_K2 \ #define WOL_REG_DBG_FORCE_VALID_K2_E5 \
0x60014cUL 0x60014cUL
#define WOL_REG_DBG_FORCE_FRAME_K2 \ #define WOL_REG_DBG_FORCE_FRAME_K2_E5 \
0x600150UL 0x600150UL
#define BMBN_REG_DBG_SELECT_K2 \ #define BMBN_REG_DBG_SELECT_K2_E5 \
0x610140UL 0x610140UL
#define BMBN_REG_DBG_DWORD_ENABLE_K2 \ #define BMBN_REG_DBG_DWORD_ENABLE_K2_E5 \
0x610144UL 0x610144UL
#define BMBN_REG_DBG_SHIFT_K2 \ #define BMBN_REG_DBG_SHIFT_K2_E5 \
0x610148UL 0x610148UL
#define BMBN_REG_DBG_FORCE_VALID_K2 \ #define BMBN_REG_DBG_FORCE_VALID_K2_E5 \
0x61014cUL 0x61014cUL
#define BMBN_REG_DBG_FORCE_FRAME_K2 \ #define BMBN_REG_DBG_FORCE_FRAME_K2_E5 \
0x610150UL 0x610150UL
#define NWM_REG_DBG_SELECT_K2 \ #define NWM_REG_DBG_SELECT_K2_E5 \
0x8000ecUL 0x8000ecUL
#define NWM_REG_DBG_DWORD_ENABLE_K2 \ #define NWM_REG_DBG_DWORD_ENABLE_K2_E5 \
0x8000f0UL 0x8000f0UL
#define NWM_REG_DBG_SHIFT_K2 \ #define NWM_REG_DBG_SHIFT_K2_E5 \
0x8000f4UL 0x8000f4UL
#define NWM_REG_DBG_FORCE_VALID_K2 \ #define NWM_REG_DBG_FORCE_VALID_K2_E5 \
0x8000f8UL 0x8000f8UL
#define NWM_REG_DBG_FORCE_FRAME_K2\ #define NWM_REG_DBG_FORCE_FRAME_K2_E5 \
0x8000fcUL 0x8000fcUL
#define PBF_REG_DBG_SELECT \ #define PBF_REG_DBG_SELECT \
0xd80060UL 0xd80060UL
...@@ -1247,35 +1247,35 @@ ...@@ -1247,35 +1247,35 @@
0x1901534UL 0x1901534UL
#define USEM_REG_DBG_FORCE_FRAME \ #define USEM_REG_DBG_FORCE_FRAME \
0x1901538UL 0x1901538UL
#define NWS_REG_DBG_SELECT_K2 \ #define NWS_REG_DBG_SELECT_K2_E5 \
0x700128UL 0x700128UL
#define NWS_REG_DBG_DWORD_ENABLE_K2 \ #define NWS_REG_DBG_DWORD_ENABLE_K2_E5 \
0x70012cUL 0x70012cUL
#define NWS_REG_DBG_SHIFT_K2 \ #define NWS_REG_DBG_SHIFT_K2_E5 \
0x700130UL 0x700130UL
#define NWS_REG_DBG_FORCE_VALID_K2 \ #define NWS_REG_DBG_FORCE_VALID_K2_E5 \
0x700134UL 0x700134UL
#define NWS_REG_DBG_FORCE_FRAME_K2 \ #define NWS_REG_DBG_FORCE_FRAME_K2_E5 \
0x700138UL 0x700138UL
#define MS_REG_DBG_SELECT_K2 \ #define MS_REG_DBG_SELECT_K2_E5 \
0x6a0228UL 0x6a0228UL
#define MS_REG_DBG_DWORD_ENABLE_K2 \ #define MS_REG_DBG_DWORD_ENABLE_K2_E5 \
0x6a022cUL 0x6a022cUL
#define MS_REG_DBG_SHIFT_K2 \ #define MS_REG_DBG_SHIFT_K2_E5 \
0x6a0230UL 0x6a0230UL
#define MS_REG_DBG_FORCE_VALID_K2 \ #define MS_REG_DBG_FORCE_VALID_K2_E5 \
0x6a0234UL 0x6a0234UL
#define MS_REG_DBG_FORCE_FRAME_K2 \ #define MS_REG_DBG_FORCE_FRAME_K2_E5 \
0x6a0238UL 0x6a0238UL
#define PCIE_REG_DBG_COMMON_SELECT_K2 \ #define PCIE_REG_DBG_COMMON_SELECT_K2_E5 \
0x054398UL 0x054398UL
#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2 \ #define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5 \
0x05439cUL 0x05439cUL
#define PCIE_REG_DBG_COMMON_SHIFT_K2 \ #define PCIE_REG_DBG_COMMON_SHIFT_K2_E5 \
0x0543a0UL 0x0543a0UL
#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2 \ #define PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5 \
0x0543a4UL 0x0543a4UL
#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2 \ #define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5 \
0x0543a8UL 0x0543a8UL
#define MISC_REG_RESET_PL_UA \ #define MISC_REG_RESET_PL_UA \
0x008050UL 0x008050UL
...@@ -1415,7 +1415,7 @@ ...@@ -1415,7 +1415,7 @@
0x1940000UL 0x1940000UL
#define SEM_FAST_REG_INT_RAM \ #define SEM_FAST_REG_INT_RAM \
0x020000UL 0x020000UL
#define SEM_FAST_REG_INT_RAM_SIZE \ #define SEM_FAST_REG_INT_RAM_SIZE_BB_K2 \
20480 20480
#define GRC_REG_TRACE_FIFO_VALID_DATA \ #define GRC_REG_TRACE_FIFO_VALID_DATA \
0x050064UL 0x050064UL
...@@ -1451,7 +1451,7 @@ ...@@ -1451,7 +1451,7 @@
0x238c30UL 0x238c30UL
#define MISCS_REG_BLOCK_256B_EN \ #define MISCS_REG_BLOCK_256B_EN \
0x009074UL 0x009074UL
#define MCP_REG_SCRATCH_SIZE \ #define MCP_REG_SCRATCH_SIZE_BB_K2 \
57344 57344
#define MCP_REG_CPU_REG_FILE \ #define MCP_REG_CPU_REG_FILE \
0xe05200UL 0xe05200UL
...@@ -1485,35 +1485,35 @@ ...@@ -1485,35 +1485,35 @@
0x008c14UL 0x008c14UL
#define NWS_REG_NWS_CMU_K2 \ #define NWS_REG_NWS_CMU_K2 \
0x720000UL 0x720000UL
#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \ #define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5 \
0x000680UL 0x000680UL
#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \ #define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5 \
0x000684UL 0x000684UL
#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \ #define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5 \
0x0006c0UL 0x0006c0UL
#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \ #define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 \
0x0006c4UL 0x0006c4UL
#define MS_REG_MS_CMU_K2 \ #define MS_REG_MS_CMU_K2_E5 \
0x6a4000UL 0x6a4000UL
#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \ #define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
0x000208UL 0x000208UL
#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \ #define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
0x00020cUL 0x00020cUL
#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \ #define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
0x000210UL 0x000210UL
#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \ #define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
0x000214UL 0x000214UL
#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \ #define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
0x000208UL 0x000208UL
#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \ #define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
0x00020cUL 0x00020cUL
#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \ #define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
0x000210UL 0x000210UL
#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \ #define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
0x000214UL 0x000214UL
#define PHY_PCIE_REG_PHY0_K2 \ #define PHY_PCIE_REG_PHY0_K2_E5 \
0x620000UL 0x620000UL
#define PHY_PCIE_REG_PHY1_K2 \ #define PHY_PCIE_REG_PHY1_K2_E5 \
0x624000UL 0x624000UL
#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL #define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
#define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL #define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL
......
...@@ -215,7 +215,7 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, ...@@ -215,7 +215,7 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq) struct qed_spq *p_spq)
{ {
struct core_conn_context *p_cxt; struct e4_core_conn_context *p_cxt;
struct qed_cxt_info cxt_info; struct qed_cxt_info cxt_info;
u16 physical_q; u16 physical_q;
int rc; int rc;
...@@ -233,11 +233,11 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, ...@@ -233,11 +233,11 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
p_cxt = cxt_info.p_cxt; p_cxt = cxt_info.p_cxt;
SET_FIELD(p_cxt->xstorm_ag_context.flags10, SET_FIELD(p_cxt->xstorm_ag_context.flags10,
XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
SET_FIELD(p_cxt->xstorm_ag_context.flags1, SET_FIELD(p_cxt->xstorm_ag_context.flags1,
XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
SET_FIELD(p_cxt->xstorm_ag_context.flags9, SET_FIELD(p_cxt->xstorm_ag_context.flags9,
XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
/* QM physical queue */ /* QM physical queue */
physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
......
...@@ -1621,7 +1621,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, ...@@ -1621,7 +1621,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
/* fill in pfdev info */ /* fill in pfdev info */
pfdev_info->chip_num = p_hwfn->cdev->chip_num; pfdev_info->chip_num = p_hwfn->cdev->chip_num;
pfdev_info->db_size = 0; pfdev_info->db_size = 0;
pfdev_info->indices_per_sb = PIS_PER_SB; pfdev_info->indices_per_sb = PIS_PER_SB_E4;
pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
...@@ -3582,11 +3582,11 @@ static int ...@@ -3582,11 +3582,11 @@ static int
qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
{ {
u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS]; u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
int i, cnt; int i, cnt;
/* Read initial consumers & producers */ /* Read initial consumers & producers */
for (i = 0; i < MAX_NUM_VOQS; i++) { for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
u32 prod; u32 prod;
cons[i] = qed_rd(p_hwfn, p_ptt, cons[i] = qed_rd(p_hwfn, p_ptt,
...@@ -3601,7 +3601,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, ...@@ -3601,7 +3601,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
/* Wait for consumers to pass the producers */ /* Wait for consumers to pass the producers */
i = 0; i = 0;
for (cnt = 0; cnt < 50; cnt++) { for (cnt = 0; cnt < 50; cnt++) {
for (; i < MAX_NUM_VOQS; i++) { for (; i < MAX_NUM_VOQS_E4; i++) {
u32 tmp; u32 tmp;
tmp = qed_rd(p_hwfn, p_ptt, tmp = qed_rd(p_hwfn, p_ptt,
...@@ -3611,7 +3611,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, ...@@ -3611,7 +3611,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
break; break;
} }
if (i == MAX_NUM_VOQS) if (i == MAX_NUM_VOQS_E4)
break; break;
msleep(20); msleep(20);
......
...@@ -1147,7 +1147,7 @@ static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info, ...@@ -1147,7 +1147,7 @@ static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
static int qede_alloc_mem_sb(struct qede_dev *edev, static int qede_alloc_mem_sb(struct qede_dev *edev,
struct qed_sb_info *sb_info, u16 sb_id) struct qed_sb_info *sb_info, u16 sb_id)
{ {
struct status_block *sb_virt; struct status_block_e4 *sb_virt;
dma_addr_t sb_phys; dma_addr_t sb_phys;
int rc; int rc;
......
...@@ -25,10 +25,10 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params, ...@@ -25,10 +25,10 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
u32 task_retry_id, u32 task_retry_id,
u8 fcp_cmd_payload[32]) u8 fcp_cmd_payload[32])
{ {
struct fcoe_task_context *ctx = task_params->context; struct e4_fcoe_task_context *ctx = task_params->context;
struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct ystorm_fcoe_task_st_ctx *y_st_ctx; struct ystorm_fcoe_task_st_ctx *y_st_ctx;
struct tstorm_fcoe_task_st_ctx *t_st_ctx; struct tstorm_fcoe_task_st_ctx *t_st_ctx;
struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct mstorm_fcoe_task_st_ctx *m_st_ctx; struct mstorm_fcoe_task_st_ctx *m_st_ctx;
u32 io_size, val; u32 io_size, val;
bool slow_sgl; bool slow_sgl;
...@@ -112,10 +112,10 @@ int init_initiator_midpath_unsolicited_fcoe_task( ...@@ -112,10 +112,10 @@ int init_initiator_midpath_unsolicited_fcoe_task(
struct scsi_sgl_task_params *rx_sgl_task_params, struct scsi_sgl_task_params *rx_sgl_task_params,
u8 fw_to_place_fc_header) u8 fw_to_place_fc_header)
{ {
struct fcoe_task_context *ctx = task_params->context; struct e4_fcoe_task_context *ctx = task_params->context;
struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct ystorm_fcoe_task_st_ctx *y_st_ctx; struct ystorm_fcoe_task_st_ctx *y_st_ctx;
struct tstorm_fcoe_task_st_ctx *t_st_ctx; struct tstorm_fcoe_task_st_ctx *t_st_ctx;
struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct mstorm_fcoe_task_st_ctx *m_st_ctx; struct mstorm_fcoe_task_st_ctx *m_st_ctx;
u32 val; u32 val;
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
struct fcoe_task_params { struct fcoe_task_params {
/* Output parameter [set/filled by the HSI function] */ /* Output parameter [set/filled by the HSI function] */
struct fcoe_task_context *context; struct e4_fcoe_task_context *context;
/* Output parameter [set/filled by the HSI function] */ /* Output parameter [set/filled by the HSI function] */
struct fcoe_wqe *sqe; struct fcoe_wqe *sqe;
......
...@@ -129,7 +129,7 @@ struct qedf_ioreq { ...@@ -129,7 +129,7 @@ struct qedf_ioreq {
struct delayed_work timeout_work; struct delayed_work timeout_work;
struct completion tm_done; struct completion tm_done;
struct completion abts_done; struct completion abts_done;
struct fcoe_task_context *task; struct e4_fcoe_task_context *task;
struct fcoe_task_params *task_params; struct fcoe_task_params *task_params;
struct scsi_sgl_task_params *sgl_task_params; struct scsi_sgl_task_params *sgl_task_params;
int idx; int idx;
...@@ -465,7 +465,7 @@ extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, ...@@ -465,7 +465,7 @@ extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
unsigned int timer_msec); unsigned int timer_msec);
extern int qedf_init_mp_req(struct qedf_ioreq *io_req); extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
extern void qedf_init_mp_task(struct qedf_ioreq *io_req, extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
struct fcoe_task_context *task_ctx, struct fcoe_wqe *wqe); struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport); extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
extern void qedf_ring_doorbell(struct qedf_rport *fcport); extern void qedf_ring_doorbell(struct qedf_rport *fcport);
extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
......
...@@ -19,7 +19,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op, ...@@ -19,7 +19,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
struct qedf_ioreq *els_req; struct qedf_ioreq *els_req;
struct qedf_mp_req *mp_req; struct qedf_mp_req *mp_req;
struct fc_frame_header *fc_hdr; struct fc_frame_header *fc_hdr;
struct fcoe_task_context *task; struct e4_fcoe_task_context *task;
int rc = 0; int rc = 0;
uint32_t did, sid; uint32_t did, sid;
uint16_t xid; uint16_t xid;
......
...@@ -579,7 +579,7 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req, ...@@ -579,7 +579,7 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
} }
static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx, struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
struct fcoe_wqe *sqe) struct fcoe_wqe *sqe)
{ {
enum fcoe_task_type task_type; enum fcoe_task_type task_type;
...@@ -597,7 +597,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, ...@@ -597,7 +597,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
/* Note init_initiator_rw_fcoe_task memsets the task context */ /* Note init_initiator_rw_fcoe_task memsets the task context */
io_req->task = task_ctx; io_req->task = task_ctx;
memset(task_ctx, 0, sizeof(struct fcoe_task_context)); memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
memset(io_req->task_params, 0, sizeof(struct fcoe_task_params)); memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
...@@ -673,7 +673,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, ...@@ -673,7 +673,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
} }
void qedf_init_mp_task(struct qedf_ioreq *io_req, void qedf_init_mp_task(struct qedf_ioreq *io_req,
struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
{ {
struct qedf_mp_req *mp_req = &(io_req->mp_req); struct qedf_mp_req *mp_req = &(io_req->mp_req);
struct qedf_rport *fcport = io_req->fcport; struct qedf_rport *fcport = io_req->fcport;
...@@ -691,7 +691,7 @@ void qedf_init_mp_task(struct qedf_ioreq *io_req, ...@@ -691,7 +691,7 @@ void qedf_init_mp_task(struct qedf_ioreq *io_req,
memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
memset(task_ctx, 0, sizeof(struct fcoe_task_context)); memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params)); memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
/* Setup the task from io_req for easy reference */ /* Setup the task from io_req for easy reference */
...@@ -844,7 +844,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req) ...@@ -844,7 +844,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
struct Scsi_Host *host = sc_cmd->device->host; struct Scsi_Host *host = sc_cmd->device->host;
struct fc_lport *lport = shost_priv(host); struct fc_lport *lport = shost_priv(host);
struct qedf_ctx *qedf = lport_priv(lport); struct qedf_ctx *qedf = lport_priv(lport);
struct fcoe_task_context *task_ctx; struct e4_fcoe_task_context *task_ctx;
u16 xid; u16 xid;
enum fcoe_task_type req_type = 0; enum fcoe_task_type req_type = 0;
struct fcoe_wqe *sqe; struct fcoe_wqe *sqe;
...@@ -1065,7 +1065,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, ...@@ -1065,7 +1065,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *io_req) struct qedf_ioreq *io_req)
{ {
u16 xid, rval; u16 xid, rval;
struct fcoe_task_context *task_ctx; struct e4_fcoe_task_context *task_ctx;
struct scsi_cmnd *sc_cmd; struct scsi_cmnd *sc_cmd;
struct fcoe_cqe_rsp_info *fcp_rsp; struct fcoe_cqe_rsp_info *fcp_rsp;
struct qedf_rport *fcport; struct qedf_rport *fcport;
...@@ -1722,7 +1722,7 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req, ...@@ -1722,7 +1722,7 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
struct qedf_rport *fcport; struct qedf_rport *fcport;
struct qedf_ctx *qedf; struct qedf_ctx *qedf;
uint16_t xid; uint16_t xid;
struct fcoe_task_context *task; struct e4_fcoe_task_context *task;
int tmo = 0; int tmo = 0;
int rc = SUCCESS; int rc = SUCCESS;
unsigned long flags; unsigned long flags;
...@@ -1835,7 +1835,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd, ...@@ -1835,7 +1835,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
uint8_t tm_flags) uint8_t tm_flags)
{ {
struct qedf_ioreq *io_req; struct qedf_ioreq *io_req;
struct fcoe_task_context *task; struct e4_fcoe_task_context *task;
struct qedf_ctx *qedf = fcport->qedf; struct qedf_ctx *qedf = fcport->qedf;
struct fc_lport *lport = qedf->lport; struct fc_lport *lport = qedf->lport;
int rc = 0; int rc = 0;
......
...@@ -1860,7 +1860,7 @@ static bool qedf_fp_has_work(struct qedf_fastpath *fp) ...@@ -1860,7 +1860,7 @@ static bool qedf_fp_has_work(struct qedf_fastpath *fp)
struct qedf_ctx *qedf = fp->qedf; struct qedf_ctx *qedf = fp->qedf;
struct global_queue *que; struct global_queue *que;
struct qed_sb_info *sb_info = fp->sb_info; struct qed_sb_info *sb_info = fp->sb_info;
struct status_block *sb = sb_info->sb_virt; struct status_block_e4 *sb = sb_info->sb_virt;
u16 prod_idx; u16 prod_idx;
/* Get the pointer to the global CQ this completion is on */ /* Get the pointer to the global CQ this completion is on */
...@@ -1887,7 +1887,7 @@ static bool qedf_process_completions(struct qedf_fastpath *fp) ...@@ -1887,7 +1887,7 @@ static bool qedf_process_completions(struct qedf_fastpath *fp)
{ {
struct qedf_ctx *qedf = fp->qedf; struct qedf_ctx *qedf = fp->qedf;
struct qed_sb_info *sb_info = fp->sb_info; struct qed_sb_info *sb_info = fp->sb_info;
struct status_block *sb = sb_info->sb_virt; struct status_block_e4 *sb = sb_info->sb_virt;
struct global_queue *que; struct global_queue *que;
u16 prod_idx; u16 prod_idx;
struct fcoe_cqe *cqe; struct fcoe_cqe *cqe;
...@@ -2352,12 +2352,12 @@ void qedf_fp_io_handler(struct work_struct *work) ...@@ -2352,12 +2352,12 @@ void qedf_fp_io_handler(struct work_struct *work)
static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf, static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
struct qed_sb_info *sb_info, u16 sb_id) struct qed_sb_info *sb_info, u16 sb_id)
{ {
struct status_block *sb_virt; struct status_block_e4 *sb_virt;
dma_addr_t sb_phys; dma_addr_t sb_phys;
int ret; int ret;
sb_virt = dma_alloc_coherent(&qedf->pdev->dev, sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
sizeof(struct status_block), &sb_phys, GFP_KERNEL); sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL);
if (!sb_virt) { if (!sb_virt) {
QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed " QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed "
......
...@@ -152,7 +152,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused) ...@@ -152,7 +152,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
{ {
struct qedi_fastpath *fp = NULL; struct qedi_fastpath *fp = NULL;
struct qed_sb_info *sb_info = NULL; struct qed_sb_info *sb_info = NULL;
struct status_block *sb = NULL; struct status_block_e4 *sb = NULL;
struct global_queue *que = NULL; struct global_queue *que = NULL;
int id; int id;
u16 prod_idx; u16 prod_idx;
...@@ -168,7 +168,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused) ...@@ -168,7 +168,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
sb_info = fp->sb_info; sb_info = fp->sb_info;
sb = sb_info->sb_virt; sb = sb_info->sb_virt;
prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] & prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] &
STATUS_BLOCK_PROD_INDEX_MASK); STATUS_BLOCK_E4_PROD_INDEX_MASK);
seq_printf(s, "SB PROD IDX: %d\n", prod_idx); seq_printf(s, "SB PROD IDX: %d\n", prod_idx);
que = qedi->global_queues[fp->sb_id]; que = qedi->global_queues[fp->sb_id];
seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx); seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx);
......
...@@ -87,7 +87,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi, ...@@ -87,7 +87,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
{ {
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session; struct iscsi_session *session = conn->session;
struct iscsi_task_context *task_ctx; struct e4_iscsi_task_context *task_ctx;
struct iscsi_text_rsp *resp_hdr_ptr; struct iscsi_text_rsp *resp_hdr_ptr;
struct iscsi_text_response_hdr *cqe_text_response; struct iscsi_text_response_hdr *cqe_text_response;
struct qedi_cmd *cmd; struct qedi_cmd *cmd;
...@@ -260,7 +260,7 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi, ...@@ -260,7 +260,7 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
{ {
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session; struct iscsi_session *session = conn->session;
struct iscsi_task_context *task_ctx; struct e4_iscsi_task_context *task_ctx;
struct iscsi_login_rsp *resp_hdr_ptr; struct iscsi_login_rsp *resp_hdr_ptr;
struct iscsi_login_response_hdr *cqe_login_response; struct iscsi_login_response_hdr *cqe_login_response;
struct qedi_cmd *cmd; struct qedi_cmd *cmd;
...@@ -1017,7 +1017,7 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn, ...@@ -1017,7 +1017,7 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params; struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params; struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params; struct iscsi_task_params task_params;
struct iscsi_task_context *fw_task_ctx; struct e4_iscsi_task_context *fw_task_ctx;
struct qedi_ctx *qedi = qedi_conn->qedi; struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_login_req *login_hdr; struct iscsi_login_req *login_hdr;
struct scsi_sge *resp_sge = NULL; struct scsi_sge *resp_sge = NULL;
...@@ -1037,8 +1037,9 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn, ...@@ -1037,8 +1037,9 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
return -ENOMEM; return -ENOMEM;
fw_task_ctx = fw_task_ctx =
(struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); tid);
memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid; qedi_cmd->task_id = tid;
...@@ -1119,7 +1120,7 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn, ...@@ -1119,7 +1120,7 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params; struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params; struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params; struct iscsi_task_params task_params;
struct iscsi_task_context *fw_task_ctx; struct e4_iscsi_task_context *fw_task_ctx;
struct iscsi_logout *logout_hdr = NULL; struct iscsi_logout *logout_hdr = NULL;
struct qedi_ctx *qedi = qedi_conn->qedi; struct qedi_ctx *qedi = qedi_conn->qedi;
struct qedi_cmd *qedi_cmd; struct qedi_cmd *qedi_cmd;
...@@ -1137,8 +1138,9 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn, ...@@ -1137,8 +1138,9 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
return -ENOMEM; return -ENOMEM;
fw_task_ctx = fw_task_ctx =
(struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); tid);
memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid; qedi_cmd->task_id = tid;
...@@ -1467,7 +1469,7 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, ...@@ -1467,7 +1469,7 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
struct iscsi_tmf_request_hdr tmf_pdu_header; struct iscsi_tmf_request_hdr tmf_pdu_header;
struct iscsi_task_params task_params; struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi; struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_task_context *fw_task_ctx; struct e4_iscsi_task_context *fw_task_ctx;
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_task *ctask; struct iscsi_task *ctask;
struct iscsi_tm *tmf_hdr; struct iscsi_tm *tmf_hdr;
...@@ -1490,8 +1492,9 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, ...@@ -1490,8 +1492,9 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
return -ENOMEM; return -ENOMEM;
fw_task_ctx = fw_task_ctx =
(struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); tid);
memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid; qedi_cmd->task_id = tid;
...@@ -1605,7 +1608,7 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn, ...@@ -1605,7 +1608,7 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params; struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params; struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params; struct iscsi_task_params task_params;
struct iscsi_task_context *fw_task_ctx; struct e4_iscsi_task_context *fw_task_ctx;
struct qedi_ctx *qedi = qedi_conn->qedi; struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_text *text_hdr; struct iscsi_text *text_hdr;
struct scsi_sge *req_sge = NULL; struct scsi_sge *req_sge = NULL;
...@@ -1627,8 +1630,9 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn, ...@@ -1627,8 +1630,9 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
return -ENOMEM; return -ENOMEM;
fw_task_ctx = fw_task_ctx =
(struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); tid);
memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid; qedi_cmd->task_id = tid;
...@@ -1705,7 +1709,7 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn, ...@@ -1705,7 +1709,7 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params rx_sgl_task_params; struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params; struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi; struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_task_context *fw_task_ctx; struct e4_iscsi_task_context *fw_task_ctx;
struct iscsi_nopout *nopout_hdr; struct iscsi_nopout *nopout_hdr;
struct scsi_sge *resp_sge = NULL; struct scsi_sge *resp_sge = NULL;
struct qedi_cmd *qedi_cmd; struct qedi_cmd *qedi_cmd;
...@@ -1725,8 +1729,9 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn, ...@@ -1725,8 +1729,9 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
return -ENOMEM; return -ENOMEM;
fw_task_ctx = fw_task_ctx =
(struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); tid);
memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid; qedi_cmd->task_id = tid;
...@@ -2046,7 +2051,7 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task) ...@@ -2046,7 +2051,7 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
struct iscsi_task_params task_params; struct iscsi_task_params task_params;
struct iscsi_conn_params conn_params; struct iscsi_conn_params conn_params;
struct scsi_initiator_cmd_params cmd_params; struct scsi_initiator_cmd_params cmd_params;
struct iscsi_task_context *fw_task_ctx; struct e4_iscsi_task_context *fw_task_ctx;
struct iscsi_cls_conn *cls_conn; struct iscsi_cls_conn *cls_conn;
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE; enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
...@@ -2069,8 +2074,9 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task) ...@@ -2069,8 +2074,9 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
return -ENOMEM; return -ENOMEM;
fw_task_ctx = fw_task_ctx =
(struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); tid);
memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
cmd->task_id = tid; cmd->task_id = tid;
......
...@@ -203,7 +203,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params, ...@@ -203,7 +203,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
struct data_hdr *pdu_header, struct data_hdr *pdu_header,
enum iscsi_task_type task_type) enum iscsi_task_type task_type)
{ {
struct iscsi_task_context *context; struct e4_iscsi_task_context *context;
u16 index; u16 index;
u32 val; u32 val;
...@@ -222,7 +222,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params, ...@@ -222,7 +222,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
cpu_to_le16(task_params->conn_icid); cpu_to_le16(task_params->conn_icid);
SET_FIELD(context->ustorm_ag_context.flags1, SET_FIELD(context->ustorm_ag_context.flags1,
USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1); E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
context->ustorm_st_context.task_type = task_type; context->ustorm_st_context.task_type = task_type;
context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number; context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
...@@ -252,9 +252,8 @@ void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc, ...@@ -252,9 +252,8 @@ void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
static static
void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt, void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt, struct e4_ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
u32 remaining_recv_len, u32 remaining_recv_len, u32 expected_data_transfer_len,
u32 expected_data_transfer_len,
u8 num_sges, bool tx_dif_conn_err_en) u8 num_sges, bool tx_dif_conn_err_en)
{ {
u32 val; u32 val;
...@@ -265,12 +264,12 @@ void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt, ...@@ -265,12 +264,12 @@ void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
ustorm_st_cxt->exp_data_transfer_len = val; ustorm_st_cxt->exp_data_transfer_len = val;
SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges); SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
SET_FIELD(ustorm_ag_cxt->flags2, SET_FIELD(ustorm_ag_cxt->flags2,
USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN, E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
tx_dif_conn_err_en ? 1 : 0); tx_dif_conn_err_en ? 1 : 0);
} }
static static
void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context, void set_rw_exp_data_acked_and_cont_len(struct e4_iscsi_task_context *context,
struct iscsi_conn_params *conn_params, struct iscsi_conn_params *conn_params,
enum iscsi_task_type task_type, enum iscsi_task_type task_type,
u32 task_size, u32 task_size,
...@@ -469,7 +468,7 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context, ...@@ -469,7 +468,7 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
} }
} }
static void set_local_completion_context(struct iscsi_task_context *context) static void set_local_completion_context(struct e4_iscsi_task_context *context)
{ {
SET_FIELD(context->ystorm_st_context.state.flags, SET_FIELD(context->ystorm_st_context.state.flags,
YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1); YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
...@@ -486,7 +485,7 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params, ...@@ -486,7 +485,7 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
struct scsi_dif_task_params *dif_task_params) struct scsi_dif_task_params *dif_task_params)
{ {
u32 exp_data_transfer_len = conn_params->max_burst_length; u32 exp_data_transfer_len = conn_params->max_burst_length;
struct iscsi_task_context *cxt; struct e4_iscsi_task_context *cxt;
bool slow_io = false; bool slow_io = false;
u32 task_size, val; u32 task_size, val;
u8 num_sges = 0; u8 num_sges = 0;
...@@ -600,7 +599,7 @@ int init_initiator_login_request_task(struct iscsi_task_params *task_params, ...@@ -600,7 +599,7 @@ int init_initiator_login_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params, struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params) struct scsi_sgl_task_params *rx_params)
{ {
struct iscsi_task_context *cxt; struct e4_iscsi_task_context *cxt;
cxt = task_params->context; cxt = task_params->context;
...@@ -642,7 +641,7 @@ int init_initiator_nop_out_task(struct iscsi_task_params *task_params, ...@@ -642,7 +641,7 @@ int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_sgl_task_params, struct scsi_sgl_task_params *tx_sgl_task_params,
struct scsi_sgl_task_params *rx_sgl_task_params) struct scsi_sgl_task_params *rx_sgl_task_params)
{ {
struct iscsi_task_context *cxt; struct e4_iscsi_task_context *cxt;
cxt = task_params->context; cxt = task_params->context;
...@@ -688,7 +687,7 @@ int init_initiator_logout_request_task(struct iscsi_task_params *task_params, ...@@ -688,7 +687,7 @@ int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params, struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params) struct scsi_sgl_task_params *rx_params)
{ {
struct iscsi_task_context *cxt; struct e4_iscsi_task_context *cxt;
cxt = task_params->context; cxt = task_params->context;
...@@ -743,7 +742,7 @@ int init_initiator_text_request_task(struct iscsi_task_params *task_params, ...@@ -743,7 +742,7 @@ int init_initiator_text_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params, struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params) struct scsi_sgl_task_params *rx_params)
{ {
struct iscsi_task_context *cxt; struct e4_iscsi_task_context *cxt;
cxt = task_params->context; cxt = task_params->context;
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include "qedi_fw_scsi.h" #include "qedi_fw_scsi.h"
struct iscsi_task_params { struct iscsi_task_params {
struct iscsi_task_context *context; struct e4_iscsi_task_context *context;
struct iscsi_wqe *sqe; struct iscsi_wqe *sqe;
u32 tx_io_size; u32 tx_io_size;
u32 rx_io_size; u32 rx_io_size;
......
...@@ -182,7 +182,7 @@ struct qedi_cmd { ...@@ -182,7 +182,7 @@ struct qedi_cmd {
struct scsi_cmnd *scsi_cmd; struct scsi_cmnd *scsi_cmd;
struct scatterlist *sg; struct scatterlist *sg;
struct qedi_io_bdt io_tbl; struct qedi_io_bdt io_tbl;
struct iscsi_task_context request; struct e4_iscsi_task_context request;
unsigned char *sense_buffer; unsigned char *sense_buffer;
dma_addr_t sense_buffer_dma; dma_addr_t sense_buffer_dma;
u16 task_id; u16 task_id;
......
...@@ -339,12 +339,12 @@ static int qedi_init_uio(struct qedi_ctx *qedi) ...@@ -339,12 +339,12 @@ static int qedi_init_uio(struct qedi_ctx *qedi)
static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi, static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
struct qed_sb_info *sb_info, u16 sb_id) struct qed_sb_info *sb_info, u16 sb_id)
{ {
struct status_block *sb_virt; struct status_block_e4 *sb_virt;
dma_addr_t sb_phys; dma_addr_t sb_phys;
int ret; int ret;
sb_virt = dma_alloc_coherent(&qedi->pdev->dev, sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
sizeof(struct status_block), &sb_phys, sizeof(struct status_block_e4), &sb_phys,
GFP_KERNEL); GFP_KERNEL);
if (!sb_virt) { if (!sb_virt) {
QEDI_ERR(&qedi->dbg_ctx, QEDI_ERR(&qedi->dbg_ctx,
...@@ -961,7 +961,7 @@ static bool qedi_process_completions(struct qedi_fastpath *fp) ...@@ -961,7 +961,7 @@ static bool qedi_process_completions(struct qedi_fastpath *fp)
{ {
struct qedi_ctx *qedi = fp->qedi; struct qedi_ctx *qedi = fp->qedi;
struct qed_sb_info *sb_info = fp->sb_info; struct qed_sb_info *sb_info = fp->sb_info;
struct status_block *sb = sb_info->sb_virt; struct status_block_e4 *sb = sb_info->sb_virt;
struct qedi_percpu_s *p = NULL; struct qedi_percpu_s *p = NULL;
struct global_queue *que; struct global_queue *que;
u16 prod_idx; u16 prod_idx;
...@@ -1015,7 +1015,7 @@ static bool qedi_fp_has_work(struct qedi_fastpath *fp) ...@@ -1015,7 +1015,7 @@ static bool qedi_fp_has_work(struct qedi_fastpath *fp)
struct qedi_ctx *qedi = fp->qedi; struct qedi_ctx *qedi = fp->qedi;
struct global_queue *que; struct global_queue *que;
struct qed_sb_info *sb_info = fp->sb_info; struct qed_sb_info *sb_info = fp->sb_info;
struct status_block *sb = sb_info->sb_virt; struct status_block_e4 *sb = sb_info->sb_virt;
u16 prod_idx; u16 prod_idx;
barrier(); barrier();
......
...@@ -156,11 +156,11 @@ ...@@ -156,11 +156,11 @@
#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2) #define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB) #define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB)
#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2) #define MAX_NUM_VOQS_E4 (MAX_NUM_VOQS_K2)
#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB) #define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
/* CIDs */ /* CIDs */
#define NUM_OF_CONNECTION_TYPES (8) #define NUM_OF_CONNECTION_TYPES_E4 (8)
#define NUM_OF_LCIDS (320) #define NUM_OF_LCIDS (320)
#define NUM_OF_LTIDS (320) #define NUM_OF_LTIDS (320)
...@@ -401,7 +401,7 @@ ...@@ -401,7 +401,7 @@
#define CAU_FSM_ETH_TX 1 #define CAU_FSM_ETH_TX 1
/* Number of Protocol Indices per Status Block */ /* Number of Protocol Indices per Status Block */
#define PIS_PER_SB 12 #define PIS_PER_SB_E4 12
#define CAU_HC_STOPPED_STATE 3 #define CAU_HC_STOPPED_STATE 3
#define CAU_HC_DISABLE_STATE 4 #define CAU_HC_DISABLE_STATE 4
...@@ -1202,20 +1202,20 @@ struct rdif_task_context { ...@@ -1202,20 +1202,20 @@ struct rdif_task_context {
}; };
/* Status block structure */ /* Status block structure */
struct status_block { struct status_block_e4 {
__le16 pi_array[PIS_PER_SB]; __le16 pi_array[PIS_PER_SB_E4];
__le32 sb_num; __le32 sb_num;
#define STATUS_BLOCK_SB_NUM_MASK 0x1FF #define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF
#define STATUS_BLOCK_SB_NUM_SHIFT 0 #define STATUS_BLOCK_E4_SB_NUM_SHIFT 0
#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F #define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F
#define STATUS_BLOCK_ZERO_PAD_SHIFT 9 #define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9
#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF #define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF
#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16 #define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16
__le32 prod_index; __le32 prod_index;
#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF #define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF
#define STATUS_BLOCK_PROD_INDEX_SHIFT 0 #define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0
#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF #define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF
#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 #define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24
}; };
/* Tdif context */ /* Tdif context */
......
This diff is collapsed.
This diff is collapsed.
...@@ -316,7 +316,7 @@ enum qed_int_mode { ...@@ -316,7 +316,7 @@ enum qed_int_mode {
}; };
struct qed_sb_info { struct qed_sb_info {
struct status_block *sb_virt; struct status_block_e4 *sb_virt;
dma_addr_t sb_phys; dma_addr_t sb_phys;
u32 sb_ack; /* Last given ack */ u32 sb_ack; /* Last given ack */
u16 igu_sb_id; u16 igu_sb_id;
...@@ -939,7 +939,7 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) ...@@ -939,7 +939,7 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
u16 rc = 0; u16 rc = 0;
prod = le32_to_cpu(sb_info->sb_virt->prod_index) & prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
STATUS_BLOCK_PROD_INDEX_MASK; STATUS_BLOCK_E4_PROD_INDEX_MASK;
if (sb_info->sb_ack != prod) { if (sb_info->sb_ack != prod) {
sb_info->sb_ack = prod; sb_info->sb_ack = prod;
rc |= QED_SB_IDX; rc |= QED_SB_IDX;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment