Commit da090917 authored by Tomer Tayar's avatar Tomer Tayar Committed by David S. Miller

qed*: Utilize FW 8.33.1.0

Advance the qed* drivers to use firmware 8.33.1.0:
Modify core driver (qed) to utilize the new FW and initialize the device
with it. This is the lion's share of the patch, and includes changes to FW
interface files, device initialization flows, FW interaction flows, and
debug collection flows.
Modify Ethernet driver (qede) to make use of new FW in fastpath.
Modify RoCE/iWARP driver (qedr) to make use of new FW in fastpath.
Modify FCoE driver (qedf) to make use of new FW in fastpath.
Modify iSCSI driver (qedi) to make use of new FW in fastpath.
Signed-off-by: default avatarAriel Elior <Ariel.Elior@cavium.com>
Signed-off-by: default avatarMichal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: default avatarYuval Bason <Yuval.Bason@cavium.com>
Signed-off-by: default avatarRam Amrani <Ram.Amrani@cavium.com>
Signed-off-by: default avatarManish Chopra <Manish.Chopra@cavium.com>
Signed-off-by: default avatarChad Dupuis <Chad.Dupuis@cavium.com>
Signed-off-by: default avatarManish Rangankar <Manish.Rangankar@cavium.com>
Signed-off-by: default avatarTomer Tayar <Tomer.Tayar@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 21dd79e8
...@@ -164,6 +164,13 @@ struct rdma_srq_sge { ...@@ -164,6 +164,13 @@ struct rdma_srq_sge {
__le32 l_key; __le32 l_key;
}; };
/* Rdma doorbell data for flags update */
struct rdma_pwm_flags_data {
__le16 icid; /* internal CID */
u8 agg_flags; /* aggregative flags */
u8 reserved;
};
/* Rdma doorbell data for SQ and RQ */ /* Rdma doorbell data for SQ and RQ */
struct rdma_pwm_val16_data { struct rdma_pwm_val16_data {
__le16 icid; __le16 icid;
...@@ -184,8 +191,12 @@ struct rdma_pwm_val32_data { ...@@ -184,8 +191,12 @@ struct rdma_pwm_val32_data {
#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0 #define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0
#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1 #define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1
#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2 #define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x1F #define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_MASK 0x1
#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 3 #define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_SHIFT 3
#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_MASK 0x1
#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_SHIFT 4
#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x7
#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 5
__le32 value; __le32 value;
}; };
...@@ -492,8 +503,10 @@ struct rdma_sq_fmr_wqe { ...@@ -492,8 +503,10 @@ struct rdma_sq_fmr_wqe {
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5
#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1 #define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6 #define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6
#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0x1FF #define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK 0x1
#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 7 #define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT 7
#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0xFF
#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8
__le32 reserved5; __le32 reserved5;
}; };
...@@ -572,8 +585,10 @@ struct rdma_sq_fmr_wqe_3rd { ...@@ -572,8 +585,10 @@ struct rdma_sq_fmr_wqe_3rd {
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5
#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1 #define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6 #define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF #define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7 #define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_SHIFT 7
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0xFF
#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8
__le32 reserved5; __le32 reserved5;
}; };
...@@ -618,8 +633,10 @@ struct rdma_sq_rdma_wqe { ...@@ -618,8 +633,10 @@ struct rdma_sq_rdma_wqe {
#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4 #define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4
#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1 #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1
#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5 #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x3 #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK 0x1
#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 6 #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT 6
#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x1
#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 7
u8 wqe_size; u8 wqe_size;
u8 prev_wqe_size; u8 prev_wqe_size;
struct regpair remote_va; struct regpair remote_va;
......
...@@ -85,6 +85,7 @@ config QED ...@@ -85,6 +85,7 @@ config QED
tristate "QLogic QED 25/40/100Gb core driver" tristate "QLogic QED 25/40/100Gb core driver"
depends on PCI depends on PCI
select ZLIB_INFLATE select ZLIB_INFLATE
select CRC8
---help--- ---help---
This enables the support for ... This enables the support for ...
......
...@@ -778,8 +778,8 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, ...@@ -778,8 +778,8 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
return sw_fid; return sw_fid;
} }
#define PURE_LB_TC 8
#define PKT_LB_TC 9 #define PKT_LB_TC 9
#define MAX_NUM_VOQS_E4 20
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
......
...@@ -742,7 +742,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count) ...@@ -742,7 +742,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]); p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
qed_cxt_qm_iids(p_hwfn, &qm_iids); qed_cxt_qm_iids(p_hwfn, &qm_iids);
total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, total = qed_qm_pf_mem_size(qm_iids.cids,
qm_iids.vf_cids, qm_iids.tids, qm_iids.vf_cids, qm_iids.tids,
p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_pqs,
p_hwfn->qm_info.num_vf_pqs); p_hwfn->qm_info.num_vf_pqs);
...@@ -1496,20 +1496,24 @@ static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn) ...@@ -1496,20 +1496,24 @@ static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
} }
} }
void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool is_pf_loading)
{ {
struct qed_qm_pf_rt_init_params params;
struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_pf_rt_init_params params;
struct qed_mcp_link_state *p_link;
struct qed_qm_iids iids; struct qed_qm_iids iids;
memset(&iids, 0, sizeof(iids)); memset(&iids, 0, sizeof(iids));
qed_cxt_qm_iids(p_hwfn, &iids); qed_cxt_qm_iids(p_hwfn, &iids);
p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
memset(&params, 0, sizeof(params)); memset(&params, 0, sizeof(params));
params.port_id = p_hwfn->port_id; params.port_id = p_hwfn->port_id;
params.pf_id = p_hwfn->rel_pf_id; params.pf_id = p_hwfn->rel_pf_id;
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
params.is_first_pf = p_hwfn->first_on_engine; params.is_pf_loading = is_pf_loading;
params.num_pf_cids = iids.cids; params.num_pf_cids = iids.cids;
params.num_vf_cids = iids.vf_cids; params.num_vf_cids = iids.vf_cids;
params.num_tids = iids.tids; params.num_tids = iids.tids;
...@@ -1520,6 +1524,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -1520,6 +1524,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
params.num_vports = qm_info->num_vports; params.num_vports = qm_info->num_vports;
params.pf_wfq = qm_info->pf_wfq; params.pf_wfq = qm_info->pf_wfq;
params.pf_rl = qm_info->pf_rl; params.pf_rl = qm_info->pf_rl;
params.link_speed = p_link->speed;
params.pq_params = qm_info->qm_pq_params; params.pq_params = qm_info->qm_pq_params;
params.vport_params = qm_info->qm_vport_params; params.vport_params = qm_info->qm_vport_params;
...@@ -1883,7 +1888,7 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn) ...@@ -1883,7 +1888,7 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{ {
qed_qm_init_pf(p_hwfn, p_ptt); qed_qm_init_pf(p_hwfn, p_ptt, true);
qed_cm_init_pf(p_hwfn); qed_cm_init_pf(p_hwfn);
qed_dq_init_pf(p_hwfn); qed_dq_init_pf(p_hwfn);
qed_cdu_init_pf(p_hwfn); qed_cdu_init_pf(p_hwfn);
......
...@@ -169,8 +169,10 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); ...@@ -169,8 +169,10 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
* *
* @param p_hwfn * @param p_hwfn
* @param p_ptt * @param p_ptt
* @param is_pf_loading
*/ */
void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool is_pf_loading);
/** /**
* @brief Reconfigures QM pf on the fly * @brief Reconfigures QM pf on the fly
......
...@@ -954,9 +954,7 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src, ...@@ -954,9 +954,7 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest) struct pf_update_ramrod_data *p_dest)
{ {
struct protocol_dcb_data *p_dcb_data; struct protocol_dcb_data *p_dcb_data;
bool update_flag = false; u8 update_flag;
p_dest->pf_id = p_src->pf_id;
update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update; update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
p_dest->update_fcoe_dcb_data_mode = update_flag; p_dest->update_fcoe_dcb_data_mode = update_flag;
......
This diff is collapsed.
...@@ -784,7 +784,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -784,7 +784,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_init_clear_rt_data(p_hwfn); qed_init_clear_rt_data(p_hwfn);
/* prepare QM portion of runtime array */ /* prepare QM portion of runtime array */
qed_qm_init_pf(p_hwfn, p_ptt); qed_qm_init_pf(p_hwfn, p_ptt, false);
/* activate init tool on runtime array */ /* activate init tool on runtime array */
rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
...@@ -1527,6 +1527,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, ...@@ -1527,6 +1527,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
if (rc) if (rc)
return rc; return rc;
/* Sanity check before the PF init sequence that uses DMAE */
rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
if (rc)
return rc;
/* PF Init sequence */ /* PF Init sequence */
rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
if (rc) if (rc)
...@@ -2192,7 +2197,7 @@ qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -2192,7 +2197,7 @@ qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
/* No need for a case for QED_CMDQS_CQS since /* No need for a case for QED_CMDQS_CQS since
* CNQ/CMDQS are the same resource. * CNQ/CMDQS are the same resource.
*/ */
resc_max_val = NUM_OF_CMDQS_CQS; resc_max_val = NUM_OF_GLOBAL_QUEUES;
break; break;
case QED_RDMA_STATS_QUEUE: case QED_RDMA_STATS_QUEUE:
resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
...@@ -2267,7 +2272,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, ...@@ -2267,7 +2272,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
case QED_RDMA_CNQ_RAM: case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS: case QED_CMDQS_CQS:
/* CNQ/CMDQS are the same resource */ /* CNQ/CMDQS are the same resource */
*p_resc_num = NUM_OF_CMDQS_CQS / num_funcs; *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
break; break;
case QED_RDMA_STATS_QUEUE: case QED_RDMA_STATS_QUEUE:
*p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 : *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
......
This diff is collapsed.
...@@ -807,3 +807,71 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn, ...@@ -807,3 +807,71 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
return rc; return rc;
} }
int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, const char *phase)
{
u32 size = PAGE_SIZE / 2, val;
struct qed_dmae_params params;
int rc = 0;
dma_addr_t p_phys;
void *p_virt;
u32 *p_tmp;
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2 * size, &p_phys, GFP_KERNEL);
if (!p_virt) {
DP_NOTICE(p_hwfn,
"DMAE sanity [%s]: failed to allocate memory\n",
phase);
return -ENOMEM;
}
/* Fill the bottom half of the allocated memory with a known pattern */
for (p_tmp = (u32 *)p_virt;
p_tmp < (u32 *)((u8 *)p_virt + size); p_tmp++) {
/* Save the address itself as the value */
val = (u32)(uintptr_t)p_tmp;
*p_tmp = val;
}
/* Zero the top half of the allocated memory */
memset((u8 *)p_virt + size, 0, size);
DP_VERBOSE(p_hwfn,
QED_MSG_SP,
"DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n",
phase,
(u64)p_phys,
p_virt, (u64)(p_phys + size), (u8 *)p_virt + size, size);
memset(&params, 0, sizeof(params));
rc = qed_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
size / 4 /* size_in_dwords */, &params);
if (rc) {
DP_NOTICE(p_hwfn,
"DMAE sanity [%s]: qed_dmae_host2host() failed. rc = %d.\n",
phase, rc);
goto out;
}
/* Verify that the top half of the allocated memory has the pattern */
for (p_tmp = (u32 *)((u8 *)p_virt + size);
p_tmp < (u32 *)((u8 *)p_virt + (2 * size)); p_tmp++) {
/* The corresponding address in the bottom half */
val = (u32)(uintptr_t)p_tmp - size;
if (*p_tmp != val) {
DP_NOTICE(p_hwfn,
"DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
phase,
(u64)p_phys + ((u8 *)p_tmp - (u8 *)p_virt),
p_tmp, *p_tmp, val);
rc = -EINVAL;
goto out;
}
}
out:
dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2 * size, p_virt, p_phys);
return rc;
}
...@@ -299,4 +299,8 @@ union qed_qm_pq_params { ...@@ -299,4 +299,8 @@ union qed_qm_pq_params {
int qed_init_fw_data(struct qed_dev *cdev, int qed_init_fw_data(struct qed_dev *cdev,
const u8 *fw_data); const u8 *fw_data);
int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, const char *phase);
#endif #endif
...@@ -414,11 +414,23 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn, ...@@ -414,11 +414,23 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
} }
/* init_ops callbacks entry point */ /* init_ops callbacks entry point */
static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn, static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
struct init_callback_op *p_cmd) struct init_callback_op *p_cmd)
{ {
DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n"); int rc;
switch (p_cmd->callback_id) {
case DMAE_READY_CB:
rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
break;
default:
DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n",
p_cmd->callback_id);
return -EINVAL;
}
return rc;
} }
static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn, static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
...@@ -519,7 +531,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn, ...@@ -519,7 +531,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
break; break;
case INIT_OP_CALLBACK: case INIT_OP_CALLBACK:
qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
break; break;
} }
......
...@@ -89,7 +89,7 @@ struct qed_iscsi_conn { ...@@ -89,7 +89,7 @@ struct qed_iscsi_conn {
u8 local_mac[6]; u8 local_mac[6];
u8 remote_mac[6]; u8 remote_mac[6];
u16 vlan_id; u16 vlan_id;
u8 tcp_flags; u16 tcp_flags;
u8 ip_version; u8 ip_version;
u32 remote_ip[4]; u32 remote_ip[4];
u32 local_ip[4]; u32 local_ip[4];
...@@ -106,7 +106,6 @@ struct qed_iscsi_conn { ...@@ -106,7 +106,6 @@ struct qed_iscsi_conn {
u32 ss_thresh; u32 ss_thresh;
u16 srtt; u16 srtt;
u16 rtt_var; u16 rtt_var;
u32 ts_time;
u32 ts_recent; u32 ts_recent;
u32 ts_recent_age; u32 ts_recent_age;
u32 total_rt; u32 total_rt;
...@@ -128,7 +127,6 @@ struct qed_iscsi_conn { ...@@ -128,7 +127,6 @@ struct qed_iscsi_conn {
u16 mss; u16 mss;
u8 snd_wnd_scale; u8 snd_wnd_scale;
u8 rcv_wnd_scale; u8 rcv_wnd_scale;
u32 ts_ticks_per_second;
u16 da_timeout_value; u16 da_timeout_value;
u8 ack_frequency; u8 ack_frequency;
...@@ -214,9 +212,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, ...@@ -214,9 +212,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring; p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring; p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring; p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
p_init->ooo_enable = p_params->ooo_enable;
p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
p_params->ll2_ooo_queue_id; p_params->ll2_ooo_queue_id;
p_init->func_params.log_page_size = p_params->log_page_size; p_init->func_params.log_page_size = p_params->log_page_size;
val = p_params->num_tasks; val = p_params->num_tasks;
p_init->func_params.num_tasks = cpu_to_le16(val); p_init->func_params.num_tasks = cpu_to_le16(val);
...@@ -371,7 +369,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, ...@@ -371,7 +369,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id); p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
p_tcp->flags = p_conn->tcp_flags; p_tcp->flags = cpu_to_le16(p_conn->tcp_flags);
p_tcp->ip_version = p_conn->ip_version; p_tcp->ip_version = p_conn->ip_version;
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
dval = p_conn->remote_ip[i]; dval = p_conn->remote_ip[i];
...@@ -436,7 +434,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, ...@@ -436,7 +434,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2)); p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2));
p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id); p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id);
p_tcp2->flags = p_conn->tcp_flags; p_tcp2->flags = cpu_to_le16(p_conn->tcp_flags);
p_tcp2->ip_version = p_conn->ip_version; p_tcp2->ip_version = p_conn->ip_version;
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
...@@ -458,6 +456,11 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, ...@@ -458,6 +456,11 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_tcp2->syn_ip_payload_length = cpu_to_le16(wval); p_tcp2->syn_ip_payload_length = cpu_to_le16(wval);
p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr); p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr);
p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr); p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr);
p_tcp2->cwnd = cpu_to_le32(p_conn->cwnd);
p_tcp2->ka_max_probe_cnt = p_conn->ka_probe_cnt;
p_tcp2->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
p_tcp2->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
p_tcp2->ka_interval = cpu_to_le32(p_conn->ka_interval);
} }
return qed_spq_post(p_hwfn, p_ent, NULL); return qed_spq_post(p_hwfn, p_ent, NULL);
...@@ -692,8 +695,7 @@ static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, ...@@ -692,8 +695,7 @@ static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
} }
} }
static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn, static int qed_iscsi_setup_connection(struct qed_iscsi_conn *p_conn)
struct qed_iscsi_conn *p_conn)
{ {
if (!p_conn->queue_cnts_virt_addr) if (!p_conn->queue_cnts_virt_addr)
goto nomem; goto nomem;
...@@ -844,7 +846,7 @@ static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn, ...@@ -844,7 +846,7 @@ static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn,
rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn); rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn);
if (!rc) if (!rc)
rc = qed_iscsi_setup_connection(p_hwfn, p_conn); rc = qed_iscsi_setup_connection(p_conn);
if (rc) { if (rc) {
spin_lock_bh(&p_hwfn->p_iscsi_info->lock); spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
...@@ -1294,7 +1296,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev, ...@@ -1294,7 +1296,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
con->ss_thresh = conn_info->ss_thresh; con->ss_thresh = conn_info->ss_thresh;
con->srtt = conn_info->srtt; con->srtt = conn_info->srtt;
con->rtt_var = conn_info->rtt_var; con->rtt_var = conn_info->rtt_var;
con->ts_time = conn_info->ts_time;
con->ts_recent = conn_info->ts_recent; con->ts_recent = conn_info->ts_recent;
con->ts_recent_age = conn_info->ts_recent_age; con->ts_recent_age = conn_info->ts_recent_age;
con->total_rt = conn_info->total_rt; con->total_rt = conn_info->total_rt;
...@@ -1316,7 +1317,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev, ...@@ -1316,7 +1317,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
con->mss = conn_info->mss; con->mss = conn_info->mss;
con->snd_wnd_scale = conn_info->snd_wnd_scale; con->snd_wnd_scale = conn_info->snd_wnd_scale;
con->rcv_wnd_scale = conn_info->rcv_wnd_scale; con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
con->ts_ticks_per_second = conn_info->ts_ticks_per_second;
con->da_timeout_value = conn_info->da_timeout_value; con->da_timeout_value = conn_info->da_timeout_value;
con->ack_frequency = conn_info->ack_frequency; con->ack_frequency = conn_info->ack_frequency;
......
...@@ -64,14 +64,21 @@ struct mpa_v2_hdr { ...@@ -64,14 +64,21 @@ struct mpa_v2_hdr {
#define QED_IWARP_INVALID_TCP_CID 0xffffffff #define QED_IWARP_INVALID_TCP_CID 0xffffffff
#define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024) #define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024)
#define QED_IWARP_RCV_WND_SIZE_MIN (64 * 1024) #define QED_IWARP_RCV_WND_SIZE_MIN (0xffff)
#define TIMESTAMP_HEADER_SIZE (12) #define TIMESTAMP_HEADER_SIZE (12)
#define QED_IWARP_MAX_FIN_RT_DEFAULT (2)
#define QED_IWARP_TS_EN BIT(0) #define QED_IWARP_TS_EN BIT(0)
#define QED_IWARP_DA_EN BIT(1) #define QED_IWARP_DA_EN BIT(1)
#define QED_IWARP_PARAM_CRC_NEEDED (1) #define QED_IWARP_PARAM_CRC_NEEDED (1)
#define QED_IWARP_PARAM_P2P (1) #define QED_IWARP_PARAM_P2P (1)
#define QED_IWARP_DEF_MAX_RT_TIME (0)
#define QED_IWARP_DEF_CWND_FACTOR (4)
#define QED_IWARP_DEF_KA_MAX_PROBE_CNT (5)
#define QED_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */
#define QED_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */
static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code, u16 echo, u8 fw_event_code, u16 echo,
union event_ring_data *data, union event_ring_data *data,
...@@ -120,11 +127,17 @@ static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid) ...@@ -120,11 +127,17 @@ static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock); spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
} }
void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, void
struct iwarp_init_func_params *p_ramrod) qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
struct iwarp_init_func_ramrod_data *p_ramrod)
{ {
p_ramrod->ll2_ooo_q_index = RESC_START(p_hwfn, QED_LL2_QUEUE) + p_ramrod->iwarp.ll2_ooo_q_index =
RESC_START(p_hwfn, QED_LL2_QUEUE) +
p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle; p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
return;
} }
static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid) static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
...@@ -699,6 +712,12 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) ...@@ -699,6 +712,12 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
tcp->ttl = 0x40; tcp->ttl = 0x40;
tcp->tos_or_tc = 0; tcp->tos_or_tc = 0;
tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR * tcp->mss;
tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT;
tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL;
tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale; tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
tcp->connect_mode = ep->connect_mode; tcp->connect_mode = ep->connect_mode;
...@@ -807,6 +826,7 @@ static int ...@@ -807,6 +826,7 @@ static int
qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{ {
struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod; struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
struct qed_iwarp_info *iwarp_info;
struct qed_sp_init_data init_data; struct qed_sp_init_data init_data;
dma_addr_t async_output_phys; dma_addr_t async_output_phys;
struct qed_spq_entry *p_ent; struct qed_spq_entry *p_ent;
...@@ -874,6 +894,8 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) ...@@ -874,6 +894,8 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
p_mpa_ramrod->common.reject = 1; p_mpa_ramrod->common.reject = 1;
} }
iwarp_info = &p_hwfn->p_rdma_info->iwarp;
p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
p_mpa_ramrod->mode = ep->mpa_rev; p_mpa_ramrod->mode = ep->mpa_rev;
SET_FIELD(p_mpa_ramrod->rtr_pref, SET_FIELD(p_mpa_ramrod->rtr_pref,
IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type); IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
...@@ -2745,6 +2767,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, ...@@ -2745,6 +2767,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
/* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */ /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) - iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
ilog2(QED_IWARP_RCV_WND_SIZE_MIN); ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED; iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED; iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
......
...@@ -95,6 +95,7 @@ struct qed_iwarp_info { ...@@ -95,6 +95,7 @@ struct qed_iwarp_info {
spinlock_t iw_lock; /* for iwarp resources */ spinlock_t iw_lock; /* for iwarp resources */
spinlock_t qp_lock; /* for teardown races */ spinlock_t qp_lock; /* for teardown races */
u32 rcv_wnd_scale; u32 rcv_wnd_scale;
u16 rcv_wnd_size;
u16 max_mtu; u16 max_mtu;
u8 mac_addr[ETH_ALEN]; u8 mac_addr[ETH_ALEN];
u8 crc_needed; u8 crc_needed;
...@@ -187,7 +188,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, ...@@ -187,7 +188,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
struct qed_rdma_start_in_params *params); struct qed_rdma_start_in_params *params);
void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
struct iwarp_init_func_params *p_ramrod); struct iwarp_init_func_ramrod_data *p_ramrod);
int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
......
...@@ -1969,33 +1969,45 @@ void qed_reset_vport_stats(struct qed_dev *cdev) ...@@ -1969,33 +1969,45 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
_qed_get_vport_stats(cdev, cdev->reset_stats); _qed_get_vport_stats(cdev, cdev->reset_stats);
} }
static void static enum gft_profile_type
qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
{
if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE)
return GFT_PROFILE_TYPE_4_TUPLE;
if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
return GFT_PROFILE_TYPE_IP_DST_PORT;
return GFT_PROFILE_TYPE_L4_DST_PORT;
}
void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_arfs_config_params *p_cfg_params) struct qed_arfs_config_params *p_cfg_params)
{ {
if (p_cfg_params->arfs_enable) { if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id, qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
p_cfg_params->tcp, p_cfg_params->udp, p_cfg_params->tcp,
p_cfg_params->ipv4, p_cfg_params->ipv6); p_cfg_params->udp,
DP_VERBOSE(p_hwfn, QED_MSG_SP, p_cfg_params->ipv4,
"tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n", p_cfg_params->ipv6,
qed_arfs_mode_to_hsi(p_cfg_params->mode));
DP_VERBOSE(p_hwfn,
QED_MSG_SP,
"Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n",
p_cfg_params->tcp ? "Enable" : "Disable", p_cfg_params->tcp ? "Enable" : "Disable",
p_cfg_params->udp ? "Enable" : "Disable", p_cfg_params->udp ? "Enable" : "Disable",
p_cfg_params->ipv4 ? "Enable" : "Disable", p_cfg_params->ipv4 ? "Enable" : "Disable",
p_cfg_params->ipv6 ? "Enable" : "Disable"); p_cfg_params->ipv6 ? "Enable" : "Disable",
(u32)p_cfg_params->mode);
} else { } else {
qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n");
qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
} }
DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n",
p_cfg_params->arfs_enable ? "Enable" : "Disable");
} }
static int int
qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
struct qed_spq_comp_cb *p_cb, struct qed_spq_comp_cb *p_cb,
dma_addr_t p_addr, u16 length, u16 qid, struct qed_ntuple_filter_params *p_params)
u8 vport_id, bool b_is_add)
{ {
struct rx_update_gft_filter_data *p_ramrod = NULL; struct rx_update_gft_filter_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
...@@ -2004,13 +2016,15 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, ...@@ -2004,13 +2016,15 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u8 abs_vport_id = 0; u8 abs_vport_id = 0;
int rc = -EINVAL; int rc = -EINVAL;
rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc) if (rc)
return rc; return rc;
rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id); if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id);
if (rc) if (rc)
return rc; return rc;
}
/* Get SPQ entry */ /* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data)); memset(&init_data, 0, sizeof(init_data));
...@@ -2032,17 +2046,27 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, ...@@ -2032,17 +2046,27 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return rc; return rc;
p_ramrod = &p_ent->ramrod.rx_update_gft; p_ramrod = &p_ent->ramrod.rx_update_gft;
DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
p_ramrod->pkt_hdr_length = cpu_to_le16(length); DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id); p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length);
p_ramrod->vport_id = abs_vport_id;
p_ramrod->filter_type = RFS_FILTER_TYPE; if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER; p_ramrod->rx_qid_valid = 1;
p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
}
p_ramrod->flow_id_valid = 0;
p_ramrod->flow_id = 0;
p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
: GFT_DELETE_FILTER;
DP_VERBOSE(p_hwfn, QED_MSG_SP, DP_VERBOSE(p_hwfn, QED_MSG_SP,
"V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n", "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
abs_vport_id, abs_rx_q_id, abs_vport_id, abs_rx_q_id,
b_is_add ? "Adding" : "Removing", (u64)p_addr, length); p_params->b_is_add ? "Adding" : "Removing",
(u64)p_params->addr, p_params->length);
return qed_spq_post(p_hwfn, p_ent, NULL); return qed_spq_post(p_hwfn, p_ent, NULL);
} }
...@@ -2743,7 +2767,8 @@ static int qed_configure_filter(struct qed_dev *cdev, ...@@ -2743,7 +2767,8 @@ static int qed_configure_filter(struct qed_dev *cdev,
} }
} }
static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher) static int qed_configure_arfs_searcher(struct qed_dev *cdev,
enum qed_filter_config_mode mode)
{ {
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_arfs_config_params arfs_config_params; struct qed_arfs_config_params arfs_config_params;
...@@ -2753,8 +2778,7 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher) ...@@ -2753,8 +2778,7 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
arfs_config_params.udp = true; arfs_config_params.udp = true;
arfs_config_params.ipv4 = true; arfs_config_params.ipv4 = true;
arfs_config_params.ipv6 = true; arfs_config_params.ipv6 = true;
arfs_config_params.arfs_enable = en_searcher; arfs_config_params.mode = mode;
qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt, qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
&arfs_config_params); &arfs_config_params);
return 0; return 0;
...@@ -2762,8 +2786,8 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher) ...@@ -2762,8 +2786,8 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
static void static void
qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn, qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
void *cookie, union event_ring_data *data, void *cookie,
u8 fw_return_code) union event_ring_data *data, u8 fw_return_code)
{ {
struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common; struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
void *dev = p_hwfn->cdev->ops_cookie; void *dev = p_hwfn->cdev->ops_cookie;
...@@ -2771,10 +2795,10 @@ qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn, ...@@ -2771,10 +2795,10 @@ qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
op->arfs_filter_op(dev, cookie, fw_return_code); op->arfs_filter_op(dev, cookie, fw_return_code);
} }
static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie, static int
dma_addr_t mapping, u16 length, qed_ntuple_arfs_filter_config(struct qed_dev *cdev,
u16 vport_id, u16 rx_queue_id, void *cookie,
bool add_filter) struct qed_ntuple_filter_params *params)
{ {
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_spq_comp_cb cb; struct qed_spq_comp_cb cb;
...@@ -2783,9 +2807,19 @@ static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie, ...@@ -2783,9 +2807,19 @@ static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
cb.function = qed_arfs_sp_response_handler; cb.function = qed_arfs_sp_response_handler;
cb.cookie = cookie; cb.cookie = cookie;
rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt, if (params->b_is_vf) {
&cb, mapping, length, rx_queue_id, if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false,
vport_id, add_filter); false)) {
DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n",
params->vf_id);
return rc;
}
params->vport_id = params->vf_id + 1;
params->qid = QED_RFS_NTUPLE_QID_RSS;
}
rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params);
if (rc) if (rc)
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"Failed to issue a-RFS filter configuration\n"); "Failed to issue a-RFS filter configuration\n");
......
...@@ -190,7 +190,7 @@ struct qed_arfs_config_params { ...@@ -190,7 +190,7 @@ struct qed_arfs_config_params {
bool udp; bool udp;
bool ipv4; bool ipv4;
bool ipv6; bool ipv6;
bool arfs_enable; enum qed_filter_config_mode mode;
}; };
struct qed_sp_vport_update_params { struct qed_sp_vport_update_params {
...@@ -277,6 +277,37 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats); ...@@ -277,6 +277,37 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
void qed_reset_vport_stats(struct qed_dev *cdev); void qed_reset_vport_stats(struct qed_dev *cdev);
/**
* *@brief qed_arfs_mode_configure -
*
**Enable or disable rfs mode. It must accept atleast one of tcp or udp true
**and atleast one of ipv4 or ipv6 true to enable rfs mode.
*
**@param p_hwfn
**@param p_ptt
**@param p_cfg_params - arfs mode configuration parameters.
*
*/
void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_arfs_config_params *p_cfg_params);
/**
* @brief - qed_configure_rfs_ntuple_filter
*
* This ramrod should be used to add or remove arfs hw filter
*
* @params p_hwfn
* @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize
* it with cookie and callback function address, if not
* using this mode then client must pass NULL.
* @params p_params
*/
int
qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
struct qed_spq_comp_cb *p_cb,
struct qed_ntuple_filter_params *p_params);
#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8) #define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
#define QED_QUEUE_CID_SELF (0xff) #define QED_QUEUE_CID_SELF (0xff)
......
...@@ -406,6 +406,9 @@ static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn, ...@@ -406,6 +406,9 @@ static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi); data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo); data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error; data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id);
data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp);
} }
static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn, static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
...@@ -927,7 +930,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -927,7 +930,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
qed_chain_get_pbl_phys(&p_rx->rcq_chain)); qed_chain_get_pbl_phys(&p_rx->rcq_chain));
p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg; p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en; p_ramrod->inner_vlan_stripping_en =
p_ll2_conn->input.rx_vlan_removal_en;
p_ramrod->queue_id = p_ll2_conn->queue_id; p_ramrod->queue_id = p_ll2_conn->queue_id;
p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0; p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
...@@ -1299,8 +1303,20 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data) ...@@ -1299,8 +1303,20 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input)); memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ? switch (data->input.tx_dest) {
CORE_TX_DEST_NW : CORE_TX_DEST_LB; case QED_LL2_TX_DEST_NW:
p_ll2_info->tx_dest = CORE_TX_DEST_NW;
break;
case QED_LL2_TX_DEST_LB:
p_ll2_info->tx_dest = CORE_TX_DEST_LB;
break;
case QED_LL2_TX_DEST_DROP:
p_ll2_info->tx_dest = CORE_TX_DEST_DROP;
break;
default:
return -EINVAL;
}
if (data->input.conn_type == QED_LL2_TYPE_OOO || if (data->input.conn_type == QED_LL2_TYPE_OOO ||
data->input.secondary_queue) data->input.secondary_queue)
p_ll2_info->main_func_queue = false; p_ll2_info->main_func_queue = false;
...@@ -2281,8 +2297,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) ...@@ -2281,8 +2297,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
goto release_terminate; goto release_terminate;
} }
if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI && if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI) {
cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n"); DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
rc = qed_ll2_start_ooo(cdev, params); rc = qed_ll2_start_ooo(cdev, params);
if (rc) { if (rc) {
...@@ -2340,8 +2355,7 @@ static int qed_ll2_stop(struct qed_dev *cdev) ...@@ -2340,8 +2355,7 @@ static int qed_ll2_stop(struct qed_dev *cdev)
qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt); qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
eth_zero_addr(cdev->ll2_mac_address); eth_zero_addr(cdev->ll2_mac_address);
if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI && if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI)
cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
qed_ll2_stop_ooo(cdev); qed_ll2_stop_ooo(cdev);
rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
......
...@@ -2234,7 +2234,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len) ...@@ -2234,7 +2234,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
DRV_MSG_CODE_NVM_READ_NVRAM, DRV_MSG_CODE_NVM_READ_NVRAM,
addr + offset + addr + offset +
(bytes_to_copy << (bytes_to_copy <<
DRV_MB_PARAM_NVM_LEN_SHIFT), DRV_MB_PARAM_NVM_LEN_OFFSET),
&resp, &resp_param, &resp, &resp_param,
&read_len, &read_len,
(u32 *)(p_buf + offset)); (u32 *)(p_buf + offset));
......
...@@ -553,7 +553,7 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, ...@@ -553,7 +553,7 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
qed_iwarp_init_fw_ramrod(p_hwfn, qed_iwarp_init_fw_ramrod(p_hwfn,
&p_ent->ramrod.iwarp_init_func.iwarp); &p_ent->ramrod.iwarp_init_func);
p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma; p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
} else { } else {
p_ramrod = &p_ent->ramrod.roce_init_func.rdma; p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
......
...@@ -124,6 +124,8 @@ ...@@ -124,6 +124,8 @@
0x1f0434UL 0x1f0434UL
#define PRS_REG_SEARCH_TAG1 \ #define PRS_REG_SEARCH_TAG1 \
0x1f0444UL 0x1f0444UL
#define PRS_REG_SEARCH_TENANT_ID \
0x1f044cUL
#define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST \ #define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST \
0x1f0a0cUL 0x1f0a0cUL
#define PRS_REG_SEARCH_TCP_FIRST_FRAG \ #define PRS_REG_SEARCH_TCP_FIRST_FRAG \
...@@ -200,6 +202,12 @@ ...@@ -200,6 +202,12 @@
0x2e8800UL 0x2e8800UL
#define CCFC_REG_STRONG_ENABLE_VF \ #define CCFC_REG_STRONG_ENABLE_VF \
0x2e070cUL 0x2e070cUL
#define CDU_REG_CCFC_CTX_VALID0 \
0x580400UL
#define CDU_REG_CCFC_CTX_VALID1 \
0x580404UL
#define CDU_REG_TCFC_CTX_VALID0 \
0x580408UL
#define CDU_REG_CID_ADDR_PARAMS \ #define CDU_REG_CID_ADDR_PARAMS \
0x580900UL 0x580900UL
#define DBG_REG_CLIENT_ENABLE \ #define DBG_REG_CLIENT_ENABLE \
...@@ -1277,6 +1285,46 @@ ...@@ -1277,6 +1285,46 @@
0x0543a4UL 0x0543a4UL
#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5 \ #define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5 \
0x0543a8UL 0x0543a8UL
#define PTLD_REG_DBG_SELECT_E5 \
0x5a1600UL
#define PTLD_REG_DBG_DWORD_ENABLE_E5 \
0x5a1604UL
#define PTLD_REG_DBG_SHIFT_E5 \
0x5a1608UL
#define PTLD_REG_DBG_FORCE_VALID_E5 \
0x5a160cUL
#define PTLD_REG_DBG_FORCE_FRAME_E5 \
0x5a1610UL
#define YPLD_REG_DBG_SELECT_E5 \
0x5c1600UL
#define YPLD_REG_DBG_DWORD_ENABLE_E5 \
0x5c1604UL
#define YPLD_REG_DBG_SHIFT_E5 \
0x5c1608UL
#define YPLD_REG_DBG_FORCE_VALID_E5 \
0x5c160cUL
#define YPLD_REG_DBG_FORCE_FRAME_E5 \
0x5c1610UL
#define RGSRC_REG_DBG_SELECT_E5 \
0x320040UL
#define RGSRC_REG_DBG_DWORD_ENABLE_E5 \
0x320044UL
#define RGSRC_REG_DBG_SHIFT_E5 \
0x320048UL
#define RGSRC_REG_DBG_FORCE_VALID_E5 \
0x32004cUL
#define RGSRC_REG_DBG_FORCE_FRAME_E5 \
0x320050UL
#define TGSRC_REG_DBG_SELECT_E5 \
0x322040UL
#define TGSRC_REG_DBG_DWORD_ENABLE_E5 \
0x322044UL
#define TGSRC_REG_DBG_SHIFT_E5 \
0x322048UL
#define TGSRC_REG_DBG_FORCE_VALID_E5 \
0x32204cUL
#define TGSRC_REG_DBG_FORCE_FRAME_E5 \
0x322050UL
#define MISC_REG_RESET_PL_UA \ #define MISC_REG_RESET_PL_UA \
0x008050UL 0x008050UL
#define MISC_REG_RESET_PL_HV \ #define MISC_REG_RESET_PL_HV \
...@@ -1433,6 +1481,8 @@ ...@@ -1433,6 +1481,8 @@
0x340800UL 0x340800UL
#define BRB_REG_BIG_RAM_DATA \ #define BRB_REG_BIG_RAM_DATA \
0x341500UL 0x341500UL
#define BRB_REG_BIG_RAM_DATA_SIZE \
64
#define SEM_FAST_REG_STALL_0_BB_K2 \ #define SEM_FAST_REG_STALL_0_BB_K2 \
0x000488UL 0x000488UL
#define SEM_FAST_REG_STALLED \ #define SEM_FAST_REG_STALLED \
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment