Commit 50bc60cb authored by Michal Kalderon's avatar Michal Kalderon Committed by David S. Miller

qed*: Utilize FW 8.33.11.0

This FW contains several fixes and features

RDMA Features
- SRQ support
- XRC support
- Memory window support
- RDMA low latency queue support
- RDMA bonding support

RDMA bug fixes
- RDMA remote invalidate during retransmit fix
- iWARP MPA connect interop issue with RTR fix
- iWARP Legacy DPM support
- Fix MPA reject flow
- iWARP error handling
- RQ WQE validation checks

MISC
- Fix some HSI types endianity
- New Restriction: vlan insertion in core_tx_bd_data can't be set
  for LB packets

ETH
- HW QoS offload support
- Fix vlan, dcb and sriov flow of VF sending a packet with
  inband VLAN tag instead of default VLAN
- Allow GRE version 1 offloads in RX flow
- Allow VXLAN steering

iSCSI / FcoE
- Fix bd availability checking flow
- Support 256th sge proerly in iscsi/fcoe retransmit
- Performance improvement
- Fix handle iSCSI command arrival with AHS and with immediate
- Fix ipv6 traffic class configuration

DEBUG
- Update debug utilities
Signed-off-by: default avatarMichal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: default avatarTomer Tayar <Tomer.Tayar@cavium.com>
Signed-off-by: default avatarManish Rangankar <Manish.Rangankar@cavium.com>
Signed-off-by: default avatarAriel Elior <Ariel.Elior@cavium.com>
Acked-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 18dcbe12
......@@ -45,7 +45,7 @@ struct rdma_cqe_responder {
__le32 imm_data_or_inv_r_Key;
__le32 length;
__le32 imm_data_hi;
__le16 rq_cons;
__le16 rq_cons_or_srq_id;
u8 flags;
#define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK 0x1
#define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0
......@@ -115,6 +115,7 @@ enum rdma_cqe_requester_status_enum {
RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR,
RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
RDMA_CQE_REQ_STS_XRC_VOILATION_ERR,
MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
};
......@@ -136,6 +137,7 @@ enum rdma_cqe_type {
RDMA_CQE_TYPE_REQUESTER,
RDMA_CQE_TYPE_RESPONDER_RQ,
RDMA_CQE_TYPE_RESPONDER_SRQ,
RDMA_CQE_TYPE_RESPONDER_XRC_SRQ,
RDMA_CQE_TYPE_INVALID,
MAX_RDMA_CQE_TYPE
};
......
......@@ -3695,7 +3695,7 @@ static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
struct rdma_cqe_responder *resp, int *update)
{
if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
consume_cqe(cq);
*update |= 1;
}
......@@ -3710,7 +3710,7 @@ static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
cnt = process_resp_flush(qp, cq, num_entries, wc,
resp->rq_cons);
resp->rq_cons_or_srq_id);
try_consume_resp_cqe(cq, qp, resp, update);
} else {
cnt = process_resp_one(dev, qp, cq, wc, resp);
......
This diff is collapsed.
......@@ -407,6 +407,7 @@ static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
"pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
/* init pq params */
qm_info->qm_pq_params[pq_idx].port_id = p_hwfn->port_id;
qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
qm_info->num_vports;
qm_info->qm_pq_params[pq_idx].tc_id = tc;
......@@ -727,8 +728,9 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
pq = &(qm_info->qm_pq_params[i]);
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
"pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
"pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
qm_info->start_pq + i,
pq->port_id,
pq->vport_id,
pq->tc_id, pq->wrr_group, pq->rl_valid);
}
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -467,12 +467,11 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
u16 *p_first_tx_pq_id;
ext_voq = qed_get_ext_voq(p_hwfn,
p_params->port_id,
pq_params[i].port_id,
tc_id,
p_params->max_phys_tcs_per_port);
is_vf_pq = (i >= p_params->num_pf_pqs);
rl_valid = pq_params[i].rl_valid &&
pq_params[i].vport_id < max_qm_global_rls;
rl_valid = pq_params[i].rl_valid > 0;
/* Update first Tx PQ of VPORT/TC */
vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport;
......@@ -494,10 +493,11 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
}
/* Check RL ID */
if (pq_params[i].rl_valid && pq_params[i].vport_id >=
max_qm_global_rls)
if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {
DP_NOTICE(p_hwfn,
"Invalid VPORT ID for rate limiter configuration\n");
rl_valid = false;
}
/* Prepare PQ map entry */
QM_INIT_TX_PQ_MAP(p_hwfn,
......@@ -528,7 +528,7 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id,
p_params->pf_id,
tc_id,
p_params->port_id,
pq_params[i].port_id,
rl_valid ? 1 : 0,
rl_valid ?
pq_params[i].vport_id : 0);
......@@ -603,6 +603,7 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
* Return -1 on error.
*/
static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
struct qed_qm_pf_rt_init_params *p_params)
{
u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
......@@ -619,7 +620,7 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
for (i = 0; i < num_tx_pqs; i++) {
ext_voq = qed_get_ext_voq(p_hwfn,
p_params->port_id,
pq_params[i].port_id,
pq_params[i].tc_id,
p_params->max_phys_tcs_per_port);
crd_reg_offset =
......@@ -1020,7 +1021,8 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
*__p_var = (*__p_var & ~BIT(__offset)) | \
((enable) ? BIT(__offset) : 0); \
} while (0)
#define PRS_ETH_TUNN_FIC_FORMAT -188897008
#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008
#define PRS_ETH_OUTPUT_FORMAT -46832
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port)
......@@ -1046,11 +1048,15 @@ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val)
qed_wr(p_hwfn,
p_ptt,
PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
(u32)PRS_ETH_TUNN_FIC_FORMAT);
if (reg_val) {
reg_val =
qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
/* Update output only if tunnel blocks not included. */
if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
(u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
/* Update NIG register */
reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
......@@ -1077,11 +1083,15 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val)
qed_wr(p_hwfn,
p_ptt,
PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
(u32)PRS_ETH_TUNN_FIC_FORMAT);
if (reg_val) {
reg_val =
qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
/* Update output only if tunnel blocks not included. */
if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
(u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
/* Update NIG register */
reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
......@@ -1126,11 +1136,15 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_geneve_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val)
qed_wr(p_hwfn,
p_ptt,
PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
(u32)PRS_ETH_TUNN_FIC_FORMAT);
if (reg_val) {
reg_val =
qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
/* Update output only if tunnel blocks not included. */
if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
(u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
/* Update NIG register */
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
......@@ -1152,6 +1166,38 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
ip_geneve_enable ? 1 : 0);
}
#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4
#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512
void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool enable)
{
u32 reg_val, cfg_mask;
/* read PRS config register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);
/* set VXLAN_NO_L2_ENABLE mask */
cfg_mask = BIT(PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
if (enable) {
/* set VXLAN_NO_L2_ENABLE flag */
reg_val |= cfg_mask;
/* update PRS FIC register */
qed_wr(p_hwfn,
p_ptt,
PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
(u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
} else {
/* clear VXLAN_NO_L2_ENABLE flag */
reg_val &= ~cfg_mask;
}
/* write PRS config register */
qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
}
#define T_ETH_PACKET_ACTION_GFT_EVENTID 23
#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
#define T_ETH_PACKET_MATCH_RFS_EVENTID 25
......@@ -1268,6 +1314,10 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
ram_line_lo = 0;
ram_line_hi = 0;
/* Tunnel type */
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
......@@ -1279,9 +1329,14 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
} else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) {
} else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
} else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
} else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
}
qed_wr(p_hwfn,
......
......@@ -2375,13 +2375,6 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
memset(&tx_pkt, 0, sizeof(tx_pkt));
tx_pkt.num_of_bds = 1;
tx_pkt.vlan = data->vlan;
if (GET_FIELD(data->parse_flags,
PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
SET_FIELD(tx_pkt.bd_flags,
CORE_TX_BD_DATA_VLAN_INSERTION, 1);
tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
tx_pkt.first_frag = buf->data_phys_addr +
......
......@@ -1974,7 +1974,7 @@ qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE)
return GFT_PROFILE_TYPE_4_TUPLE;
if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
return GFT_PROFILE_TYPE_IP_DST_PORT;
return GFT_PROFILE_TYPE_IP_DST_ADDR;
return GFT_PROFILE_TYPE_L4_DST_PORT;
}
......
......@@ -591,16 +591,6 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
}
}
static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
{
u8 bd_flags = 0;
if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
return bd_flags;
}
static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
......@@ -744,7 +734,6 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
struct qed_ooo_buffer *p_buffer;
u16 l4_hdr_offset_w;
dma_addr_t first_frag;
u16 parse_flags;
u8 bd_flags;
int rc;
......@@ -756,8 +745,6 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
first_frag = p_buffer->rx_buffer_phys_addr +
p_buffer->placement_offset;
parse_flags = p_buffer->parse_flags;
bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
......
......@@ -110,7 +110,7 @@
#define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 33
#define FW_REVISION_VERSION 1
#define FW_REVISION_VERSION 11
#define FW_ENGINEERING_VERSION 0
/***********************/
......
......@@ -105,7 +105,7 @@
#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
/* GFS constants */
#define ETH_GFT_TRASH_CAN_VPORT 0x1FF
#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */
/* Destination port mode */
enum dest_port_mode {
......
......@@ -753,8 +753,8 @@ struct e4_ystorm_iscsi_task_ag_ctx {
#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */
#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7
u8 flags1;
#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
......
......@@ -51,6 +51,8 @@
#define RDMA_MAX_CQS (64 * 1024)
#define RDMA_MAX_TIDS (128 * 1024 - 1)
#define RDMA_MAX_PDS (64 * 1024)
#define RDMA_MAX_XRC_SRQS (1024)
#define RDMA_MAX_SRQS (32 * 1024)
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
......
......@@ -59,6 +59,9 @@ enum roce_async_events_type {
ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
ROCE_ASYNC_EVENT_SRQ_EMPTY,
ROCE_ASYNC_EVENT_DESTROY_QP_DONE,
ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR,
ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR,
ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR,
MAX_ROCE_ASYNC_EVENTS_TYPE
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment