Commit 2b31f7ae authored by Saeed Mahameed's avatar Saeed Mahameed

net/mlx5: TX WQE update

Add new TX WQE fields for Connect-X5 vlan insertion support,
type and vlan_tci, when type = MLX5_ETH_WQE_INSERT_VLAN the
HW will insert the vlan and prio fields (vlan_tci) to the packet.

Those bits and the inline header fields are mutually exclusive, and
valid only when:
MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_NOT_REQUIRED
and MLX5_CAP_ETH(mdev, wqe_vlan_insert),
who will be set in ConnectX-5 and later HW generations.
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Reviewed-by: default avatarTariq Toukan <tariqt@mellanox.com>
parent f32f5bd2
...@@ -2984,20 +2984,20 @@ static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg, ...@@ -2984,20 +2984,20 @@ static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
if (wr->opcode == IB_WR_LSO) { if (wr->opcode == IB_WR_LSO) {
struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr); struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
int size_of_inl_hdr_start = sizeof(eseg->inline_hdr_start); int size_of_inl_hdr_start = sizeof(eseg->inline_hdr.start);
u64 left, leftlen, copysz; u64 left, leftlen, copysz;
void *pdata = ud_wr->header; void *pdata = ud_wr->header;
left = ud_wr->hlen; left = ud_wr->hlen;
eseg->mss = cpu_to_be16(ud_wr->mss); eseg->mss = cpu_to_be16(ud_wr->mss);
eseg->inline_hdr_sz = cpu_to_be16(left); eseg->inline_hdr.sz = cpu_to_be16(left);
/* /*
* check if there is space till the end of queue, if yes, * check if there is space till the end of queue, if yes,
* copy all in one shot, otherwise copy till the end of queue, * copy all in one shot, otherwise copy till the end of queue,
* rollback and than the copy the left * rollback and than the copy the left
*/ */
leftlen = qend - (void *)eseg->inline_hdr_start; leftlen = qend - (void *)eseg->inline_hdr.start;
copysz = min_t(u64, leftlen, left); copysz = min_t(u64, leftlen, left);
memcpy(seg - size_of_inl_hdr_start, pdata, copysz); memcpy(seg - size_of_inl_hdr_start, pdata, copysz);
......
...@@ -687,8 +687,8 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, ...@@ -687,8 +687,8 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
memset(wqe, 0, sizeof(*wqe)); memset(wqe, 0, sizeof(*wqe));
/* copy the inline part */ /* copy the inline part */
memcpy(eseg->inline_hdr_start, xdp->data, MLX5E_XDP_MIN_INLINE); memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE);
eseg->inline_hdr_sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
dseg = (struct mlx5_wqe_data_seg *)cseg + (MLX5E_XDP_TX_DS_COUNT - 1); dseg = (struct mlx5_wqe_data_seg *)cseg + (MLX5E_XDP_TX_DS_COUNT - 1);
......
...@@ -284,18 +284,18 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -284,18 +284,18 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
wi->num_bytes = num_bytes; wi->num_bytes = num_bytes;
if (skb_vlan_tag_present(skb)) { if (skb_vlan_tag_present(skb)) {
mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data, mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data,
&skb_len); &skb_len);
ihs += VLAN_HLEN; ihs += VLAN_HLEN;
} else { } else {
memcpy(eseg->inline_hdr_start, skb_data, ihs); memcpy(eseg->inline_hdr.start, skb_data, ihs);
mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
} }
eseg->inline_hdr_sz = cpu_to_be16(ihs); eseg->inline_hdr.sz = cpu_to_be16(ihs);
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start), ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start),
MLX5_SEND_WQE_DS); MLX5_SEND_WQE_DS);
dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt; dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
......
...@@ -577,7 +577,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { ...@@ -577,7 +577,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 lro_cap[0x1]; u8 lro_cap[0x1];
u8 lro_psh_flag[0x1]; u8 lro_psh_flag[0x1];
u8 lro_time_stamp[0x1]; u8 lro_time_stamp[0x1];
u8 reserved_at_5[0x3]; u8 reserved_at_5[0x2];
u8 wqe_vlan_insert[0x1];
u8 self_lb_en_modifiable[0x1]; u8 self_lb_en_modifiable[0x1];
u8 reserved_at_9[0x2]; u8 reserved_at_9[0x2];
u8 max_lso_cap[0x5]; u8 max_lso_cap[0x5];
......
...@@ -221,14 +221,26 @@ enum { ...@@ -221,14 +221,26 @@ enum {
MLX5_ETH_WQE_L4_CSUM = 1 << 7, MLX5_ETH_WQE_L4_CSUM = 1 << 7,
}; };
enum {
MLX5_ETH_WQE_INSERT_VLAN = 1 << 15,
};
struct mlx5_wqe_eth_seg { struct mlx5_wqe_eth_seg {
u8 rsvd0[4]; u8 rsvd0[4];
u8 cs_flags; u8 cs_flags;
u8 rsvd1; u8 rsvd1;
__be16 mss; __be16 mss;
__be32 rsvd2; __be32 rsvd2;
__be16 inline_hdr_sz; union {
u8 inline_hdr_start[2]; struct {
__be16 sz;
u8 start[2];
} inline_hdr;
struct {
__be16 type;
__be16 vlan_tci;
} insert;
};
}; };
struct mlx5_wqe_xrc_seg { struct mlx5_wqe_xrc_seg {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment