Commit 124d0d8d authored by Tariq Toukan's avatar Tariq Toukan Committed by David S. Miller

net/mlx5e: XDP, Remove un-established assumptions on XDP buffer

Remove the assumption of non-zero linear length in the XDP xmit
function, used to serve both internal XDP_TX operations as well as
redirected-in requests.

Do not apply the MLX5E_XDP_MIN_INLINE check unless necessary.
Reviewed-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 20409abe
...@@ -477,18 +477,26 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, ...@@ -477,18 +477,26 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
u16 ds_cnt, inline_hdr_sz; u16 ds_cnt, inline_hdr_sz;
u8 num_wqebbs = 1; u8 num_wqebbs = 1;
int num_frags = 0; int num_frags = 0;
bool inline_ok;
bool linear;
u16 pi; u16 pi;
struct mlx5e_xdpsq_stats *stats = sq->stats; struct mlx5e_xdpsq_stats *stats = sq->stats;
if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) { inline_ok = sq->min_inline_mode == MLX5_INLINE_MODE_NONE ||
dma_len >= MLX5E_XDP_MIN_INLINE;
if (unlikely(!inline_ok || sq->hw_mtu < dma_len)) {
stats->err++; stats->err++;
return false; return false;
} }
ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1; inline_hdr_sz = 0;
if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE)
ds_cnt++; inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
linear = !!(dma_len - inline_hdr_sz);
ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + linear + !!inline_hdr_sz;
/* check_result must be 0 if sinfo is passed. */ /* check_result must be 0 if sinfo is passed. */
if (!check_result) { if (!check_result) {
...@@ -517,22 +525,23 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, ...@@ -517,22 +525,23 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
eseg = &wqe->eth; eseg = &wqe->eth;
dseg = wqe->data; dseg = wqe->data;
inline_hdr_sz = 0;
/* copy the inline part if required */ /* copy the inline part if required */
if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { if (inline_hdr_sz) {
memcpy(eseg->inline_hdr.start, xdptxd->data, sizeof(eseg->inline_hdr.start)); memcpy(eseg->inline_hdr.start, xdptxd->data, sizeof(eseg->inline_hdr.start));
memcpy(dseg, xdptxd->data + sizeof(eseg->inline_hdr.start), memcpy(dseg, xdptxd->data + sizeof(eseg->inline_hdr.start),
MLX5E_XDP_MIN_INLINE - sizeof(eseg->inline_hdr.start)); inline_hdr_sz - sizeof(eseg->inline_hdr.start));
dma_len -= MLX5E_XDP_MIN_INLINE; dma_len -= inline_hdr_sz;
dma_addr += MLX5E_XDP_MIN_INLINE; dma_addr += inline_hdr_sz;
inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
dseg++; dseg++;
} }
/* write the dma part */ /* write the dma part */
if (linear) {
dseg->addr = cpu_to_be64(dma_addr); dseg->addr = cpu_to_be64(dma_addr);
dseg->byte_count = cpu_to_be32(dma_len); dseg->byte_count = cpu_to_be32(dma_len);
dseg->lkey = sq->mkey_be;
dseg++;
}
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND); cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
...@@ -543,7 +552,6 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, ...@@ -543,7 +552,6 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer)); memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz); eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
dseg->lkey = sq->mkey_be;
for (i = 0; i < num_frags; i++) { for (i = 0; i < num_frags; i++) {
skb_frag_t *frag = &xdptxdf->sinfo->frags[i]; skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
...@@ -553,10 +561,10 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, ...@@ -553,10 +561,10 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
page_pool_get_dma_addr(skb_frag_page(frag)) + page_pool_get_dma_addr(skb_frag_page(frag)) +
skb_frag_off(frag); skb_frag_off(frag);
dseg++;
dseg->addr = cpu_to_be64(addr); dseg->addr = cpu_to_be64(addr);
dseg->byte_count = cpu_to_be32(skb_frag_size(frag)); dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
dseg->lkey = sq->mkey_be; dseg->lkey = sq->mkey_be;
dseg++;
} }
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
......
...@@ -1886,7 +1886,6 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, ...@@ -1886,7 +1886,6 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i); struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth; struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
struct mlx5_wqe_data_seg *dseg;
sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) { sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
.num_wqebbs = 1, .num_wqebbs = 1,
...@@ -1895,9 +1894,6 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, ...@@ -1895,9 +1894,6 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz); eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
dseg->lkey = sq->mkey_be;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment