Commit a2e5ba24 authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Jakub Kicinski

net/mlx5e: xsk: Split out WQE allocation for legacy XSK RQ

Allocation of XSK frames on legacy RQ may be made more efficient with a
specialized routine that relies on certain assumptions, such as there is
only one fragment, allocation units (XSK frames) are not shared among
multiple packets. It reduces the number of branches both in the XSK code
and in the regular RQ, because with this approach there is only a single
check whether it's an XSK or regular RQ.
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 0b482232
...@@ -8,6 +8,32 @@ ...@@ -8,6 +8,32 @@
/* RX data path */ /* RX data path */
int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
int i;
for (i = 0; i < wqe_bulk; i++) {
int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
struct mlx5e_wqe_frag_info *frag;
struct mlx5e_rx_wqe_cyc *wqe;
dma_addr_t addr;
wqe = mlx5_wq_cyc_get_wqe(wq, j);
/* Assumes log_num_frags == 0. */
frag = &rq->wqe.frags[j];
frag->au->xsk = xsk_buff_alloc(rq->xsk_pool);
if (unlikely(!frag->au->xsk))
return i;
addr = xsk_buff_xdp_get_frame_dma(frag->au->xsk);
wqe->data[0].addr = cpu_to_be64(addr + rq->buff.headroom);
}
return wqe_bulk;
}
static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, void *data, static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, void *data,
u32 cqe_bcnt) u32 cqe_bcnt)
{ {
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
/* RX data path */ /* RX data path */
int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
struct mlx5e_mpw_info *wi, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u16 cqe_bcnt,
......
...@@ -359,7 +359,7 @@ static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, ...@@ -359,7 +359,7 @@ static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
* offset) should just use the new one without replenishing again * offset) should just use the new one without replenishing again
* by themselves. * by themselves.
*/ */
err = mlx5e_page_alloc(rq, frag->au); err = mlx5e_page_alloc_pool(rq, frag->au);
return err; return err;
} }
...@@ -393,8 +393,7 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe, ...@@ -393,8 +393,7 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
goto free_frags; goto free_frags;
headroom = i == 0 ? rq->buff.headroom : 0; headroom = i == 0 ? rq->buff.headroom : 0;
addr = rq->xsk_pool ? xsk_buff_xdp_get_frame_dma(frag->au->xsk) : addr = page_pool_get_dma_addr(frag->au->page);
page_pool_get_dma_addr(frag->au->page);
wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom); wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom);
} }
...@@ -826,7 +825,11 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) ...@@ -826,7 +825,11 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
*/ */
wqe_bulk -= (head + wqe_bulk) & rq->wqe.info.wqe_index_mask; wqe_bulk -= (head + wqe_bulk) & rq->wqe.info.wqe_index_mask;
count = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk); if (!rq->xsk_pool)
count = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk);
else
count = mlx5e_xsk_alloc_rx_wqes(rq, head, wqe_bulk);
mlx5_wq_cyc_push_n(wq, count); mlx5_wq_cyc_push_n(wq, count);
if (unlikely(count != wqe_bulk)) { if (unlikely(count != wqe_bulk)) {
rq->stats->buff_alloc_err++; rq->stats->buff_alloc_err++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment