Commit 4c2af5cc authored by Tariq Toukan's avatar Tariq Toukan Committed by Saeed Mahameed

net/mlx5e: Small enhancements for RX MPWQE allocation and free

The dma offset of a MPWQE (Multi-Packet WQE) in memory region
is fixed for all rounds. Calculate it once on creation time,
instead of in runtime. This also obsoletes the wqe argument in
the function.

In addition, optimize dma_info iterator calculation.
Signed-off-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 9bafe2ad
...@@ -674,6 +674,12 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, ...@@ -674,6 +674,12 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
for (i = 0; i < wq_sz; i++) { for (i = 0; i < wq_sz; i++) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, i) << PAGE_SHIFT;
wqe->data.addr = cpu_to_be64(dma_offset);
}
wqe->data.byte_count = cpu_to_be32(byte_count); wqe->data.byte_count = cpu_to_be32(byte_count);
wqe->data.lkey = rq->mkey_be; wqe->data.lkey = rq->mkey_be;
} }
......
...@@ -374,18 +374,15 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) ...@@ -374,18 +374,15 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
} }
static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq, static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
struct mlx5e_rx_wqe *wqe,
u16 ix) u16 ix)
{ {
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT;
int pg_strides = mlx5e_mpwqe_strides_per_page(rq); int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
int err; int err;
int i; int i;
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
err = mlx5e_page_alloc_mapped(rq, dma_info); err = mlx5e_page_alloc_mapped(rq, dma_info);
if (unlikely(err)) if (unlikely(err))
goto err_unmap; goto err_unmap;
...@@ -395,14 +392,12 @@ static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq, ...@@ -395,14 +392,12 @@ static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
memset(wi->skbs_frags, 0, sizeof(*wi->skbs_frags) * MLX5_MPWRQ_PAGES_PER_WQE); memset(wi->skbs_frags, 0, sizeof(*wi->skbs_frags) * MLX5_MPWRQ_PAGES_PER_WQE);
wi->consumed_strides = 0; wi->consumed_strides = 0;
wqe->data.addr = cpu_to_be64(dma_offset);
return 0; return 0;
err_unmap: err_unmap:
while (--i >= 0) { while (--i >= 0) {
struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i]; dma_info--;
page_ref_sub(dma_info->page, pg_strides); page_ref_sub(dma_info->page, pg_strides);
mlx5e_page_release(rq, dma_info, true); mlx5e_page_release(rq, dma_info, true);
} }
...@@ -413,11 +408,10 @@ static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq, ...@@ -413,11 +408,10 @@ static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
{ {
int pg_strides = mlx5e_mpwqe_strides_per_page(rq); int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
int i; int i;
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]); page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]);
mlx5e_page_release(rq, dma_info, true); mlx5e_page_release(rq, dma_info, true);
} }
...@@ -447,7 +441,7 @@ int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) ...@@ -447,7 +441,7 @@ int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
{ {
int err; int err;
err = mlx5e_alloc_rx_umr_mpwqe(rq, wqe, ix); err = mlx5e_alloc_rx_umr_mpwqe(rq, ix);
if (unlikely(err)) if (unlikely(err))
return err; return err;
set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state); set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment