Commit a064c609 authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Jakub Kicinski

net/mlx5e: Introduce wqe_index_mask for legacy RQ

When fragments of different WQEs share the same page, mlx5e_post_rx_wqes
must wait until the old WQE stops using the page, only then the new WQE
can allocate the new page. Essentially, it means that if WQE index i is
still in use, the allocation must stop before `i % bulk`, where bulk is
the number of WQEs that may share the same page.

As bulk is always a power of two, `i % bulk = i & (bulk - 1)`, and the
new wqe_index_mask field will be equal to `bulk - 1`.

At the same time, wqe_bulk remains for optimization purposes and stores
`max(bulk, 8)`, which allows to skip the allocation until we have at
least 8 WQEs free.
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 8cbcafce
...@@ -660,6 +660,7 @@ struct mlx5e_rq_frags_info { ...@@ -660,6 +660,7 @@ struct mlx5e_rq_frags_info {
u8 num_frags; u8 num_frags;
u8 log_num_frags; u8 log_num_frags;
u8 wqe_bulk; u8 wqe_bulk;
u8 wqe_index_mask;
}; };
struct mlx5e_dma_info { struct mlx5e_dma_info {
......
...@@ -586,7 +586,14 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, ...@@ -586,7 +586,14 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
info->arr[0].frag_size = byte_count; info->arr[0].frag_size = byte_count;
info->arr[0].frag_stride = frag_stride; info->arr[0].frag_stride = frag_stride;
info->num_frags = 1; info->num_frags = 1;
info->wqe_bulk = PAGE_SIZE / frag_stride;
/* N WQEs share the same page, N = PAGE_SIZE / frag_stride. The
* first WQE in the page is responsible for allocation of this
* page, this WQE's index is k*N. If WQEs [k*N+1; k*N+N-1] are
* still not completed, the allocation must stop before k*N.
*/
info->wqe_index_mask = (PAGE_SIZE / frag_stride) - 1;
goto out; goto out;
} }
...@@ -635,11 +642,21 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, ...@@ -635,11 +642,21 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
i++; i++;
} }
info->num_frags = i; info->num_frags = i;
/* number of different wqes sharing a page */
info->wqe_bulk = 1 + (info->num_frags % 2); /* The last fragment of WQE with index 2*N may share the page with the
* first fragment of WQE with index 2*N+1 in certain cases. If WQE 2*N+1
* is not completed yet, WQE 2*N must not be allocated, as it's
* responsible for allocating a new page.
*/
info->wqe_index_mask = info->num_frags % 2;
out: out:
info->wqe_bulk = max_t(u8, info->wqe_bulk, 8); /* Bulking optimization to skip allocation until at least 8 WQEs can be
* allocated in a row. At the same time, never start allocation when
* the page is still used by older WQEs.
*/
info->wqe_bulk = max_t(u8, info->wqe_index_mask + 1, 8);
info->log_num_frags = order_base_2(info->num_frags); info->log_num_frags = order_base_2(info->num_frags);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment