Commit 78aedd32 authored by Tariq Toukan's avatar Tariq Toukan Committed by Saeed Mahameed

net/mlx5e: Build SKB with exact frag_size

Build the SKB over the receive packet instead of the
whole page. Getting the SKB's linear data and shared_info
closer improves locality.
In addition, this opens up the possibility to make use of
other parts of the page in the downstream page-reuse patch.

Fixes: 1bfecfca ("net/mlx5e: Build RX SKB on demand")
Signed-off-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 3d091982
...@@ -72,6 +72,8 @@ ...@@ -72,6 +72,8 @@
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
#define MLX5_RX_HEADROOM NET_SKB_PAD #define MLX5_RX_HEADROOM NET_SKB_PAD
#define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \ #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
(6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */ (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
......
...@@ -639,11 +639,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, ...@@ -639,11 +639,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
byte_count = rq->buff.wqe_sz; byte_count = rq->buff.wqe_sz;
/* calc the required page order */ /* calc the required page order */
frag_sz = rq->rx_headroom + frag_sz = MLX5_SKB_FRAG_SZ(rq->rx_headroom + byte_count);
byte_count /* packet data */ +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
frag_sz = SKB_DATA_ALIGN(frag_sz);
npages = DIV_ROUND_UP(frag_sz, PAGE_SIZE); npages = DIV_ROUND_UP(frag_sz, PAGE_SIZE);
rq->buff.page_order = order_base_2(npages); rq->buff.page_order = order_base_2(npages);
......
...@@ -740,6 +740,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, ...@@ -740,6 +740,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
void *va, *data; void *va, *data;
u16 rx_headroom = rq->rx_headroom; u16 rx_headroom = rq->rx_headroom;
bool consumed; bool consumed;
u32 frag_size;
di = &rq->dma_info[wqe_counter]; di = &rq->dma_info[wqe_counter];
va = page_address(di->page); va = page_address(di->page);
...@@ -764,7 +765,8 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, ...@@ -764,7 +765,8 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
if (consumed) if (consumed)
return NULL; /* page/packet was consumed by XDP */ return NULL; /* page/packet was consumed by XDP */
skb = build_skb(va, RQ_PAGE_SIZE(rq)); frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = build_skb(va, frag_size);
if (unlikely(!skb)) { if (unlikely(!skb)) {
rq->stats.buff_alloc_err++; rq->stats.buff_alloc_err++;
mlx5e_page_release(rq, di, true); mlx5e_page_release(rq, di, true);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment