Commit ddc87e7d authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Saeed Mahameed

net/mlx5e: Store DMA address inside struct page

Use page_pool_set_dma_addr() to store the DMA address of a page inside
struct page, in order to avoid passing struct mlx5e_dma_info to XDP
handlers. Previously, struct mlx5e_dma_info was used to pass both the
DMA address and the page, and it worked well for the single-fragment
case.

When XDP multi buffer is in use, and a fragmented xdp_frame has to be
transmitted, the driver needs to know the DMA addresses of fragments,
however, the array of fragments in struct skb_shared_info doesn't
contain them. In order to pass the DMA addresses, the driver puts them
into struct page itself, which is accessible from the array of fragments
in struct skb_shared_info. The existing XDP handlers are modified to
remove the dependency on struct mlx5e_dma_info.
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent ea5d49bd
...@@ -515,7 +515,7 @@ struct mlx5e_xdp_info { ...@@ -515,7 +515,7 @@ struct mlx5e_xdp_info {
} frame; } frame;
struct { struct {
struct mlx5e_rq *rq; struct mlx5e_rq *rq;
struct mlx5e_dma_info di; struct page *page;
} page; } page;
}; };
}; };
......
...@@ -44,10 +44,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget); ...@@ -44,10 +44,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget);
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq); int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
/* RX */ /* RX */
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info); void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page);
void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle);
struct mlx5e_dma_info *dma_info,
bool recycle);
INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)); INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)); INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
......
...@@ -57,7 +57,7 @@ int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) ...@@ -57,7 +57,7 @@ int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
static inline bool static inline bool
mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
struct mlx5e_dma_info *di, struct xdp_buff *xdp) struct page *page, struct xdp_buff *xdp)
{ {
struct mlx5e_xmit_data xdptxd; struct mlx5e_xmit_data xdptxd;
struct mlx5e_xdp_info xdpi; struct mlx5e_xdp_info xdpi;
...@@ -110,13 +110,13 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, ...@@ -110,13 +110,13 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE; xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
dma_addr = di->addr + (xdpf->data - (void *)xdpf); dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
xdptxd.dma_addr = dma_addr; xdptxd.dma_addr = dma_addr;
xdpi.page.rq = rq; xdpi.page.rq = rq;
xdpi.page.di = *di; xdpi.page.page = page;
} }
return INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, return INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
...@@ -124,7 +124,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, ...@@ -124,7 +124,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
} }
/* returns true if packet was consumed by xdp */ /* returns true if packet was consumed by xdp */
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
struct bpf_prog *prog, struct xdp_buff *xdp) struct bpf_prog *prog, struct xdp_buff *xdp)
{ {
u32 act; u32 act;
...@@ -135,7 +135,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, ...@@ -135,7 +135,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
case XDP_PASS: case XDP_PASS:
return false; return false;
case XDP_TX: case XDP_TX:
if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, di, xdp))) if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, page, xdp)))
goto xdp_abort; goto xdp_abort;
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
return true; return true;
...@@ -147,7 +147,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, ...@@ -147,7 +147,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
__set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags); __set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL) if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL)
mlx5e_page_dma_unmap(rq, di); mlx5e_page_dma_unmap(rq, page);
rq->stats->xdp_redirect++; rq->stats->xdp_redirect++;
return true; return true;
default: default:
...@@ -384,7 +384,7 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq, ...@@ -384,7 +384,7 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
break; break;
case MLX5E_XDP_XMIT_MODE_PAGE: case MLX5E_XDP_XMIT_MODE_PAGE:
/* XDP_TX from the regular RQ */ /* XDP_TX from the regular RQ */
mlx5e_page_release_dynamic(xdpi.page.rq, &xdpi.page.di, recycle); mlx5e_page_release_dynamic(xdpi.page.rq, xdpi.page.page, recycle);
break; break;
case MLX5E_XDP_XMIT_MODE_XSK: case MLX5E_XDP_XMIT_MODE_XSK:
/* AF_XDP send */ /* AF_XDP send */
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
struct mlx5e_xsk_param; struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk); int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
struct bpf_prog *prog, struct xdp_buff *xdp); struct bpf_prog *prog, struct xdp_buff *xdp);
void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq); void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq); bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
......
...@@ -780,7 +780,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) ...@@ -780,7 +780,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
* entered, and it's safe to call mlx5e_page_release_dynamic * entered, and it's safe to call mlx5e_page_release_dynamic
* directly. * directly.
*/ */
mlx5e_page_release_dynamic(rq, dma_info, false); mlx5e_page_release_dynamic(rq, dma_info->page, false);
} }
xdp_rxq_info_unreg(&rq->xdp_rxq); xdp_rxq_info_unreg(&rq->xdp_rxq);
......
...@@ -222,8 +222,7 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, ...@@ -222,8 +222,7 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1; return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
} }
static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct page *page)
struct mlx5e_dma_info *dma_info)
{ {
struct mlx5e_page_cache *cache = &rq->page_cache; struct mlx5e_page_cache *cache = &rq->page_cache;
u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1); u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
...@@ -234,12 +233,13 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, ...@@ -234,12 +233,13 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
return false; return false;
} }
if (!dev_page_is_reusable(dma_info->page)) { if (!dev_page_is_reusable(page)) {
stats->cache_waive++; stats->cache_waive++;
return false; return false;
} }
cache->page_cache[cache->tail] = *dma_info; cache->page_cache[cache->tail].page = page;
cache->page_cache[cache->tail].addr = page_pool_get_dma_addr(page);
cache->tail = tail_next; cache->tail = tail_next;
return true; return true;
} }
...@@ -287,6 +287,7 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, ...@@ -287,6 +287,7 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
dma_info->page = NULL; dma_info->page = NULL;
return -ENOMEM; return -ENOMEM;
} }
page_pool_set_dma_addr(dma_info->page, dma_info->addr);
return 0; return 0;
} }
...@@ -300,26 +301,27 @@ static inline int mlx5e_page_alloc(struct mlx5e_rq *rq, ...@@ -300,26 +301,27 @@ static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
return mlx5e_page_alloc_pool(rq, dma_info); return mlx5e_page_alloc_pool(rq, dma_info);
} }
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page)
{ {
dma_unmap_page_attrs(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir, dma_addr_t dma_addr = page_pool_get_dma_addr(page);
dma_unmap_page_attrs(rq->pdev, dma_addr, PAGE_SIZE, rq->buff.map_dir,
DMA_ATTR_SKIP_CPU_SYNC); DMA_ATTR_SKIP_CPU_SYNC);
page_pool_set_dma_addr(page, 0);
} }
void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle)
struct mlx5e_dma_info *dma_info,
bool recycle)
{ {
if (likely(recycle)) { if (likely(recycle)) {
if (mlx5e_rx_cache_put(rq, dma_info)) if (mlx5e_rx_cache_put(rq, page))
return; return;
mlx5e_page_dma_unmap(rq, dma_info); mlx5e_page_dma_unmap(rq, page);
page_pool_recycle_direct(rq->page_pool, dma_info->page); page_pool_recycle_direct(rq->page_pool, page);
} else { } else {
mlx5e_page_dma_unmap(rq, dma_info); mlx5e_page_dma_unmap(rq, page);
page_pool_release_page(rq->page_pool, dma_info->page); page_pool_release_page(rq->page_pool, page);
put_page(dma_info->page); put_page(page);
} }
} }
...@@ -334,7 +336,7 @@ static inline void mlx5e_page_release(struct mlx5e_rq *rq, ...@@ -334,7 +336,7 @@ static inline void mlx5e_page_release(struct mlx5e_rq *rq,
*/ */
xsk_buff_free(dma_info->xsk); xsk_buff_free(dma_info->xsk);
else else
mlx5e_page_release_dynamic(rq, dma_info, recycle); mlx5e_page_release_dynamic(rq, dma_info->page, recycle);
} }
static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
...@@ -1544,7 +1546,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, ...@@ -1544,7 +1546,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
net_prefetchw(va); /* xdp_frame data area */ net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp); mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
if (mlx5e_xdp_handle(rq, di, prog, &xdp)) if (mlx5e_xdp_handle(rq, di->page, prog, &xdp))
return NULL; /* page/packet was consumed by XDP */ return NULL; /* page/packet was consumed by XDP */
rx_headroom = xdp.data - xdp.data_hard_start; rx_headroom = xdp.data - xdp.data_hard_start;
...@@ -1632,7 +1634,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, ...@@ -1632,7 +1634,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
di = head_wi->di; di = head_wi->di;
prog = rcu_dereference(rq->xdp_prog); prog = rcu_dereference(rq->xdp_prog);
if (prog && mlx5e_xdp_handle(rq, di, prog, &xdp)) { if (prog && mlx5e_xdp_handle(rq, di->page, prog, &xdp)) {
if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
int i; int i;
...@@ -1934,7 +1936,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, ...@@ -1934,7 +1936,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
net_prefetchw(va); /* xdp_frame data area */ net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp); mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
if (mlx5e_xdp_handle(rq, di, prog, &xdp)) { if (mlx5e_xdp_handle(rq, di->page, prog, &xdp)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */ __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */ return NULL; /* page/packet was consumed by XDP */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment