Commit a5a0c590 authored by Tariq Toukan's avatar Tariq Toukan Committed by David S. Miller

net/mlx5e: Introduce API for RX mapped pages

Manage the allocation and deallocation of mapped RX pages only
through dedicated API functions.
Signed-off-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7e426671
...@@ -305,26 +305,32 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) ...@@ -305,26 +305,32 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
} }
static inline int mlx5e_alloc_and_map_page(struct mlx5e_rq *rq, static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
struct mlx5e_mpw_info *wi, struct mlx5e_dma_info *dma_info)
int i)
{ {
struct page *page = dev_alloc_page(); struct page *page = dev_alloc_page();
if (unlikely(!page)) if (unlikely(!page))
return -ENOMEM; return -ENOMEM;
wi->umr.dma_info[i].page = page; dma_info->page = page;
wi->umr.dma_info[i].addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE, dma_info->addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(rq->pdev, wi->umr.dma_info[i].addr))) { if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
put_page(page); put_page(page);
return -ENOMEM; return -ENOMEM;
} }
wi->umr.mtt[i] = cpu_to_be64(wi->umr.dma_info[i].addr | MLX5_EN_WR);
return 0; return 0;
} }
static inline void mlx5e_page_release(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
{
dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, DMA_FROM_DEVICE);
put_page(dma_info->page);
}
static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq, static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
struct mlx5e_rx_wqe *wqe, struct mlx5e_rx_wqe *wqe,
u16 ix) u16 ix)
...@@ -336,10 +342,13 @@ static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq, ...@@ -336,10 +342,13 @@ static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
int i; int i;
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
err = mlx5e_alloc_and_map_page(rq, wi, i); struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
err = mlx5e_page_alloc_mapped(rq, dma_info);
if (unlikely(err)) if (unlikely(err))
goto err_unmap; goto err_unmap;
page_ref_add(wi->umr.dma_info[i].page, pg_strides); wi->umr.mtt[i] = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
page_ref_add(dma_info->page, pg_strides);
wi->skbs_frags[i] = 0; wi->skbs_frags[i] = 0;
} }
...@@ -350,10 +359,10 @@ static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq, ...@@ -350,10 +359,10 @@ static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
err_unmap: err_unmap:
while (--i >= 0) { while (--i >= 0) {
dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE, struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
PCI_DMA_FROMDEVICE);
page_ref_sub(wi->umr.dma_info[i].page, pg_strides); page_ref_sub(dma_info->page, pg_strides);
put_page(wi->umr.dma_info[i].page); mlx5e_page_release(rq, dma_info);
} }
return err; return err;
...@@ -365,11 +374,10 @@ void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) ...@@ -365,11 +374,10 @@ void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
int i; int i;
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE, struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
PCI_DMA_FROMDEVICE);
page_ref_sub(wi->umr.dma_info[i].page, page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]);
pg_strides - wi->skbs_frags[i]); mlx5e_page_release(rq, dma_info);
put_page(wi->umr.dma_info[i].page);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment