Commit 707f908e authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Jakub Kicinski

net/mlx5e: Optimize the page cache reducing its size 2x

RX page cache stores dma_info structs, that consist of a pointer to
struct page and a DMA address. In fact, the DMA address is extracted
from struct page using page_pool_get_dma_addr when a page is pushed to
the cache. By moving this call to the point when a page is popped from
the cache, we can avoid storing the DMA address in the cache,
effectively reducing its size by two times without losing any
functionality.
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 0b9c86c7
...@@ -630,7 +630,7 @@ struct mlx5e_mpw_info { ...@@ -630,7 +630,7 @@ struct mlx5e_mpw_info {
struct mlx5e_page_cache { struct mlx5e_page_cache {
u32 head; u32 head;
u32 tail; u32 tail;
struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE]; struct page *page_cache[MLX5E_CACHE_SIZE];
}; };
struct mlx5e_rq; struct mlx5e_rq;
......
...@@ -830,13 +830,11 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) ...@@ -830,13 +830,11 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
for (i = rq->page_cache.head; i != rq->page_cache.tail; for (i = rq->page_cache.head; i != rq->page_cache.tail;
i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) { i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
/* With AF_XDP, page_cache is not used, so this loop is not /* With AF_XDP, page_cache is not used, so this loop is not
* entered, and it's safe to call mlx5e_page_release_dynamic * entered, and it's safe to call mlx5e_page_release_dynamic
* directly. * directly.
*/ */
mlx5e_page_release_dynamic(rq, dma_info->page, false); mlx5e_page_release_dynamic(rq, rq->page_cache.page_cache[i], false);
} }
xdp_rxq_info_unreg(&rq->xdp_rxq); xdp_rxq_info_unreg(&rq->xdp_rxq);
......
...@@ -245,8 +245,7 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct page *page) ...@@ -245,8 +245,7 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct page *page)
return false; return false;
} }
cache->page_cache[cache->tail].page = page; cache->page_cache[cache->tail] = page;
cache->page_cache[cache->tail].addr = page_pool_get_dma_addr(page);
cache->tail = tail_next; cache->tail = tail_next;
return true; return true;
} }
...@@ -262,12 +261,13 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, ...@@ -262,12 +261,13 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
return false; return false;
} }
if (page_ref_count(cache->page_cache[cache->head].page) != 1) { if (page_ref_count(cache->page_cache[cache->head]) != 1) {
stats->cache_busy++; stats->cache_busy++;
return false; return false;
} }
*dma_info = cache->page_cache[cache->head]; dma_info->page = cache->page_cache[cache->head];
dma_info->addr = page_pool_get_dma_addr(dma_info->page);
cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1); cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
stats->cache_reuse++; stats->cache_reuse++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment