Commit acd7628d authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

mlx4: reduce rx ring page_cache size

We only need to store the page and dma address.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Acked-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d85f6c14
...@@ -250,7 +250,10 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, ...@@ -250,7 +250,10 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
(index << priv->log_rx_info); (index << priv->log_rx_info);
if (ring->page_cache.index > 0) { if (ring->page_cache.index > 0) {
frags[0] = ring->page_cache.buf[--ring->page_cache.index]; ring->page_cache.index--;
frags[0].page = ring->page_cache.buf[ring->page_cache.index].page;
frags[0].dma = ring->page_cache.buf[ring->page_cache.index].dma;
frags[0].page_offset = XDP_PACKET_HEADROOM;
rx_desc->data[0].addr = cpu_to_be64(frags[0].dma + rx_desc->data[0].addr = cpu_to_be64(frags[0].dma +
frags[0].page_offset); frags[0].page_offset);
return 0; return 0;
...@@ -537,7 +540,9 @@ bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring, ...@@ -537,7 +540,9 @@ bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
if (cache->index >= MLX4_EN_CACHE_SIZE) if (cache->index >= MLX4_EN_CACHE_SIZE)
return false; return false;
cache->buf[cache->index++] = *frame; cache->buf[cache->index].page = frame->page;
cache->buf[cache->index].dma = frame->dma;
cache->index++;
return true; return true;
} }
...@@ -567,11 +572,9 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, ...@@ -567,11 +572,9 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
int i; int i;
for (i = 0; i < ring->page_cache.index; i++) { for (i = 0; i < ring->page_cache.index; i++) {
struct mlx4_en_rx_alloc *frame = &ring->page_cache.buf[i]; dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma,
PAGE_SIZE, priv->dma_dir);
dma_unmap_page(priv->ddev, frame->dma, frame->page_size, put_page(ring->page_cache.buf[i].page);
priv->dma_dir);
put_page(frame->page);
} }
ring->page_cache.index = 0; ring->page_cache.index = 0;
mlx4_en_free_rx_buf(priv, ring); mlx4_en_free_rx_buf(priv, ring);
......
...@@ -354,8 +354,6 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, ...@@ -354,8 +354,6 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_alloc frame = { struct mlx4_en_rx_alloc frame = {
.page = tx_info->page, .page = tx_info->page,
.dma = tx_info->map0_dma, .dma = tx_info->map0_dma,
.page_offset = XDP_PACKET_HEADROOM,
.page_size = PAGE_SIZE,
}; };
if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) { if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
......
...@@ -268,9 +268,13 @@ struct mlx4_en_rx_alloc { ...@@ -268,9 +268,13 @@ struct mlx4_en_rx_alloc {
}; };
#define MLX4_EN_CACHE_SIZE (2 * NAPI_POLL_WEIGHT) #define MLX4_EN_CACHE_SIZE (2 * NAPI_POLL_WEIGHT)
struct mlx4_en_page_cache { struct mlx4_en_page_cache {
u32 index; u32 index;
struct mlx4_en_rx_alloc buf[MLX4_EN_CACHE_SIZE]; struct {
struct page *page;
dma_addr_t dma;
} buf[MLX4_EN_CACHE_SIZE];
}; };
struct mlx4_en_priv; struct mlx4_en_priv;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment