Commit e4017570 authored by Matteo Croce's avatar Matteo Croce Committed by David S. Miller

mvneta: recycle buffers

Use the new recycling API for page_pool.
In a drop rate test, the packet rate increased by 10%,
from 296 Kpps to 326 Kpps.

perf top on a stock system shows:

Overhead  Shared Object     Symbol
  23.66%  [kernel]          [k] __pi___inval_dcache_area
  22.85%  [mvneta]          [k] mvneta_rx_swbm
   7.54%  [kernel]          [k] kmem_cache_alloc
   6.49%  [kernel]          [k] eth_type_trans
   3.94%  [kernel]          [k] dev_gro_receive
   3.91%  [kernel]          [k] __netif_receive_skb_core
   3.91%  [kernel]          [k] kmem_cache_free
   3.76%  [kernel]          [k] page_pool_release_page
   3.56%  [kernel]          [k] free_unref_page
   2.40%  [kernel]          [k] build_skb
   1.49%  [kernel]          [k] skb_release_data
   1.45%  [kernel]          [k] __alloc_pages_bulk
   1.30%  [kernel]          [k] page_frag_free

And this is the same output with recycling enabled:

Overhead  Shared Object     Symbol
  26.41%  [kernel]          [k] __pi___inval_dcache_area
  25.00%  [mvneta]          [k] mvneta_rx_swbm
   8.14%  [kernel]          [k] kmem_cache_alloc
   6.84%  [kernel]          [k] eth_type_trans
   4.44%  [kernel]          [k] __netif_receive_skb_core
   4.38%  [kernel]          [k] kmem_cache_free
   4.16%  [kernel]          [k] dev_gro_receive
   3.21%  [kernel]          [k] page_pool_put_page
   2.41%  [kernel]          [k] build_skb
   1.82%  [kernel]          [k] skb_release_data
   1.61%  [kernel]          [k] napi_gro_receive
   1.25%  [kernel]          [k] page_pool_refill_alloc_cache
   1.16%  [kernel]          [k] __netif_receive_skb_list_core

We can see that page_pool_release_page(), free_unref_page() and
__alloc_pages_bulk() are no longer on top of the list when receiving
traffic.

The test was done with mausezahn on the TX side with 64 byte raw
ethernet frames.
Signed-off-by: default avatarMatteo Croce <mcroce@microsoft.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 133637fc
...@@ -2320,7 +2320,7 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, ...@@ -2320,7 +2320,7 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
} }
static struct sk_buff * static struct sk_buff *
mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
struct xdp_buff *xdp, u32 desc_status) struct xdp_buff *xdp, u32 desc_status)
{ {
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
...@@ -2331,7 +2331,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, ...@@ -2331,7 +2331,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
if (!skb) if (!skb)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
page_pool_release_page(rxq->page_pool, virt_to_page(xdp->data)); skb_mark_for_recycle(skb, virt_to_page(xdp->data), pool);
skb_reserve(skb, xdp->data - xdp->data_hard_start); skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data); skb_put(skb, xdp->data_end - xdp->data);
...@@ -2343,7 +2343,10 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, ...@@ -2343,7 +2343,10 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb_frag_page(frag), skb_frag_off(frag), skb_frag_page(frag), skb_frag_off(frag),
skb_frag_size(frag), PAGE_SIZE); skb_frag_size(frag), PAGE_SIZE);
page_pool_release_page(rxq->page_pool, skb_frag_page(frag)); /* We don't need to reset pp_recycle here. It's already set, so
* just mark fragments for recycling.
*/
page_pool_store_mem_info(skb_frag_page(frag), pool);
} }
return skb; return skb;
...@@ -2425,7 +2428,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, ...@@ -2425,7 +2428,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps)) mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
goto next; goto next;
skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status); skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status);
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment