Commit 57f05bc2 authored by Yunsheng Lin's avatar Yunsheng Lin Committed by Jakub Kicinski

page_pool: keep pp info as long as page pool owns the page

Currently, page->pp is cleared and set everytime the page
is recycled, which is unnecessary.

So only set the page->pp when the page is added to the page
pool and only clear it when the page is released from the
page pool.

This is also a preparation to support allocating frag page
in page pool.
Reviewed-by: default avatarIlias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: default avatarYunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 2a2b6e36
...@@ -2327,7 +2327,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool, ...@@ -2327,7 +2327,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
if (!skb) if (!skb)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
skb_mark_for_recycle(skb, virt_to_page(xdp->data), pool); skb_mark_for_recycle(skb);
skb_reserve(skb, xdp->data - xdp->data_hard_start); skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data); skb_put(skb, xdp->data_end - xdp->data);
...@@ -2339,10 +2339,6 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool, ...@@ -2339,10 +2339,6 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb_frag_page(frag), skb_frag_off(frag), skb_frag_page(frag), skb_frag_off(frag),
skb_frag_size(frag), PAGE_SIZE); skb_frag_size(frag), PAGE_SIZE);
/* We don't need to reset pp_recycle here. It's already set, so
* just mark fragments for recycling.
*/
page_pool_store_mem_info(skb_frag_page(frag), pool);
} }
return skb; return skb;
......
...@@ -3995,7 +3995,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, ...@@ -3995,7 +3995,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
} }
if (pp) if (pp)
skb_mark_for_recycle(skb, page, pp); skb_mark_for_recycle(skb);
else else
dma_unmap_single_attrs(dev->dev.parent, dma_addr, dma_unmap_single_attrs(dev->dev.parent, dma_addr,
bm_pool->buf_size, DMA_FROM_DEVICE, bm_pool->buf_size, DMA_FROM_DEVICE,
......
...@@ -431,7 +431,7 @@ static void cpsw_rx_handler(void *token, int len, int status) ...@@ -431,7 +431,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb->protocol = eth_type_trans(skb, ndev); skb->protocol = eth_type_trans(skb, ndev);
/* mark skb for recycling */ /* mark skb for recycling */
skb_mark_for_recycle(skb, page, pool); skb_mark_for_recycle(skb);
netif_receive_skb(skb); netif_receive_skb(skb);
ndev->stats.rx_bytes += len; ndev->stats.rx_bytes += len;
......
...@@ -375,7 +375,7 @@ static void cpsw_rx_handler(void *token, int len, int status) ...@@ -375,7 +375,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb->protocol = eth_type_trans(skb, ndev); skb->protocol = eth_type_trans(skb, ndev);
/* mark skb for recycling */ /* mark skb for recycling */
skb_mark_for_recycle(skb, page, pool); skb_mark_for_recycle(skb);
netif_receive_skb(skb); netif_receive_skb(skb);
ndev->stats.rx_bytes += len; ndev->stats.rx_bytes += len;
......
...@@ -4712,11 +4712,9 @@ static inline u64 skb_get_kcov_handle(struct sk_buff *skb) ...@@ -4712,11 +4712,9 @@ static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
} }
#ifdef CONFIG_PAGE_POOL #ifdef CONFIG_PAGE_POOL
static inline void skb_mark_for_recycle(struct sk_buff *skb, struct page *page, static inline void skb_mark_for_recycle(struct sk_buff *skb)
struct page_pool *pp)
{ {
skb->pp_recycle = 1; skb->pp_recycle = 1;
page_pool_store_mem_info(page, pp);
} }
#endif #endif
......
...@@ -253,11 +253,4 @@ static inline void page_pool_ring_unlock(struct page_pool *pool) ...@@ -253,11 +253,4 @@ static inline void page_pool_ring_unlock(struct page_pool *pool)
spin_unlock_bh(&pool->ring.producer_lock); spin_unlock_bh(&pool->ring.producer_lock);
} }
/* Store mem_info on struct page and use it while recycling skb frags */
static inline
void page_pool_store_mem_info(struct page *page, struct page_pool *pp)
{
page->pp = pp;
}
#endif /* _NET_PAGE_POOL_H */ #endif /* _NET_PAGE_POOL_H */
...@@ -206,6 +206,19 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page) ...@@ -206,6 +206,19 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
return true; return true;
} }
static void page_pool_set_pp_info(struct page_pool *pool,
struct page *page)
{
page->pp = pool;
page->pp_magic |= PP_SIGNATURE;
}
static void page_pool_clear_pp_info(struct page *page)
{
page->pp_magic = 0;
page->pp = NULL;
}
static struct page *__page_pool_alloc_page_order(struct page_pool *pool, static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
gfp_t gfp) gfp_t gfp)
{ {
...@@ -222,7 +235,7 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool, ...@@ -222,7 +235,7 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
return NULL; return NULL;
} }
page->pp_magic |= PP_SIGNATURE; page_pool_set_pp_info(pool, page);
/* Track how many pages are held 'in-flight' */ /* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++; pool->pages_state_hold_cnt++;
...@@ -266,7 +279,8 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, ...@@ -266,7 +279,8 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
put_page(page); put_page(page);
continue; continue;
} }
page->pp_magic |= PP_SIGNATURE;
page_pool_set_pp_info(pool, page);
pool->alloc.cache[pool->alloc.count++] = page; pool->alloc.cache[pool->alloc.count++] = page;
/* Track how many pages are held 'in-flight' */ /* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++; pool->pages_state_hold_cnt++;
...@@ -345,7 +359,7 @@ void page_pool_release_page(struct page_pool *pool, struct page *page) ...@@ -345,7 +359,7 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
DMA_ATTR_SKIP_CPU_SYNC); DMA_ATTR_SKIP_CPU_SYNC);
page_pool_set_dma_addr(page, 0); page_pool_set_dma_addr(page, 0);
skip_dma_unmap: skip_dma_unmap:
page->pp_magic = 0; page_pool_clear_pp_info(page);
/* This may be the last page returned, releasing the pool, so /* This may be the last page returned, releasing the pool, so
* it is not safe to reference pool afterwards. * it is not safe to reference pool afterwards.
...@@ -644,7 +658,6 @@ bool page_pool_return_skb_page(struct page *page) ...@@ -644,7 +658,6 @@ bool page_pool_return_skb_page(struct page *page)
* The page will be returned to the pool here regardless of the * The page will be returned to the pool here regardless of the
* 'flipped' fragment being in use or not. * 'flipped' fragment being in use or not.
*/ */
page->pp = NULL;
page_pool_put_full_page(pp, page, false); page_pool_put_full_page(pp, page, false);
return true; return true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment