Commit 03c66a13 authored by Maciej Fijalkowski's avatar Maciej Fijalkowski Committed by Jeff Kirsher

ice: Introduce bulk update for page count

{get,put}_page are atomic operations which we use for page count
handling. The current logic for refcount handling is that we increment
it when passing a skb with the data from the first half of page up to
netstack and recycle the second half of page. This operation protects us
from losing a page since the network stack can decrement the refcount of
page from skb.

The performance can be gently improved by doing the bulk updates of
refcount instead of doing it one by one. During the buffer initialization,
maximize the page's refcount and don't allow the refcount to become
less than two.
Signed-off-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: default avatarAnirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 1857ca42
...@@ -283,7 +283,7 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring) ...@@ -283,7 +283,7 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
continue; continue;
dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE); dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE);
__free_pages(rx_buf->page, 0); __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
rx_buf->page = NULL; rx_buf->page = NULL;
rx_buf->page_offset = 0; rx_buf->page_offset = 0;
...@@ -423,6 +423,8 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) ...@@ -423,6 +423,8 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
bi->dma = dma; bi->dma = dma;
bi->page = page; bi->page = page;
bi->page_offset = 0; bi->page_offset = 0;
page_ref_add(page, USHRT_MAX - 1);
bi->pagecnt_bias = USHRT_MAX;
return true; return true;
} }
...@@ -509,6 +511,7 @@ static bool ice_page_is_reserved(struct page *page) ...@@ -509,6 +511,7 @@ static bool ice_page_is_reserved(struct page *page)
static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf,
unsigned int truesize) unsigned int truesize)
{ {
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page; struct page *page = rx_buf->page;
/* avoid re-using remote pages */ /* avoid re-using remote pages */
...@@ -517,7 +520,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, ...@@ -517,7 +520,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf,
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */ /* if we are only owner of page we can reuse it */
if (unlikely(page_count(page) != 1)) if (unlikely((page_count(page) - pagecnt_bias) > 1))
return false; return false;
/* flip page offset to other buffer */ /* flip page offset to other buffer */
...@@ -530,10 +533,14 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, ...@@ -530,10 +533,14 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf,
return false; return false;
#endif /* PAGE_SIZE < 8192) */ #endif /* PAGE_SIZE < 8192) */
/* Even if we own the page, we are not allowed to use atomic_set() /* If we have drained the page fragment pool we need to update
* This would break get_page_unless_zero() users. * the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds.
*/ */
get_page(page); if (unlikely(pagecnt_bias == 1)) {
page_ref_add(page, USHRT_MAX - 1);
rx_buf->pagecnt_bias = USHRT_MAX;
}
return true; return true;
} }
...@@ -576,11 +583,12 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb, ...@@ -576,11 +583,12 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* page is not reserved, we can reuse buffer as-is */ /* page is not reserved, we can reuse buffer as-is */
if (likely(!ice_page_is_reserved(page))) if (likely(!ice_page_is_reserved(page))) {
rx_buf->pagecnt_bias++;
return true; return true;
}
/* this page cannot be reused so discard it */ /* this page cannot be reused so discard it */
__free_pages(page, 0);
return false; return false;
} }
...@@ -650,6 +658,9 @@ ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size) ...@@ -650,6 +658,9 @@ ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size)
rx_buf->page_offset, size, rx_buf->page_offset, size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
/* We have pulled a buffer for use, so decrement pagecnt_bias */
rx_buf->pagecnt_bias--;
return rx_buf; return rx_buf;
} }
...@@ -703,6 +714,7 @@ ice_fetch_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, ...@@ -703,6 +714,7 @@ ice_fetch_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
/* we are not reusing the buffer so unmap it */ /* we are not reusing the buffer so unmap it */
dma_unmap_page(rx_ring->dev, rx_buf->dma, PAGE_SIZE, dma_unmap_page(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
} }
/* clear contents of buffer_info */ /* clear contents of buffer_info */
......
...@@ -73,6 +73,7 @@ struct ice_rx_buf { ...@@ -73,6 +73,7 @@ struct ice_rx_buf {
dma_addr_t dma; dma_addr_t dma;
struct page *page; struct page *page;
unsigned int page_offset; unsigned int page_offset;
u16 pagecnt_bias;
}; };
struct ice_q_stats { struct ice_q_stats {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment