Commit 1d032bc7 authored by Maciej Fijalkowski's avatar Maciej Fijalkowski Committed by Jeff Kirsher

ice: Gather the rx buf clean-up logic for better reuse

Pull out the code responsible for page counting and buffer recycling so
that it will be possible to clean up the Rx buffers in cases where we
won't allocate skb (ex. XDP)
Signed-off-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: default avatarAnirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 03c66a13
...@@ -498,19 +498,42 @@ static bool ice_page_is_reserved(struct page *page) ...@@ -498,19 +498,42 @@ static bool ice_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
} }
/**
* ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
* @rx_buf: Rx buffer to adjust
* @size: Size of adjustment
*
* Update the offset within page so that Rx buf will be ready to be reused.
* For systems with PAGE_SIZE < 8192 this function will flip the page offset
* so the second half of page assigned to Rx buffer will be used, otherwise
* the offset is moved by the @size bytes
*/
static void
ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
{
#if (PAGE_SIZE < 8192)
/* flip page offset to other buffer */
rx_buf->page_offset ^= size;
#else
/* move offset up to the next cache line */
rx_buf->page_offset += size;
#endif
}
/** /**
* ice_can_reuse_rx_page - Determine if page can be reused for another Rx * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buf: buffer containing the page * @rx_buf: buffer containing the page
* @truesize: the offset that needs to be applied to page
* *
* If page is reusable, we have a green light for calling ice_reuse_rx_page, * If page is reusable, we have a green light for calling ice_reuse_rx_page,
* which will assign the current buffer to the buffer that next_to_alloc is * which will assign the current buffer to the buffer that next_to_alloc is
* pointing to; otherwise, the DMA mapping needs to be destroyed and * pointing to; otherwise, the DMA mapping needs to be destroyed and
* page freed * page freed
*/ */
static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
unsigned int truesize)
{ {
#if (PAGE_SIZE >= 8192)
unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
#endif
unsigned int pagecnt_bias = rx_buf->pagecnt_bias; unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page; struct page *page = rx_buf->page;
...@@ -522,14 +545,8 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, ...@@ -522,14 +545,8 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf,
/* if we are only owner of page we can reuse it */ /* if we are only owner of page we can reuse it */
if (unlikely((page_count(page) - pagecnt_bias) > 1)) if (unlikely((page_count(page) - pagecnt_bias) > 1))
return false; return false;
/* flip page offset to other buffer */
rx_buf->page_offset ^= truesize;
#else #else
/* move offset up to the next cache line */ if (rx_buf->page_offset > last_offset)
rx_buf->page_offset += truesize;
if (rx_buf->page_offset > PAGE_SIZE - ICE_RXBUF_2048)
return false; return false;
#endif /* PAGE_SIZE < 8192) */ #endif /* PAGE_SIZE < 8192) */
...@@ -556,10 +573,9 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, ...@@ -556,10 +573,9 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf,
* less than the skb header size, otherwise it will just attach the page as * less than the skb header size, otherwise it will just attach the page as
* a frag to the skb. * a frag to the skb.
* *
* The function will then update the page offset if necessary and return * The function will then update the page offset
* true if the buffer can be reused by the adapter.
*/ */
static bool static void
ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb, ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
unsigned int size) unsigned int size)
{ {
...@@ -582,14 +598,8 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb, ...@@ -582,14 +598,8 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
if (size <= ICE_RX_HDR_SIZE) { if (size <= ICE_RX_HDR_SIZE) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* page is not reserved, we can reuse buffer as-is */ rx_buf->pagecnt_bias++;
if (likely(!ice_page_is_reserved(page))) { return;
rx_buf->pagecnt_bias++;
return true;
}
/* this page cannot be reused so discard it */
return false;
} }
/* we need the header to contain the greater of either ETH_HLEN or /* we need the header to contain the greater of either ETH_HLEN or
...@@ -610,8 +620,7 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb, ...@@ -610,8 +620,7 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
add_tail_frag: add_tail_frag:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
(unsigned long)va & ~PAGE_MASK, size, truesize); (unsigned long)va & ~PAGE_MASK, size, truesize);
ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
return ice_can_reuse_rx_page(rx_buf, truesize);
} }
/** /**
...@@ -697,6 +706,7 @@ ice_fetch_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, ...@@ -697,6 +706,7 @@ ice_fetch_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
GFP_ATOMIC | __GFP_NOWARN); GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb)) { if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_buf_failed++; rx_ring->rx_stats.alloc_buf_failed++;
rx_buf->pagecnt_bias++;
return NULL; return NULL;
} }
...@@ -706,8 +716,23 @@ ice_fetch_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, ...@@ -706,8 +716,23 @@ ice_fetch_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
} }
/* pull page into skb */ /* pull page into skb */
if (ice_add_rx_frag(rx_buf, skb, size)) { ice_add_rx_frag(rx_buf, skb, size);
return skb;
}
/**
* ice_put_rx_buf - Clean up used buffer and either recycle or free
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
*
* This function will clean up the contents of the rx_buf. It will
* either recycle the buffer or unmap it and free the associated resources.
*/
static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
/* hand second half of page back to the ring */ /* hand second half of page back to the ring */
if (ice_can_reuse_rx_page(rx_buf)) {
ice_reuse_rx_page(rx_ring, rx_buf); ice_reuse_rx_page(rx_ring, rx_buf);
rx_ring->rx_stats.page_reuse_count++; rx_ring->rx_stats.page_reuse_count++;
} else { } else {
...@@ -719,8 +744,6 @@ ice_fetch_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, ...@@ -719,8 +744,6 @@ ice_fetch_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
/* clear contents of buffer_info */ /* clear contents of buffer_info */
rx_buf->page = NULL; rx_buf->page = NULL;
return skb;
} }
/** /**
...@@ -1007,6 +1030,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -1007,6 +1030,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
if (!skb) if (!skb)
break; break;
ice_put_rx_buf(rx_ring, rx_buf);
cleaned_count++; cleaned_count++;
/* skip if it is NOP desc */ /* skip if it is NOP desc */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment