Commit 1b56cf49 authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

ixgbe: Update code to better handle incrementing page count

Batch the page count updates instead of doing them one at a time.  By doing
this we can improve the overall performance as the atomic increment
operations can be expensive due to the fact that on x86 they are locked
operations which can cause stalls.  By doing bulk updates we can
consolidate the stall which should help to improve the overall receive
performance.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Acked-by: default avatarJohn Fastabend <john.r.fastabend@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent f3213d93
...@@ -195,7 +195,12 @@ struct ixgbe_rx_buffer { ...@@ -195,7 +195,12 @@ struct ixgbe_rx_buffer {
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t dma; dma_addr_t dma;
struct page *page; struct page *page;
unsigned int page_offset; #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
__u32 page_offset;
#else
__u16 page_offset;
#endif
__u16 pagecnt_bias;
}; };
struct ixgbe_queue_stats { struct ixgbe_queue_stats {
......
...@@ -1589,6 +1589,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, ...@@ -1589,6 +1589,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
bi->dma = dma; bi->dma = dma;
bi->page = page; bi->page = page;
bi->page_offset = 0; bi->page_offset = 0;
bi->pagecnt_bias = 1;
return true; return true;
} }
...@@ -1943,13 +1944,15 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_ring *rx_ring, ...@@ -1943,13 +1944,15 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_ring *rx_ring,
unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) - unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
ixgbe_rx_bufsz(rx_ring); ixgbe_rx_bufsz(rx_ring);
#endif #endif
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
/* avoid re-using remote pages */ /* avoid re-using remote pages */
if (unlikely(ixgbe_page_is_reserved(page))) if (unlikely(ixgbe_page_is_reserved(page)))
return false; return false;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */ /* if we are only owner of page we can reuse it */
if (unlikely(page_count(page) != 1)) if (unlikely(page_count(page) != pagecnt_bias))
return false; return false;
/* flip page offset to other buffer */ /* flip page offset to other buffer */
...@@ -1962,10 +1965,14 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_ring *rx_ring, ...@@ -1962,10 +1965,14 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_ring *rx_ring,
return false; return false;
#endif #endif
/* Even if we own the page, we are not allowed to use atomic_set() /* If we have drained the page fragment pool we need to update
* This would break get_page_unless_zero() users. * the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds.
*/ */
page_ref_inc(page); if (unlikely(pagecnt_bias == 1)) {
page_ref_add(page, USHRT_MAX);
rx_buffer->pagecnt_bias = USHRT_MAX;
}
return true; return true;
} }
...@@ -2009,7 +2016,6 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, ...@@ -2009,7 +2016,6 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
return true; return true;
/* this page cannot be reused so discard it */ /* this page cannot be reused so discard it */
__free_pages(page, ixgbe_rx_pg_order(rx_ring));
return false; return false;
} }
...@@ -2088,7 +2094,8 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, ...@@ -2088,7 +2094,8 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
if (ixgbe_add_rx_frag(rx_ring, rx_buffer, size, skb)) { if (ixgbe_add_rx_frag(rx_ring, rx_buffer, size, skb)) {
/* hand second half of page back to the ring */ /* hand second half of page back to the ring */
ixgbe_reuse_rx_page(rx_ring, rx_buffer); ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { } else {
if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
/* the page has been released from the ring */ /* the page has been released from the ring */
IXGBE_CB(skb)->page_released = true; IXGBE_CB(skb)->page_released = true;
} else { } else {
...@@ -2098,6 +2105,9 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, ...@@ -2098,6 +2105,9 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
DMA_FROM_DEVICE, DMA_FROM_DEVICE,
IXGBE_RX_DMA_ATTR); IXGBE_RX_DMA_ATTR);
} }
__page_frag_cache_drain(page,
rx_buffer->pagecnt_bias);
}
/* clear contents of buffer_info */ /* clear contents of buffer_info */
rx_buffer->page = NULL; rx_buffer->page = NULL;
...@@ -4914,7 +4924,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) ...@@ -4914,7 +4924,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
ixgbe_rx_pg_size(rx_ring), ixgbe_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, DMA_FROM_DEVICE,
IXGBE_RX_DMA_ATTR); IXGBE_RX_DMA_ATTR);
__free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring)); __page_frag_cache_drain(rx_buffer->page,
rx_buffer->pagecnt_bias);
rx_buffer->page = NULL; rx_buffer->page = NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment