Commit 4f4542bf authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

ixgbe: Make use of order 1 pages and 3K buffers independent of FCoE

In order to support build_skb with jumbo frames it will be necessary to use
3K buffers for the Rx path with 8K pages backing them.  This is needed on
architectures that implement 4K pages because we can't support 2K buffers
plus padding in a 4K page.

In the case of systems that support page sizes larger than 4K the 3K
attribute will only be applied to FCoE as we can fall back to using just 2K
buffers and adding the padding.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 1b56cf49
...@@ -226,13 +226,14 @@ struct ixgbe_rx_queue_stats { ...@@ -226,13 +226,14 @@ struct ixgbe_rx_queue_stats {
#define IXGBE_TS_HDR_LEN 8 #define IXGBE_TS_HDR_LEN 8
enum ixgbe_ring_state_t { enum ixgbe_ring_state_t {
__IXGBE_RX_3K_BUFFER,
__IXGBE_RX_RSC_ENABLED,
__IXGBE_RX_CSUM_UDP_ZERO_ERR,
__IXGBE_RX_FCOE,
__IXGBE_TX_FDIR_INIT_DONE, __IXGBE_TX_FDIR_INIT_DONE,
__IXGBE_TX_XPS_INIT_DONE, __IXGBE_TX_XPS_INIT_DONE,
__IXGBE_TX_DETECT_HANG, __IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED, __IXGBE_HANG_CHECK_ARMED,
__IXGBE_RX_RSC_ENABLED,
__IXGBE_RX_CSUM_UDP_ZERO_ERR,
__IXGBE_RX_FCOE,
}; };
struct ixgbe_fwd_adapter { struct ixgbe_fwd_adapter {
...@@ -344,19 +345,16 @@ struct ixgbe_ring_feature { ...@@ -344,19 +345,16 @@ struct ixgbe_ring_feature {
*/ */
static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring) static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
{ {
#ifdef IXGBE_FCOE if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
if (test_bit(__IXGBE_RX_FCOE, &ring->state)) return IXGBE_RXBUFFER_3K;
return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K :
IXGBE_RXBUFFER_3K;
#endif
return IXGBE_RXBUFFER_2K; return IXGBE_RXBUFFER_2K;
} }
static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
{ {
#ifdef IXGBE_FCOE #if (PAGE_SIZE < 8192)
if (test_bit(__IXGBE_RX_FCOE, &ring->state)) if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
return (PAGE_SIZE < 8192) ? 1 : 0; return 1;
#endif #endif
return 0; return 0;
} }
......
...@@ -1604,6 +1604,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) ...@@ -1604,6 +1604,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
union ixgbe_adv_rx_desc *rx_desc; union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi; struct ixgbe_rx_buffer *bi;
u16 i = rx_ring->next_to_use; u16 i = rx_ring->next_to_use;
u16 bufsz;
/* nothing to do */ /* nothing to do */
if (!cleaned_count) if (!cleaned_count)
...@@ -1613,14 +1614,15 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) ...@@ -1613,14 +1614,15 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
bi = &rx_ring->rx_buffer_info[i]; bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count; i -= rx_ring->count;
bufsz = ixgbe_rx_bufsz(rx_ring);
do { do {
if (!ixgbe_alloc_mapped_page(rx_ring, bi)) if (!ixgbe_alloc_mapped_page(rx_ring, bi))
break; break;
/* sync the buffer for use by the device */ /* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma, dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset, bi->page_offset, bufsz,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
/* /*
...@@ -2000,9 +2002,9 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, ...@@ -2000,9 +2002,9 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
struct page *page = rx_buffer->page; struct page *page = rx_buffer->page;
unsigned char *va = page_address(page) + rx_buffer->page_offset; unsigned char *va = page_address(page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
unsigned int truesize = ixgbe_rx_bufsz(rx_ring); unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
#else #else
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); unsigned int truesize = SKB_DATA_ALIGN(size);
#endif #endif
if (unlikely(skb_is_nonlinear(skb))) if (unlikely(skb_is_nonlinear(skb)))
...@@ -3866,10 +3868,15 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) ...@@ -3866,10 +3868,15 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
*/ */
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
rx_ring = adapter->rx_ring[i]; rx_ring = adapter->rx_ring[i];
clear_ring_rsc_enabled(rx_ring);
clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
set_ring_rsc_enabled(rx_ring); set_ring_rsc_enabled(rx_ring);
else
clear_ring_rsc_enabled(rx_ring); if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment