Commit f15c5ba5 authored by Emil Tantilov's avatar Emil Tantilov Committed by Jeff Kirsher

ixgbevf: add support for using order 1 pages to receive large frames

Based on commit 8649aaef
("igb: Add support for using order 1 pages to receive large frames")

Add support for using 3K buffers in order 1 page. We are reserving 1K for
now to have space available for future tail room and head room when we
enable build_skb support.
Signed-off-by: default avatarEmil Tantilov <emil.s.tantilov@intel.com>
Tested-by: default avatarKrishneil Singh <krishneil.k.singh@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent bc04347f
...@@ -89,17 +89,11 @@ struct ixgbevf_rx_queue_stats { ...@@ -89,17 +89,11 @@ struct ixgbevf_rx_queue_stats {
}; };
enum ixgbevf_ring_state_t { enum ixgbevf_ring_state_t {
__IXGBEVF_RX_3K_BUFFER,
__IXGBEVF_TX_DETECT_HANG, __IXGBEVF_TX_DETECT_HANG,
__IXGBEVF_HANG_CHECK_ARMED, __IXGBEVF_HANG_CHECK_ARMED,
}; };
#define check_for_tx_hang(ring) \
test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \
set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
struct ixgbevf_ring { struct ixgbevf_ring {
struct ixgbevf_ring *next; struct ixgbevf_ring *next;
struct net_device *netdev; struct net_device *netdev;
...@@ -156,12 +150,20 @@ struct ixgbevf_ring { ...@@ -156,12 +150,20 @@ struct ixgbevf_ring {
/* Supported Rx Buffer Sizes */ /* Supported Rx Buffer Sizes */
#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ #define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
#define IXGBEVF_RXBUFFER_2048 2048 #define IXGBEVF_RXBUFFER_2048 2048
#define IXGBEVF_RXBUFFER_3072 3072
#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 #define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
#define IXGBEVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
#if (PAGE_SIZE < 8192)
#define IXGBEVF_MAX_FRAME_BUILD_SKB \
(SKB_WITH_OVERHEAD(IXGBEVF_RXBUFFER_2048) - IXGBEVF_SKB_PAD)
#else
#define IXGBEVF_MAX_FRAME_BUILD_SKB IXGBEVF_RXBUFFER_2048
#endif
#define IXGBE_TX_FLAGS_CSUM BIT(0) #define IXGBE_TX_FLAGS_CSUM BIT(0)
#define IXGBE_TX_FLAGS_VLAN BIT(1) #define IXGBE_TX_FLAGS_VLAN BIT(1)
#define IXGBE_TX_FLAGS_TSO BIT(2) #define IXGBE_TX_FLAGS_TSO BIT(2)
...@@ -170,6 +172,40 @@ struct ixgbevf_ring { ...@@ -170,6 +172,40 @@ struct ixgbevf_ring {
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16
#define ring_uses_large_buffer(ring) \
test_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
#define set_ring_uses_large_buffer(ring) \
set_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
#define clear_ring_uses_large_buffer(ring) \
clear_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
static inline unsigned int ixgbevf_rx_bufsz(struct ixgbevf_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring_uses_large_buffer(ring))
return IXGBEVF_RXBUFFER_3072;
#endif
return IXGBEVF_RXBUFFER_2048;
}
static inline unsigned int ixgbevf_rx_pg_order(struct ixgbevf_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring_uses_large_buffer(ring))
return 1;
#endif
return 0;
}
#define ixgbevf_rx_pg_size(_ring) (PAGE_SIZE << ixgbevf_rx_pg_order(_ring))
#define check_for_tx_hang(ring) \
test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \
set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
struct ixgbevf_ring_container { struct ixgbevf_ring_container {
struct ixgbevf_ring *ring; /* pointer to linked list of rings */ struct ixgbevf_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_bytes; /* total bytes processed this int */
......
...@@ -565,21 +565,22 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, ...@@ -565,21 +565,22 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
return true; return true;
/* alloc new page for storage */ /* alloc new page for storage */
page = dev_alloc_page(); page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring));
if (unlikely(!page)) { if (unlikely(!page)) {
rx_ring->rx_stats.alloc_rx_page_failed++; rx_ring->rx_stats.alloc_rx_page_failed++;
return false; return false;
} }
/* map page for use */ /* map page for use */
dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE, dma = dma_map_page_attrs(rx_ring->dev, page, 0,
ixgbevf_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR); DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
/* if mapping failed free memory back to system since /* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use * there isn't much point in holding memory we can't use
*/ */
if (dma_mapping_error(rx_ring->dev, dma)) { if (dma_mapping_error(rx_ring->dev, dma)) {
__free_page(page); __free_pages(page, ixgbevf_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_rx_page_failed++; rx_ring->rx_stats.alloc_rx_page_failed++;
return false; return false;
...@@ -621,7 +622,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, ...@@ -621,7 +622,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
/* sync the buffer for use by the device */ /* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma, dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset, bi->page_offset,
IXGBEVF_RX_BUFSZ, ixgbevf_rx_bufsz(rx_ring),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
/* Refresh the desc even if pkt_addr didn't change /* Refresh the desc even if pkt_addr didn't change
...@@ -750,13 +751,16 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer, ...@@ -750,13 +751,16 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
return false; return false;
/* flip page offset to other buffer */ /* flip page offset to other buffer */
rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ; rx_buffer->page_offset ^= truesize;
#else #else
/* move offset up to the next cache line */ /* move offset up to the next cache line */
rx_buffer->page_offset += truesize; rx_buffer->page_offset += truesize;
if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ)) #define IXGBEVF_LAST_OFFSET \
(SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET)
return false; return false;
#endif #endif
...@@ -797,7 +801,7 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, ...@@ -797,7 +801,7 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
struct page *page = rx_buffer->page; struct page *page = rx_buffer->page;
void *va = page_address(page) + rx_buffer->page_offset; void *va = page_address(page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
unsigned int truesize = IXGBEVF_RX_BUFSZ; unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
#else #else
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
#endif #endif
...@@ -888,8 +892,8 @@ static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring, ...@@ -888,8 +892,8 @@ static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
* any references we are holding to it * any references we are holding to it
*/ */
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
PAGE_SIZE, DMA_FROM_DEVICE, ixgbevf_rx_pg_size(rx_ring),
IXGBEVF_RX_DMA_ATTR); DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
__page_frag_cache_drain(page, rx_buffer->pagecnt_bias); __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
} }
...@@ -1586,7 +1590,8 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) ...@@ -1586,7 +1590,8 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter,
struct ixgbevf_ring *ring, int index)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
u32 srrctl; u32 srrctl;
...@@ -1594,7 +1599,10 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) ...@@ -1594,7 +1599,10 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
srrctl = IXGBE_SRRCTL_DROP_EN; srrctl = IXGBE_SRRCTL_DROP_EN;
srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; if (ring_uses_large_buffer(ring))
srrctl |= IXGBEVF_RXBUFFER_3072 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
else
srrctl |= IXGBEVF_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
...@@ -1766,7 +1774,7 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, ...@@ -1766,7 +1774,7 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
ring->next_to_use = 0; ring->next_to_use = 0;
ring->next_to_alloc = 0; ring->next_to_alloc = 0;
ixgbevf_configure_srrctl(adapter, reg_idx); ixgbevf_configure_srrctl(adapter, ring, reg_idx);
/* allow any size packet since we can handle overflow */ /* allow any size packet since we can handle overflow */
rxdctl &= ~IXGBE_RXDCTL_RLPML_EN; rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
...@@ -1778,6 +1786,26 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, ...@@ -1778,6 +1786,26 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring)); ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
} }
static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
struct ixgbevf_ring *rx_ring)
{
struct net_device *netdev = adapter->netdev;
unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
/* set build_skb and buffer size flags */
clear_ring_uses_large_buffer(rx_ring);
if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
return;
#if (PAGE_SIZE < 8192)
if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
return;
set_ring_uses_large_buffer(rx_ring);
#endif
}
/** /**
* ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
* @adapter: board private structure * @adapter: board private structure
...@@ -1805,8 +1833,12 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) ...@@ -1805,8 +1833,12 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
/* Setup the HW Rx Head and Tail Descriptor Pointers and /* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring * the Base and Length of the Rx Descriptor Ring
*/ */
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++) {
ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]); struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
ixgbevf_set_rx_buffer_len(adapter, rx_ring);
ixgbevf_configure_rx_ring(adapter, rx_ring);
}
} }
static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
...@@ -2135,13 +2167,13 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) ...@@ -2135,13 +2167,13 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
dma_sync_single_range_for_cpu(rx_ring->dev, dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma, rx_buffer->dma,
rx_buffer->page_offset, rx_buffer->page_offset,
IXGBEVF_RX_BUFSZ, ixgbevf_rx_bufsz(rx_ring),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
/* free resources associated with mapping */ /* free resources associated with mapping */
dma_unmap_page_attrs(rx_ring->dev, dma_unmap_page_attrs(rx_ring->dev,
rx_buffer->dma, rx_buffer->dma,
PAGE_SIZE, ixgbevf_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, DMA_FROM_DEVICE,
IXGBEVF_RX_DMA_ATTR); IXGBEVF_RX_DMA_ATTR);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment