Commit 69d3ca53 authored by Alexander Duyck's avatar Alexander Duyck Committed by David S. Miller

igb: optimize/refactor receive path

While cleaning up the skb_over panic with small frames I found there was
room for improvement in the ordering of operations within the rx receive
flow.  These changes will place the prefetch for the next descriptor to a
point earlier in the rx path.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3e450669
...@@ -3803,6 +3803,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, ...@@ -3803,6 +3803,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
unsigned int total_bytes = 0, total_packets = 0; unsigned int total_bytes = 0, total_packets = 0;
i = rx_ring->next_to_clean; i = rx_ring->next_to_clean;
buffer_info = &rx_ring->buffer_info[i];
rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.upper.status_error); staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
...@@ -3810,25 +3811,22 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, ...@@ -3810,25 +3811,22 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
if (*work_done >= budget) if (*work_done >= budget)
break; break;
(*work_done)++; (*work_done)++;
buffer_info = &rx_ring->buffer_info[i];
/* HW will not DMA in data larger than the given buffer, even skb = buffer_info->skb;
* if it parses the (NFS, of course) header to be larger. In prefetch(skb->data - NET_IP_ALIGN);
* that case, it fills the header buffer and spills the rest buffer_info->skb = NULL;
* into the page.
*/ i++;
hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & if (i == rx_ring->count)
E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; i = 0;
if (hlen > adapter->rx_ps_hdr_size) next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
hlen = adapter->rx_ps_hdr_size; prefetch(next_rxd);
next_buffer = &rx_ring->buffer_info[i];
length = le16_to_cpu(rx_desc->wb.upper.length); length = le16_to_cpu(rx_desc->wb.upper.length);
cleaned = true; cleaned = true;
cleaned_count++; cleaned_count++;
skb = buffer_info->skb;
prefetch(skb->data - NET_IP_ALIGN);
buffer_info->skb = NULL;
if (!adapter->rx_ps_hdr_size) { if (!adapter->rx_ps_hdr_size) {
pci_unmap_single(pdev, buffer_info->dma, pci_unmap_single(pdev, buffer_info->dma,
adapter->rx_buffer_len + adapter->rx_buffer_len +
...@@ -3838,6 +3836,16 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, ...@@ -3838,6 +3836,16 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
goto send_up; goto send_up;
} }
/* HW will not DMA in data larger than the given buffer, even
* if it parses the (NFS, of course) header to be larger. In
* that case, it fills the header buffer and spills the rest
* into the page.
*/
hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
if (hlen > adapter->rx_ps_hdr_size)
hlen = adapter->rx_ps_hdr_size;
if (!skb_shinfo(skb)->nr_frags) { if (!skb_shinfo(skb)->nr_frags) {
pci_unmap_single(pdev, buffer_info->dma, pci_unmap_single(pdev, buffer_info->dma,
adapter->rx_ps_hdr_size + adapter->rx_ps_hdr_size +
...@@ -3867,13 +3875,6 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, ...@@ -3867,13 +3875,6 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
skb->truesize += length; skb->truesize += length;
} }
send_up:
i++;
if (i == rx_ring->count)
i = 0;
next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
prefetch(next_rxd);
next_buffer = &rx_ring->buffer_info[i];
if (!(staterr & E1000_RXD_STAT_EOP)) { if (!(staterr & E1000_RXD_STAT_EOP)) {
buffer_info->skb = next_buffer->skb; buffer_info->skb = next_buffer->skb;
...@@ -3882,7 +3883,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, ...@@ -3882,7 +3883,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
next_buffer->dma = 0; next_buffer->dma = 0;
goto next_desc; goto next_desc;
} }
send_up:
if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
dev_kfree_skb_irq(skb); dev_kfree_skb_irq(skb);
goto next_desc; goto next_desc;
...@@ -3909,7 +3910,6 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, ...@@ -3909,7 +3910,6 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
/* use prefetched values */ /* use prefetched values */
rx_desc = next_rxd; rx_desc = next_rxd;
buffer_info = next_buffer; buffer_info = next_buffer;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error); staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment