Commit 42073d91 authored by Alexander Duyck's avatar Alexander Duyck Committed by Peter P Waskiewicz Jr

ixgbe: Have the CPU take ownership of the buffers sooner

This patch makes it so that we will always have ownership of the buffers by
the time we get to ixgbe_add_rx_frag. This is necessary as I am planning to
add a copy-break to ixgbe_add_rx_frag and in order for that to function
correctly we need the CPU to have ownership of the buffer.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarPhil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: default avatarPeter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
parent 09816fbe
...@@ -1457,6 +1457,36 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, ...@@ -1457,6 +1457,36 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
return true; return true;
} }
/**
* ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
* @rx_ring: rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being updated
*
* This function provides a basic DMA sync up for the first fragment of an
* skb. The reason for doing this is that the first fragment cannot be
* unmapped until we have reached the end of packet descriptor for a buffer
* chain.
*/
static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
struct sk_buff *skb)
{
/* if the page was released unmap it, else just sync our portion */
if (unlikely(IXGBE_CB(skb)->page_released)) {
dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
IXGBE_CB(skb)->page_released = false;
} else {
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
dma_sync_single_range_for_cpu(rx_ring->dev,
IXGBE_CB(skb)->dma,
frag->page_offset,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
}
IXGBE_CB(skb)->dma = 0;
}
/** /**
* ixgbe_cleanup_headers - Correct corrupted or empty headers * ixgbe_cleanup_headers - Correct corrupted or empty headers
* @rx_ring: rx descriptor ring packet is being transacted on * @rx_ring: rx descriptor ring packet is being transacted on
...@@ -1484,20 +1514,6 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, ...@@ -1484,20 +1514,6 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
unsigned char *va; unsigned char *va;
unsigned int pull_len; unsigned int pull_len;
/* if the page was released unmap it, else just sync our portion */
if (unlikely(IXGBE_CB(skb)->page_released)) {
dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
IXGBE_CB(skb)->page_released = false;
} else {
dma_sync_single_range_for_cpu(rx_ring->dev,
IXGBE_CB(skb)->dma,
frag->page_offset,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
}
IXGBE_CB(skb)->dma = 0;
/* verify that the packet does not have any known errors */ /* verify that the packet does not have any known errors */
if (unlikely(ixgbe_test_staterr(rx_desc, if (unlikely(ixgbe_test_staterr(rx_desc,
IXGBE_RXDADV_ERR_FRAME_ERR_MASK) && IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
...@@ -1742,8 +1758,16 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -1742,8 +1758,16 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
* after the writeback. Only unmap it when EOP is * after the writeback. Only unmap it when EOP is
* reached * reached
*/ */
if (likely(ixgbe_test_staterr(rx_desc,
IXGBE_RXD_STAT_EOP)))
goto dma_sync;
IXGBE_CB(skb)->dma = rx_buffer->dma; IXGBE_CB(skb)->dma = rx_buffer->dma;
} else { } else {
if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
ixgbe_dma_sync_frag(rx_ring, skb);
dma_sync:
/* we are reusing so sync this buffer for CPU use */ /* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev, dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma, rx_buffer->dma,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment