Commit afaa9459 authored by Alexander Duyck's avatar Alexander Duyck Committed by Peter P Waskiewicz Jr

ixgbe: Remove code that was initializing Rx page offset

This change reverts an earlier patch that introduced
ixgbe_init_rx_page_offset. The idea behind the function was to provide
some variation in the starting offset for the page in order to reduce
hot-spots in the cache. However it doesn't appear to provide any
significant benefit in the testing I have done. It has however been a
source of several bugs, and it blocks us from being able to use 2K
fragments on larger page sizes. So the decision I made was to remove it.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarPhil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: default avatarPeter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
parent 02644a17
...@@ -1167,7 +1167,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, ...@@ -1167,7 +1167,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
} }
bi->dma = dma; bi->dma = dma;
bi->page_offset ^= ixgbe_rx_bufsz(rx_ring); bi->page_offset = 0;
return true; return true;
} }
...@@ -4129,27 +4129,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) ...@@ -4129,27 +4129,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
} }
/**
* ixgbe_init_rx_page_offset - initialize page offset values for Rx buffers
* @rx_ring: ring to setup
*
* On many IA platforms the L1 cache has a critical stride of 4K, this
* results in each receive buffer starting in the same cache set. To help
* reduce the pressure on this cache set we can interleave the offsets so
* that only every other buffer will be in the same cache set.
**/
static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring)
{
struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info;
u16 i;
for (i = 0; i < rx_ring->count; i += 2) {
rx_buffer[0].page_offset = 0;
rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring);
rx_buffer = &rx_buffer[2];
}
}
/** /**
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: ring to free buffers from * @rx_ring: ring to free buffers from
...@@ -4195,8 +4174,6 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) ...@@ -4195,8 +4174,6 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
memset(rx_ring->rx_buffer_info, 0, size); memset(rx_ring->rx_buffer_info, 0, size);
ixgbe_init_rx_page_offset(rx_ring);
/* Zero out the descriptor ring */ /* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size); memset(rx_ring->desc, 0, rx_ring->size);
...@@ -4646,8 +4623,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) ...@@ -4646,8 +4623,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
rx_ring->next_to_clean = 0; rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0; rx_ring->next_to_use = 0;
ixgbe_init_rx_page_offset(rx_ring);
return 0; return 0;
err: err:
vfree(rx_ring->rx_buffer_info); vfree(rx_ring->rx_buffer_info);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment