Commit b74e3e8c authored by Ben Hutchings's avatar Ben Hutchings

sfc: Update RX buffer address together with length

Adjust rx_buf->page_offset when we eat the RX hash prefix.  Remove
efx_rx_buf_offset(), which is now redundant.
Signed-off-by: default avatarBen Hutchings <bhutchings@solarflare.com>
parent 5036b7c7
...@@ -208,7 +208,8 @@ struct efx_tx_queue { ...@@ -208,7 +208,8 @@ struct efx_tx_queue {
* @dma_addr: DMA base address of the buffer * @dma_addr: DMA base address of the buffer
* @page: The associated page buffer. * @page: The associated page buffer.
* Will be %NULL if the buffer slot is currently free. * Will be %NULL if the buffer slot is currently free.
* @page_offset: Offset within page * @page_offset: If pending: offset in @page of DMA base address.
* If completed: offset in @page of Ethernet header.
* @len: If pending: length for DMA descriptor. * @len: If pending: length for DMA descriptor.
* If completed: received length, excluding hash prefix. * If completed: received length, excluding hash prefix.
* @flags: Flags for buffer and packet state. * @flags: Flags for buffer and packet state.
......
...@@ -47,13 +47,6 @@ static unsigned int rx_refill_threshold; ...@@ -47,13 +47,6 @@ static unsigned int rx_refill_threshold;
*/ */
#define EFX_RXD_HEAD_ROOM 2 #define EFX_RXD_HEAD_ROOM 2
/* Offset of ethernet header within page */
static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
struct efx_rx_buffer *buf)
{
return buf->page_offset + efx->type->rx_buffer_hash_size;
}
static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf) static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
{ {
return page_address(buf->page) + buf->page_offset; return page_address(buf->page) + buf->page_offset;
...@@ -356,8 +349,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel, ...@@ -356,8 +349,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
if (efx->net_dev->features & NETIF_F_RXHASH) if (efx->net_dev->features & NETIF_F_RXHASH)
skb->rxhash = efx_rx_buf_hash(eh); skb->rxhash = efx_rx_buf_hash(eh);
skb_fill_page_desc(skb, 0, page, skb_fill_page_desc(skb, 0, page, rx_buf->page_offset, rx_buf->len);
efx_rx_buf_offset(efx, rx_buf), rx_buf->len);
skb->len = rx_buf->len; skb->len = rx_buf->len;
skb->data_len = rx_buf->len; skb->data_len = rx_buf->len;
...@@ -399,7 +391,7 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel, ...@@ -399,7 +391,7 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
if (rx_buf->len > hdr_len) { if (rx_buf->len > hdr_len) {
skb->data_len = skb->len - hdr_len; skb->data_len = skb->len - hdr_len;
skb_fill_page_desc(skb, 0, rx_buf->page, skb_fill_page_desc(skb, 0, rx_buf->page,
efx_rx_buf_offset(efx, rx_buf) + hdr_len, rx_buf->page_offset + hdr_len,
skb->data_len); skb->data_len);
} else { } else {
__free_pages(rx_buf->page, efx->rx_buffer_order); __free_pages(rx_buf->page, efx->rx_buffer_order);
...@@ -460,10 +452,12 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, ...@@ -460,10 +452,12 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
*/ */
prefetch(efx_rx_buf_va(rx_buf)); prefetch(efx_rx_buf_va(rx_buf));
rx_buf->page_offset += efx->type->rx_buffer_hash_size;
rx_buf->len = len - efx->type->rx_buffer_hash_size;
/* Pipeline receives so that we give time for packet headers to be /* Pipeline receives so that we give time for packet headers to be
* prefetched into cache. * prefetched into cache.
*/ */
rx_buf->len = len - efx->type->rx_buffer_hash_size;
out: out:
efx_rx_flush_packet(channel); efx_rx_flush_packet(channel);
channel->rx_pkt = rx_buf; channel->rx_pkt = rx_buf;
...@@ -497,7 +491,7 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, ...@@ -497,7 +491,7 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf) void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
{ {
struct efx_nic *efx = channel->efx; struct efx_nic *efx = channel->efx;
u8 *eh = efx_rx_buf_va(rx_buf) + efx->type->rx_buffer_hash_size; u8 *eh = efx_rx_buf_va(rx_buf);
/* If we're in loopback test, then pass the packet directly to the /* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here * loopback layer, and free the rx_buf here
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment