Commit 4c1975d7 authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

ixgbe: Minor refactor of RSC

This change addresses several issue.

First I had left the use of the next and prev skb pointers floating around
in the code and they were overdue to be pulled since I had rewritten the
RSC code in the out-of-tree driver some time ago to address issues brought
up by David Miller in regards to this.

I am also now defaulting to always leaving the first buffer unmapped on any
packet and then unmapping it after we read the EOP descriptor.  This allows
a simplification of the path with less branching.

Instead of counting packets received the code was changed some time ago to
track the number of buffers received.  This leads to inaccurate counting
when you compare numbers of packets received by the hardware versus what is
tracked by the software.  To correct this I am revising things so that the
append_cnt value for RSC accurately tracks the number of frames received.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarStephen Ko <stephen.s.ko@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent d9dd966d
...@@ -535,12 +535,16 @@ enum ixbge_state_t { ...@@ -535,12 +535,16 @@ enum ixbge_state_t {
__IXGBE_IN_SFP_INIT, __IXGBE_IN_SFP_INIT,
}; };
struct ixgbe_rsc_cb { struct ixgbe_cb {
union { /* Union defining head/tail partner */
struct sk_buff *head;
struct sk_buff *tail;
};
dma_addr_t dma; dma_addr_t dma;
u16 skb_cnt; u16 append_cnt;
bool delay_unmap; bool delay_unmap;
}; };
#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) #define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
enum ixgbe_boards { enum ixgbe_boards {
board_82598, board_82598,
......
...@@ -1207,40 +1207,96 @@ static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc) ...@@ -1207,40 +1207,96 @@ static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
} }
/** /**
* ixgbe_transform_rsc_queue - change rsc queue into a full packet * ixgbe_merge_active_tail - merge active tail into lro skb
* @skb: pointer to the last skb in the rsc queue * @tail: pointer to active tail in frag_list
* *
* This function changes a queue full of hw rsc buffers into a completed * This function merges the length and data of an active tail into the
* packet. It uses the ->prev pointers to find the first packet and then * skb containing the frag_list. It resets the tail's pointer to the head,
* turns it into the frag list owner. * but it leaves the heads pointer to tail intact.
**/ **/
static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb) static inline struct sk_buff *ixgbe_merge_active_tail(struct sk_buff *tail)
{ {
unsigned int frag_list_size = 0; struct sk_buff *head = IXGBE_CB(tail)->head;
unsigned int skb_cnt = 1;
while (skb->prev) { if (!head)
struct sk_buff *prev = skb->prev; return tail;
frag_list_size += skb->len;
skb->prev = NULL; head->len += tail->len;
skb = prev; head->data_len += tail->len;
skb_cnt++; head->truesize += tail->len;
IXGBE_CB(tail)->head = NULL;
return head;
}
/**
* ixgbe_add_active_tail - adds an active tail into the skb frag_list
* @head: pointer to the start of the skb
* @tail: pointer to active tail to add to frag_list
*
* This function adds an active tail to the end of the frag list. This tail
* will still be receiving data so we cannot yet ad it's stats to the main
* skb. That is done via ixgbe_merge_active_tail.
**/
static inline void ixgbe_add_active_tail(struct sk_buff *head,
struct sk_buff *tail)
{
struct sk_buff *old_tail = IXGBE_CB(head)->tail;
if (old_tail) {
ixgbe_merge_active_tail(old_tail);
old_tail->next = tail;
} else {
skb_shinfo(head)->frag_list = tail;
} }
skb_shinfo(skb)->frag_list = skb->next; IXGBE_CB(tail)->head = head;
skb->next = NULL; IXGBE_CB(head)->tail = tail;
skb->len += frag_list_size; }
skb->data_len += frag_list_size;
skb->truesize += frag_list_size; /**
IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt; * ixgbe_close_active_frag_list - cleanup pointers on a frag_list skb
* @head: pointer to head of an active frag list
*
* This function will clear the frag_tail_tracker pointer on an active
* frag_list and returns true if the pointer was actually set
**/
static inline bool ixgbe_close_active_frag_list(struct sk_buff *head)
{
struct sk_buff *tail = IXGBE_CB(head)->tail;
if (!tail)
return false;
return skb; ixgbe_merge_active_tail(tail);
IXGBE_CB(head)->tail = NULL;
return true;
} }
static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc) static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{ {
return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & __le32 rsc_enabled;
IXGBE_RXDADV_RSCCNT_MASK); u32 rsc_cnt;
if (!ring_is_rsc_enabled(rx_ring))
return;
rsc_enabled = rx_desc->wb.lower.lo_dword.data &
cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
/* If this is an RSC frame rsc_cnt should be non-zero */
if (!rsc_enabled)
return;
rsc_cnt = le32_to_cpu(rsc_enabled);
rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
} }
static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
...@@ -1249,7 +1305,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -1249,7 +1305,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
{ {
struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_adapter *adapter = q_vector->adapter;
union ixgbe_adv_rx_desc *rx_desc, *next_rxd; union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; struct ixgbe_rx_buffer *rx_buffer_info;
struct sk_buff *skb; struct sk_buff *skb;
unsigned int total_rx_bytes = 0, total_rx_packets = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0;
const int current_node = numa_node_id(); const int current_node = numa_node_id();
...@@ -1259,7 +1315,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -1259,7 +1315,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
u32 staterr; u32 staterr;
u16 i; u16 i;
u16 cleaned_count = 0; u16 cleaned_count = 0;
bool pkt_is_rsc = false;
i = rx_ring->next_to_clean; i = rx_ring->next_to_clean;
rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
...@@ -1276,32 +1331,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -1276,32 +1331,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
rx_buffer_info->skb = NULL; rx_buffer_info->skb = NULL;
prefetch(skb->data); prefetch(skb->data);
if (ring_is_rsc_enabled(rx_ring))
pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
/* linear means we are building an skb from multiple pages */ /* linear means we are building an skb from multiple pages */
if (!skb_is_nonlinear(skb)) { if (!skb_is_nonlinear(skb)) {
u16 hlen; u16 hlen;
if (pkt_is_rsc &&
!(staterr & IXGBE_RXD_STAT_EOP) &&
!skb->prev) {
/*
* When HWRSC is enabled, delay unmapping
* of the first packet. It carries the
* header information, HW may still
* access the header after the writeback.
* Only unmap it when EOP is reached
*/
IXGBE_RSC_CB(skb)->delay_unmap = true;
IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
} else {
dma_unmap_single(rx_ring->dev,
rx_buffer_info->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
}
rx_buffer_info->dma = 0;
if (ring_is_ps_enabled(rx_ring)) { if (ring_is_ps_enabled(rx_ring)) {
hlen = ixgbe_get_hlen(rx_desc); hlen = ixgbe_get_hlen(rx_desc);
upper_len = le16_to_cpu(rx_desc->wb.upper.length); upper_len = le16_to_cpu(rx_desc->wb.upper.length);
...@@ -1310,6 +1342,23 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -1310,6 +1342,23 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
} }
skb_put(skb, hlen); skb_put(skb, hlen);
/*
* Delay unmapping of the first packet. It carries the
* header information, HW may still access the header
* after writeback. Only unmap it when EOP is reached
*/
if (!IXGBE_CB(skb)->head) {
IXGBE_CB(skb)->delay_unmap = true;
IXGBE_CB(skb)->dma = rx_buffer_info->dma;
} else {
skb = ixgbe_merge_active_tail(skb);
dma_unmap_single(rx_ring->dev,
rx_buffer_info->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
}
rx_buffer_info->dma = 0;
} else { } else {
/* assume packet split since header is unmapped */ /* assume packet split since header is unmapped */
upper_len = le16_to_cpu(rx_desc->wb.upper.length); upper_len = le16_to_cpu(rx_desc->wb.upper.length);
...@@ -1337,6 +1386,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -1337,6 +1386,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
skb->truesize += PAGE_SIZE / 2; skb->truesize += PAGE_SIZE / 2;
} }
ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb);
i++; i++;
if (i == rx_ring->count) if (i == rx_ring->count)
i = 0; i = 0;
...@@ -1345,55 +1396,50 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -1345,55 +1396,50 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
prefetch(next_rxd); prefetch(next_rxd);
cleaned_count++; cleaned_count++;
if (pkt_is_rsc) { if (!(staterr & IXGBE_RXD_STAT_EOP)) {
u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> struct ixgbe_rx_buffer *next_buffer;
IXGBE_RXDADV_NEXTP_SHIFT; u32 nextp;
if (IXGBE_CB(skb)->append_cnt) {
nextp = staterr & IXGBE_RXDADV_NEXTP_MASK;
nextp >>= IXGBE_RXDADV_NEXTP_SHIFT;
} else {
nextp = i;
}
next_buffer = &rx_ring->rx_buffer_info[nextp]; next_buffer = &rx_ring->rx_buffer_info[nextp];
} else {
next_buffer = &rx_ring->rx_buffer_info[i];
}
if (!(staterr & IXGBE_RXD_STAT_EOP)) {
if (ring_is_ps_enabled(rx_ring)) { if (ring_is_ps_enabled(rx_ring)) {
rx_buffer_info->skb = next_buffer->skb; rx_buffer_info->skb = next_buffer->skb;
rx_buffer_info->dma = next_buffer->dma; rx_buffer_info->dma = next_buffer->dma;
next_buffer->skb = skb; next_buffer->skb = skb;
next_buffer->dma = 0; next_buffer->dma = 0;
} else { } else {
skb->next = next_buffer->skb; struct sk_buff *next_skb = next_buffer->skb;
skb->next->prev = skb; ixgbe_add_active_tail(skb, next_skb);
IXGBE_CB(next_skb)->head = skb;
} }
rx_ring->rx_stats.non_eop_descs++; rx_ring->rx_stats.non_eop_descs++;
goto next_desc; goto next_desc;
} }
if (skb->prev) { dma_unmap_single(rx_ring->dev,
skb = ixgbe_transform_rsc_queue(skb); IXGBE_CB(skb)->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
IXGBE_CB(skb)->dma = 0;
IXGBE_CB(skb)->delay_unmap = false;
if (ixgbe_close_active_frag_list(skb) &&
!IXGBE_CB(skb)->append_cnt) {
/* if we got here without RSC the packet is invalid */ /* if we got here without RSC the packet is invalid */
if (!pkt_is_rsc) { dev_kfree_skb_any(skb);
__pskb_trim(skb, 0); goto next_desc;
rx_buffer_info->skb = skb;
goto next_desc;
}
} }
if (ring_is_rsc_enabled(rx_ring)) { if (IXGBE_CB(skb)->append_cnt) {
if (IXGBE_RSC_CB(skb)->delay_unmap) { rx_ring->rx_stats.rsc_count +=
dma_unmap_single(rx_ring->dev, IXGBE_CB(skb)->append_cnt;
IXGBE_RSC_CB(skb)->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
IXGBE_RSC_CB(skb)->dma = 0;
IXGBE_RSC_CB(skb)->delay_unmap = false;
}
}
if (pkt_is_rsc) {
if (ring_is_ps_enabled(rx_ring))
rx_ring->rx_stats.rsc_count +=
skb_shinfo(skb)->nr_frags;
else
rx_ring->rx_stats.rsc_count +=
IXGBE_RSC_CB(skb)->skb_cnt;
rx_ring->rx_stats.rsc_flush++; rx_ring->rx_stats.rsc_flush++;
} }
...@@ -3881,19 +3927,18 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) ...@@ -3881,19 +3927,18 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
if (rx_buffer_info->skb) { if (rx_buffer_info->skb) {
struct sk_buff *skb = rx_buffer_info->skb; struct sk_buff *skb = rx_buffer_info->skb;
rx_buffer_info->skb = NULL; rx_buffer_info->skb = NULL;
do { /* We need to clean up RSC frag lists */
struct sk_buff *this = skb; skb = ixgbe_merge_active_tail(skb);
if (IXGBE_RSC_CB(this)->delay_unmap) { ixgbe_close_active_frag_list(skb);
dma_unmap_single(dev, if (IXGBE_CB(skb)->delay_unmap) {
IXGBE_RSC_CB(this)->dma, dma_unmap_single(dev,
rx_ring->rx_buf_len, IXGBE_CB(skb)->dma,
DMA_FROM_DEVICE); rx_ring->rx_buf_len,
IXGBE_RSC_CB(this)->dma = 0; DMA_FROM_DEVICE);
IXGBE_RSC_CB(skb)->delay_unmap = false; IXGBE_CB(skb)->dma = 0;
} IXGBE_CB(skb)->delay_unmap = false;
skb = skb->prev; }
dev_kfree_skb(this); dev_kfree_skb(skb);
} while (skb);
} }
if (!rx_buffer_info->page) if (!rx_buffer_info->page)
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment