Commit 29b82f2a authored by Maciej Fijalkowski's avatar Maciej Fijalkowski Committed by Tony Nguyen

ice: move skb pointer from rx_buf to rx_ring

Similar thing has been done in i40e, as there is no real need for having
the sk_buff pointer in each rx_buf. Non-eop frames can be simply handled
on that pointer moved upwards to rx_ring.
Reviewed-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Signed-off-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: default avatarTony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent 59c97d1b
...@@ -375,6 +375,11 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring) ...@@ -375,6 +375,11 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
if (!rx_ring->rx_buf) if (!rx_ring->rx_buf)
return; return;
if (rx_ring->skb) {
dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
}
if (rx_ring->xsk_pool) { if (rx_ring->xsk_pool) {
ice_xsk_clean_rx_ring(rx_ring); ice_xsk_clean_rx_ring(rx_ring);
goto rx_skip_free; goto rx_skip_free;
...@@ -384,10 +389,6 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring) ...@@ -384,10 +389,6 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
for (i = 0; i < rx_ring->count; i++) { for (i = 0; i < rx_ring->count; i++) {
struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
if (rx_buf->skb) {
dev_kfree_skb(rx_buf->skb);
rx_buf->skb = NULL;
}
if (!rx_buf->page) if (!rx_buf->page)
continue; continue;
...@@ -850,7 +851,6 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) ...@@ -850,7 +851,6 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
/** /**
* ice_get_rx_buf - Fetch Rx buffer and synchronize data for use * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
* @rx_ring: Rx descriptor ring to transact packets on * @rx_ring: Rx descriptor ring to transact packets on
* @skb: skb to be used
* @size: size of buffer to add to skb * @size: size of buffer to add to skb
* @rx_buf_pgcnt: rx_buf page refcount * @rx_buf_pgcnt: rx_buf page refcount
* *
...@@ -858,8 +858,8 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) ...@@ -858,8 +858,8 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
* for use by the CPU. * for use by the CPU.
*/ */
static struct ice_rx_buf * static struct ice_rx_buf *
ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size,
const unsigned int size, int *rx_buf_pgcnt) int *rx_buf_pgcnt)
{ {
struct ice_rx_buf *rx_buf; struct ice_rx_buf *rx_buf;
...@@ -871,7 +871,6 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, ...@@ -871,7 +871,6 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
0; 0;
#endif #endif
prefetchw(rx_buf->page); prefetchw(rx_buf->page);
*skb = rx_buf->skb;
if (!size) if (!size)
return rx_buf; return rx_buf;
...@@ -1033,29 +1032,24 @@ ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, ...@@ -1033,29 +1032,24 @@ ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
/* clear contents of buffer_info */ /* clear contents of buffer_info */
rx_buf->page = NULL; rx_buf->page = NULL;
rx_buf->skb = NULL;
} }
/** /**
* ice_is_non_eop - process handling of non-EOP buffers * ice_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed * @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer * @rx_desc: Rx descriptor for current buffer
* @skb: Current socket buffer containing buffer in progress
* *
* If the buffer is an EOP buffer, this function exits returning false, * If the buffer is an EOP buffer, this function exits returning false,
* otherwise return true indicating that this is in fact a non-EOP buffer. * otherwise return true indicating that this is in fact a non-EOP buffer.
*/ */
static bool static bool
ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
struct sk_buff *skb)
{ {
/* if we are the last buffer then there is nothing else to do */ /* if we are the last buffer then there is nothing else to do */
#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S) #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF))) if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
return false; return false;
/* place skb in next buffer to be received */
rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb;
rx_ring->rx_stats.non_eop_descs++; rx_ring->rx_stats.non_eop_descs++;
return true; return true;
...@@ -1078,6 +1072,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -1078,6 +1072,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0; unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
unsigned int xdp_res, xdp_xmit = 0; unsigned int xdp_res, xdp_xmit = 0;
struct sk_buff *skb = rx_ring->skb;
struct bpf_prog *xdp_prog = NULL; struct bpf_prog *xdp_prog = NULL;
struct xdp_buff xdp; struct xdp_buff xdp;
bool failure; bool failure;
...@@ -1094,7 +1089,6 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -1094,7 +1089,6 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
union ice_32b_rx_flex_desc *rx_desc; union ice_32b_rx_flex_desc *rx_desc;
struct ice_rx_buf *rx_buf; struct ice_rx_buf *rx_buf;
unsigned char *hard_start; unsigned char *hard_start;
struct sk_buff *skb;
unsigned int size; unsigned int size;
u16 stat_err_bits; u16 stat_err_bits;
int rx_buf_pgcnt; int rx_buf_pgcnt;
...@@ -1129,7 +1123,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -1129,7 +1123,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
ICE_RX_FLX_DESC_PKT_LEN_M; ICE_RX_FLX_DESC_PKT_LEN_M;
/* retrieve a buffer from the ring */ /* retrieve a buffer from the ring */
rx_buf = ice_get_rx_buf(rx_ring, &skb, size, &rx_buf_pgcnt); rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
if (!size) { if (!size) {
xdp.data = NULL; xdp.data = NULL;
...@@ -1191,7 +1185,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -1191,7 +1185,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
cleaned_count++; cleaned_count++;
/* skip if it is NOP desc */ /* skip if it is NOP desc */
if (ice_is_non_eop(rx_ring, rx_desc, skb)) if (ice_is_non_eop(rx_ring, rx_desc))
continue; continue;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
...@@ -1221,6 +1215,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -1221,6 +1215,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
/* send completed skb up the stack */ /* send completed skb up the stack */
ice_receive_skb(rx_ring, skb, vlan_tag); ice_receive_skb(rx_ring, skb, vlan_tag);
skb = NULL;
/* update budget accounting */ /* update budget accounting */
total_rx_pkts++; total_rx_pkts++;
...@@ -1231,6 +1226,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -1231,6 +1226,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
if (xdp_prog) if (xdp_prog)
ice_finalize_xdp_rx(rx_ring, xdp_xmit); ice_finalize_xdp_rx(rx_ring, xdp_xmit);
rx_ring->skb = skb;
ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
......
...@@ -165,7 +165,6 @@ struct ice_tx_offload_params { ...@@ -165,7 +165,6 @@ struct ice_tx_offload_params {
struct ice_rx_buf { struct ice_rx_buf {
union { union {
struct { struct {
struct sk_buff *skb;
dma_addr_t dma; dma_addr_t dma;
struct page *page; struct page *page;
unsigned int page_offset; unsigned int page_offset;
...@@ -297,6 +296,7 @@ struct ice_ring { ...@@ -297,6 +296,7 @@ struct ice_ring {
struct xsk_buff_pool *xsk_pool; struct xsk_buff_pool *xsk_pool;
/* CL3 - 3rd cacheline starts here */ /* CL3 - 3rd cacheline starts here */
struct xdp_rxq_info xdp_rxq; struct xdp_rxq_info xdp_rxq;
struct sk_buff *skb;
/* CLX - the below items are only accessed infrequently and should be /* CLX - the below items are only accessed infrequently and should be
* in their own cache line if possible * in their own cache line if possible
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment