Commit 1dc1a7e7 authored by Maciej Fijalkowski's avatar Maciej Fijalkowski Committed by Daniel Borkmann

ice: Centrallize Rx buffer recycling

Currently calls to ice_put_rx_buf() are sprinkled through
ice_clean_rx_irq() - first place is for explicit flow director's
descriptor handling, second is after running XDP prog and the last one
is after taking care of skb.

1st callsite was actually only for ntc bump purpose, as Rx buffer to be
recycled is not even passed to a function.

It is possible to walk through Rx buffers processed in particular NAPI
cycle by caching ntc from beginning of the ice_clean_rx_irq().

To do so, let us store XDP verdict inside ice_rx_buf, so action we need
to take on will be known. For XDP prog absence, just store ICE_XDP_PASS
as a verdict.
Signed-off-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Reviewed-by: default avatarAlexander Lobakin <alexandr.lobakin@intel.com>
Link: https://lore.kernel.org/bpf/20230131204506.219292-7-maciej.fijalkowski@intel.com
parent e44f4790
...@@ -553,34 +553,39 @@ ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size) ...@@ -553,34 +553,39 @@ ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size)
* @xdp: xdp_buff used as input to the XDP program * @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run * @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action * @xdp_ring: ring to be used for XDP_TX action
* @rx_buf: Rx buffer to store the XDP action
* *
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/ */
static int static void
ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring) struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
struct ice_rx_buf *rx_buf)
{ {
int err; unsigned int ret = ICE_XDP_PASS;
u32 act; u32 act;
if (!xdp_prog)
goto exit;
act = bpf_prog_run_xdp(xdp_prog, xdp); act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) { switch (act) {
case XDP_PASS: case XDP_PASS:
return ICE_XDP_PASS; break;
case XDP_TX: case XDP_TX:
if (static_branch_unlikely(&ice_xdp_locking_key)) if (static_branch_unlikely(&ice_xdp_locking_key))
spin_lock(&xdp_ring->tx_lock); spin_lock(&xdp_ring->tx_lock);
err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring); ret = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring);
if (static_branch_unlikely(&ice_xdp_locking_key)) if (static_branch_unlikely(&ice_xdp_locking_key))
spin_unlock(&xdp_ring->tx_lock); spin_unlock(&xdp_ring->tx_lock);
if (err == ICE_XDP_CONSUMED) if (ret == ICE_XDP_CONSUMED)
goto out_failure; goto out_failure;
return err; break;
case XDP_REDIRECT: case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))
if (err)
goto out_failure; goto out_failure;
return ICE_XDP_REDIR; ret = ICE_XDP_REDIR;
break;
default: default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough; fallthrough;
...@@ -589,8 +594,10 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, ...@@ -589,8 +594,10 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
trace_xdp_exception(rx_ring->netdev, xdp_prog, act); trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; fallthrough;
case XDP_DROP: case XDP_DROP:
return ICE_XDP_CONSUMED; ret = ICE_XDP_CONSUMED;
} }
exit:
rx_buf->act = ret;
} }
/** /**
...@@ -855,9 +862,6 @@ ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, ...@@ -855,9 +862,6 @@ ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
return; return;
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
rx_buf->page_offset, size, truesize); rx_buf->page_offset, size, truesize);
/* page is being used so we must update the page offset */
ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
} }
/** /**
...@@ -970,9 +974,6 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, ...@@ -970,9 +974,6 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
if (metasize) if (metasize)
skb_metadata_set(skb, metasize); skb_metadata_set(skb, metasize);
/* buffer is used by skb, update page_offset */
ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
return skb; return skb;
} }
...@@ -1023,14 +1024,13 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, ...@@ -1023,14 +1024,13 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
#endif #endif
skb_add_rx_frag(skb, 0, rx_buf->page, skb_add_rx_frag(skb, 0, rx_buf->page,
rx_buf->page_offset + headlen, size, truesize); rx_buf->page_offset + headlen, size, truesize);
/* buffer is used by skb, update page_offset */
ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
} else { } else {
/* buffer is unused, reset bias back to rx_buf; data was copied /* buffer is unused, change the act that should be taken later
* onto skb's linear part so there's no need for adjusting * on; data was copied onto skb's linear part so there's no
* page offset and we can reuse this buffer as-is * need for adjusting page offset and we can reuse this buffer
* as-is
*/ */
rx_buf->pagecnt_bias++; rx_buf->act = ICE_XDP_CONSUMED;
} }
return skb; return skb;
...@@ -1084,11 +1084,12 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) ...@@ -1084,11 +1084,12 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
unsigned int offset = rx_ring->rx_offset; unsigned int offset = rx_ring->rx_offset;
struct xdp_buff *xdp = &rx_ring->xdp; struct xdp_buff *xdp = &rx_ring->xdp;
struct ice_tx_ring *xdp_ring = NULL; struct ice_tx_ring *xdp_ring = NULL;
unsigned int xdp_res, xdp_xmit = 0;
struct sk_buff *skb = rx_ring->skb; struct sk_buff *skb = rx_ring->skb;
struct bpf_prog *xdp_prog = NULL; struct bpf_prog *xdp_prog = NULL;
u32 ntc = rx_ring->next_to_clean; u32 ntc = rx_ring->next_to_clean;
u32 cnt = rx_ring->count; u32 cnt = rx_ring->count;
u32 cached_ntc = ntc;
u32 xdp_xmit = 0;
bool failure; bool failure;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
...@@ -1137,7 +1138,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) ...@@ -1137,7 +1138,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
if (++ntc == cnt) if (++ntc == cnt)
ntc = 0; ntc = 0;
ice_put_rx_buf(rx_ring, NULL);
cleaned_count++; cleaned_count++;
continue; continue;
} }
...@@ -1164,25 +1164,15 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) ...@@ -1164,25 +1164,15 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size); xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size);
#endif #endif
if (!xdp_prog) ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf);
goto construct_skb; if (rx_buf->act == ICE_XDP_PASS)
xdp_res = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring);
if (!xdp_res)
goto construct_skb; goto construct_skb;
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
xdp_xmit |= xdp_res;
ice_rx_buf_adjust_pg_offset(rx_buf, xdp->frame_sz);
} else {
rx_buf->pagecnt_bias++;
}
total_rx_bytes += size; total_rx_bytes += size;
total_rx_pkts++; total_rx_pkts++;
cleaned_count++; cleaned_count++;
if (++ntc == cnt) if (++ntc == cnt)
ntc = 0; ntc = 0;
ice_put_rx_buf(rx_ring, rx_buf);
continue; continue;
construct_skb: construct_skb:
if (skb) { if (skb) {
...@@ -1203,7 +1193,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) ...@@ -1203,7 +1193,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
if (++ntc == cnt) if (++ntc == cnt)
ntc = 0; ntc = 0;
ice_put_rx_buf(rx_ring, rx_buf);
cleaned_count++; cleaned_count++;
/* skip if it is NOP desc */ /* skip if it is NOP desc */
...@@ -1243,6 +1232,22 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) ...@@ -1243,6 +1232,22 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
total_rx_pkts++; total_rx_pkts++;
} }
while (cached_ntc != ntc) {
struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc];
if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) {
ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
xdp_xmit |= buf->act;
} else if (buf->act & ICE_XDP_CONSUMED) {
buf->pagecnt_bias++;
} else if (buf->act == ICE_XDP_PASS) {
ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
}
ice_put_rx_buf(rx_ring, buf);
if (++cached_ntc >= cnt)
cached_ntc = 0;
}
rx_ring->next_to_clean = ntc; rx_ring->next_to_clean = ntc;
/* return up to cleaned_count buffers to hardware */ /* return up to cleaned_count buffers to hardware */
failure = ice_alloc_rx_bufs(rx_ring, cleaned_count); failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
......
...@@ -173,6 +173,7 @@ struct ice_rx_buf { ...@@ -173,6 +173,7 @@ struct ice_rx_buf {
struct page *page; struct page *page;
unsigned int page_offset; unsigned int page_offset;
unsigned int pgcnt; unsigned int pgcnt;
unsigned int act;
unsigned int pagecnt_bias; unsigned int pagecnt_bias;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment