Commit d094c985 authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by Alexei Starovoitov

net: mvneta: simplify mvneta_swbm_add_rx_fragment management

Relying on xdp frags bit, remove skb_shared_info structure
allocated on the stack in mvneta_rx_swbm routine and simplify
mvneta_swbm_add_rx_fragment accessing skb_shared_info in the
xdp_buff structure directly. There is no performance penalty in
this approach since mvneta_swbm_add_rx_fragment is run just
for xdp frags use-case.
Acked-by: default avatarToke Hoiland-Jorgensen <toke@redhat.com>
Acked-by: default avatarJohn Fastabend <john.fastabend@gmail.com>
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Link: https://lore.kernel.org/r/45f050c094ccffce49d6bc5112939ed35250ba90.1642758637.git.lorenzo@kernel.orgSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 76a67694
...@@ -2060,9 +2060,9 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) ...@@ -2060,9 +2060,9 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
static void static void
mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct xdp_buff *xdp, struct skb_shared_info *sinfo, struct xdp_buff *xdp, int sync_len)
int sync_len)
{ {
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
int i; int i;
if (likely(!xdp_buff_has_frags(xdp))) if (likely(!xdp_buff_has_frags(xdp)))
...@@ -2210,7 +2210,6 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, ...@@ -2210,7 +2210,6 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct bpf_prog *prog, struct xdp_buff *xdp, struct bpf_prog *prog, struct xdp_buff *xdp,
u32 frame_sz, struct mvneta_stats *stats) u32 frame_sz, struct mvneta_stats *stats)
{ {
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
unsigned int len, data_len, sync; unsigned int len, data_len, sync;
u32 ret, act; u32 ret, act;
...@@ -2231,7 +2230,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, ...@@ -2231,7 +2230,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
err = xdp_do_redirect(pp->dev, xdp, prog); err = xdp_do_redirect(pp->dev, xdp, prog);
if (unlikely(err)) { if (unlikely(err)) {
mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); mvneta_xdp_put_buff(pp, rxq, xdp, sync);
ret = MVNETA_XDP_DROPPED; ret = MVNETA_XDP_DROPPED;
} else { } else {
ret = MVNETA_XDP_REDIR; ret = MVNETA_XDP_REDIR;
...@@ -2242,7 +2241,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, ...@@ -2242,7 +2241,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
case XDP_TX: case XDP_TX:
ret = mvneta_xdp_xmit_back(pp, xdp); ret = mvneta_xdp_xmit_back(pp, xdp);
if (ret != MVNETA_XDP_TX) if (ret != MVNETA_XDP_TX)
mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); mvneta_xdp_put_buff(pp, rxq, xdp, sync);
break; break;
default: default:
bpf_warn_invalid_xdp_action(pp->dev, prog, act); bpf_warn_invalid_xdp_action(pp->dev, prog, act);
...@@ -2251,7 +2250,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, ...@@ -2251,7 +2250,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
trace_xdp_exception(pp->dev, prog, act); trace_xdp_exception(pp->dev, prog, act);
fallthrough; fallthrough;
case XDP_DROP: case XDP_DROP:
mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); mvneta_xdp_put_buff(pp, rxq, xdp, sync);
ret = MVNETA_XDP_DROPPED; ret = MVNETA_XDP_DROPPED;
stats->xdp_drop++; stats->xdp_drop++;
break; break;
...@@ -2303,9 +2302,9 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, ...@@ -2303,9 +2302,9 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc, struct mvneta_rx_desc *rx_desc,
struct mvneta_rx_queue *rxq, struct mvneta_rx_queue *rxq,
struct xdp_buff *xdp, int *size, struct xdp_buff *xdp, int *size,
struct skb_shared_info *xdp_sinfo,
struct page *page) struct page *page)
{ {
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
struct net_device *dev = pp->dev; struct net_device *dev = pp->dev;
enum dma_data_direction dma_dir; enum dma_data_direction dma_dir;
int data_len, len; int data_len, len;
...@@ -2323,8 +2322,11 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, ...@@ -2323,8 +2322,11 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
len, dma_dir); len, dma_dir);
rx_desc->buf_phys_addr = 0; rx_desc->buf_phys_addr = 0;
if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) { if (!xdp_buff_has_frags(xdp))
skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++]; sinfo->nr_frags = 0;
if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags++];
skb_frag_off_set(frag, pp->rx_offset_correction); skb_frag_off_set(frag, pp->rx_offset_correction);
skb_frag_size_set(frag, data_len); skb_frag_size_set(frag, data_len);
...@@ -2335,16 +2337,6 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, ...@@ -2335,16 +2337,6 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
} else { } else {
page_pool_put_full_page(rxq->page_pool, page, true); page_pool_put_full_page(rxq->page_pool, page, true);
} }
/* last fragment */
if (len == *size) {
struct skb_shared_info *sinfo;
sinfo = xdp_get_shared_info_from_buff(xdp);
sinfo->nr_frags = xdp_sinfo->nr_frags;
memcpy(sinfo->frags, xdp_sinfo->frags,
sinfo->nr_frags * sizeof(skb_frag_t));
}
*size -= len; *size -= len;
} }
...@@ -2392,7 +2384,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi, ...@@ -2392,7 +2384,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
{ {
int rx_proc = 0, rx_todo, refill, size = 0; int rx_proc = 0, rx_todo, refill, size = 0;
struct net_device *dev = pp->dev; struct net_device *dev = pp->dev;
struct skb_shared_info sinfo;
struct mvneta_stats ps = {}; struct mvneta_stats ps = {};
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
u32 desc_status, frame_sz; u32 desc_status, frame_sz;
...@@ -2401,8 +2392,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi, ...@@ -2401,8 +2392,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq); xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq);
xdp_buf.data_hard_start = NULL; xdp_buf.data_hard_start = NULL;
sinfo.nr_frags = 0;
/* Get number of received packets */ /* Get number of received packets */
rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
...@@ -2444,7 +2433,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, ...@@ -2444,7 +2433,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
} }
mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf, mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
&size, &sinfo, page); &size, page);
} /* Middle or Last descriptor */ } /* Middle or Last descriptor */
if (!(rx_status & MVNETA_RXD_LAST_DESC)) if (!(rx_status & MVNETA_RXD_LAST_DESC))
...@@ -2452,7 +2441,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, ...@@ -2452,7 +2441,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
continue; continue;
if (size) { if (size) {
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
goto next; goto next;
} }
...@@ -2464,7 +2453,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, ...@@ -2464,7 +2453,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
u64_stats_update_begin(&stats->syncp); u64_stats_update_begin(&stats->syncp);
stats->es.skb_alloc_error++; stats->es.skb_alloc_error++;
...@@ -2481,11 +2470,10 @@ static int mvneta_rx_swbm(struct napi_struct *napi, ...@@ -2481,11 +2470,10 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
napi_gro_receive(napi, skb); napi_gro_receive(napi, skb);
next: next:
xdp_buf.data_hard_start = NULL; xdp_buf.data_hard_start = NULL;
sinfo.nr_frags = 0;
} }
if (xdp_buf.data_hard_start) if (xdp_buf.data_hard_start)
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
if (ps.xdp_redirect) if (ps.xdp_redirect)
xdp_do_flush_map(); xdp_do_flush_map();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment