Commit da43f0aa authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'mvneta-access-skb_shared_info-only-on-last-frag'

Lorenzo Bianconi says:

====================
mvneta: access skb_shared_info only on last frag

Build skb_shared_info on mvneta_rx_swbm stack and sync it to xdp_buff
skb_shared_info area only on the last fragment.
Avoid avoid unnecessary xdp_buff initialization in mvneta_rx_swbm routine.
This a preliminary series to complete xdp multi-buff in mvneta driver.
====================

Link: https://lore.kernel.org/r/cover.1605889258.git.lorenzo@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 9a71baf7 039fbc47
...@@ -2033,16 +2033,16 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) ...@@ -2033,16 +2033,16 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
static void static void
mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct xdp_buff *xdp, int sync_len, bool napi) struct xdp_buff *xdp, struct skb_shared_info *sinfo,
int sync_len)
{ {
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
int i; int i;
for (i = 0; i < sinfo->nr_frags; i++) for (i = 0; i < sinfo->nr_frags; i++)
page_pool_put_full_page(rxq->page_pool, page_pool_put_full_page(rxq->page_pool,
skb_frag_page(&sinfo->frags[i]), napi); skb_frag_page(&sinfo->frags[i]), true);
page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
sync_len, napi); sync_len, true);
} }
static int static int
...@@ -2179,6 +2179,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, ...@@ -2179,6 +2179,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct bpf_prog *prog, struct xdp_buff *xdp, struct bpf_prog *prog, struct xdp_buff *xdp,
u32 frame_sz, struct mvneta_stats *stats) u32 frame_sz, struct mvneta_stats *stats)
{ {
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
unsigned int len, data_len, sync; unsigned int len, data_len, sync;
u32 ret, act; u32 ret, act;
...@@ -2199,7 +2200,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, ...@@ -2199,7 +2200,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
err = xdp_do_redirect(pp->dev, xdp, prog); err = xdp_do_redirect(pp->dev, xdp, prog);
if (unlikely(err)) { if (unlikely(err)) {
mvneta_xdp_put_buff(pp, rxq, xdp, sync, true); mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
ret = MVNETA_XDP_DROPPED; ret = MVNETA_XDP_DROPPED;
} else { } else {
ret = MVNETA_XDP_REDIR; ret = MVNETA_XDP_REDIR;
...@@ -2210,7 +2211,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, ...@@ -2210,7 +2211,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
case XDP_TX: case XDP_TX:
ret = mvneta_xdp_xmit_back(pp, xdp); ret = mvneta_xdp_xmit_back(pp, xdp);
if (ret != MVNETA_XDP_TX) if (ret != MVNETA_XDP_TX)
mvneta_xdp_put_buff(pp, rxq, xdp, sync, true); mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
break; break;
default: default:
bpf_warn_invalid_xdp_action(act); bpf_warn_invalid_xdp_action(act);
...@@ -2219,7 +2220,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, ...@@ -2219,7 +2220,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
trace_xdp_exception(pp->dev, prog, act); trace_xdp_exception(pp->dev, prog, act);
fallthrough; fallthrough;
case XDP_DROP: case XDP_DROP:
mvneta_xdp_put_buff(pp, rxq, xdp, sync, true); mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
ret = MVNETA_XDP_DROPPED; ret = MVNETA_XDP_DROPPED;
stats->xdp_drop++; stats->xdp_drop++;
break; break;
...@@ -2277,9 +2278,9 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, ...@@ -2277,9 +2278,9 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc, struct mvneta_rx_desc *rx_desc,
struct mvneta_rx_queue *rxq, struct mvneta_rx_queue *rxq,
struct xdp_buff *xdp, int *size, struct xdp_buff *xdp, int *size,
struct skb_shared_info *xdp_sinfo,
struct page *page) struct page *page)
{ {
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
struct net_device *dev = pp->dev; struct net_device *dev = pp->dev;
enum dma_data_direction dma_dir; enum dma_data_direction dma_dir;
int data_len, len; int data_len, len;
...@@ -2297,13 +2298,22 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, ...@@ -2297,13 +2298,22 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
len, dma_dir); len, dma_dir);
rx_desc->buf_phys_addr = 0; rx_desc->buf_phys_addr = 0;
if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) { if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) {
skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags]; skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++];
skb_frag_off_set(frag, pp->rx_offset_correction); skb_frag_off_set(frag, pp->rx_offset_correction);
skb_frag_size_set(frag, data_len); skb_frag_size_set(frag, data_len);
__skb_frag_set_page(frag, page); __skb_frag_set_page(frag, page);
sinfo->nr_frags++;
/* last fragment */
if (len == *size) {
struct skb_shared_info *sinfo;
sinfo = xdp_get_shared_info_from_buff(xdp);
sinfo->nr_frags = xdp_sinfo->nr_frags;
memcpy(sinfo->frags, xdp_sinfo->frags,
sinfo->nr_frags * sizeof(skb_frag_t));
}
} else { } else {
page_pool_put_full_page(rxq->page_pool, page, true); page_pool_put_full_page(rxq->page_pool, page, true);
} }
...@@ -2347,13 +2357,17 @@ static int mvneta_rx_swbm(struct napi_struct *napi, ...@@ -2347,13 +2357,17 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
{ {
int rx_proc = 0, rx_todo, refill, size = 0; int rx_proc = 0, rx_todo, refill, size = 0;
struct net_device *dev = pp->dev; struct net_device *dev = pp->dev;
struct xdp_buff xdp_buf = { struct skb_shared_info sinfo;
.frame_sz = PAGE_SIZE,
.rxq = &rxq->xdp_rxq,
};
struct mvneta_stats ps = {}; struct mvneta_stats ps = {};
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
u32 desc_status, frame_sz; u32 desc_status, frame_sz;
struct xdp_buff xdp_buf;
xdp_buf.data_hard_start = NULL;
xdp_buf.frame_sz = PAGE_SIZE;
xdp_buf.rxq = &rxq->xdp_rxq;
sinfo.nr_frags = 0;
/* Get number of received packets */ /* Get number of received packets */
rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
...@@ -2393,11 +2407,11 @@ static int mvneta_rx_swbm(struct napi_struct *napi, ...@@ -2393,11 +2407,11 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
rx_desc->buf_phys_addr = 0; rx_desc->buf_phys_addr = 0;
page_pool_put_full_page(rxq->page_pool, page, page_pool_put_full_page(rxq->page_pool, page,
true); true);
continue; goto next;
} }
mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf, mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
&size, page); &size, &sinfo, page);
} /* Middle or Last descriptor */ } /* Middle or Last descriptor */
if (!(rx_status & MVNETA_RXD_LAST_DESC)) if (!(rx_status & MVNETA_RXD_LAST_DESC))
...@@ -2405,7 +2419,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, ...@@ -2405,7 +2419,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
continue; continue;
if (size) { if (size) {
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true); mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
goto next; goto next;
} }
...@@ -2417,7 +2431,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, ...@@ -2417,7 +2431,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true); mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
u64_stats_update_begin(&stats->syncp); u64_stats_update_begin(&stats->syncp);
stats->es.skb_alloc_error++; stats->es.skb_alloc_error++;
...@@ -2434,11 +2448,12 @@ static int mvneta_rx_swbm(struct napi_struct *napi, ...@@ -2434,11 +2448,12 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
napi_gro_receive(napi, skb); napi_gro_receive(napi, skb);
next: next:
xdp_buf.data_hard_start = NULL; xdp_buf.data_hard_start = NULL;
sinfo.nr_frags = 0;
} }
rcu_read_unlock(); rcu_read_unlock();
if (xdp_buf.data_hard_start) if (xdp_buf.data_hard_start)
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true); mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
if (ps.xdp_redirect) if (ps.xdp_redirect)
xdp_do_flush_map(); xdp_do_flush_map();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment