Commit 495de55f authored by Ilias Apalodimas's avatar Ilias Apalodimas Committed by Alexei Starovoitov

net: netsec: Add support for XDP frame size

This driver takes advantage of page_pool PP_FLAG_DMA_SYNC_DEV that
can help reduce the number of cache-lines that need to be flushed
when doing DMA sync for_device. Due to xdp_adjust_tail can grow the
area accessible to the by the CPU (can possibly write into), then max
sync length *after* bpf_prog_run_xdp() needs to be taken into account.

For XDP_TX action the driver is smart and does DMA-sync. When growing
tail this is still safe, because page_pool have DMA-mapped the entire
page size.
Signed-off-by: default avatarIlias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Link: https://lore.kernel.org/bpf/158945336295.97035.15034759661036971024.stgit@firesoul
parent 494f44d5
...@@ -884,23 +884,28 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog, ...@@ -884,23 +884,28 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
struct xdp_buff *xdp) struct xdp_buff *xdp)
{ {
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
unsigned int len = xdp->data_end - xdp->data; unsigned int sync, len = xdp->data_end - xdp->data;
u32 ret = NETSEC_XDP_PASS; u32 ret = NETSEC_XDP_PASS;
struct page *page;
int err; int err;
u32 act; u32 act;
act = bpf_prog_run_xdp(prog, xdp); act = bpf_prog_run_xdp(prog, xdp);
/* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
sync = xdp->data_end - xdp->data_hard_start - NETSEC_RXBUF_HEADROOM;
sync = max(sync, len);
switch (act) { switch (act) {
case XDP_PASS: case XDP_PASS:
ret = NETSEC_XDP_PASS; ret = NETSEC_XDP_PASS;
break; break;
case XDP_TX: case XDP_TX:
ret = netsec_xdp_xmit_back(priv, xdp); ret = netsec_xdp_xmit_back(priv, xdp);
if (ret != NETSEC_XDP_TX) if (ret != NETSEC_XDP_TX) {
page_pool_put_page(dring->page_pool, page = virt_to_head_page(xdp->data);
virt_to_head_page(xdp->data), len, page_pool_put_page(dring->page_pool, page, sync, true);
true); }
break; break;
case XDP_REDIRECT: case XDP_REDIRECT:
err = xdp_do_redirect(priv->ndev, xdp, prog); err = xdp_do_redirect(priv->ndev, xdp, prog);
...@@ -908,9 +913,8 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog, ...@@ -908,9 +913,8 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
ret = NETSEC_XDP_REDIR; ret = NETSEC_XDP_REDIR;
} else { } else {
ret = NETSEC_XDP_CONSUMED; ret = NETSEC_XDP_CONSUMED;
page_pool_put_page(dring->page_pool, page = virt_to_head_page(xdp->data);
virt_to_head_page(xdp->data), len, page_pool_put_page(dring->page_pool, page, sync, true);
true);
} }
break; break;
default: default:
...@@ -921,8 +925,8 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog, ...@@ -921,8 +925,8 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
/* fall through -- handle aborts by dropping packet */ /* fall through -- handle aborts by dropping packet */
case XDP_DROP: case XDP_DROP:
ret = NETSEC_XDP_CONSUMED; ret = NETSEC_XDP_CONSUMED;
page_pool_put_page(dring->page_pool, page = virt_to_head_page(xdp->data);
virt_to_head_page(xdp->data), len, true); page_pool_put_page(dring->page_pool, page, sync, true);
break; break;
} }
...@@ -936,10 +940,14 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) ...@@ -936,10 +940,14 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct netsec_rx_pkt_info rx_info; struct netsec_rx_pkt_info rx_info;
enum dma_data_direction dma_dir; enum dma_data_direction dma_dir;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
struct xdp_buff xdp;
u16 xdp_xmit = 0; u16 xdp_xmit = 0;
u32 xdp_act = 0; u32 xdp_act = 0;
int done = 0; int done = 0;
xdp.rxq = &dring->xdp_rxq;
xdp.frame_sz = PAGE_SIZE;
rcu_read_lock(); rcu_read_lock();
xdp_prog = READ_ONCE(priv->xdp_prog); xdp_prog = READ_ONCE(priv->xdp_prog);
dma_dir = page_pool_get_dma_dir(dring->page_pool); dma_dir = page_pool_get_dma_dir(dring->page_pool);
...@@ -953,7 +961,6 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) ...@@ -953,7 +961,6 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
u16 pkt_len, desc_len; u16 pkt_len, desc_len;
dma_addr_t dma_handle; dma_addr_t dma_handle;
struct xdp_buff xdp;
void *buf_addr; void *buf_addr;
if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) { if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
...@@ -1002,7 +1009,6 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) ...@@ -1002,7 +1009,6 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM; xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM;
xdp_set_data_meta_invalid(&xdp); xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + pkt_len; xdp.data_end = xdp.data + pkt_len;
xdp.rxq = &dring->xdp_rxq;
if (xdp_prog) { if (xdp_prog) {
xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp); xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment