Commit 74a1c059 authored by Ioana Ciornei's avatar Ioana Ciornei Committed by David S. Miller

dpaa2-eth: add bulking to XDP_TX

Add driver level bulking to the XDP_TX action.

An array of frame descriptors is held for each Tx frame queue and
populated accordingly when the action returned by the XDP program is
XDP_TX. The frames will be actually enqueued only when the array is
filled. At the end of the NAPI cycle a flush on the queued frames is
performed in order to enqueue the remaining FDs.
Signed-off-by: default avatarIoana Ciornei <ioana.ciornei@nxp.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6f42a293
...@@ -273,13 +273,43 @@ static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv, ...@@ -273,13 +273,43 @@ static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
return total_enqueued; return total_enqueued;
} }
static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd, static void xdp_tx_flush(struct dpaa2_eth_priv *priv,
void *buf_start, u16 queue_id) struct dpaa2_eth_channel *ch,
struct dpaa2_eth_fq *fq)
{
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_fd *fds;
int enqueued, i;
percpu_stats = this_cpu_ptr(priv->percpu_stats);
// enqueue the array of XDP_TX frames
enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds);
/* update statistics */
percpu_stats->tx_packets += enqueued;
fds = fq->xdp_tx_fds.fds;
for (i = 0; i < enqueued; i++) {
percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
ch->stats.xdp_tx++;
}
for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
percpu_stats->tx_errors++;
ch->stats.xdp_tx_err++;
}
fq->xdp_tx_fds.num = 0;
}
static void xdp_enqueue(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch,
struct dpaa2_fd *fd,
void *buf_start, u16 queue_id)
{ {
struct dpaa2_eth_fq *fq;
struct dpaa2_faead *faead; struct dpaa2_faead *faead;
struct dpaa2_fd *dest_fd;
struct dpaa2_eth_fq *fq;
u32 ctrl, frc; u32 ctrl, frc;
int i, err;
/* Mark the egress frame hardware annotation area as valid */ /* Mark the egress frame hardware annotation area as valid */
frc = dpaa2_fd_get_frc(fd); frc = dpaa2_fd_get_frc(fd);
...@@ -296,13 +326,13 @@ static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd, ...@@ -296,13 +326,13 @@ static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
faead->conf_fqid = 0; faead->conf_fqid = 0;
fq = &priv->fq[queue_id]; fq = &priv->fq[queue_id];
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++];
err = priv->enqueue(priv, fq, fd, 0, 1, NULL); memcpy(dest_fd, fd, sizeof(*dest_fd));
if (err != -EBUSY)
break;
}
return err; if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE)
return;
xdp_tx_flush(priv, ch, fq);
} }
static u32 run_xdp(struct dpaa2_eth_priv *priv, static u32 run_xdp(struct dpaa2_eth_priv *priv,
...@@ -311,14 +341,11 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv, ...@@ -311,14 +341,11 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
struct dpaa2_fd *fd, void *vaddr) struct dpaa2_fd *fd, void *vaddr)
{ {
dma_addr_t addr = dpaa2_fd_get_addr(fd); dma_addr_t addr = dpaa2_fd_get_addr(fd);
struct rtnl_link_stats64 *percpu_stats;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
struct xdp_buff xdp; struct xdp_buff xdp;
u32 xdp_act = XDP_PASS; u32 xdp_act = XDP_PASS;
int err; int err;
percpu_stats = this_cpu_ptr(priv->percpu_stats);
rcu_read_lock(); rcu_read_lock();
xdp_prog = READ_ONCE(ch->xdp.prog); xdp_prog = READ_ONCE(ch->xdp.prog);
...@@ -344,16 +371,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv, ...@@ -344,16 +371,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
case XDP_PASS: case XDP_PASS:
break; break;
case XDP_TX: case XDP_TX:
err = xdp_enqueue(priv, fd, vaddr, rx_fq->flowid); xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
if (err) {
xdp_release_buf(priv, ch, addr);
percpu_stats->tx_errors++;
ch->stats.xdp_tx_err++;
} else {
percpu_stats->tx_packets++;
percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
ch->stats.xdp_tx++;
}
break; break;
default: default:
bpf_warn_invalid_xdp_action(xdp_act); bpf_warn_invalid_xdp_action(xdp_act);
...@@ -1175,6 +1193,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) ...@@ -1175,6 +1193,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
int store_cleaned, work_done; int store_cleaned, work_done;
struct list_head rx_list; struct list_head rx_list;
int retries = 0; int retries = 0;
u16 flowid;
int err; int err;
ch = container_of(napi, struct dpaa2_eth_channel, napi); ch = container_of(napi, struct dpaa2_eth_channel, napi);
...@@ -1197,6 +1216,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) ...@@ -1197,6 +1216,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
break; break;
if (fq->type == DPAA2_RX_FQ) { if (fq->type == DPAA2_RX_FQ) {
rx_cleaned += store_cleaned; rx_cleaned += store_cleaned;
flowid = fq->flowid;
} else { } else {
txconf_cleaned += store_cleaned; txconf_cleaned += store_cleaned;
/* We have a single Tx conf FQ on this channel */ /* We have a single Tx conf FQ on this channel */
...@@ -1239,6 +1259,8 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) ...@@ -1239,6 +1259,8 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
if (ch->xdp.res & XDP_REDIRECT) if (ch->xdp.res & XDP_REDIRECT)
xdp_do_flush_map(); xdp_do_flush_map();
else if (rx_cleaned && ch->xdp.res & XDP_TX)
xdp_tx_flush(priv, ch, &priv->fq[flowid]);
return work_done; return work_done;
} }
......
...@@ -334,6 +334,7 @@ struct dpaa2_eth_fq { ...@@ -334,6 +334,7 @@ struct dpaa2_eth_fq {
struct dpaa2_eth_fq_stats stats; struct dpaa2_eth_fq_stats stats;
struct dpaa2_eth_xdp_fds xdp_redirect_fds; struct dpaa2_eth_xdp_fds xdp_redirect_fds;
struct dpaa2_eth_xdp_fds xdp_tx_fds;
}; };
struct dpaa2_eth_ch_xdp { struct dpaa2_eth_ch_xdp {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment