Commit 6aa40b9e authored by Ioana Ciornei's avatar Ioana Ciornei Committed by David S. Miller

dpaa2-eth: split the .ndo_xdp_xmit callback into two stages

Instead of having a function that both creates a frame descriptor from
an xdp_frame and enqueues it, split this into two stages.
Add the dpaa2_eth_xdp_create_fd that just transforms an xdp_frame into a
FD while the actual enqueue callback is called directly from the ndo for
each frame.
This is particulary useful in conjunction with bulk enqueue.
Signed-off-by: default avatarIoana Ciornei <ioana.ciornei@nxp.com>
Acked-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6ff80447
...@@ -1880,20 +1880,16 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp) ...@@ -1880,20 +1880,16 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
return 0; return 0;
} }
static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev, static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
struct xdp_frame *xdpf) struct xdp_frame *xdpf,
struct dpaa2_fd *fd)
{ {
struct dpaa2_eth_priv *priv = netdev_priv(net_dev); struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct device *dev = net_dev->dev.parent; struct device *dev = net_dev->dev.parent;
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_eth_drv_stats *percpu_extras;
unsigned int needed_headroom; unsigned int needed_headroom;
struct dpaa2_eth_swa *swa; struct dpaa2_eth_swa *swa;
struct dpaa2_eth_fq *fq;
struct dpaa2_fd fd;
void *buffer_start, *aligned_start; void *buffer_start, *aligned_start;
dma_addr_t addr; dma_addr_t addr;
int err, i;
/* We require a minimum headroom to be able to transmit the frame. /* We require a minimum headroom to be able to transmit the frame.
* Otherwise return an error and let the original net_device handle it * Otherwise return an error and let the original net_device handle it
...@@ -1902,11 +1898,8 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev, ...@@ -1902,11 +1898,8 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
if (xdpf->headroom < needed_headroom) if (xdpf->headroom < needed_headroom)
return -EINVAL; return -EINVAL;
percpu_stats = this_cpu_ptr(priv->percpu_stats);
percpu_extras = this_cpu_ptr(priv->percpu_extras);
/* Setup the FD fields */ /* Setup the FD fields */
memset(&fd, 0, sizeof(fd)); memset(fd, 0, sizeof(*fd));
/* Align FD address, if possible */ /* Align FD address, if possible */
buffer_start = xdpf->data - needed_headroom; buffer_start = xdpf->data - needed_headroom;
...@@ -1924,32 +1917,14 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev, ...@@ -1924,32 +1917,14 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
addr = dma_map_single(dev, buffer_start, addr = dma_map_single(dev, buffer_start,
swa->xdp.dma_size, swa->xdp.dma_size,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, addr))) { if (unlikely(dma_mapping_error(dev, addr)))
percpu_stats->tx_dropped++;
return -ENOMEM; return -ENOMEM;
}
dpaa2_fd_set_addr(&fd, addr);
dpaa2_fd_set_offset(&fd, xdpf->data - buffer_start);
dpaa2_fd_set_len(&fd, xdpf->len);
dpaa2_fd_set_format(&fd, dpaa2_fd_single);
dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)]; dpaa2_fd_set_addr(fd, addr);
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { dpaa2_fd_set_offset(fd, xdpf->data - buffer_start);
err = priv->enqueue(priv, fq, &fd, 0, 1, NULL); dpaa2_fd_set_len(fd, xdpf->len);
if (err != -EBUSY) dpaa2_fd_set_format(fd, dpaa2_fd_single);
break; dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
}
percpu_extras->tx_portal_busy += i;
if (unlikely(err < 0)) {
percpu_stats->tx_errors++;
/* let the Rx device handle the cleanup */
return err;
}
percpu_stats->tx_packets++;
percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
return 0; return 0;
} }
...@@ -1957,6 +1932,11 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev, ...@@ -1957,6 +1932,11 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
struct xdp_frame **frames, u32 flags) struct xdp_frame **frames, u32 flags)
{ {
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpaa2_eth_drv_stats *percpu_extras;
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_eth_fq *fq;
struct dpaa2_fd fd;
int drops = 0; int drops = 0;
int i, err; int i, err;
...@@ -1966,14 +1946,38 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, ...@@ -1966,14 +1946,38 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
if (!netif_running(net_dev)) if (!netif_running(net_dev))
return -ENETDOWN; return -ENETDOWN;
percpu_stats = this_cpu_ptr(priv->percpu_stats);
percpu_extras = this_cpu_ptr(priv->percpu_extras);
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i]; struct xdp_frame *xdpf = frames[i];
err = dpaa2_eth_xdp_xmit_frame(net_dev, xdpf); /* create the FD from the xdp_frame */
err = dpaa2_eth_xdp_create_fd(net_dev, xdpf, &fd);
if (err) { if (err) {
percpu_stats->tx_dropped++;
xdp_return_frame_rx_napi(xdpf); xdp_return_frame_rx_napi(xdpf);
drops++; drops++;
continue;
}
/* enqueue the newly created FD */
fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)];
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
err = priv->enqueue(priv, fq, &fd, 0, 1);
if (err != -EBUSY)
break;
}
percpu_extras->tx_portal_busy += i;
if (unlikely(err < 0)) {
percpu_stats->tx_errors++;
xdp_return_frame_rx_napi(xdpf);
continue;
} }
percpu_stats->tx_packets++;
percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
} }
return n - drops; return n - drops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment