Commit 2131479d authored by Toshiaki Makita's avatar Toshiaki Makita Committed by David S. Miller

veth: Account for packet drops in ndo_xdp_xmit

Use existing atomic drop counter. Since drop path is really an
exceptional case here, I'm thinking atomic ops would not hurt the
performance.
XDP packets and bytes are not counted in ndo_xdp_xmit, but will be
accounted on rx side by the following commit.
Signed-off-by: default avatarToshiaki Makita <makita.toshiaki@lab.ntt.co.jp>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent acad76a5
...@@ -308,16 +308,20 @@ static int veth_xdp_xmit(struct net_device *dev, int n, ...@@ -308,16 +308,20 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
{ {
struct veth_priv *rcv_priv, *priv = netdev_priv(dev); struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
struct net_device *rcv; struct net_device *rcv;
int i, ret, drops = n;
unsigned int max_len; unsigned int max_len;
struct veth_rq *rq; struct veth_rq *rq;
int i, drops = 0;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
return -EINVAL; ret = -EINVAL;
goto drop;
}
rcv = rcu_dereference(priv->peer); rcv = rcu_dereference(priv->peer);
if (unlikely(!rcv)) if (unlikely(!rcv)) {
return -ENXIO; ret = -ENXIO;
goto drop;
}
rcv_priv = netdev_priv(rcv); rcv_priv = netdev_priv(rcv);
rq = &rcv_priv->rq[veth_select_rxq(rcv)]; rq = &rcv_priv->rq[veth_select_rxq(rcv)];
...@@ -325,9 +329,12 @@ static int veth_xdp_xmit(struct net_device *dev, int n, ...@@ -325,9 +329,12 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
* side. This means an XDP program is loaded on the peer and the peer * side. This means an XDP program is loaded on the peer and the peer
* device is up. * device is up.
*/ */
if (!rcu_access_pointer(rq->xdp_prog)) if (!rcu_access_pointer(rq->xdp_prog)) {
return -ENXIO; ret = -ENXIO;
goto drop;
}
drops = 0;
max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN; max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
spin_lock(&rq->xdp_ring.producer_lock); spin_lock(&rq->xdp_ring.producer_lock);
...@@ -346,7 +353,14 @@ static int veth_xdp_xmit(struct net_device *dev, int n, ...@@ -346,7 +353,14 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
if (flags & XDP_XMIT_FLUSH) if (flags & XDP_XMIT_FLUSH)
__veth_xdp_flush(rq); __veth_xdp_flush(rq);
return n - drops; if (likely(!drops))
return n;
ret = n - drops;
drop:
atomic64_add(drops, &priv->dropped);
return ret;
} }
static void veth_xdp_flush(struct net_device *dev) static void veth_xdp_flush(struct net_device *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment