Commit 4195e54a authored by Toshiaki Makita's avatar Toshiaki Makita Committed by David S. Miller

veth: Account for XDP packet statistics on rx side

On XDP path veth has napi handler so we can collect statistics on
per-queue basis for XDP.

By this change now we can collect XDP_DROP drop count as well as packets
and bytes coming through ndo_xdp_xmit. Packet counters shown by
"ip -s link", sysfs stats or /proc/net/dev is now correct for XDP.
Signed-off-by: default avatarToshiaki Makita <makita.toshiaki@lab.ntt.co.jp>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2131479d
...@@ -37,11 +37,19 @@ ...@@ -37,11 +37,19 @@
#define VETH_XDP_TX BIT(0) #define VETH_XDP_TX BIT(0)
#define VETH_XDP_REDIR BIT(1) #define VETH_XDP_REDIR BIT(1)
struct veth_rq_stats {
u64 xdp_packets;
u64 xdp_bytes;
u64 xdp_drops;
struct u64_stats_sync syncp;
};
struct veth_rq { struct veth_rq {
struct napi_struct xdp_napi; struct napi_struct xdp_napi;
struct net_device *dev; struct net_device *dev;
struct bpf_prog __rcu *xdp_prog; struct bpf_prog __rcu *xdp_prog;
struct xdp_mem_info xdp_mem; struct xdp_mem_info xdp_mem;
struct veth_rq_stats stats;
bool rx_notify_masked; bool rx_notify_masked;
struct ptr_ring xdp_ring; struct ptr_ring xdp_ring;
struct xdp_rxq_info xdp_rxq; struct xdp_rxq_info xdp_rxq;
...@@ -211,12 +219,14 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -211,12 +219,14 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) { if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
if (!rcv_xdp) {
struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats); struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
u64_stats_update_begin(&stats->syncp); u64_stats_update_begin(&stats->syncp);
stats->bytes += length; stats->bytes += length;
stats->packets++; stats->packets++;
u64_stats_update_end(&stats->syncp); u64_stats_update_end(&stats->syncp);
}
} else { } else {
drop: drop:
atomic64_inc(&priv->dropped); atomic64_inc(&priv->dropped);
...@@ -230,7 +240,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -230,7 +240,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
static u64 veth_stats_one(struct pcpu_lstats *result, struct net_device *dev) static u64 veth_stats_tx(struct pcpu_lstats *result, struct net_device *dev)
{ {
struct veth_priv *priv = netdev_priv(dev); struct veth_priv *priv = netdev_priv(dev);
int cpu; int cpu;
...@@ -253,23 +263,58 @@ static u64 veth_stats_one(struct pcpu_lstats *result, struct net_device *dev) ...@@ -253,23 +263,58 @@ static u64 veth_stats_one(struct pcpu_lstats *result, struct net_device *dev)
return atomic64_read(&priv->dropped); return atomic64_read(&priv->dropped);
} }
static void veth_stats_rx(struct veth_rq_stats *result, struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
int i;
result->xdp_packets = 0;
result->xdp_bytes = 0;
result->xdp_drops = 0;
for (i = 0; i < dev->num_rx_queues; i++) {
struct veth_rq_stats *stats = &priv->rq[i].stats;
u64 packets, bytes, drops;
unsigned int start;
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
packets = stats->xdp_packets;
bytes = stats->xdp_bytes;
drops = stats->xdp_drops;
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
result->xdp_packets += packets;
result->xdp_bytes += bytes;
result->xdp_drops += drops;
}
}
static void veth_get_stats64(struct net_device *dev, static void veth_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *tot) struct rtnl_link_stats64 *tot)
{ {
struct veth_priv *priv = netdev_priv(dev); struct veth_priv *priv = netdev_priv(dev);
struct net_device *peer; struct net_device *peer;
struct pcpu_lstats one; struct veth_rq_stats rx;
struct pcpu_lstats tx;
tot->tx_dropped = veth_stats_tx(&tx, dev);
tot->tx_bytes = tx.bytes;
tot->tx_packets = tx.packets;
tot->tx_dropped = veth_stats_one(&one, dev); veth_stats_rx(&rx, dev);
tot->tx_bytes = one.bytes; tot->rx_dropped = rx.xdp_drops;
tot->tx_packets = one.packets; tot->rx_bytes = rx.xdp_bytes;
tot->rx_packets = rx.xdp_packets;
rcu_read_lock(); rcu_read_lock();
peer = rcu_dereference(priv->peer); peer = rcu_dereference(priv->peer);
if (peer) { if (peer) {
tot->rx_dropped = veth_stats_one(&one, peer); tot->rx_dropped += veth_stats_tx(&tx, peer);
tot->rx_bytes = one.bytes; tot->rx_bytes += tx.bytes;
tot->rx_packets = one.packets; tot->rx_packets += tx.packets;
veth_stats_rx(&rx, peer);
tot->tx_bytes += rx.xdp_bytes;
tot->tx_packets += rx.xdp_packets;
} }
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -609,28 +654,42 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb, ...@@ -609,28 +654,42 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit) static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit)
{ {
int i, done = 0; int i, done = 0, drops = 0, bytes = 0;
for (i = 0; i < budget; i++) { for (i = 0; i < budget; i++) {
void *ptr = __ptr_ring_consume(&rq->xdp_ring); void *ptr = __ptr_ring_consume(&rq->xdp_ring);
unsigned int xdp_xmit_one = 0;
struct sk_buff *skb; struct sk_buff *skb;
if (!ptr) if (!ptr)
break; break;
if (veth_is_xdp_frame(ptr)) { if (veth_is_xdp_frame(ptr)) {
skb = veth_xdp_rcv_one(rq, veth_ptr_to_xdp(ptr), struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
xdp_xmit);
bytes += frame->len;
skb = veth_xdp_rcv_one(rq, frame, &xdp_xmit_one);
} else { } else {
skb = veth_xdp_rcv_skb(rq, ptr, xdp_xmit); skb = ptr;
bytes += skb->len;
skb = veth_xdp_rcv_skb(rq, skb, &xdp_xmit_one);
} }
*xdp_xmit |= xdp_xmit_one;
if (skb) if (skb)
napi_gro_receive(&rq->xdp_napi, skb); napi_gro_receive(&rq->xdp_napi, skb);
else if (!xdp_xmit_one)
drops++;
done++; done++;
} }
u64_stats_update_begin(&rq->stats.syncp);
rq->stats.xdp_packets += done;
rq->stats.xdp_bytes += bytes;
rq->stats.xdp_drops += drops;
u64_stats_update_end(&rq->stats.syncp);
return done; return done;
} }
...@@ -821,8 +880,10 @@ static int veth_alloc_queues(struct net_device *dev) ...@@ -821,8 +880,10 @@ static int veth_alloc_queues(struct net_device *dev)
if (!priv->rq) if (!priv->rq)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < dev->num_rx_queues; i++) for (i = 0; i < dev->num_rx_queues; i++) {
priv->rq[i].dev = dev; priv->rq[i].dev = dev;
u64_stats_init(&priv->rq[i].stats.syncp);
}
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment