Commit 2681128f authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

veth: reduce stat overhead

veth stats are a bit bloated. There is no need to account transmit
and receive stats, since they are absolutely symmetric.

Also use a per device atomic64_t for the dropped counter, as it
should never be used in fast path.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4cafe373
...@@ -25,18 +25,15 @@ ...@@ -25,18 +25,15 @@
#define MIN_MTU 68 /* Min L3 MTU */ #define MIN_MTU 68 /* Min L3 MTU */
#define MAX_MTU 65535 /* Max L3 MTU (arbitrary) */ #define MAX_MTU 65535 /* Max L3 MTU (arbitrary) */
struct veth_net_stats { struct pcpu_vstats {
u64 rx_packets; u64 packets;
u64 rx_bytes; u64 bytes;
u64 tx_packets;
u64 tx_bytes;
u64 rx_dropped;
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
}; };
struct veth_priv { struct veth_priv {
struct net_device *peer; struct net_device *peer;
struct veth_net_stats __percpu *stats; atomic64_t dropped;
}; };
/* /*
...@@ -107,50 +104,30 @@ static const struct ethtool_ops veth_ethtool_ops = { ...@@ -107,50 +104,30 @@ static const struct ethtool_ops veth_ethtool_ops = {
.get_ethtool_stats = veth_get_ethtool_stats, .get_ethtool_stats = veth_get_ethtool_stats,
}; };
/*
* xmit
*/
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct net_device *rcv = NULL; struct veth_priv *priv = netdev_priv(dev);
struct veth_priv *priv, *rcv_priv; struct net_device *rcv = priv->peer;
struct veth_net_stats *stats, *rcv_stats; int length = skb->len;
int length;
priv = netdev_priv(dev);
rcv = priv->peer;
rcv_priv = netdev_priv(rcv);
stats = this_cpu_ptr(priv->stats);
rcv_stats = this_cpu_ptr(rcv_priv->stats);
/* don't change ip_summed == CHECKSUM_PARTIAL, as that /* don't change ip_summed == CHECKSUM_PARTIAL, as that
will cause bad checksum on forwarded packets */ * will cause bad checksum on forwarded packets
*/
if (skb->ip_summed == CHECKSUM_NONE && if (skb->ip_summed == CHECKSUM_NONE &&
rcv->features & NETIF_F_RXCSUM) rcv->features & NETIF_F_RXCSUM)
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
length = skb->len; if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) {
if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS) struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);
goto rx_drop;
u64_stats_update_begin(&stats->syncp); u64_stats_update_begin(&stats->syncp);
stats->tx_bytes += length; stats->bytes += length;
stats->tx_packets++; stats->packets++;
u64_stats_update_end(&stats->syncp); u64_stats_update_end(&stats->syncp);
} else {
atomic64_inc(&priv->dropped);
}
u64_stats_update_begin(&rcv_stats->syncp);
rcv_stats->rx_bytes += length;
rcv_stats->rx_packets++;
u64_stats_update_end(&rcv_stats->syncp);
return NETDEV_TX_OK;
rx_drop:
u64_stats_update_begin(&rcv_stats->syncp);
rcv_stats->rx_dropped++;
u64_stats_update_end(&rcv_stats->syncp);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -158,32 +135,42 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -158,32 +135,42 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
* general routines * general routines
*/ */
static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev, static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev)
struct rtnl_link_stats64 *tot)
{ {
struct veth_priv *priv = netdev_priv(dev); struct veth_priv *priv = netdev_priv(dev);
int cpu; int cpu;
result->packets = 0;
result->bytes = 0;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct veth_net_stats *stats = per_cpu_ptr(priv->stats, cpu); struct pcpu_vstats *stats = per_cpu_ptr(dev->vstats, cpu);
u64 rx_packets, rx_bytes, rx_dropped; u64 packets, bytes;
u64 tx_packets, tx_bytes;
unsigned int start; unsigned int start;
do { do {
start = u64_stats_fetch_begin_bh(&stats->syncp); start = u64_stats_fetch_begin_bh(&stats->syncp);
rx_packets = stats->rx_packets; packets = stats->packets;
tx_packets = stats->tx_packets; bytes = stats->bytes;
rx_bytes = stats->rx_bytes;
tx_bytes = stats->tx_bytes;
rx_dropped = stats->rx_dropped;
} while (u64_stats_fetch_retry_bh(&stats->syncp, start)); } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
tot->rx_packets += rx_packets; result->packets += packets;
tot->tx_packets += tx_packets; result->bytes += bytes;
tot->rx_bytes += rx_bytes;
tot->tx_bytes += tx_bytes;
tot->rx_dropped += rx_dropped;
} }
return atomic64_read(&priv->dropped);
}
static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *tot)
{
struct veth_priv *priv = netdev_priv(dev);
struct pcpu_vstats one;
tot->tx_dropped = veth_stats_one(&one, dev);
tot->tx_bytes = one.bytes;
tot->tx_packets = one.packets;
tot->rx_dropped = veth_stats_one(&one, priv->peer);
tot->rx_bytes = one.bytes;
tot->rx_packets = one.packets;
return tot; return tot;
} }
...@@ -228,24 +215,16 @@ static int veth_change_mtu(struct net_device *dev, int new_mtu) ...@@ -228,24 +215,16 @@ static int veth_change_mtu(struct net_device *dev, int new_mtu)
static int veth_dev_init(struct net_device *dev) static int veth_dev_init(struct net_device *dev)
{ {
struct veth_net_stats __percpu *stats; dev->vstats = alloc_percpu(struct pcpu_vstats);
struct veth_priv *priv; if (!dev->vstats)
stats = alloc_percpu(struct veth_net_stats);
if (stats == NULL)
return -ENOMEM; return -ENOMEM;
priv = netdev_priv(dev);
priv->stats = stats;
return 0; return 0;
} }
static void veth_dev_free(struct net_device *dev) static void veth_dev_free(struct net_device *dev)
{ {
struct veth_priv *priv; free_percpu(dev->vstats);
priv = netdev_priv(dev);
free_percpu(priv->stats);
free_netdev(dev); free_netdev(dev);
} }
......
...@@ -1284,6 +1284,7 @@ struct net_device { ...@@ -1284,6 +1284,7 @@ struct net_device {
struct pcpu_lstats __percpu *lstats; /* loopback stats */ struct pcpu_lstats __percpu *lstats; /* loopback stats */
struct pcpu_tstats __percpu *tstats; /* tunnel stats */ struct pcpu_tstats __percpu *tstats; /* tunnel stats */
struct pcpu_dstats __percpu *dstats; /* dummy stats */ struct pcpu_dstats __percpu *dstats; /* dummy stats */
struct pcpu_vstats __percpu *vstats; /* veth stats */
}; };
/* GARP */ /* GARP */
struct garp_port __rcu *garp_port; struct garp_port __rcu *garp_port;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment