Commit 79e0c5be authored by Daniel Borkmann's avatar Daniel Borkmann Committed by Martin KaFai Lau

net, vrf: Move dstats structure to core

Just move struct pcpu_dstats out of the vrf into the core, and streamline
the field names slightly, so they better align with the {t,l}stats ones.

No functional change otherwise. A conversion of the u64s to u64_stats_t
could be done at a separate point in future. This move is needed as we are
moving the {t,l,d}stats allocation/freeing to the core.
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Reviewed-by: default avatarNikolay Aleksandrov <razor@blackwall.org>
Cc: Jakub Kicinski <kuba@kernel.org>
Cc: David Ahern <dsahern@kernel.org>
Link: https://lore.kernel.org/r/20231114004220.6495-2-daniel@iogearbox.netSigned-off-by: default avatarMartin KaFai Lau <martin.lau@kernel.org>
parent 76df934c
...@@ -121,22 +121,12 @@ struct net_vrf { ...@@ -121,22 +121,12 @@ struct net_vrf {
int ifindex; int ifindex;
}; };
struct pcpu_dstats {
u64 tx_pkts;
u64 tx_bytes;
u64 tx_drps;
u64 rx_pkts;
u64 rx_bytes;
u64 rx_drps;
struct u64_stats_sync syncp;
};
static void vrf_rx_stats(struct net_device *dev, int len) static void vrf_rx_stats(struct net_device *dev, int len)
{ {
struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
u64_stats_update_begin(&dstats->syncp); u64_stats_update_begin(&dstats->syncp);
dstats->rx_pkts++; dstats->rx_packets++;
dstats->rx_bytes += len; dstats->rx_bytes += len;
u64_stats_update_end(&dstats->syncp); u64_stats_update_end(&dstats->syncp);
} }
...@@ -161,10 +151,10 @@ static void vrf_get_stats64(struct net_device *dev, ...@@ -161,10 +151,10 @@ static void vrf_get_stats64(struct net_device *dev,
do { do {
start = u64_stats_fetch_begin(&dstats->syncp); start = u64_stats_fetch_begin(&dstats->syncp);
tbytes = dstats->tx_bytes; tbytes = dstats->tx_bytes;
tpkts = dstats->tx_pkts; tpkts = dstats->tx_packets;
tdrops = dstats->tx_drps; tdrops = dstats->tx_drops;
rbytes = dstats->rx_bytes; rbytes = dstats->rx_bytes;
rpkts = dstats->rx_pkts; rpkts = dstats->rx_packets;
} while (u64_stats_fetch_retry(&dstats->syncp, start)); } while (u64_stats_fetch_retry(&dstats->syncp, start));
stats->tx_bytes += tbytes; stats->tx_bytes += tbytes;
stats->tx_packets += tpkts; stats->tx_packets += tpkts;
...@@ -421,7 +411,7 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -421,7 +411,7 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
vrf_rx_stats(dev, len); vrf_rx_stats(dev, len);
else else
this_cpu_inc(dev->dstats->rx_drps); this_cpu_inc(dev->dstats->rx_drops);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -616,11 +606,11 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -616,11 +606,11 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
u64_stats_update_begin(&dstats->syncp); u64_stats_update_begin(&dstats->syncp);
dstats->tx_pkts++; dstats->tx_packets++;
dstats->tx_bytes += len; dstats->tx_bytes += len;
u64_stats_update_end(&dstats->syncp); u64_stats_update_end(&dstats->syncp);
} else { } else {
this_cpu_inc(dev->dstats->tx_drps); this_cpu_inc(dev->dstats->tx_drops);
} }
return ret; return ret;
......
...@@ -2755,6 +2755,16 @@ struct pcpu_sw_netstats { ...@@ -2755,6 +2755,16 @@ struct pcpu_sw_netstats {
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
} __aligned(4 * sizeof(u64)); } __aligned(4 * sizeof(u64));
struct pcpu_dstats {
u64 rx_packets;
u64 rx_bytes;
u64 rx_drops;
u64 tx_packets;
u64 tx_bytes;
u64 tx_drops;
struct u64_stats_sync syncp;
} __aligned(8 * sizeof(u64));
struct pcpu_lstats { struct pcpu_lstats {
u64_stats_t packets; u64_stats_t packets;
u64_stats_t bytes; u64_stats_t bytes;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment