Commit ca5ebbfe authored by David S. Miller's avatar David S. Miller

Merge branch 'net-atomic-dev-stats'

Eric Dumazet says:

====================
net: add atomic dev->stats infra

Long standing KCSAN issues are caused by data-race around
some dev->stats changes.

Most performance critical paths already use per-cpu
variables, or per-queue ones.

It is reasonable (and more correct) to use atomic operations
for the slow paths.

First patch adds the infrastructure, then three patches address
the most common paths that syzbot is playing with.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 68d268d0 c4794d22
......@@ -171,31 +171,38 @@ static inline bool dev_xmit_complete(int rc)
* (unsigned long) so they can be read and written atomically.
*/
#define NET_DEV_STAT(FIELD) \
union { \
unsigned long FIELD; \
atomic_long_t __##FIELD; \
}
struct net_device_stats {
unsigned long rx_packets;
unsigned long tx_packets;
unsigned long rx_bytes;
unsigned long tx_bytes;
unsigned long rx_errors;
unsigned long tx_errors;
unsigned long rx_dropped;
unsigned long tx_dropped;
unsigned long multicast;
unsigned long collisions;
unsigned long rx_length_errors;
unsigned long rx_over_errors;
unsigned long rx_crc_errors;
unsigned long rx_frame_errors;
unsigned long rx_fifo_errors;
unsigned long rx_missed_errors;
unsigned long tx_aborted_errors;
unsigned long tx_carrier_errors;
unsigned long tx_fifo_errors;
unsigned long tx_heartbeat_errors;
unsigned long tx_window_errors;
unsigned long rx_compressed;
unsigned long tx_compressed;
NET_DEV_STAT(rx_packets);
NET_DEV_STAT(tx_packets);
NET_DEV_STAT(rx_bytes);
NET_DEV_STAT(tx_bytes);
NET_DEV_STAT(rx_errors);
NET_DEV_STAT(tx_errors);
NET_DEV_STAT(rx_dropped);
NET_DEV_STAT(tx_dropped);
NET_DEV_STAT(multicast);
NET_DEV_STAT(collisions);
NET_DEV_STAT(rx_length_errors);
NET_DEV_STAT(rx_over_errors);
NET_DEV_STAT(rx_crc_errors);
NET_DEV_STAT(rx_frame_errors);
NET_DEV_STAT(rx_fifo_errors);
NET_DEV_STAT(rx_missed_errors);
NET_DEV_STAT(tx_aborted_errors);
NET_DEV_STAT(tx_carrier_errors);
NET_DEV_STAT(tx_fifo_errors);
NET_DEV_STAT(tx_heartbeat_errors);
NET_DEV_STAT(tx_window_errors);
NET_DEV_STAT(rx_compressed);
NET_DEV_STAT(tx_compressed);
};
#undef NET_DEV_STAT
/* per-cpu stats, allocated on demand.
* Try to fit them in a single cache line, for dev_get_stats() sake.
......@@ -5171,4 +5178,9 @@ extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
extern struct net_device *blackhole_netdev;
/* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */
#define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
#define DEV_STATS_ADD(DEV, FIELD, VAL) \
atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
#endif /* _LINUX_NETDEVICE_H */
......@@ -356,9 +356,8 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
struct net *net)
{
/* TODO : stats should be SMP safe */
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
DEV_STATS_INC(dev, rx_packets);
DEV_STATS_ADD(dev, rx_bytes, skb->len);
__skb_tunnel_rx(skb, dev, net);
}
......
......@@ -10369,24 +10369,16 @@ void netdev_run_todo(void)
void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
const struct net_device_stats *netdev_stats)
{
#if BITS_PER_LONG == 64
BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
/* zero out counters that only exist in rtnl_link_stats64 */
memset((char *)stats64 + sizeof(*netdev_stats), 0,
sizeof(*stats64) - sizeof(*netdev_stats));
#else
size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
const unsigned long *src = (const unsigned long *)netdev_stats;
size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t);
const atomic_long_t *src = (atomic_long_t *)netdev_stats;
u64 *dst = (u64 *)stats64;
BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
for (i = 0; i < n; i++)
dst[i] = src[i];
dst[i] = atomic_long_read(&src[i]);
/* zero out counters that only exist in rtnl_link_stats64 */
memset((char *)stats64 + n * sizeof(u64), 0,
sizeof(*stats64) - n * sizeof(u64));
#endif
}
EXPORT_SYMBOL(netdev_stats_to_stats64);
......
......@@ -510,7 +510,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
err_free_skb:
kfree_skb(skb);
dev->stats.tx_dropped++;
DEV_STATS_INC(dev, tx_dropped);
}
static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
......@@ -592,7 +592,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
err_free_skb:
kfree_skb(skb);
dev->stats.tx_dropped++;
DEV_STATS_INC(dev, tx_dropped);
}
static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
......@@ -663,7 +663,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
free_skb:
kfree_skb(skb);
dev->stats.tx_dropped++;
DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK;
}
......@@ -717,7 +717,7 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
free_skb:
kfree_skb(skb);
dev->stats.tx_dropped++;
DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK;
}
......@@ -745,7 +745,7 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
free_skb:
kfree_skb(skb);
dev->stats.tx_dropped++;
DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK;
}
......
......@@ -368,23 +368,23 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(iph->daddr)) {
tunnel->dev->stats.multicast++;
DEV_STATS_INC(tunnel->dev, multicast);
skb->pkt_type = PACKET_BROADCAST;
}
#endif
if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
tunnel->dev->stats.rx_crc_errors++;
tunnel->dev->stats.rx_errors++;
DEV_STATS_INC(tunnel->dev, rx_crc_errors);
DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop;
}
if (tunnel->parms.i_flags&TUNNEL_SEQ) {
if (!(tpi->flags&TUNNEL_SEQ) ||
(tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
tunnel->dev->stats.rx_fifo_errors++;
tunnel->dev->stats.rx_errors++;
DEV_STATS_INC(tunnel->dev, rx_fifo_errors);
DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop;
}
tunnel->i_seqno = ntohl(tpi->seq) + 1;
......@@ -398,8 +398,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
&iph->saddr, iph->tos);
if (err > 1) {
++tunnel->dev->stats.rx_frame_errors;
++tunnel->dev->stats.rx_errors;
DEV_STATS_INC(tunnel->dev, rx_frame_errors);
DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop;
}
}
......@@ -581,7 +581,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
if (!rt) {
rt = ip_route_output_key(tunnel->net, &fl4);
if (IS_ERR(rt)) {
dev->stats.tx_carrier_errors++;
DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error;
}
if (use_cache)
......@@ -590,7 +590,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
}
if (rt->dst.dev == dev) {
ip_rt_put(rt);
dev->stats.collisions++;
DEV_STATS_INC(dev, collisions);
goto tx_error;
}
......@@ -625,10 +625,10 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
df, !net_eq(tunnel->net, dev_net(dev)));
return;
tx_error:
dev->stats.tx_errors++;
DEV_STATS_INC(dev, tx_errors);
goto kfree;
tx_dropped:
dev->stats.tx_dropped++;
DEV_STATS_INC(dev, tx_dropped);
kfree:
kfree_skb(skb);
}
......@@ -662,7 +662,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
/* NBMA tunnel */
if (!skb_dst(skb)) {
dev->stats.tx_fifo_errors++;
DEV_STATS_INC(dev, tx_fifo_errors);
goto tx_error;
}
......@@ -749,7 +749,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
rt = ip_route_output_key(tunnel->net, &fl4);
if (IS_ERR(rt)) {
dev->stats.tx_carrier_errors++;
DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error;
}
if (use_cache)
......@@ -762,7 +762,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
if (rt->dst.dev == dev) {
ip_rt_put(rt);
dev->stats.collisions++;
DEV_STATS_INC(dev, collisions);
goto tx_error;
}
......@@ -805,7 +805,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
if (skb_cow_head(skb, dev->needed_headroom)) {
ip_rt_put(rt);
dev->stats.tx_dropped++;
DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb);
return;
}
......@@ -819,7 +819,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
dst_link_failure(skb);
#endif
tx_error:
dev->stats.tx_errors++;
DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb);
}
EXPORT_SYMBOL_GPL(ip_tunnel_xmit);
......
......@@ -107,8 +107,8 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
dev = tunnel->dev;
if (err) {
dev->stats.rx_errors++;
dev->stats.rx_dropped++;
DEV_STATS_INC(dev, rx_errors);
DEV_STATS_INC(dev, rx_dropped);
return 0;
}
......@@ -183,7 +183,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
if (IS_ERR(rt)) {
dev->stats.tx_carrier_errors++;
DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error_icmp;
}
dst = &rt->dst;
......@@ -198,14 +198,14 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
if (dst->error) {
dst_release(dst);
dst = NULL;
dev->stats.tx_carrier_errors++;
DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error_icmp;
}
skb_dst_set(skb, dst);
break;
#endif
default:
dev->stats.tx_carrier_errors++;
DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error_icmp;
}
}
......@@ -213,7 +213,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
dst_hold(dst);
dst = xfrm_lookup_route(tunnel->net, dst, fl, NULL, 0);
if (IS_ERR(dst)) {
dev->stats.tx_carrier_errors++;
DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error_icmp;
}
......@@ -221,7 +221,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
goto xmit;
if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) {
dev->stats.tx_carrier_errors++;
DEV_STATS_INC(dev, tx_carrier_errors);
dst_release(dst);
goto tx_error_icmp;
}
......@@ -230,7 +230,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
if (tdev == dev) {
dst_release(dst);
dev->stats.collisions++;
DEV_STATS_INC(dev, collisions);
goto tx_error;
}
......@@ -267,7 +267,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
tx_error_icmp:
dst_link_failure(skb);
tx_error:
dev->stats.tx_errors++;
DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb);
return NETDEV_TX_OK;
}
......@@ -304,7 +304,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
return vti_xmit(skb, dev, &fl);
tx_err:
dev->stats.tx_errors++;
DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb);
return NETDEV_TX_OK;
}
......
......@@ -310,7 +310,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
tx_error:
kfree_skb(skb);
dev->stats.tx_errors++;
DEV_STATS_INC(dev, tx_errors);
return NETDEV_TX_OK;
}
......
......@@ -506,8 +506,8 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
return err;
}
dev->stats.tx_bytes += skb->len;
dev->stats.tx_packets++;
DEV_STATS_ADD(dev, tx_bytes, skb->len);
DEV_STATS_INC(dev, tx_packets);
rcu_read_lock();
/* Pairs with WRITE_ONCE() in vif_add() and vif_delete() */
......@@ -1839,8 +1839,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
if (vif->flags & VIFF_REGISTER) {
WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
WRITE_ONCE(vif->bytes_out, vif->bytes_out + skb->len);
vif_dev->stats.tx_bytes += skb->len;
vif_dev->stats.tx_packets++;
DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
DEV_STATS_INC(vif_dev, tx_packets);
ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
goto out_free;
}
......@@ -1898,8 +1898,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
if (vif->flags & VIFF_TUNNEL) {
ip_encap(net, skb, vif->local, vif->remote);
/* FIXME: extra output firewall step used to be here. --RR */
vif_dev->stats.tx_packets++;
vif_dev->stats.tx_bytes += skb->len;
DEV_STATS_INC(vif_dev, tx_packets);
DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
}
IPCB(skb)->flags |= IPSKB_FORWARDED;
......
......@@ -895,7 +895,6 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct net_device_stats *stats = &t->dev->stats;
__be16 payload_protocol;
int ret;
......@@ -925,8 +924,8 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
tx_err:
if (!t->parms.collect_md || !IS_ERR(skb_tunnel_info_txcheck(skb)))
stats->tx_errors++;
stats->tx_dropped++;
DEV_STATS_INC(dev, tx_errors);
DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb);
return NETDEV_TX_OK;
}
......@@ -937,7 +936,6 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
struct ip_tunnel_info *tun_info = NULL;
struct ip6_tnl *t = netdev_priv(dev);
struct dst_entry *dst = skb_dst(skb);
struct net_device_stats *stats;
bool truncate = false;
int encap_limit = -1;
__u8 dsfield = false;
......@@ -1086,10 +1084,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
tx_err:
stats = &t->dev->stats;
if (!IS_ERR(tun_info))
stats->tx_errors++;
stats->tx_dropped++;
DEV_STATS_INC(dev, tx_errors);
DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb);
return NETDEV_TX_OK;
}
......
......@@ -803,8 +803,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
(tunnel->parms.i_flags & TUNNEL_CSUM)) ||
((tpi->flags & TUNNEL_CSUM) &&
!(tunnel->parms.i_flags & TUNNEL_CSUM))) {
tunnel->dev->stats.rx_crc_errors++;
tunnel->dev->stats.rx_errors++;
DEV_STATS_INC(tunnel->dev, rx_crc_errors);
DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop;
}
......@@ -812,8 +812,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
if (!(tpi->flags & TUNNEL_SEQ) ||
(tunnel->i_seqno &&
(s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
tunnel->dev->stats.rx_fifo_errors++;
tunnel->dev->stats.rx_errors++;
DEV_STATS_INC(tunnel->dev, rx_fifo_errors);
DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop;
}
tunnel->i_seqno = ntohl(tpi->seq) + 1;
......@@ -824,8 +824,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
/* Warning: All skb pointers will be invalidated! */
if (tunnel->dev->type == ARPHRD_ETHER) {
if (!pskb_may_pull(skb, ETH_HLEN)) {
tunnel->dev->stats.rx_length_errors++;
tunnel->dev->stats.rx_errors++;
DEV_STATS_INC(tunnel->dev, rx_length_errors);
DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop;
}
......@@ -849,8 +849,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
&ipv6h->saddr,
ipv6_get_dsfield(ipv6h));
if (err > 1) {
++tunnel->dev->stats.rx_frame_errors;
++tunnel->dev->stats.rx_errors;
DEV_STATS_INC(tunnel->dev, rx_frame_errors);
DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop;
}
}
......@@ -1071,7 +1071,6 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
{
struct ip6_tnl *t = netdev_priv(dev);
struct net *net = t->net;
struct net_device_stats *stats = &t->dev->stats;
struct ipv6hdr *ipv6h;
struct ipv6_tel_txoption opt;
struct dst_entry *dst = NULL, *ndst = NULL;
......@@ -1166,7 +1165,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
tdev = dst->dev;
if (tdev == dev) {
stats->collisions++;
DEV_STATS_INC(dev, collisions);
net_warn_ratelimited("%s: Local routing loop detected!\n",
t->parms.name);
goto tx_err_dst_release;
......@@ -1265,7 +1264,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
ip6tunnel_xmit(NULL, skb, dev);
return 0;
tx_err_link_failure:
stats->tx_carrier_errors++;
DEV_STATS_INC(dev, tx_carrier_errors);
dst_link_failure(skb);
tx_err_dst_release:
dst_release(dst);
......@@ -1408,7 +1407,6 @@ static netdev_tx_t
ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct net_device_stats *stats = &t->dev->stats;
u8 ipproto;
int ret;
......@@ -1438,8 +1436,8 @@ ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
tx_err:
stats->tx_errors++;
stats->tx_dropped++;
DEV_STATS_INC(dev, tx_errors);
DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb);
return NETDEV_TX_OK;
}
......
......@@ -317,7 +317,7 @@ static int vti6_input_proto(struct sk_buff *skb, int nexthdr, __be32 spi,
ipv6h = ipv6_hdr(skb);
if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
t->dev->stats.rx_dropped++;
DEV_STATS_INC(t->dev, rx_dropped);
rcu_read_unlock();
goto discard;
}
......@@ -359,8 +359,8 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
dev = t->dev;
if (err) {
dev->stats.rx_errors++;
dev->stats.rx_dropped++;
DEV_STATS_INC(dev, rx_errors);
DEV_STATS_INC(dev, rx_dropped);
return 0;
}
......@@ -446,7 +446,6 @@ static int
vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
{
struct ip6_tnl *t = netdev_priv(dev);
struct net_device_stats *stats = &t->dev->stats;
struct dst_entry *dst = skb_dst(skb);
struct net_device *tdev;
struct xfrm_state *x;
......@@ -506,7 +505,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
tdev = dst->dev;
if (tdev == dev) {
stats->collisions++;
DEV_STATS_INC(dev, collisions);
net_warn_ratelimited("%s: Local routing loop detected!\n",
t->parms.name);
goto tx_err_dst_release;
......@@ -544,7 +543,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
return 0;
tx_err_link_failure:
stats->tx_carrier_errors++;
DEV_STATS_INC(dev, tx_carrier_errors);
dst_link_failure(skb);
tx_err_dst_release:
dst_release(dst);
......@@ -555,7 +554,6 @@ static netdev_tx_t
vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct net_device_stats *stats = &t->dev->stats;
struct flowi fl;
int ret;
......@@ -591,8 +589,8 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
tx_err:
stats->tx_errors++;
stats->tx_dropped++;
DEV_STATS_INC(dev, tx_errors);
DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb);
return NETDEV_TX_OK;
}
......
......@@ -608,8 +608,8 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
goto tx_err;
dev->stats.tx_bytes += skb->len;
dev->stats.tx_packets++;
DEV_STATS_ADD(dev, tx_bytes, skb->len);
DEV_STATS_INC(dev, tx_packets);
rcu_read_lock();
ip6mr_cache_report(mrt, skb, READ_ONCE(mrt->mroute_reg_vif_num),
MRT6MSG_WHOLEPKT);
......@@ -618,7 +618,7 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
tx_err:
dev->stats.tx_errors++;
DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb);
return NETDEV_TX_OK;
}
......@@ -2044,8 +2044,8 @@ static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
if (vif->flags & MIFF_REGISTER) {
WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
WRITE_ONCE(vif->bytes_out, vif->bytes_out + skb->len);
vif_dev->stats.tx_bytes += skb->len;
vif_dev->stats.tx_packets++;
DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
DEV_STATS_INC(vif_dev, tx_packets);
ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
goto out_free;
}
......
......@@ -694,7 +694,7 @@ static int ipip6_rcv(struct sk_buff *skb)
skb->dev = tunnel->dev;
if (packet_is_spoofed(skb, iph, tunnel)) {
tunnel->dev->stats.rx_errors++;
DEV_STATS_INC(tunnel->dev, rx_errors);
goto out;
}
......@@ -714,8 +714,8 @@ static int ipip6_rcv(struct sk_buff *skb)
net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
&iph->saddr, iph->tos);
if (err > 1) {
++tunnel->dev->stats.rx_frame_errors;
++tunnel->dev->stats.rx_errors;
DEV_STATS_INC(tunnel->dev, rx_frame_errors);
DEV_STATS_INC(tunnel->dev, rx_errors);
goto out;
}
}
......@@ -942,7 +942,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
if (!rt) {
rt = ip_route_output_flow(tunnel->net, &fl4, NULL);
if (IS_ERR(rt)) {
dev->stats.tx_carrier_errors++;
DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error_icmp;
}
dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst, fl4.saddr);
......@@ -950,14 +950,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
ip_rt_put(rt);
dev->stats.tx_carrier_errors++;
DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error_icmp;
}
tdev = rt->dst.dev;
if (tdev == dev) {
ip_rt_put(rt);
dev->stats.collisions++;
DEV_STATS_INC(dev, collisions);
goto tx_error;
}
......@@ -970,7 +970,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
mtu = dst_mtu(&rt->dst) - t_hlen;
if (mtu < IPV4_MIN_MTU) {
dev->stats.collisions++;
DEV_STATS_INC(dev, collisions);
ip_rt_put(rt);
goto tx_error;
}
......@@ -1009,7 +1009,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
if (!new_skb) {
ip_rt_put(rt);
dev->stats.tx_dropped++;
DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb);
return NETDEV_TX_OK;
}
......@@ -1039,7 +1039,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
dst_link_failure(skb);
tx_error:
kfree_skb(skb);
dev->stats.tx_errors++;
DEV_STATS_INC(dev, tx_errors);
return NETDEV_TX_OK;
}
......@@ -1058,7 +1058,7 @@ static netdev_tx_t sit_tunnel_xmit__(struct sk_buff *skb,
return NETDEV_TX_OK;
tx_error:
kfree_skb(skb);
dev->stats.tx_errors++;
DEV_STATS_INC(dev, tx_errors);
return NETDEV_TX_OK;
}
......@@ -1087,7 +1087,7 @@ static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
tx_err:
dev->stats.tx_errors++;
DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb);
return NETDEV_TX_OK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment