Commit ca5ebbfe authored by David S. Miller's avatar David S. Miller

Merge branch 'net-atomic-dev-stats'

Eric Dumazet says:

====================
net: add atomic dev->stats infra

Long standing KCSAN issues are caused by data-race around
some dev->stats changes.

Most performance critical paths already use per-cpu
variables, or per-queue ones.

It is reasonable (and more correct) to use atomic operations
for the slow paths.

First patch adds the infrastructure, then three patches address
the most common paths that syzbot is playing with.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 68d268d0 c4794d22
...@@ -171,31 +171,38 @@ static inline bool dev_xmit_complete(int rc) ...@@ -171,31 +171,38 @@ static inline bool dev_xmit_complete(int rc)
* (unsigned long) so they can be read and written atomically. * (unsigned long) so they can be read and written atomically.
*/ */
#define NET_DEV_STAT(FIELD) \
union { \
unsigned long FIELD; \
atomic_long_t __##FIELD; \
}
struct net_device_stats { struct net_device_stats {
unsigned long rx_packets; NET_DEV_STAT(rx_packets);
unsigned long tx_packets; NET_DEV_STAT(tx_packets);
unsigned long rx_bytes; NET_DEV_STAT(rx_bytes);
unsigned long tx_bytes; NET_DEV_STAT(tx_bytes);
unsigned long rx_errors; NET_DEV_STAT(rx_errors);
unsigned long tx_errors; NET_DEV_STAT(tx_errors);
unsigned long rx_dropped; NET_DEV_STAT(rx_dropped);
unsigned long tx_dropped; NET_DEV_STAT(tx_dropped);
unsigned long multicast; NET_DEV_STAT(multicast);
unsigned long collisions; NET_DEV_STAT(collisions);
unsigned long rx_length_errors; NET_DEV_STAT(rx_length_errors);
unsigned long rx_over_errors; NET_DEV_STAT(rx_over_errors);
unsigned long rx_crc_errors; NET_DEV_STAT(rx_crc_errors);
unsigned long rx_frame_errors; NET_DEV_STAT(rx_frame_errors);
unsigned long rx_fifo_errors; NET_DEV_STAT(rx_fifo_errors);
unsigned long rx_missed_errors; NET_DEV_STAT(rx_missed_errors);
unsigned long tx_aborted_errors; NET_DEV_STAT(tx_aborted_errors);
unsigned long tx_carrier_errors; NET_DEV_STAT(tx_carrier_errors);
unsigned long tx_fifo_errors; NET_DEV_STAT(tx_fifo_errors);
unsigned long tx_heartbeat_errors; NET_DEV_STAT(tx_heartbeat_errors);
unsigned long tx_window_errors; NET_DEV_STAT(tx_window_errors);
unsigned long rx_compressed; NET_DEV_STAT(rx_compressed);
unsigned long tx_compressed; NET_DEV_STAT(tx_compressed);
}; };
#undef NET_DEV_STAT
/* per-cpu stats, allocated on demand. /* per-cpu stats, allocated on demand.
* Try to fit them in a single cache line, for dev_get_stats() sake. * Try to fit them in a single cache line, for dev_get_stats() sake.
...@@ -5171,4 +5178,9 @@ extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; ...@@ -5171,4 +5178,9 @@ extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
extern struct net_device *blackhole_netdev; extern struct net_device *blackhole_netdev;
/* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */
#define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
#define DEV_STATS_ADD(DEV, FIELD, VAL) \
atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
#endif /* _LINUX_NETDEVICE_H */ #endif /* _LINUX_NETDEVICE_H */
...@@ -356,9 +356,8 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, ...@@ -356,9 +356,8 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
struct net *net) struct net *net)
{ {
/* TODO : stats should be SMP safe */ DEV_STATS_INC(dev, rx_packets);
dev->stats.rx_packets++; DEV_STATS_ADD(dev, rx_bytes, skb->len);
dev->stats.rx_bytes += skb->len;
__skb_tunnel_rx(skb, dev, net); __skb_tunnel_rx(skb, dev, net);
} }
......
...@@ -10369,24 +10369,16 @@ void netdev_run_todo(void) ...@@ -10369,24 +10369,16 @@ void netdev_run_todo(void)
void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
const struct net_device_stats *netdev_stats) const struct net_device_stats *netdev_stats)
{ {
#if BITS_PER_LONG == 64 size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t);
BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats)); const atomic_long_t *src = (atomic_long_t *)netdev_stats;
memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
/* zero out counters that only exist in rtnl_link_stats64 */
memset((char *)stats64 + sizeof(*netdev_stats), 0,
sizeof(*stats64) - sizeof(*netdev_stats));
#else
size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
const unsigned long *src = (const unsigned long *)netdev_stats;
u64 *dst = (u64 *)stats64; u64 *dst = (u64 *)stats64;
BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
dst[i] = src[i]; dst[i] = atomic_long_read(&src[i]);
/* zero out counters that only exist in rtnl_link_stats64 */ /* zero out counters that only exist in rtnl_link_stats64 */
memset((char *)stats64 + n * sizeof(u64), 0, memset((char *)stats64 + n * sizeof(u64), 0,
sizeof(*stats64) - n * sizeof(u64)); sizeof(*stats64) - n * sizeof(u64));
#endif
} }
EXPORT_SYMBOL(netdev_stats_to_stats64); EXPORT_SYMBOL(netdev_stats_to_stats64);
......
...@@ -510,7 +510,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -510,7 +510,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
err_free_skb: err_free_skb:
kfree_skb(skb); kfree_skb(skb);
dev->stats.tx_dropped++; DEV_STATS_INC(dev, tx_dropped);
} }
static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev) static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
...@@ -592,7 +592,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -592,7 +592,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
err_free_skb: err_free_skb:
kfree_skb(skb); kfree_skb(skb);
dev->stats.tx_dropped++; DEV_STATS_INC(dev, tx_dropped);
} }
static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
...@@ -663,7 +663,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb, ...@@ -663,7 +663,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
free_skb: free_skb:
kfree_skb(skb); kfree_skb(skb);
dev->stats.tx_dropped++; DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -717,7 +717,7 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb, ...@@ -717,7 +717,7 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
free_skb: free_skb:
kfree_skb(skb); kfree_skb(skb);
dev->stats.tx_dropped++; DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -745,7 +745,7 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb, ...@@ -745,7 +745,7 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
free_skb: free_skb:
kfree_skb(skb); kfree_skb(skb);
dev->stats.tx_dropped++; DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -368,23 +368,23 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, ...@@ -368,23 +368,23 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
#ifdef CONFIG_NET_IPGRE_BROADCAST #ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(iph->daddr)) { if (ipv4_is_multicast(iph->daddr)) {
tunnel->dev->stats.multicast++; DEV_STATS_INC(tunnel->dev, multicast);
skb->pkt_type = PACKET_BROADCAST; skb->pkt_type = PACKET_BROADCAST;
} }
#endif #endif
if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) || if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) { ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
tunnel->dev->stats.rx_crc_errors++; DEV_STATS_INC(tunnel->dev, rx_crc_errors);
tunnel->dev->stats.rx_errors++; DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop; goto drop;
} }
if (tunnel->parms.i_flags&TUNNEL_SEQ) { if (tunnel->parms.i_flags&TUNNEL_SEQ) {
if (!(tpi->flags&TUNNEL_SEQ) || if (!(tpi->flags&TUNNEL_SEQ) ||
(tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) { (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
tunnel->dev->stats.rx_fifo_errors++; DEV_STATS_INC(tunnel->dev, rx_fifo_errors);
tunnel->dev->stats.rx_errors++; DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop; goto drop;
} }
tunnel->i_seqno = ntohl(tpi->seq) + 1; tunnel->i_seqno = ntohl(tpi->seq) + 1;
...@@ -398,8 +398,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, ...@@ -398,8 +398,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
&iph->saddr, iph->tos); &iph->saddr, iph->tos);
if (err > 1) { if (err > 1) {
++tunnel->dev->stats.rx_frame_errors; DEV_STATS_INC(tunnel->dev, rx_frame_errors);
++tunnel->dev->stats.rx_errors; DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop; goto drop;
} }
} }
...@@ -581,7 +581,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -581,7 +581,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
if (!rt) { if (!rt) {
rt = ip_route_output_key(tunnel->net, &fl4); rt = ip_route_output_key(tunnel->net, &fl4);
if (IS_ERR(rt)) { if (IS_ERR(rt)) {
dev->stats.tx_carrier_errors++; DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error; goto tx_error;
} }
if (use_cache) if (use_cache)
...@@ -590,7 +590,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -590,7 +590,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
} }
if (rt->dst.dev == dev) { if (rt->dst.dev == dev) {
ip_rt_put(rt); ip_rt_put(rt);
dev->stats.collisions++; DEV_STATS_INC(dev, collisions);
goto tx_error; goto tx_error;
} }
...@@ -625,10 +625,10 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -625,10 +625,10 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
df, !net_eq(tunnel->net, dev_net(dev))); df, !net_eq(tunnel->net, dev_net(dev)));
return; return;
tx_error: tx_error:
dev->stats.tx_errors++; DEV_STATS_INC(dev, tx_errors);
goto kfree; goto kfree;
tx_dropped: tx_dropped:
dev->stats.tx_dropped++; DEV_STATS_INC(dev, tx_dropped);
kfree: kfree:
kfree_skb(skb); kfree_skb(skb);
} }
...@@ -662,7 +662,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -662,7 +662,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
/* NBMA tunnel */ /* NBMA tunnel */
if (!skb_dst(skb)) { if (!skb_dst(skb)) {
dev->stats.tx_fifo_errors++; DEV_STATS_INC(dev, tx_fifo_errors);
goto tx_error; goto tx_error;
} }
...@@ -749,7 +749,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -749,7 +749,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
rt = ip_route_output_key(tunnel->net, &fl4); rt = ip_route_output_key(tunnel->net, &fl4);
if (IS_ERR(rt)) { if (IS_ERR(rt)) {
dev->stats.tx_carrier_errors++; DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error; goto tx_error;
} }
if (use_cache) if (use_cache)
...@@ -762,7 +762,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -762,7 +762,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
if (rt->dst.dev == dev) { if (rt->dst.dev == dev) {
ip_rt_put(rt); ip_rt_put(rt);
dev->stats.collisions++; DEV_STATS_INC(dev, collisions);
goto tx_error; goto tx_error;
} }
...@@ -805,7 +805,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -805,7 +805,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
if (skb_cow_head(skb, dev->needed_headroom)) { if (skb_cow_head(skb, dev->needed_headroom)) {
ip_rt_put(rt); ip_rt_put(rt);
dev->stats.tx_dropped++; DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb); kfree_skb(skb);
return; return;
} }
...@@ -819,7 +819,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -819,7 +819,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
dst_link_failure(skb); dst_link_failure(skb);
#endif #endif
tx_error: tx_error:
dev->stats.tx_errors++; DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb); kfree_skb(skb);
} }
EXPORT_SYMBOL_GPL(ip_tunnel_xmit); EXPORT_SYMBOL_GPL(ip_tunnel_xmit);
......
...@@ -107,8 +107,8 @@ static int vti_rcv_cb(struct sk_buff *skb, int err) ...@@ -107,8 +107,8 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
dev = tunnel->dev; dev = tunnel->dev;
if (err) { if (err) {
dev->stats.rx_errors++; DEV_STATS_INC(dev, rx_errors);
dev->stats.rx_dropped++; DEV_STATS_INC(dev, rx_dropped);
return 0; return 0;
} }
...@@ -183,7 +183,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -183,7 +183,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC; fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4); rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
if (IS_ERR(rt)) { if (IS_ERR(rt)) {
dev->stats.tx_carrier_errors++; DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error_icmp; goto tx_error_icmp;
} }
dst = &rt->dst; dst = &rt->dst;
...@@ -198,14 +198,14 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -198,14 +198,14 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
if (dst->error) { if (dst->error) {
dst_release(dst); dst_release(dst);
dst = NULL; dst = NULL;
dev->stats.tx_carrier_errors++; DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error_icmp; goto tx_error_icmp;
} }
skb_dst_set(skb, dst); skb_dst_set(skb, dst);
break; break;
#endif #endif
default: default:
dev->stats.tx_carrier_errors++; DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error_icmp; goto tx_error_icmp;
} }
} }
...@@ -213,7 +213,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -213,7 +213,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
dst_hold(dst); dst_hold(dst);
dst = xfrm_lookup_route(tunnel->net, dst, fl, NULL, 0); dst = xfrm_lookup_route(tunnel->net, dst, fl, NULL, 0);
if (IS_ERR(dst)) { if (IS_ERR(dst)) {
dev->stats.tx_carrier_errors++; DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error_icmp; goto tx_error_icmp;
} }
...@@ -221,7 +221,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -221,7 +221,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
goto xmit; goto xmit;
if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) { if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) {
dev->stats.tx_carrier_errors++; DEV_STATS_INC(dev, tx_carrier_errors);
dst_release(dst); dst_release(dst);
goto tx_error_icmp; goto tx_error_icmp;
} }
...@@ -230,7 +230,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -230,7 +230,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
if (tdev == dev) { if (tdev == dev) {
dst_release(dst); dst_release(dst);
dev->stats.collisions++; DEV_STATS_INC(dev, collisions);
goto tx_error; goto tx_error;
} }
...@@ -267,7 +267,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -267,7 +267,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
tx_error_icmp: tx_error_icmp:
dst_link_failure(skb); dst_link_failure(skb);
tx_error: tx_error:
dev->stats.tx_errors++; DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb); kfree_skb(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -304,7 +304,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -304,7 +304,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
return vti_xmit(skb, dev, &fl); return vti_xmit(skb, dev, &fl);
tx_err: tx_err:
dev->stats.tx_errors++; DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb); kfree_skb(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -310,7 +310,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, ...@@ -310,7 +310,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
tx_error: tx_error:
kfree_skb(skb); kfree_skb(skb);
dev->stats.tx_errors++; DEV_STATS_INC(dev, tx_errors);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -506,8 +506,8 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -506,8 +506,8 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
return err; return err;
} }
dev->stats.tx_bytes += skb->len; DEV_STATS_ADD(dev, tx_bytes, skb->len);
dev->stats.tx_packets++; DEV_STATS_INC(dev, tx_packets);
rcu_read_lock(); rcu_read_lock();
/* Pairs with WRITE_ONCE() in vif_add() and vif_delete() */ /* Pairs with WRITE_ONCE() in vif_add() and vif_delete() */
...@@ -1839,8 +1839,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, ...@@ -1839,8 +1839,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
if (vif->flags & VIFF_REGISTER) { if (vif->flags & VIFF_REGISTER) {
WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1); WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
WRITE_ONCE(vif->bytes_out, vif->bytes_out + skb->len); WRITE_ONCE(vif->bytes_out, vif->bytes_out + skb->len);
vif_dev->stats.tx_bytes += skb->len; DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
vif_dev->stats.tx_packets++; DEV_STATS_INC(vif_dev, tx_packets);
ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT); ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
goto out_free; goto out_free;
} }
...@@ -1898,8 +1898,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, ...@@ -1898,8 +1898,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
if (vif->flags & VIFF_TUNNEL) { if (vif->flags & VIFF_TUNNEL) {
ip_encap(net, skb, vif->local, vif->remote); ip_encap(net, skb, vif->local, vif->remote);
/* FIXME: extra output firewall step used to be here. --RR */ /* FIXME: extra output firewall step used to be here. --RR */
vif_dev->stats.tx_packets++; DEV_STATS_INC(vif_dev, tx_packets);
vif_dev->stats.tx_bytes += skb->len; DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
} }
IPCB(skb)->flags |= IPSKB_FORWARDED; IPCB(skb)->flags |= IPSKB_FORWARDED;
......
...@@ -895,7 +895,6 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb, ...@@ -895,7 +895,6 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
struct net_device *dev) struct net_device *dev)
{ {
struct ip6_tnl *t = netdev_priv(dev); struct ip6_tnl *t = netdev_priv(dev);
struct net_device_stats *stats = &t->dev->stats;
__be16 payload_protocol; __be16 payload_protocol;
int ret; int ret;
...@@ -925,8 +924,8 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb, ...@@ -925,8 +924,8 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
tx_err: tx_err:
if (!t->parms.collect_md || !IS_ERR(skb_tunnel_info_txcheck(skb))) if (!t->parms.collect_md || !IS_ERR(skb_tunnel_info_txcheck(skb)))
stats->tx_errors++; DEV_STATS_INC(dev, tx_errors);
stats->tx_dropped++; DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb); kfree_skb(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -937,7 +936,6 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, ...@@ -937,7 +936,6 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
struct ip_tunnel_info *tun_info = NULL; struct ip_tunnel_info *tun_info = NULL;
struct ip6_tnl *t = netdev_priv(dev); struct ip6_tnl *t = netdev_priv(dev);
struct dst_entry *dst = skb_dst(skb); struct dst_entry *dst = skb_dst(skb);
struct net_device_stats *stats;
bool truncate = false; bool truncate = false;
int encap_limit = -1; int encap_limit = -1;
__u8 dsfield = false; __u8 dsfield = false;
...@@ -1086,10 +1084,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, ...@@ -1086,10 +1084,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
return NETDEV_TX_OK; return NETDEV_TX_OK;
tx_err: tx_err:
stats = &t->dev->stats;
if (!IS_ERR(tun_info)) if (!IS_ERR(tun_info))
stats->tx_errors++; DEV_STATS_INC(dev, tx_errors);
stats->tx_dropped++; DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb); kfree_skb(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -803,8 +803,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb, ...@@ -803,8 +803,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
(tunnel->parms.i_flags & TUNNEL_CSUM)) || (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
((tpi->flags & TUNNEL_CSUM) && ((tpi->flags & TUNNEL_CSUM) &&
!(tunnel->parms.i_flags & TUNNEL_CSUM))) { !(tunnel->parms.i_flags & TUNNEL_CSUM))) {
tunnel->dev->stats.rx_crc_errors++; DEV_STATS_INC(tunnel->dev, rx_crc_errors);
tunnel->dev->stats.rx_errors++; DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop; goto drop;
} }
...@@ -812,8 +812,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb, ...@@ -812,8 +812,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
if (!(tpi->flags & TUNNEL_SEQ) || if (!(tpi->flags & TUNNEL_SEQ) ||
(tunnel->i_seqno && (tunnel->i_seqno &&
(s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) { (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
tunnel->dev->stats.rx_fifo_errors++; DEV_STATS_INC(tunnel->dev, rx_fifo_errors);
tunnel->dev->stats.rx_errors++; DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop; goto drop;
} }
tunnel->i_seqno = ntohl(tpi->seq) + 1; tunnel->i_seqno = ntohl(tpi->seq) + 1;
...@@ -824,8 +824,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb, ...@@ -824,8 +824,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
/* Warning: All skb pointers will be invalidated! */ /* Warning: All skb pointers will be invalidated! */
if (tunnel->dev->type == ARPHRD_ETHER) { if (tunnel->dev->type == ARPHRD_ETHER) {
if (!pskb_may_pull(skb, ETH_HLEN)) { if (!pskb_may_pull(skb, ETH_HLEN)) {
tunnel->dev->stats.rx_length_errors++; DEV_STATS_INC(tunnel->dev, rx_length_errors);
tunnel->dev->stats.rx_errors++; DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop; goto drop;
} }
...@@ -849,8 +849,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb, ...@@ -849,8 +849,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
&ipv6h->saddr, &ipv6h->saddr,
ipv6_get_dsfield(ipv6h)); ipv6_get_dsfield(ipv6h));
if (err > 1) { if (err > 1) {
++tunnel->dev->stats.rx_frame_errors; DEV_STATS_INC(tunnel->dev, rx_frame_errors);
++tunnel->dev->stats.rx_errors; DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop; goto drop;
} }
} }
...@@ -1071,7 +1071,6 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, ...@@ -1071,7 +1071,6 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
{ {
struct ip6_tnl *t = netdev_priv(dev); struct ip6_tnl *t = netdev_priv(dev);
struct net *net = t->net; struct net *net = t->net;
struct net_device_stats *stats = &t->dev->stats;
struct ipv6hdr *ipv6h; struct ipv6hdr *ipv6h;
struct ipv6_tel_txoption opt; struct ipv6_tel_txoption opt;
struct dst_entry *dst = NULL, *ndst = NULL; struct dst_entry *dst = NULL, *ndst = NULL;
...@@ -1166,7 +1165,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, ...@@ -1166,7 +1165,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
tdev = dst->dev; tdev = dst->dev;
if (tdev == dev) { if (tdev == dev) {
stats->collisions++; DEV_STATS_INC(dev, collisions);
net_warn_ratelimited("%s: Local routing loop detected!\n", net_warn_ratelimited("%s: Local routing loop detected!\n",
t->parms.name); t->parms.name);
goto tx_err_dst_release; goto tx_err_dst_release;
...@@ -1265,7 +1264,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, ...@@ -1265,7 +1264,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
ip6tunnel_xmit(NULL, skb, dev); ip6tunnel_xmit(NULL, skb, dev);
return 0; return 0;
tx_err_link_failure: tx_err_link_failure:
stats->tx_carrier_errors++; DEV_STATS_INC(dev, tx_carrier_errors);
dst_link_failure(skb); dst_link_failure(skb);
tx_err_dst_release: tx_err_dst_release:
dst_release(dst); dst_release(dst);
...@@ -1408,7 +1407,6 @@ static netdev_tx_t ...@@ -1408,7 +1407,6 @@ static netdev_tx_t
ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev) ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct ip6_tnl *t = netdev_priv(dev); struct ip6_tnl *t = netdev_priv(dev);
struct net_device_stats *stats = &t->dev->stats;
u8 ipproto; u8 ipproto;
int ret; int ret;
...@@ -1438,8 +1436,8 @@ ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1438,8 +1436,8 @@ ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
tx_err: tx_err:
stats->tx_errors++; DEV_STATS_INC(dev, tx_errors);
stats->tx_dropped++; DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb); kfree_skb(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -317,7 +317,7 @@ static int vti6_input_proto(struct sk_buff *skb, int nexthdr, __be32 spi, ...@@ -317,7 +317,7 @@ static int vti6_input_proto(struct sk_buff *skb, int nexthdr, __be32 spi,
ipv6h = ipv6_hdr(skb); ipv6h = ipv6_hdr(skb);
if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) { if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
t->dev->stats.rx_dropped++; DEV_STATS_INC(t->dev, rx_dropped);
rcu_read_unlock(); rcu_read_unlock();
goto discard; goto discard;
} }
...@@ -359,8 +359,8 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err) ...@@ -359,8 +359,8 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
dev = t->dev; dev = t->dev;
if (err) { if (err) {
dev->stats.rx_errors++; DEV_STATS_INC(dev, rx_errors);
dev->stats.rx_dropped++; DEV_STATS_INC(dev, rx_dropped);
return 0; return 0;
} }
...@@ -446,7 +446,6 @@ static int ...@@ -446,7 +446,6 @@ static int
vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
{ {
struct ip6_tnl *t = netdev_priv(dev); struct ip6_tnl *t = netdev_priv(dev);
struct net_device_stats *stats = &t->dev->stats;
struct dst_entry *dst = skb_dst(skb); struct dst_entry *dst = skb_dst(skb);
struct net_device *tdev; struct net_device *tdev;
struct xfrm_state *x; struct xfrm_state *x;
...@@ -506,7 +505,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) ...@@ -506,7 +505,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
tdev = dst->dev; tdev = dst->dev;
if (tdev == dev) { if (tdev == dev) {
stats->collisions++; DEV_STATS_INC(dev, collisions);
net_warn_ratelimited("%s: Local routing loop detected!\n", net_warn_ratelimited("%s: Local routing loop detected!\n",
t->parms.name); t->parms.name);
goto tx_err_dst_release; goto tx_err_dst_release;
...@@ -544,7 +543,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) ...@@ -544,7 +543,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
return 0; return 0;
tx_err_link_failure: tx_err_link_failure:
stats->tx_carrier_errors++; DEV_STATS_INC(dev, tx_carrier_errors);
dst_link_failure(skb); dst_link_failure(skb);
tx_err_dst_release: tx_err_dst_release:
dst_release(dst); dst_release(dst);
...@@ -555,7 +554,6 @@ static netdev_tx_t ...@@ -555,7 +554,6 @@ static netdev_tx_t
vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct ip6_tnl *t = netdev_priv(dev); struct ip6_tnl *t = netdev_priv(dev);
struct net_device_stats *stats = &t->dev->stats;
struct flowi fl; struct flowi fl;
int ret; int ret;
...@@ -591,8 +589,8 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -591,8 +589,8 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
tx_err: tx_err:
stats->tx_errors++; DEV_STATS_INC(dev, tx_errors);
stats->tx_dropped++; DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb); kfree_skb(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
...@@ -608,8 +608,8 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, ...@@ -608,8 +608,8 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0) if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
goto tx_err; goto tx_err;
dev->stats.tx_bytes += skb->len; DEV_STATS_ADD(dev, tx_bytes, skb->len);
dev->stats.tx_packets++; DEV_STATS_INC(dev, tx_packets);
rcu_read_lock(); rcu_read_lock();
ip6mr_cache_report(mrt, skb, READ_ONCE(mrt->mroute_reg_vif_num), ip6mr_cache_report(mrt, skb, READ_ONCE(mrt->mroute_reg_vif_num),
MRT6MSG_WHOLEPKT); MRT6MSG_WHOLEPKT);
...@@ -618,7 +618,7 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, ...@@ -618,7 +618,7 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
return NETDEV_TX_OK; return NETDEV_TX_OK;
tx_err: tx_err:
dev->stats.tx_errors++; DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb); kfree_skb(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -2044,8 +2044,8 @@ static int ip6mr_forward2(struct net *net, struct mr_table *mrt, ...@@ -2044,8 +2044,8 @@ static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
if (vif->flags & MIFF_REGISTER) { if (vif->flags & MIFF_REGISTER) {
WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1); WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
WRITE_ONCE(vif->bytes_out, vif->bytes_out + skb->len); WRITE_ONCE(vif->bytes_out, vif->bytes_out + skb->len);
vif_dev->stats.tx_bytes += skb->len; DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
vif_dev->stats.tx_packets++; DEV_STATS_INC(vif_dev, tx_packets);
ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT); ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
goto out_free; goto out_free;
} }
......
...@@ -694,7 +694,7 @@ static int ipip6_rcv(struct sk_buff *skb) ...@@ -694,7 +694,7 @@ static int ipip6_rcv(struct sk_buff *skb)
skb->dev = tunnel->dev; skb->dev = tunnel->dev;
if (packet_is_spoofed(skb, iph, tunnel)) { if (packet_is_spoofed(skb, iph, tunnel)) {
tunnel->dev->stats.rx_errors++; DEV_STATS_INC(tunnel->dev, rx_errors);
goto out; goto out;
} }
...@@ -714,8 +714,8 @@ static int ipip6_rcv(struct sk_buff *skb) ...@@ -714,8 +714,8 @@ static int ipip6_rcv(struct sk_buff *skb)
net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
&iph->saddr, iph->tos); &iph->saddr, iph->tos);
if (err > 1) { if (err > 1) {
++tunnel->dev->stats.rx_frame_errors; DEV_STATS_INC(tunnel->dev, rx_frame_errors);
++tunnel->dev->stats.rx_errors; DEV_STATS_INC(tunnel->dev, rx_errors);
goto out; goto out;
} }
} }
...@@ -942,7 +942,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, ...@@ -942,7 +942,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
if (!rt) { if (!rt) {
rt = ip_route_output_flow(tunnel->net, &fl4, NULL); rt = ip_route_output_flow(tunnel->net, &fl4, NULL);
if (IS_ERR(rt)) { if (IS_ERR(rt)) {
dev->stats.tx_carrier_errors++; DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error_icmp; goto tx_error_icmp;
} }
dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst, fl4.saddr); dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst, fl4.saddr);
...@@ -950,14 +950,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, ...@@ -950,14 +950,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
ip_rt_put(rt); ip_rt_put(rt);
dev->stats.tx_carrier_errors++; DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_error_icmp; goto tx_error_icmp;
} }
tdev = rt->dst.dev; tdev = rt->dst.dev;
if (tdev == dev) { if (tdev == dev) {
ip_rt_put(rt); ip_rt_put(rt);
dev->stats.collisions++; DEV_STATS_INC(dev, collisions);
goto tx_error; goto tx_error;
} }
...@@ -970,7 +970,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, ...@@ -970,7 +970,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
mtu = dst_mtu(&rt->dst) - t_hlen; mtu = dst_mtu(&rt->dst) - t_hlen;
if (mtu < IPV4_MIN_MTU) { if (mtu < IPV4_MIN_MTU) {
dev->stats.collisions++; DEV_STATS_INC(dev, collisions);
ip_rt_put(rt); ip_rt_put(rt);
goto tx_error; goto tx_error;
} }
...@@ -1009,7 +1009,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, ...@@ -1009,7 +1009,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
if (!new_skb) { if (!new_skb) {
ip_rt_put(rt); ip_rt_put(rt);
dev->stats.tx_dropped++; DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb); kfree_skb(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -1039,7 +1039,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, ...@@ -1039,7 +1039,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
dst_link_failure(skb); dst_link_failure(skb);
tx_error: tx_error:
kfree_skb(skb); kfree_skb(skb);
dev->stats.tx_errors++; DEV_STATS_INC(dev, tx_errors);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -1058,7 +1058,7 @@ static netdev_tx_t sit_tunnel_xmit__(struct sk_buff *skb, ...@@ -1058,7 +1058,7 @@ static netdev_tx_t sit_tunnel_xmit__(struct sk_buff *skb,
return NETDEV_TX_OK; return NETDEV_TX_OK;
tx_error: tx_error:
kfree_skb(skb); kfree_skb(skb);
dev->stats.tx_errors++; DEV_STATS_INC(dev, tx_errors);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -1087,7 +1087,7 @@ static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb, ...@@ -1087,7 +1087,7 @@ static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
return NETDEV_TX_OK; return NETDEV_TX_OK;
tx_err: tx_err:
dev->stats.tx_errors++; DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb); kfree_skb(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment