Commit caf586e5 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: add a core netdev->rx_dropped counter

In various situations, a device provides a packet to our stack and we
drop it before it enters protocol stack :
- softnet backlog full (accounted in /proc/net/softnet_stat)
- bad vlan tag (not accounted)
- unknown/unregistered protocol (not accounted)

We can handle a per-device counter of such dropped frames at core level,
and automatically adds it to the device provided stats (rx_dropped), so
that standard tools can be used (ifconfig, ip link, cat /proc/net/dev)

This is a generalization of commit 8990f468 (net: rx_dropped
accounting), thus reverting it.
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a00eac0c
...@@ -64,7 +64,6 @@ struct pcpu_lstats { ...@@ -64,7 +64,6 @@ struct pcpu_lstats {
u64 packets; u64 packets;
u64 bytes; u64 bytes;
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
unsigned long drops;
}; };
/* /*
...@@ -90,8 +89,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb, ...@@ -90,8 +89,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
lb_stats->bytes += len; lb_stats->bytes += len;
lb_stats->packets++; lb_stats->packets++;
u64_stats_update_end(&lb_stats->syncp); u64_stats_update_end(&lb_stats->syncp);
} else }
lb_stats->drops++;
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -101,7 +99,6 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev, ...@@ -101,7 +99,6 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
{ {
u64 bytes = 0; u64 bytes = 0;
u64 packets = 0; u64 packets = 0;
u64 drops = 0;
int i; int i;
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
...@@ -115,14 +112,11 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev, ...@@ -115,14 +112,11 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
tbytes = lb_stats->bytes; tbytes = lb_stats->bytes;
tpackets = lb_stats->packets; tpackets = lb_stats->packets;
} while (u64_stats_fetch_retry(&lb_stats->syncp, start)); } while (u64_stats_fetch_retry(&lb_stats->syncp, start));
drops += lb_stats->drops;
bytes += tbytes; bytes += tbytes;
packets += tpackets; packets += tpackets;
} }
stats->rx_packets = packets; stats->rx_packets = packets;
stats->tx_packets = packets; stats->tx_packets = packets;
stats->rx_dropped = drops;
stats->rx_errors = drops;
stats->rx_bytes = bytes; stats->rx_bytes = bytes;
stats->tx_bytes = bytes; stats->tx_bytes = bytes;
return stats; return stats;
......
...@@ -884,6 +884,9 @@ struct net_device { ...@@ -884,6 +884,9 @@ struct net_device {
int iflink; int iflink;
struct net_device_stats stats; struct net_device_stats stats;
atomic_long_t rx_dropped; /* dropped packets by core network
* Do not use this in drivers.
*/
#ifdef CONFIG_WIRELESS_EXT #ifdef CONFIG_WIRELESS_EXT
/* List of functions to handle Wireless Extensions (instead of ioctl). /* List of functions to handle Wireless Extensions (instead of ioctl).
......
...@@ -25,7 +25,6 @@ struct vlan_priority_tci_mapping { ...@@ -25,7 +25,6 @@ struct vlan_priority_tci_mapping {
* @rx_multicast: number of received multicast packets * @rx_multicast: number of received multicast packets
* @syncp: synchronization point for 64bit counters * @syncp: synchronization point for 64bit counters
* @rx_errors: number of errors * @rx_errors: number of errors
* @rx_dropped: number of dropped packets
*/ */
struct vlan_rx_stats { struct vlan_rx_stats {
u64 rx_packets; u64 rx_packets;
...@@ -33,7 +32,6 @@ struct vlan_rx_stats { ...@@ -33,7 +32,6 @@ struct vlan_rx_stats {
u64 rx_multicast; u64 rx_multicast;
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
unsigned long rx_errors; unsigned long rx_errors;
unsigned long rx_dropped;
}; };
/** /**
......
...@@ -33,6 +33,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, ...@@ -33,6 +33,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
return polling ? netif_receive_skb(skb) : netif_rx(skb); return polling ? netif_receive_skb(skb) : netif_rx(skb);
drop: drop:
atomic_long_inc(&skb->dev->rx_dropped);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return NET_RX_DROP; return NET_RX_DROP;
} }
...@@ -123,6 +124,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, ...@@ -123,6 +124,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
return dev_gro_receive(napi, skb); return dev_gro_receive(napi, skb);
drop: drop:
atomic_long_inc(&skb->dev->rx_dropped);
return GRO_DROP; return GRO_DROP;
} }
......
...@@ -225,16 +225,15 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, ...@@ -225,16 +225,15 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
} }
} }
if (unlikely(netif_rx(skb) == NET_RX_DROP)) { netif_rx(skb);
if (rx_stats)
rx_stats->rx_dropped++;
}
rcu_read_unlock(); rcu_read_unlock();
return NET_RX_SUCCESS; return NET_RX_SUCCESS;
err_unlock: err_unlock:
rcu_read_unlock(); rcu_read_unlock();
err_free: err_free:
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb); kfree_skb(skb);
return NET_RX_DROP; return NET_RX_DROP;
} }
...@@ -846,15 +845,13 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st ...@@ -846,15 +845,13 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
accum.rx_packets += rxpackets; accum.rx_packets += rxpackets;
accum.rx_bytes += rxbytes; accum.rx_bytes += rxbytes;
accum.rx_multicast += rxmulticast; accum.rx_multicast += rxmulticast;
/* rx_errors, rx_dropped are ulong, not protected by syncp */ /* rx_errors is ulong, not protected by syncp */
accum.rx_errors += p->rx_errors; accum.rx_errors += p->rx_errors;
accum.rx_dropped += p->rx_dropped;
} }
stats->rx_packets = accum.rx_packets; stats->rx_packets = accum.rx_packets;
stats->rx_bytes = accum.rx_bytes; stats->rx_bytes = accum.rx_bytes;
stats->rx_errors = accum.rx_errors; stats->rx_errors = accum.rx_errors;
stats->multicast = accum.rx_multicast; stats->multicast = accum.rx_multicast;
stats->rx_dropped = accum.rx_dropped;
} }
return stats; return stats;
} }
......
...@@ -1483,8 +1483,9 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) ...@@ -1483,8 +1483,9 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
skb_orphan(skb); skb_orphan(skb);
nf_reset(skb); nf_reset(skb);
if (!(dev->flags & IFF_UP) || if (unlikely(!(dev->flags & IFF_UP) ||
(skb->len > (dev->mtu + dev->hard_header_len))) { (skb->len > (dev->mtu + dev->hard_header_len)))) {
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb); kfree_skb(skb);
return NET_RX_DROP; return NET_RX_DROP;
} }
...@@ -2548,6 +2549,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, ...@@ -2548,6 +2549,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
local_irq_restore(flags); local_irq_restore(flags);
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb); kfree_skb(skb);
return NET_RX_DROP; return NET_RX_DROP;
} }
...@@ -2995,6 +2997,7 @@ static int __netif_receive_skb(struct sk_buff *skb) ...@@ -2995,6 +2997,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
if (pt_prev) { if (pt_prev) {
ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
} else { } else {
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb); kfree_skb(skb);
/* Jamal, now you will not able to escape explaining /* Jamal, now you will not able to escape explaining
* me how you were going to use this. :-) * me how you were going to use this. :-)
...@@ -5429,14 +5432,14 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, ...@@ -5429,14 +5432,14 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
if (ops->ndo_get_stats64) { if (ops->ndo_get_stats64) {
memset(storage, 0, sizeof(*storage)); memset(storage, 0, sizeof(*storage));
return ops->ndo_get_stats64(dev, storage); ops->ndo_get_stats64(dev, storage);
} } else if (ops->ndo_get_stats) {
if (ops->ndo_get_stats) {
netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
return storage; } else {
netdev_stats_to_stats64(storage, &dev->stats);
dev_txq_stats_fold(dev, storage);
} }
netdev_stats_to_stats64(storage, &dev->stats); storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
dev_txq_stats_fold(dev, storage);
return storage; return storage;
} }
EXPORT_SYMBOL(dev_get_stats); EXPORT_SYMBOL(dev_get_stats);
......
...@@ -679,8 +679,7 @@ static int ipgre_rcv(struct sk_buff *skb) ...@@ -679,8 +679,7 @@ static int ipgre_rcv(struct sk_buff *skb)
skb_reset_network_header(skb); skb_reset_network_header(skb);
ipgre_ecn_decapsulate(iph, skb); ipgre_ecn_decapsulate(iph, skb);
if (netif_rx(skb) == NET_RX_DROP) netif_rx(skb);
tunnel->dev->stats.rx_dropped++;
rcu_read_unlock(); rcu_read_unlock();
return 0; return 0;
......
...@@ -414,8 +414,7 @@ static int ipip_rcv(struct sk_buff *skb) ...@@ -414,8 +414,7 @@ static int ipip_rcv(struct sk_buff *skb)
ipip_ecn_decapsulate(iph, skb); ipip_ecn_decapsulate(iph, skb);
if (netif_rx(skb) == NET_RX_DROP) netif_rx(skb);
tunnel->dev->stats.rx_dropped++;
rcu_read_unlock(); rcu_read_unlock();
return 0; return 0;
......
...@@ -768,8 +768,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, ...@@ -768,8 +768,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
dscp_ecn_decapsulate(t, ipv6h, skb); dscp_ecn_decapsulate(t, ipv6h, skb);
if (netif_rx(skb) == NET_RX_DROP) netif_rx(skb);
t->dev->stats.rx_dropped++;
rcu_read_unlock(); rcu_read_unlock();
return 0; return 0;
......
...@@ -666,8 +666,7 @@ static int pim6_rcv(struct sk_buff *skb) ...@@ -666,8 +666,7 @@ static int pim6_rcv(struct sk_buff *skb)
skb_tunnel_rx(skb, reg_dev); skb_tunnel_rx(skb, reg_dev);
if (netif_rx(skb) == NET_RX_DROP) netif_rx(skb);
reg_dev->stats.rx_dropped++;
dev_put(reg_dev); dev_put(reg_dev);
return 0; return 0;
......
...@@ -600,8 +600,7 @@ static int ipip6_rcv(struct sk_buff *skb) ...@@ -600,8 +600,7 @@ static int ipip6_rcv(struct sk_buff *skb)
ipip6_ecn_decapsulate(iph, skb); ipip6_ecn_decapsulate(iph, skb);
if (netif_rx(skb) == NET_RX_DROP) netif_rx(skb);
tunnel->dev->stats.rx_dropped++;
rcu_read_unlock(); rcu_read_unlock();
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment