Commit 6cce09f8 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: Add SNMP counters for backlog and min_ttl drops

Commit 6b03a53a (tcp: use limited socket backlog) added the possibility
of dropping frames when backlog queue is full.

Commit d218d111 (tcp: Generalized TTL Security Mechanism) added the
possibility of dropping frames when TTL is under a given limit.

This patch adds new SNMP MIB entries, named TCPBacklogDrop and
TCPMinTTLDrop, published in /proc/net/netstat in TcpExt: line

netstat -s | egrep "TCPBacklogDrop|TCPMinTTLDrop"
    TCPBacklogDrop: 0
    TCPMinTTLDrop: 0
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 40456353
...@@ -225,6 +225,8 @@ enum ...@@ -225,6 +225,8 @@ enum
LINUX_MIB_SACKSHIFTED, LINUX_MIB_SACKSHIFTED,
LINUX_MIB_SACKMERGED, LINUX_MIB_SACKMERGED,
LINUX_MIB_SACKSHIFTFALLBACK, LINUX_MIB_SACKSHIFTFALLBACK,
LINUX_MIB_TCPBACKLOGDROP,
LINUX_MIB_TCPMINTTLDROP, /* RFC 5082 */
__LINUX_MIB_MAX __LINUX_MIB_MAX
}; };
......
...@@ -249,6 +249,8 @@ static const struct snmp_mib snmp4_net_list[] = { ...@@ -249,6 +249,8 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPSackShifted", LINUX_MIB_SACKSHIFTED), SNMP_MIB_ITEM("TCPSackShifted", LINUX_MIB_SACKSHIFTED),
SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED), SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED),
SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK), SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK),
SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP),
SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP),
SNMP_MIB_SENTINEL SNMP_MIB_SENTINEL
}; };
......
...@@ -1651,8 +1651,10 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1651,8 +1651,10 @@ int tcp_v4_rcv(struct sk_buff *skb)
if (!sk) if (!sk)
goto no_tcp_socket; goto no_tcp_socket;
if (iph->ttl < inet_sk(sk)->min_ttl) if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
goto discard_and_relse; goto discard_and_relse;
}
process: process:
if (sk->sk_state == TCP_TIME_WAIT) if (sk->sk_state == TCP_TIME_WAIT)
...@@ -1682,8 +1684,9 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1682,8 +1684,9 @@ int tcp_v4_rcv(struct sk_buff *skb)
if (!tcp_prequeue(sk, skb)) if (!tcp_prequeue(sk, skb))
ret = tcp_v4_do_rcv(sk, skb); ret = tcp_v4_do_rcv(sk, skb);
} }
} else if (sk_add_backlog(sk, skb)) { } else if (unlikely(sk_add_backlog(sk, skb))) {
bh_unlock_sock(sk); bh_unlock_sock(sk);
NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
goto discard_and_relse; goto discard_and_relse;
} }
bh_unlock_sock(sk); bh_unlock_sock(sk);
......
...@@ -1740,8 +1740,9 @@ static int tcp_v6_rcv(struct sk_buff *skb) ...@@ -1740,8 +1740,9 @@ static int tcp_v6_rcv(struct sk_buff *skb)
if (!tcp_prequeue(sk, skb)) if (!tcp_prequeue(sk, skb))
ret = tcp_v6_do_rcv(sk, skb); ret = tcp_v6_do_rcv(sk, skb);
} }
} else if (sk_add_backlog(sk, skb)) { } else if (unlikely(sk_add_backlog(sk, skb))) {
bh_unlock_sock(sk); bh_unlock_sock(sk);
NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
goto discard_and_relse; goto discard_and_relse;
} }
bh_unlock_sock(sk); bh_unlock_sock(sk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment