Commit 7a26dc9e authored by Menglong Dong's avatar Menglong Dong Committed by David S. Miller

net: tcp: add skb drop reasons to tcp_add_backlog()

Pass the address of drop_reason to tcp_add_backlog() to store the
reasons for skb drops when fails. Following drop reasons are
introduced:

SKB_DROP_REASON_SOCKET_BACKLOG
Reviewed-by: default avatarMengen Sun <mengensun@tencent.com>
Reviewed-by: default avatarHao Peng <flyingpeng@tencent.com>
Signed-off-by: default avatarMenglong Dong <imagedong@tencent.com>
Reviewed-by: default avatarEric Dumazet <edumazet@google.com>
Reviewed-by: default avatarDavid Ahern <dsahern@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 643b622b
......@@ -358,6 +358,10 @@ enum skb_drop_reason {
* corresponding to
* LINUX_MIB_TCPMD5FAILURE
*/
SKB_DROP_REASON_SOCKET_BACKLOG, /* failed to add skb to socket
* backlog (see
* LINUX_MIB_TCPBACKLOGDROP)
*/
SKB_DROP_REASON_MAX,
};
......
......@@ -1367,7 +1367,8 @@ static inline bool tcp_checksum_complete(struct sk_buff *skb)
__skb_checksum_complete(skb);
}
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
enum skb_drop_reason *reason);
#ifdef CONFIG_INET
void __sk_defer_free_flush(struct sock *sk);
......
......@@ -31,6 +31,7 @@
EM(SKB_DROP_REASON_TCP_MD5UNEXPECTED, \
TCP_MD5UNEXPECTED) \
EM(SKB_DROP_REASON_TCP_MD5FAILURE, TCP_MD5FAILURE) \
EM(SKB_DROP_REASON_SOCKET_BACKLOG, SOCKET_BACKLOG) \
EMe(SKB_DROP_REASON_MAX, MAX)
#undef EM
......
......@@ -1811,7 +1811,8 @@ int tcp_v4_early_demux(struct sk_buff *skb)
return 0;
}
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
enum skb_drop_reason *reason)
{
u32 limit, tail_gso_size, tail_gso_segs;
struct skb_shared_info *shinfo;
......@@ -1837,6 +1838,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
if (unlikely(tcp_checksum_complete(skb))) {
bh_unlock_sock(sk);
trace_tcp_bad_csum(skb);
*reason = SKB_DROP_REASON_TCP_CSUM;
__TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
__TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
return true;
......@@ -1925,6 +1927,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
if (unlikely(sk_add_backlog(sk, skb, limit))) {
bh_unlock_sock(sk);
*reason = SKB_DROP_REASON_SOCKET_BACKLOG;
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
return true;
}
......@@ -2133,7 +2136,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
if (!sock_owned_by_user(sk)) {
ret = tcp_v4_do_rcv(sk, skb);
} else {
if (tcp_add_backlog(sk, skb))
if (tcp_add_backlog(sk, skb, &drop_reason))
goto discard_and_relse;
}
bh_unlock_sock(sk);
......
......@@ -1784,7 +1784,7 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
if (!sock_owned_by_user(sk)) {
ret = tcp_v6_do_rcv(sk, skb);
} else {
if (tcp_add_backlog(sk, skb))
if (tcp_add_backlog(sk, skb, &drop_reason))
goto discard_and_relse;
}
bh_unlock_sock(sk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment