Commit 5ea8ea2c authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp/dccp: drop SYN packets if accept queue is full

Per listen(fd, backlog) rules, there is really no point accepting a SYN,
sending a SYNACK, and dropping the following ACK packet if accept queue
is full, because application is not draining accept queue fast enough.

This behavior is fooling TCP clients that believe they established a
flow, while there is nothing at server side. They might then send about
10 MSS (if using IW10) that will be dropped anyway while server is under
stress.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Acked-by: default avatarNeal Cardwell <ncardwell@google.com>
Acked-by: default avatarYuchung Cheng <ycheng@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 58effd71
...@@ -289,11 +289,6 @@ static inline int inet_csk_reqsk_queue_len(const struct sock *sk) ...@@ -289,11 +289,6 @@ static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue); return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
} }
static inline int inet_csk_reqsk_queue_young(const struct sock *sk)
{
return reqsk_queue_len_young(&inet_csk(sk)->icsk_accept_queue);
}
static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
{ {
return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog; return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
......
...@@ -588,13 +588,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -588,13 +588,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (inet_csk_reqsk_queue_is_full(sk)) if (inet_csk_reqsk_queue_is_full(sk))
goto drop; goto drop;
/* if (sk_acceptq_is_full(sk))
* Accept backlog is full. If we have already queued enough
* of warm entries in syn queue, drop request. It is better than
* clogging syn queue with openreqs with exponentially increasing
* timeout.
*/
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
goto drop; goto drop;
req = inet_reqsk_alloc(&dccp_request_sock_ops, sk, true); req = inet_reqsk_alloc(&dccp_request_sock_ops, sk, true);
......
...@@ -325,7 +325,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -325,7 +325,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (inet_csk_reqsk_queue_is_full(sk)) if (inet_csk_reqsk_queue_is_full(sk))
goto drop; goto drop;
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) if (sk_acceptq_is_full(sk))
goto drop; goto drop;
req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk, true); req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk, true);
......
...@@ -6298,13 +6298,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, ...@@ -6298,13 +6298,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
goto drop; goto drop;
} }
if (sk_acceptq_is_full(sk)) {
/* Accept backlog is full. If we have already queued enough
* of warm entries in syn queue, drop request. It is better than
* clogging syn queue with openreqs with exponentially increasing
* timeout.
*/
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
goto drop; goto drop;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment