Commit a37c2134 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: add exponential backoff in __tcp_send_ack()

Whenever host is under very high memory pressure,
__tcp_send_ack() skb allocation fails, and we setup
a 200 ms (TCP_DELACK_MAX) timer before retrying.

On hosts with high number of TCP sockets, we can spend
considerable amount of cpu cycles in these attempts,
add high pressure on various spinlocks in mm-layer,
ultimately blocking threads attempting to free space
from making any progress.

This patch adds standard exponential backoff to avoid
adding fuel to the fire.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Acked-by: default avatarSoheil Hassas Yeganeh <soheil@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b6b6d653
...@@ -110,7 +110,7 @@ struct inet_connection_sock { ...@@ -110,7 +110,7 @@ struct inet_connection_sock {
__u8 pending; /* ACK is pending */ __u8 pending; /* ACK is pending */
__u8 quick; /* Scheduled number of quick acks */ __u8 quick; /* Scheduled number of quick acks */
__u8 pingpong; /* The session is interactive */ __u8 pingpong; /* The session is interactive */
/* one byte hole. */ __u8 retry; /* Number of attempts */
__u32 ato; /* Predicted tick of soft clock */ __u32 ato; /* Predicted tick of soft clock */
unsigned long timeout; /* Currently scheduled timeout */ unsigned long timeout; /* Currently scheduled timeout */
__u32 lrcvtime; /* timestamp of last received data packet */ __u32 lrcvtime; /* timestamp of last received data packet */
...@@ -199,6 +199,7 @@ static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what) ...@@ -199,6 +199,7 @@ static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
#endif #endif
} else if (what == ICSK_TIME_DACK) { } else if (what == ICSK_TIME_DACK) {
icsk->icsk_ack.pending = 0; icsk->icsk_ack.pending = 0;
icsk->icsk_ack.retry = 0;
#ifdef INET_CSK_CLEAR_TIMERS #ifdef INET_CSK_CLEAR_TIMERS
sk_stop_timer(sk, &icsk->icsk_delack_timer); sk_stop_timer(sk, &icsk->icsk_delack_timer);
#endif #endif
......
...@@ -3941,10 +3941,15 @@ void __tcp_send_ack(struct sock *sk, u32 rcv_nxt) ...@@ -3941,10 +3941,15 @@ void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
buff = alloc_skb(MAX_TCP_HEADER, buff = alloc_skb(MAX_TCP_HEADER,
sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN)); sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
if (unlikely(!buff)) { if (unlikely(!buff)) {
struct inet_connection_sock *icsk = inet_csk(sk);
unsigned long delay;
delay = TCP_DELACK_MAX << icsk->icsk_ack.retry;
if (delay < TCP_RTO_MAX)
icsk->icsk_ack.retry++;
inet_csk_schedule_ack(sk); inet_csk_schedule_ack(sk);
inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; icsk->icsk_ack.ato = TCP_ATO_MIN;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, delay, TCP_RTO_MAX);
TCP_DELACK_MAX, TCP_RTO_MAX);
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment