Commit fcdd1cf4 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: avoid possible arithmetic overflows

icsk_rto is a 32bit field, and icsk_backoff can reach 15 by default,
or more if some sysctl (eg tcp_retries2) are changed.

Better use 64bit to perform icsk_rto << icsk_backoff operations

As Joe Perches suggested, add a helper for this.

Yuchung spotted the tcp_v4_err() case.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 35f7aa53
...@@ -242,6 +242,15 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, ...@@ -242,6 +242,15 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
#endif #endif
} }
static inline unsigned long
inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
unsigned long max_when)
{
u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff;
return (unsigned long)min_t(u64, when, max_when);
}
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err); struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
struct request_sock *inet_csk_search_req(const struct sock *sk, struct request_sock *inet_csk_search_req(const struct sock *sk,
......
...@@ -3208,9 +3208,10 @@ static void tcp_ack_probe(struct sock *sk) ...@@ -3208,9 +3208,10 @@ static void tcp_ack_probe(struct sock *sk)
* This function is not for random using! * This function is not for random using!
*/ */
} else { } else {
unsigned long when = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), when, TCP_RTO_MAX);
TCP_RTO_MAX);
} }
} }
......
...@@ -430,9 +430,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) ...@@ -430,9 +430,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
break; break;
icsk->icsk_backoff--; icsk->icsk_backoff--;
inet_csk(sk)->icsk_rto = (tp->srtt_us ? __tcp_set_rto(tp) : icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
TCP_TIMEOUT_INIT) << icsk->icsk_backoff; TCP_TIMEOUT_INIT;
tcp_bound_rto(sk); icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
skb = tcp_write_queue_head(sk); skb = tcp_write_queue_head(sk);
BUG_ON(!skb); BUG_ON(!skb);
......
...@@ -3279,6 +3279,7 @@ void tcp_send_probe0(struct sock *sk) ...@@ -3279,6 +3279,7 @@ void tcp_send_probe0(struct sock *sk)
{ {
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
unsigned long probe_max;
int err; int err;
err = tcp_write_wakeup(sk); err = tcp_write_wakeup(sk);
...@@ -3294,9 +3295,7 @@ void tcp_send_probe0(struct sock *sk) ...@@ -3294,9 +3295,7 @@ void tcp_send_probe0(struct sock *sk)
if (icsk->icsk_backoff < sysctl_tcp_retries2) if (icsk->icsk_backoff < sysctl_tcp_retries2)
icsk->icsk_backoff++; icsk->icsk_backoff++;
icsk->icsk_probes_out++; icsk->icsk_probes_out++;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, probe_max = TCP_RTO_MAX;
min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
TCP_RTO_MAX);
} else { } else {
/* If packet was not sent due to local congestion, /* If packet was not sent due to local congestion,
* do not backoff and do not remember icsk_probes_out. * do not backoff and do not remember icsk_probes_out.
...@@ -3306,11 +3305,11 @@ void tcp_send_probe0(struct sock *sk) ...@@ -3306,11 +3305,11 @@ void tcp_send_probe0(struct sock *sk)
*/ */
if (!icsk->icsk_probes_out) if (!icsk->icsk_probes_out)
icsk->icsk_probes_out = 1; icsk->icsk_probes_out = 1;
probe_max = TCP_RESOURCE_PROBE_INTERVAL;
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
min(icsk->icsk_rto << icsk->icsk_backoff, inet_csk_rto_backoff(icsk, probe_max),
TCP_RESOURCE_PROBE_INTERVAL),
TCP_RTO_MAX); TCP_RTO_MAX);
}
} }
int tcp_rtx_synack(struct sock *sk, struct request_sock *req) int tcp_rtx_synack(struct sock *sk, struct request_sock *req)
......
...@@ -180,7 +180,7 @@ static int tcp_write_timeout(struct sock *sk) ...@@ -180,7 +180,7 @@ static int tcp_write_timeout(struct sock *sk)
retry_until = sysctl_tcp_retries2; retry_until = sysctl_tcp_retries2;
if (sock_flag(sk, SOCK_DEAD)) { if (sock_flag(sk, SOCK_DEAD)) {
const int alive = (icsk->icsk_rto < TCP_RTO_MAX); const int alive = icsk->icsk_rto < TCP_RTO_MAX;
retry_until = tcp_orphan_retries(sk, alive); retry_until = tcp_orphan_retries(sk, alive);
do_reset = alive || do_reset = alive ||
...@@ -294,7 +294,7 @@ static void tcp_probe_timer(struct sock *sk) ...@@ -294,7 +294,7 @@ static void tcp_probe_timer(struct sock *sk)
max_probes = sysctl_tcp_retries2; max_probes = sysctl_tcp_retries2;
if (sock_flag(sk, SOCK_DEAD)) { if (sock_flag(sk, SOCK_DEAD)) {
const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX); const int alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
max_probes = tcp_orphan_retries(sk, alive); max_probes = tcp_orphan_retries(sk, alive);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment