Commit 607065ba authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: avoid integer overflows in tcp_rcv_space_adjust()

When using large tcp_rmem[2] values (I did tests with 500 MB),
I noticed overflows while computing rcvwin.

Lets fix this before the following patch.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Acked-by: default avatarSoheil Hassas Yeganeh <soheil@google.com>
Acked-by: default avatarWei Wang <weiwan@google.com>
Acked-by: default avatarNeal Cardwell <ncardwell@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 02db5571
...@@ -344,7 +344,7 @@ struct tcp_sock { ...@@ -344,7 +344,7 @@ struct tcp_sock {
/* Receiver queue space */ /* Receiver queue space */
struct { struct {
int space; u32 space;
u32 seq; u32 seq;
u64 time; u64 time;
} rcvq_space; } rcvq_space;
......
...@@ -576,8 +576,8 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, ...@@ -576,8 +576,8 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
void tcp_rcv_space_adjust(struct sock *sk) void tcp_rcv_space_adjust(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
u32 copied;
int time; int time;
int copied;
tcp_mstamp_refresh(tp); tcp_mstamp_refresh(tp);
time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time); time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
...@@ -600,12 +600,13 @@ void tcp_rcv_space_adjust(struct sock *sk) ...@@ -600,12 +600,13 @@ void tcp_rcv_space_adjust(struct sock *sk)
if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf && if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
int rcvwin, rcvmem, rcvbuf; int rcvmem, rcvbuf;
u64 rcvwin;
/* minimal window to cope with packet losses, assuming /* minimal window to cope with packet losses, assuming
* steady state. Add some cushion because of small variations. * steady state. Add some cushion because of small variations.
*/ */
rcvwin = (copied << 1) + 16 * tp->advmss; rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
/* If rate increased by 25%, /* If rate increased by 25%,
* assume slow start, rcvwin = 3 * copied * assume slow start, rcvwin = 3 * copied
...@@ -625,8 +626,9 @@ void tcp_rcv_space_adjust(struct sock *sk) ...@@ -625,8 +626,9 @@ void tcp_rcv_space_adjust(struct sock *sk)
while (tcp_win_from_space(sk, rcvmem) < tp->advmss) while (tcp_win_from_space(sk, rcvmem) < tp->advmss)
rcvmem += 128; rcvmem += 128;
rcvbuf = min(rcvwin / tp->advmss * rcvmem, do_div(rcvwin, tp->advmss);
sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); rcvbuf = min_t(u64, rcvwin * rcvmem,
sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
if (rcvbuf > sk->sk_rcvbuf) { if (rcvbuf > sk->sk_rcvbuf) {
sk->sk_rcvbuf = rcvbuf; sk->sk_rcvbuf = rcvbuf;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment