Commit b38a51fe authored by Yuchung Cheng's avatar Yuchung Cheng Committed by David S. Miller

tcp: disable RFC6675 loss detection

This patch disables RFC6675 loss detection and make sysctl
net.ipv4.tcp_recovery = 1 controls a binary choice between RACK
(1) or RFC6675 (0).
Signed-off-by: default avatarYuchung Cheng <ycheng@google.com>
Signed-off-by: default avatarNeal Cardwell <ncardwell@google.com>
Reviewed-by: default avatarEric Dumazet <edumazet@google.com>
Reviewed-by: default avatarSoheil Hassas Yeganeh <soheil@google.com>
Reviewed-by: default avatarPriyaranjan Jha <priyarjha@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 20b654df
...@@ -449,7 +449,8 @@ tcp_recovery - INTEGER ...@@ -449,7 +449,8 @@ tcp_recovery - INTEGER
features. features.
RACK: 0x1 enables the RACK loss detection for fast detection of lost RACK: 0x1 enables the RACK loss detection for fast detection of lost
retransmissions and tail drops. retransmissions and tail drops. It also subsumes and disables
RFC6675 recovery for SACK connections.
RACK: 0x2 makes RACK's reordering window static (min_rtt/4). RACK: 0x2 makes RACK's reordering window static (min_rtt/4).
RACK: 0x4 disables RACK's DUPACK threshold heuristic RACK: 0x4 disables RACK's DUPACK threshold heuristic
......
...@@ -2035,6 +2035,11 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) ...@@ -2035,6 +2035,11 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
return tp->sacked_out + 1; return tp->sacked_out + 1;
} }
static bool tcp_is_rack(const struct sock *sk)
{
return sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION;
}
/* Linux NewReno/SACK/ECN state machine. /* Linux NewReno/SACK/ECN state machine.
* -------------------------------------- * --------------------------------------
* *
...@@ -2141,7 +2146,7 @@ static bool tcp_time_to_recover(struct sock *sk, int flag) ...@@ -2141,7 +2146,7 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
return true; return true;
/* Not-A-Trick#2 : Classic rule... */ /* Not-A-Trick#2 : Classic rule... */
if (tcp_dupack_heuristics(tp) > tp->reordering) if (!tcp_is_rack(sk) && tcp_dupack_heuristics(tp) > tp->reordering)
return true; return true;
return false; return false;
...@@ -2722,8 +2727,7 @@ static void tcp_rack_identify_loss(struct sock *sk, int *ack_flag) ...@@ -2722,8 +2727,7 @@ static void tcp_rack_identify_loss(struct sock *sk, int *ack_flag)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
/* Use RACK to detect loss */ if (tcp_is_rack(sk)) {
if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION) {
u32 prior_retrans = tp->retrans_out; u32 prior_retrans = tp->retrans_out;
tcp_rack_mark_lost(sk); tcp_rack_mark_lost(sk);
...@@ -2862,7 +2866,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, ...@@ -2862,7 +2866,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
fast_rexmit = 1; fast_rexmit = 1;
} }
if (do_lost) if (!tcp_is_rack(sk) && do_lost)
tcp_update_scoreboard(sk, fast_rexmit); tcp_update_scoreboard(sk, fast_rexmit);
*rexmit = REXMIT_LOST; *rexmit = REXMIT_LOST;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment