Commit a29cb691 authored by Yuchung Cheng's avatar Yuchung Cheng Committed by David S. Miller

net: tcp better handling of reordering then loss cases

This patch aims to improve the situation when reordering and loss are
ocurring in the same flight of packets.

Previously the reordering would first induce a spurious recovery, then
the subsequent ACK may undo the cwnd (based on the timestamps e.g.).
However the current loss recovery does not proceed to invoke
RACK to install a reordering timer. If some packets are also lost, this
may lead to a long RTO-based recovery. An example is
https://groups.google.com/g/bbr-dev/c/OFHADvJbTEI

The solution is to after reverting the recovery, always invoke RACK
to either mount the RACK timer to fast retransmit after the reordering
window, or restarts the recovery if new loss is identified. Hence
it is possible the sender may go from Recovery to Disorder/Open to
Recovery again in one ACK.
Reported-by: default avatarmingkun bian <bianmingkun@gmail.com>
Signed-off-by: default avatarYuchung Cheng <ycheng@google.com>
Signed-off-by: default avatarNeal Cardwell <ncardwell@google.com>
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 43902070
...@@ -2816,8 +2816,17 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack, ...@@ -2816,8 +2816,17 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
*rexmit = REXMIT_LOST; *rexmit = REXMIT_LOST;
} }
static bool tcp_force_fast_retransmit(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
return after(tcp_highest_sack_seq(tp),
tp->snd_una + tp->reordering * tp->mss_cache);
}
/* Undo during fast recovery after partial ACK. */ /* Undo during fast recovery after partial ACK. */
static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una) static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una,
bool *do_lost)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
...@@ -2842,7 +2851,9 @@ static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una) ...@@ -2842,7 +2851,9 @@ static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una)
tcp_undo_cwnd_reduction(sk, true); tcp_undo_cwnd_reduction(sk, true);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
tcp_try_keep_open(sk); tcp_try_keep_open(sk);
return true; } else {
/* Partial ACK arrived. Force fast retransmit. */
*do_lost = tcp_force_fast_retransmit(sk);
} }
return false; return false;
} }
...@@ -2866,14 +2877,6 @@ static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag) ...@@ -2866,14 +2877,6 @@ static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag)
} }
} }
static bool tcp_force_fast_retransmit(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
return after(tcp_highest_sack_seq(tp),
tp->snd_una + tp->reordering * tp->mss_cache);
}
/* Process an event, which can update packets-in-flight not trivially. /* Process an event, which can update packets-in-flight not trivially.
* Main goal of this function is to calculate new estimate for left_out, * Main goal of this function is to calculate new estimate for left_out,
* taking into account both packets sitting in receiver's buffer and * taking into account both packets sitting in receiver's buffer and
...@@ -2943,17 +2946,21 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, ...@@ -2943,17 +2946,21 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
if (!(flag & FLAG_SND_UNA_ADVANCED)) { if (!(flag & FLAG_SND_UNA_ADVANCED)) {
if (tcp_is_reno(tp)) if (tcp_is_reno(tp))
tcp_add_reno_sack(sk, num_dupack, ece_ack); tcp_add_reno_sack(sk, num_dupack, ece_ack);
} else { } else if (tcp_try_undo_partial(sk, prior_snd_una, &do_lost))
if (tcp_try_undo_partial(sk, prior_snd_una))
return;
/* Partial ACK arrived. Force fast retransmit. */
do_lost = tcp_force_fast_retransmit(sk);
}
if (tcp_try_undo_dsack(sk)) {
tcp_try_keep_open(sk);
return; return;
}
if (tcp_try_undo_dsack(sk))
tcp_try_keep_open(sk);
tcp_identify_packet_loss(sk, ack_flag); tcp_identify_packet_loss(sk, ack_flag);
if (icsk->icsk_ca_state != TCP_CA_Recovery) {
if (!tcp_time_to_recover(sk, flag))
return;
/* Undo reverts the recovery state. If loss is evident,
* starts a new recovery (e.g. reordering then loss);
*/
tcp_enter_recovery(sk, ece_ack);
}
break; break;
case TCP_CA_Loss: case TCP_CA_Loss:
tcp_process_loss(sk, flag, num_dupack, rexmit); tcp_process_loss(sk, flag, num_dupack, rexmit);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment