Commit b340b264 authored by Yuchung Cheng's avatar Yuchung Cheng Committed by David S. Miller

tcp: TLP retransmits last if failed to send new packet

When TLP fails to send new packet because of receive window
limit, it should fall back to retransmit the last packet instead.
Signed-off-by: default avatarYuchung Cheng <ycheng@google.com>
Signed-off-by: default avatarNeal Cardwell <ncardwell@google.com>
Signed-off-by: default avatarNandita Dukkipati <nanditad@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent fcd16c0a
...@@ -2149,7 +2149,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, ...@@ -2149,7 +2149,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
tcp_cwnd_validate(sk, is_cwnd_limited); tcp_cwnd_validate(sk, is_cwnd_limited);
return false; return false;
} }
return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk)); return !tp->packets_out && tcp_send_head(sk);
} }
bool tcp_schedule_loss_probe(struct sock *sk) bool tcp_schedule_loss_probe(struct sock *sk)
...@@ -2226,7 +2226,7 @@ static bool skb_still_in_host_queue(const struct sock *sk, ...@@ -2226,7 +2226,7 @@ static bool skb_still_in_host_queue(const struct sock *sk,
return false; return false;
} }
/* When probe timeout (PTO) fires, send a new segment if one exists, else /* When probe timeout (PTO) fires, try send a new segment if possible, else
* retransmit the last segment. * retransmit the last segment.
*/ */
void tcp_send_loss_probe(struct sock *sk) void tcp_send_loss_probe(struct sock *sk)
...@@ -2235,11 +2235,19 @@ void tcp_send_loss_probe(struct sock *sk) ...@@ -2235,11 +2235,19 @@ void tcp_send_loss_probe(struct sock *sk)
struct sk_buff *skb; struct sk_buff *skb;
int pcount; int pcount;
int mss = tcp_current_mss(sk); int mss = tcp_current_mss(sk);
int err = -1;
if (tcp_send_head(sk)) { skb = tcp_send_head(sk);
err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); if (skb) {
goto rearm_timer; if (tcp_snd_wnd_test(tp, skb, mss)) {
pcount = tp->packets_out;
tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
if (tp->packets_out > pcount)
goto probe_sent;
goto rearm_timer;
}
skb = tcp_write_queue_prev(sk, skb);
} else {
skb = tcp_write_queue_tail(sk);
} }
/* At most one outstanding TLP retransmission. */ /* At most one outstanding TLP retransmission. */
...@@ -2247,7 +2255,6 @@ void tcp_send_loss_probe(struct sock *sk) ...@@ -2247,7 +2255,6 @@ void tcp_send_loss_probe(struct sock *sk)
goto rearm_timer; goto rearm_timer;
/* Retransmit last segment. */ /* Retransmit last segment. */
skb = tcp_write_queue_tail(sk);
if (WARN_ON(!skb)) if (WARN_ON(!skb))
goto rearm_timer; goto rearm_timer;
...@@ -2262,24 +2269,23 @@ void tcp_send_loss_probe(struct sock *sk) ...@@ -2262,24 +2269,23 @@ void tcp_send_loss_probe(struct sock *sk)
if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss, if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss,
GFP_ATOMIC))) GFP_ATOMIC)))
goto rearm_timer; goto rearm_timer;
skb = tcp_write_queue_tail(sk); skb = tcp_write_queue_next(sk, skb);
} }
if (WARN_ON(!skb || !tcp_skb_pcount(skb))) if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
goto rearm_timer; goto rearm_timer;
err = __tcp_retransmit_skb(sk, skb); if (__tcp_retransmit_skb(sk, skb))
goto rearm_timer;
/* Record snd_nxt for loss detection. */ /* Record snd_nxt for loss detection. */
if (likely(!err)) tp->tlp_high_seq = tp->snd_nxt;
tp->tlp_high_seq = tp->snd_nxt;
probe_sent:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
/* Reset s.t. tcp_rearm_rto will restart timer from now */
inet_csk(sk)->icsk_pending = 0;
rearm_timer: rearm_timer:
if (likely(!err)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
/* Reset s.t. tcp_rearm_rto will restart timer from now */
inet_csk(sk)->icsk_pending = 0;
}
tcp_rearm_rto(sk); tcp_rearm_rto(sk);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment