Commit e636f8b0 authored by Yuchung Cheng's avatar Yuchung Cheng Committed by David S. Miller

tcp: new helper for RACK to detect loss

Create a new helper tcp_rack_detect_loss to prepare the upcoming
RACK reordering timer patch.
Signed-off-by: default avatarYuchung Cheng <ycheng@google.com>
Signed-off-by: default avatarNeal Cardwell <ncardwell@google.com>
Acked-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent db8da6bb
...@@ -1863,8 +1863,7 @@ extern int sysctl_tcp_recovery; ...@@ -1863,8 +1863,7 @@ extern int sysctl_tcp_recovery;
/* Use TCP RACK to detect (some) tail and retransmit losses */ /* Use TCP RACK to detect (some) tail and retransmit losses */
#define TCP_RACK_LOST_RETRANS 0x1 #define TCP_RACK_LOST_RETRANS 0x1
extern int tcp_rack_mark_lost(struct sock *sk); extern void tcp_rack_mark_lost(struct sock *sk);
extern void tcp_rack_advance(struct tcp_sock *tp, extern void tcp_rack_advance(struct tcp_sock *tp,
const struct skb_mstamp *xmit_time, u8 sacked); const struct skb_mstamp *xmit_time, u8 sacked);
......
...@@ -2865,10 +2865,14 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked, ...@@ -2865,10 +2865,14 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
} }
/* Use RACK to detect loss */ /* Use RACK to detect loss */
if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS && if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS) {
tcp_rack_mark_lost(sk)) { u32 prior_retrans = tp->retrans_out;
flag |= FLAG_LOST_RETRANS;
*ack_flag |= FLAG_LOST_RETRANS; tcp_rack_mark_lost(sk);
if (prior_retrans > tp->retrans_out) {
flag |= FLAG_LOST_RETRANS;
*ack_flag |= FLAG_LOST_RETRANS;
}
} }
/* E. Process state. */ /* E. Process state. */
......
...@@ -32,17 +32,11 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb) ...@@ -32,17 +32,11 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
* The current version is only used after recovery starts but can be * The current version is only used after recovery starts but can be
* easily extended to detect the first loss. * easily extended to detect the first loss.
*/ */
int tcp_rack_mark_lost(struct sock *sk) static void tcp_rack_detect_loss(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb; struct sk_buff *skb;
u32 reo_wnd, prior_retrans = tp->retrans_out; u32 reo_wnd;
if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced)
return 0;
/* Reset the advanced flag to avoid unnecessary queue scanning */
tp->rack.advanced = 0;
/* To be more reordering resilient, allow min_rtt/4 settling delay /* To be more reordering resilient, allow min_rtt/4 settling delay
* (lower-bounded to 1000uS). We use min_rtt instead of the smoothed * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
...@@ -82,7 +76,17 @@ int tcp_rack_mark_lost(struct sock *sk) ...@@ -82,7 +76,17 @@ int tcp_rack_mark_lost(struct sock *sk)
break; break;
} }
} }
return prior_retrans - tp->retrans_out; }
void tcp_rack_mark_lost(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced)
return;
/* Reset the advanced flag to avoid unnecessary queue scanning */
tp->rack.advanced = 0;
tcp_rack_detect_loss(sk);
} }
/* Record the most recently (re)sent time among the (s)acked packets */ /* Record the most recently (re)sent time among the (s)acked packets */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment