Commit 57dde7f7 authored by Yuchung Cheng's avatar Yuchung Cheng Committed by David S. Miller

tcp: add reordering timer in RACK loss detection

This patch makes RACK install a reordering timer when it suspects
some packets might be lost, but wants to delay the decision
a little bit to accomodate reordering.

It does not create a new timer but instead repurposes the existing
RTO timer, because both are meant to retransmit packets.
Specifically it arms a timer ICSK_TIME_REO_TIMEOUT when
the RACK timing check fails. The wait time is set to

  RACK.RTT + RACK.reo_wnd - (NOW - Packet.xmit_time) + fudge

This translates to expecting a packet (Packet) should take
(RACK.RTT + RACK.reo_wnd + fudge) to deliver after it was sent.

When there are multiple packets that need a timer, we use one timer
with the maximum timeout. Therefore the timer conservatively uses
the maximum window to expire N packets by one timeout, instead of
N timeouts to expire N packets sent at different times.

The fudge factor is 2 jiffies to ensure when the timer fires, all
the suspected packets would exceed the deadline and be marked lost
by tcp_rack_detect_loss(). It has to be at least 1 jiffy because the
clock may tick between calling icsk_reset_xmit_timer(timeout) and
actually hang the timer. The next jiffy is to lower-bound the timeout
to 2 jiffies when reo_wnd is < 1ms.

When the reordering timer fires (tcp_rack_reo_timeout): If we aren't
in Recovery we'll enter fast recovery and force fast retransmit.
This is very similar to the early retransmit (RFC5827) except RACK
is not constrained to only enter recovery for small outstanding
flights.
Signed-off-by: default avatarYuchung Cheng <ycheng@google.com>
Signed-off-by: default avatarNeal Cardwell <ncardwell@google.com>
Acked-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent deed7be7
...@@ -144,6 +144,7 @@ struct inet_connection_sock { ...@@ -144,6 +144,7 @@ struct inet_connection_sock {
#define ICSK_TIME_PROBE0 3 /* Zero window probe timer */ #define ICSK_TIME_PROBE0 3 /* Zero window probe timer */
#define ICSK_TIME_EARLY_RETRANS 4 /* Early retransmit timer */ #define ICSK_TIME_EARLY_RETRANS 4 /* Early retransmit timer */
#define ICSK_TIME_LOSS_PROBE 5 /* Tail loss probe timer */ #define ICSK_TIME_LOSS_PROBE 5 /* Tail loss probe timer */
#define ICSK_TIME_REO_TIMEOUT 6 /* Reordering timer */
static inline struct inet_connection_sock *inet_csk(const struct sock *sk) static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
{ {
...@@ -234,7 +235,8 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, ...@@ -234,7 +235,8 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
} }
if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 || if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 ||
what == ICSK_TIME_EARLY_RETRANS || what == ICSK_TIME_LOSS_PROBE) { what == ICSK_TIME_EARLY_RETRANS || what == ICSK_TIME_LOSS_PROBE ||
what == ICSK_TIME_REO_TIMEOUT) {
icsk->icsk_pending = what; icsk->icsk_pending = what;
icsk->icsk_timeout = jiffies + when; icsk->icsk_timeout = jiffies + when;
sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
......
...@@ -143,6 +143,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); ...@@ -143,6 +143,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
* for local resources. * for local resources.
*/ */
#define TCP_REO_TIMEOUT_MIN (2000) /* Min RACK reordering timeout in usec */
#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */ #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */ #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
...@@ -397,6 +398,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, ...@@ -397,6 +398,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
int tcp_child_process(struct sock *parent, struct sock *child, int tcp_child_process(struct sock *parent, struct sock *child,
struct sk_buff *skb); struct sk_buff *skb);
void tcp_enter_loss(struct sock *sk); void tcp_enter_loss(struct sock *sk);
void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
void tcp_clear_retrans(struct tcp_sock *tp); void tcp_clear_retrans(struct tcp_sock *tp);
void tcp_update_metrics(struct sock *sk); void tcp_update_metrics(struct sock *sk);
void tcp_init_metrics(struct sock *sk); void tcp_init_metrics(struct sock *sk);
...@@ -541,6 +543,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); ...@@ -541,6 +543,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
void tcp_retransmit_timer(struct sock *sk); void tcp_retransmit_timer(struct sock *sk);
void tcp_xmit_retransmit_queue(struct sock *); void tcp_xmit_retransmit_queue(struct sock *);
void tcp_simple_retransmit(struct sock *); void tcp_simple_retransmit(struct sock *);
void tcp_enter_recovery(struct sock *sk, bool ece_ack);
int tcp_trim_head(struct sock *, struct sk_buff *, u32); int tcp_trim_head(struct sock *, struct sk_buff *, u32);
int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t); int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
...@@ -1867,6 +1870,7 @@ extern void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now); ...@@ -1867,6 +1870,7 @@ extern void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now);
extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked,
const struct skb_mstamp *xmit_time, const struct skb_mstamp *xmit_time,
const struct skb_mstamp *ack_time); const struct skb_mstamp *ack_time);
extern void tcp_rack_reo_timeout(struct sock *sk);
/* /*
* Save and compile IPv4 options, return a pointer to it * Save and compile IPv4 options, return a pointer to it
......
...@@ -216,6 +216,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, ...@@ -216,6 +216,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
if (icsk->icsk_pending == ICSK_TIME_RETRANS || if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
r->idiag_timer = 1; r->idiag_timer = 1;
r->idiag_retrans = icsk->icsk_retransmits; r->idiag_retrans = icsk->icsk_retransmits;
......
...@@ -2522,8 +2522,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk) ...@@ -2522,8 +2522,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk)
tcp_ecn_queue_cwr(tp); tcp_ecn_queue_cwr(tp);
} }
static void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag)
int flag)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
int sndcnt = 0; int sndcnt = 0;
...@@ -2691,7 +2690,7 @@ void tcp_simple_retransmit(struct sock *sk) ...@@ -2691,7 +2690,7 @@ void tcp_simple_retransmit(struct sock *sk)
} }
EXPORT_SYMBOL(tcp_simple_retransmit); EXPORT_SYMBOL(tcp_simple_retransmit);
static void tcp_enter_recovery(struct sock *sk, bool ece_ack) void tcp_enter_recovery(struct sock *sk, bool ece_ack)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
int mib_idx; int mib_idx;
...@@ -3031,6 +3030,7 @@ void tcp_rearm_rto(struct sock *sk) ...@@ -3031,6 +3030,7 @@ void tcp_rearm_rto(struct sock *sk)
u32 rto = inet_csk(sk)->icsk_rto; u32 rto = inet_csk(sk)->icsk_rto;
/* Offset the time elapsed after installing regular RTO */ /* Offset the time elapsed after installing regular RTO */
if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
struct sk_buff *skb = tcp_write_queue_head(sk); struct sk_buff *skb = tcp_write_queue_head(sk);
const u32 rto_time_stamp = const u32 rto_time_stamp =
......
...@@ -2230,6 +2230,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i) ...@@ -2230,6 +2230,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
if (icsk->icsk_pending == ICSK_TIME_RETRANS || if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1; timer_active = 1;
timer_expires = icsk->icsk_timeout; timer_expires = icsk->icsk_timeout;
......
...@@ -2960,7 +2960,8 @@ void tcp_xmit_retransmit_queue(struct sock *sk) ...@@ -2960,7 +2960,8 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
if (tcp_in_cwnd_reduction(sk)) if (tcp_in_cwnd_reduction(sk))
tp->prr_out += tcp_skb_pcount(skb); tp->prr_out += tcp_skb_pcount(skb);
if (skb == tcp_write_queue_head(sk)) if (skb == tcp_write_queue_head(sk) &&
icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto, inet_csk(sk)->icsk_rto,
TCP_RTO_MAX); TCP_RTO_MAX);
......
...@@ -32,19 +32,18 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb) ...@@ -32,19 +32,18 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
* The current version is only used after recovery starts but can be * The current version is only used after recovery starts but can be
* easily extended to detect the first loss. * easily extended to detect the first loss.
*/ */
static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now) static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now,
u32 *reo_timeout)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb; struct sk_buff *skb;
u32 reo_wnd; u32 reo_wnd;
*reo_timeout = 0;
/* To be more reordering resilient, allow min_rtt/4 settling delay /* To be more reordering resilient, allow min_rtt/4 settling delay
* (lower-bounded to 1000uS). We use min_rtt instead of the smoothed * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
* RTT because reordering is often a path property and less related * RTT because reordering is often a path property and less related
* to queuing or delayed ACKs. * to queuing or delayed ACKs.
*
* TODO: measure and adapt to the observed reordering delay, and
* use a timer to retransmit like the delayed early retransmit.
*/ */
reo_wnd = 1000; reo_wnd = 1000;
if (tp->rack.reord && tcp_min_rtt(tp) != ~0U) if (tp->rack.reord && tcp_min_rtt(tp) != ~0U)
...@@ -66,10 +65,23 @@ static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now) ...@@ -66,10 +65,23 @@ static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now)
* A packet is lost if its elapsed time is beyond * A packet is lost if its elapsed time is beyond
* the recent RTT plus the reordering window. * the recent RTT plus the reordering window.
*/ */
if (skb_mstamp_us_delta(now, &skb->skb_mstamp) > u32 elapsed = skb_mstamp_us_delta(now,
tp->rack.rtt_us + reo_wnd) { &skb->skb_mstamp);
s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed;
if (remaining < 0) {
tcp_rack_mark_skb_lost(sk, skb); tcp_rack_mark_skb_lost(sk, skb);
continue;
} }
/* Skip ones marked lost but not yet retransmitted */
if ((scb->sacked & TCPCB_LOST) &&
!(scb->sacked & TCPCB_SACKED_RETRANS))
continue;
/* Record maximum wait time (+1 to avoid 0) */
*reo_timeout = max_t(u32, *reo_timeout, 1 + remaining);
} else if (!(scb->sacked & TCPCB_RETRANS)) { } else if (!(scb->sacked & TCPCB_RETRANS)) {
/* Original data are sent sequentially so stop early /* Original data are sent sequentially so stop early
* b/c the rest are all sent after rack_sent * b/c the rest are all sent after rack_sent
...@@ -82,12 +94,19 @@ static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now) ...@@ -82,12 +94,19 @@ static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now)
void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now) void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
u32 timeout;
if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced) if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced)
return; return;
/* Reset the advanced flag to avoid unnecessary queue scanning */ /* Reset the advanced flag to avoid unnecessary queue scanning */
tp->rack.advanced = 0; tp->rack.advanced = 0;
tcp_rack_detect_loss(sk, now); tcp_rack_detect_loss(sk, now, &timeout);
if (timeout) {
timeout = usecs_to_jiffies(timeout + TCP_REO_TIMEOUT_MIN);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
timeout, inet_csk(sk)->icsk_rto);
}
} }
/* Record the most recently (re)sent time among the (s)acked packets /* Record the most recently (re)sent time among the (s)acked packets
...@@ -123,3 +142,27 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, ...@@ -123,3 +142,27 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked,
tp->rack.mstamp = *xmit_time; tp->rack.mstamp = *xmit_time;
tp->rack.advanced = 1; tp->rack.advanced = 1;
} }
/* We have waited long enough to accommodate reordering. Mark the expired
* packets lost and retransmit them.
*/
void tcp_rack_reo_timeout(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct skb_mstamp now;
u32 timeout, prior_inflight;
skb_mstamp_get(&now);
prior_inflight = tcp_packets_in_flight(tp);
tcp_rack_detect_loss(sk, &now, &timeout);
if (prior_inflight != tcp_packets_in_flight(tp)) {
if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
tcp_enter_recovery(sk, false);
if (!inet_csk(sk)->icsk_ca_ops->cong_control)
tcp_cwnd_reduction(sk, 1, 0);
}
tcp_xmit_retransmit_queue(sk);
}
if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
tcp_rearm_rto(sk);
}
...@@ -563,6 +563,9 @@ void tcp_write_timer_handler(struct sock *sk) ...@@ -563,6 +563,9 @@ void tcp_write_timer_handler(struct sock *sk)
event = icsk->icsk_pending; event = icsk->icsk_pending;
switch (event) { switch (event) {
case ICSK_TIME_REO_TIMEOUT:
tcp_rack_reo_timeout(sk);
break;
case ICSK_TIME_EARLY_RETRANS: case ICSK_TIME_EARLY_RETRANS:
tcp_resume_early_retransmit(sk); tcp_resume_early_retransmit(sk);
break; break;
......
...@@ -1746,6 +1746,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) ...@@ -1746,6 +1746,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
if (icsk->icsk_pending == ICSK_TIME_RETRANS || if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1; timer_active = 1;
timer_expires = icsk->icsk_timeout; timer_expires = icsk->icsk_timeout;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment