Commit 737ff314 authored by Yuchung Cheng's avatar Yuchung Cheng Committed by David S. Miller

tcp: use sequence distance to detect reordering

Replace the reordering distance measurement in packet unit with
sequence based approach. Previously it trackes the number of "packets"
toward the forward ACK (i.e.  highest sacked sequence)in a state
variable "fackets_out".

Precisely measuring reordering degree on packet distance has not much
benefit, as the degree constantly changes by factors like path, load,
and congestion window. It is also complicated and prone to arcane bugs.
This patch replaces with sequence-based approach that's much simpler.
Signed-off-by: default avatarYuchung Cheng <ycheng@google.com>
Reviewed-by: default avatarEric Dumazet <edumazet@google.com>
Reviewed-by: default avatarNeal Cardwell <ncardwell@google.com>
Reviewed-by: default avatarSoheil Hassas Yeganeh <soheil@google.com>
Reviewed-by: default avatarPriyaranjan Jha <priyarjha@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 713bafea
...@@ -293,7 +293,6 @@ struct tcp_sock { ...@@ -293,7 +293,6 @@ struct tcp_sock {
u32 pushed_seq; /* Last pushed seq, required to talk to windows */ u32 pushed_seq; /* Last pushed seq, required to talk to windows */
u32 lost_out; /* Lost packets */ u32 lost_out; /* Lost packets */
u32 sacked_out; /* SACK'd packets */ u32 sacked_out; /* SACK'd packets */
u32 fackets_out; /* FACK'd packets */
struct hrtimer pacing_timer; struct hrtimer pacing_timer;
......
...@@ -2977,7 +2977,6 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) ...@@ -2977,7 +2977,6 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_lost = tp->lost_out; info->tcpi_lost = tp->lost_out;
info->tcpi_retrans = tp->retrans_out; info->tcpi_retrans = tp->retrans_out;
info->tcpi_fackets = tp->fackets_out;
now = tcp_jiffies32; now = tcp_jiffies32;
info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
......
...@@ -849,39 +849,39 @@ static void tcp_dsack_seen(struct tcp_sock *tp) ...@@ -849,39 +849,39 @@ static void tcp_dsack_seen(struct tcp_sock *tp)
tp->rack.dsack_seen = 1; tp->rack.dsack_seen = 1;
} }
static void tcp_update_reordering(struct sock *sk, const int metric, /* It's reordering when higher sequence was delivered (i.e. sacked) before
const int ts) * some lower never-retransmitted sequence ("low_seq"). The maximum reordering
* distance is approximated in full-mss packet distance ("reordering").
*/
static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq,
const int ts)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
int mib_idx; const u32 mss = tp->mss_cache;
u32 fack, metric;
if (WARN_ON_ONCE(metric < 0)) fack = tcp_highest_sack_seq(tp);
if (!before(low_seq, fack))
return; return;
if (metric > tp->reordering) { metric = fack - low_seq;
tp->reordering = min(sock_net(sk)->ipv4.sysctl_tcp_max_reordering, metric); if ((metric > tp->reordering * mss) && mss) {
#if FASTRETRANS_DEBUG > 1 #if FASTRETRANS_DEBUG > 1
pr_debug("Disorder%d %d %u f%u s%u rr%d\n", pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
tp->reordering, tp->reordering,
tp->fackets_out, 0,
tp->sacked_out, tp->sacked_out,
tp->undo_marker ? tp->undo_retrans : 0); tp->undo_marker ? tp->undo_retrans : 0);
#endif #endif
tp->reordering = min_t(u32, (metric + mss - 1) / mss,
sock_net(sk)->ipv4.sysctl_tcp_max_reordering);
} }
tp->rack.reord = 1; tp->rack.reord = 1;
/* This exciting event is worth to be remembered. 8) */ /* This exciting event is worth to be remembered. 8) */
if (ts) NET_INC_STATS(sock_net(sk),
mib_idx = LINUX_MIB_TCPTSREORDER; ts ? LINUX_MIB_TCPTSREORDER : LINUX_MIB_TCPSACKREORDER);
else if (tcp_is_reno(tp))
mib_idx = LINUX_MIB_TCPRENOREORDER;
else
mib_idx = LINUX_MIB_TCPSACKREORDER;
NET_INC_STATS(sock_net(sk), mib_idx);
} }
/* This must be called before lost_out is incremented */ /* This must be called before lost_out is incremented */
...@@ -1097,8 +1097,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, ...@@ -1097,8 +1097,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
} }
struct tcp_sacktag_state { struct tcp_sacktag_state {
int reord; u32 reord;
int fack_count;
/* Timestamps for earliest and latest never-retransmitted segment /* Timestamps for earliest and latest never-retransmitted segment
* that was SACKed. RTO needs the earliest RTT to stay conservative, * that was SACKed. RTO needs the earliest RTT to stay conservative,
* but congestion control should still get an accurate delay signal. * but congestion control should still get an accurate delay signal.
...@@ -1174,15 +1173,15 @@ static u8 tcp_sacktag_one(struct sock *sk, ...@@ -1174,15 +1173,15 @@ static u8 tcp_sacktag_one(struct sock *sk,
u64 xmit_time) u64 xmit_time)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
int fack_count = state->fack_count;
/* Account D-SACK for retransmitted packet. */ /* Account D-SACK for retransmitted packet. */
if (dup_sack && (sacked & TCPCB_RETRANS)) { if (dup_sack && (sacked & TCPCB_RETRANS)) {
if (tp->undo_marker && tp->undo_retrans > 0 && if (tp->undo_marker && tp->undo_retrans > 0 &&
after(end_seq, tp->undo_marker)) after(end_seq, tp->undo_marker))
tp->undo_retrans--; tp->undo_retrans--;
if (sacked & TCPCB_SACKED_ACKED) if ((sacked & TCPCB_SACKED_ACKED) &&
state->reord = min(fack_count, state->reord); before(start_seq, state->reord))
state->reord = start_seq;
} }
/* Nothing to do; acked frame is about to be dropped (was ACKed). */ /* Nothing to do; acked frame is about to be dropped (was ACKed). */
...@@ -1208,9 +1207,10 @@ static u8 tcp_sacktag_one(struct sock *sk, ...@@ -1208,9 +1207,10 @@ static u8 tcp_sacktag_one(struct sock *sk,
* which was in hole. It is reordering. * which was in hole. It is reordering.
*/ */
if (before(start_seq, if (before(start_seq,
tcp_highest_sack_seq(tp))) tcp_highest_sack_seq(tp)) &&
state->reord = min(fack_count, before(start_seq, state->reord))
state->reord); state->reord = start_seq;
if (!after(end_seq, tp->high_seq)) if (!after(end_seq, tp->high_seq))
state->flag |= FLAG_ORIG_SACK_ACKED; state->flag |= FLAG_ORIG_SACK_ACKED;
if (state->first_sackt == 0) if (state->first_sackt == 0)
...@@ -1229,15 +1229,10 @@ static u8 tcp_sacktag_one(struct sock *sk, ...@@ -1229,15 +1229,10 @@ static u8 tcp_sacktag_one(struct sock *sk,
tp->sacked_out += pcount; tp->sacked_out += pcount;
tp->delivered += pcount; /* Out-of-order packets delivered */ tp->delivered += pcount; /* Out-of-order packets delivered */
fack_count += pcount;
/* Lost marker hint past SACKed? Tweak RFC3517 cnt */ /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
if (tp->lost_skb_hint && if (tp->lost_skb_hint &&
before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
tp->lost_cnt_hint += pcount; tp->lost_cnt_hint += pcount;
if (fack_count > tp->fackets_out)
tp->fackets_out = fack_count;
} }
/* D-SACK. We can detect redundant retransmission in S|R and plain R /* D-SACK. We can detect redundant retransmission in S|R and plain R
...@@ -1484,7 +1479,6 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, ...@@ -1484,7 +1479,6 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
} }
out: out:
state->fack_count += pcount;
return prev; return prev;
noop: noop:
...@@ -1563,8 +1557,6 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, ...@@ -1563,8 +1557,6 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
tcp_highest_sack_seq(tp))) tcp_highest_sack_seq(tp)))
tcp_advance_highest_sack(sk, skb); tcp_advance_highest_sack(sk, skb);
} }
state->fack_count += tcp_skb_pcount(skb);
} }
return skb; return skb;
} }
...@@ -1575,7 +1567,6 @@ static struct sk_buff *tcp_sacktag_bsearch(struct sock *sk, ...@@ -1575,7 +1567,6 @@ static struct sk_buff *tcp_sacktag_bsearch(struct sock *sk,
{ {
struct rb_node *parent, **p = &sk->tcp_rtx_queue.rb_node; struct rb_node *parent, **p = &sk->tcp_rtx_queue.rb_node;
struct sk_buff *skb; struct sk_buff *skb;
int unack_bytes;
while (*p) { while (*p) {
parent = *p; parent = *p;
...@@ -1588,12 +1579,6 @@ static struct sk_buff *tcp_sacktag_bsearch(struct sock *sk, ...@@ -1588,12 +1579,6 @@ static struct sk_buff *tcp_sacktag_bsearch(struct sock *sk,
p = &parent->rb_right; p = &parent->rb_right;
continue; continue;
} }
state->fack_count = 0;
unack_bytes = TCP_SKB_CB(skb)->seq - tcp_sk(sk)->snd_una;
if (state->mss_now && unack_bytes > 0)
state->fack_count = unack_bytes / state->mss_now;
return skb; return skb;
} }
return NULL; return NULL;
...@@ -1651,13 +1636,10 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, ...@@ -1651,13 +1636,10 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
int first_sack_index; int first_sack_index;
state->flag = 0; state->flag = 0;
state->reord = tp->packets_out; state->reord = tp->snd_nxt;
if (!tp->sacked_out) { if (!tp->sacked_out)
if (WARN_ON(tp->fackets_out))
tp->fackets_out = 0;
tcp_highest_sack_reset(sk); tcp_highest_sack_reset(sk);
}
found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
num_sacks, prior_snd_una); num_sacks, prior_snd_una);
...@@ -1729,7 +1711,6 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, ...@@ -1729,7 +1711,6 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
} }
state->mss_now = tcp_current_mss(sk); state->mss_now = tcp_current_mss(sk);
state->fack_count = 0;
skb = NULL; skb = NULL;
i = 0; i = 0;
...@@ -1787,7 +1768,6 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, ...@@ -1787,7 +1768,6 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
skb = tcp_highest_sack(sk); skb = tcp_highest_sack(sk);
if (!skb) if (!skb)
break; break;
state->fack_count = tp->fackets_out;
cache++; cache++;
goto walk; goto walk;
} }
...@@ -1802,7 +1782,6 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, ...@@ -1802,7 +1782,6 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
skb = tcp_highest_sack(sk); skb = tcp_highest_sack(sk);
if (!skb) if (!skb)
break; break;
state->fack_count = tp->fackets_out;
} }
skb = tcp_sacktag_skip(skb, sk, state, start_seq); skb = tcp_sacktag_skip(skb, sk, state, start_seq);
...@@ -1822,9 +1801,8 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, ...@@ -1822,9 +1801,8 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
for (j = 0; j < used_sacks; j++) for (j = 0; j < used_sacks; j++)
tp->recv_sack_cache[i++] = sp[j]; tp->recv_sack_cache[i++] = sp[j];
if ((state->reord < tp->fackets_out) && if (inet_csk(sk)->icsk_ca_state != TCP_CA_Loss || tp->undo_marker)
((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker)) tcp_check_sack_reordering(sk, state->reord, 0);
tcp_update_reordering(sk, tp->fackets_out - state->reord, 0);
tcp_verify_left_out(tp); tcp_verify_left_out(tp);
out: out:
...@@ -1862,8 +1840,13 @@ static bool tcp_limit_reno_sacked(struct tcp_sock *tp) ...@@ -1862,8 +1840,13 @@ static bool tcp_limit_reno_sacked(struct tcp_sock *tp)
static void tcp_check_reno_reordering(struct sock *sk, const int addend) static void tcp_check_reno_reordering(struct sock *sk, const int addend)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
if (tcp_limit_reno_sacked(tp))
tcp_update_reordering(sk, tp->packets_out + addend, 0); if (!tcp_limit_reno_sacked(tp))
return;
tp->reordering = min_t(u32, tp->packets_out + addend,
sock_net(sk)->ipv4.sysctl_tcp_max_reordering);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER);
} }
/* Emulate SACKs for SACKless connection: account for a new dupack. */ /* Emulate SACKs for SACKless connection: account for a new dupack. */
...@@ -1909,7 +1892,6 @@ void tcp_clear_retrans(struct tcp_sock *tp) ...@@ -1909,7 +1892,6 @@ void tcp_clear_retrans(struct tcp_sock *tp)
tp->lost_out = 0; tp->lost_out = 0;
tp->undo_marker = 0; tp->undo_marker = 0;
tp->undo_retrans = -1; tp->undo_retrans = -1;
tp->fackets_out = 0;
tp->sacked_out = 0; tp->sacked_out = 0;
} }
...@@ -1959,7 +1941,6 @@ void tcp_enter_loss(struct sock *sk) ...@@ -1959,7 +1941,6 @@ void tcp_enter_loss(struct sock *sk)
if (is_reneg) { if (is_reneg) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
tp->sacked_out = 0; tp->sacked_out = 0;
tp->fackets_out = 0;
} }
tcp_clear_all_retrans_hints(tp); tcp_clear_all_retrans_hints(tp);
...@@ -2026,11 +2007,6 @@ static bool tcp_check_sack_reneging(struct sock *sk, int flag) ...@@ -2026,11 +2007,6 @@ static bool tcp_check_sack_reneging(struct sock *sk, int flag)
return false; return false;
} }
static inline int tcp_fackets_out(const struct tcp_sock *tp)
{
return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out;
}
/* Heurestics to calculate number of duplicate ACKs. There's no dupACKs /* Heurestics to calculate number of duplicate ACKs. There's no dupACKs
* counter when SACK is enabled (without SACK, sacked_out is used for * counter when SACK is enabled (without SACK, sacked_out is used for
* that purpose). * that purpose).
...@@ -2701,15 +2677,15 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack, ...@@ -2701,15 +2677,15 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
} }
/* Undo during fast recovery after partial ACK. */ /* Undo during fast recovery after partial ACK. */
static bool tcp_try_undo_partial(struct sock *sk, const int acked) static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
if (tp->undo_marker && tcp_packet_delayed(tp)) { if (tp->undo_marker && tcp_packet_delayed(tp)) {
/* Plain luck! Hole if filled with delayed /* Plain luck! Hole if filled with delayed
* packet, rather than with a retransmit. * packet, rather than with a retransmit. Check reordering.
*/ */
tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); tcp_check_sack_reordering(sk, prior_snd_una, 1);
/* We are getting evidence that the reordering degree is higher /* We are getting evidence that the reordering degree is higher
* than we realized. If there are no retransmits out then we * than we realized. If there are no retransmits out then we
...@@ -2745,6 +2721,14 @@ static void tcp_rack_identify_loss(struct sock *sk, int *ack_flag) ...@@ -2745,6 +2721,14 @@ static void tcp_rack_identify_loss(struct sock *sk, int *ack_flag)
} }
} }
static bool tcp_force_fast_retransmit(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
return after(tcp_highest_sack_seq(tp),
tp->snd_una + tp->reordering * tp->mss_cache);
}
/* Process an event, which can update packets-in-flight not trivially. /* Process an event, which can update packets-in-flight not trivially.
* Main goal of this function is to calculate new estimate for left_out, * Main goal of this function is to calculate new estimate for left_out,
* taking into account both packets sitting in receiver's buffer and * taking into account both packets sitting in receiver's buffer and
...@@ -2757,19 +2741,17 @@ static void tcp_rack_identify_loss(struct sock *sk, int *ack_flag) ...@@ -2757,19 +2741,17 @@ static void tcp_rack_identify_loss(struct sock *sk, int *ack_flag)
* It does _not_ decide what to send, it is made in function * It does _not_ decide what to send, it is made in function
* tcp_xmit_retransmit_queue(). * tcp_xmit_retransmit_queue().
*/ */
static void tcp_fastretrans_alert(struct sock *sk, const int acked, static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
bool is_dupack, int *ack_flag, int *rexmit) bool is_dupack, int *ack_flag, int *rexmit)
{ {
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
int fast_rexmit = 0, flag = *ack_flag; int fast_rexmit = 0, flag = *ack_flag;
bool do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && bool do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
(tcp_fackets_out(tp) > tp->reordering)); tcp_force_fast_retransmit(sk));
if (!tp->packets_out && tp->sacked_out) if (!tp->packets_out && tp->sacked_out)
tp->sacked_out = 0; tp->sacked_out = 0;
if (!tp->sacked_out && tp->fackets_out)
tp->fackets_out = 0;
/* Now state machine starts. /* Now state machine starts.
* A. ECE, hence prohibit cwnd undoing, the reduction is required. */ * A. ECE, hence prohibit cwnd undoing, the reduction is required. */
...@@ -2816,11 +2798,11 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked, ...@@ -2816,11 +2798,11 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
if (tcp_is_reno(tp) && is_dupack) if (tcp_is_reno(tp) && is_dupack)
tcp_add_reno_sack(sk); tcp_add_reno_sack(sk);
} else { } else {
if (tcp_try_undo_partial(sk, acked)) if (tcp_try_undo_partial(sk, prior_snd_una))
return; return;
/* Partial ACK arrived. Force fast retransmit. */ /* Partial ACK arrived. Force fast retransmit. */
do_lost = tcp_is_reno(tp) || do_lost = tcp_is_reno(tp) ||
tcp_fackets_out(tp) > tp->reordering; tcp_force_fast_retransmit(sk);
} }
if (tcp_try_undo_dsack(sk)) { if (tcp_try_undo_dsack(sk)) {
tcp_try_keep_open(sk); tcp_try_keep_open(sk);
...@@ -3030,15 +3012,15 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, ...@@ -3030,15 +3012,15 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
* is before the ack sequence we can discard it as it's confirmed to have * is before the ack sequence we can discard it as it's confirmed to have
* arrived at the other end. * arrived at the other end.
*/ */
static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
u32 prior_snd_una, int *acked, u32 prior_snd_una,
struct tcp_sacktag_state *sack) struct tcp_sacktag_state *sack)
{ {
const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_connection_sock *icsk = inet_csk(sk);
u64 first_ackt, last_ackt; u64 first_ackt, last_ackt;
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
u32 prior_sacked = tp->sacked_out; u32 prior_sacked = tp->sacked_out;
u32 reord = tp->packets_out; u32 reord = tp->snd_nxt; /* lowest acked un-retx un-sacked seq */
struct sk_buff *skb, *next; struct sk_buff *skb, *next;
bool fully_acked = true; bool fully_acked = true;
long sack_rtt_us = -1L; long sack_rtt_us = -1L;
...@@ -3053,6 +3035,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, ...@@ -3053,6 +3035,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
for (skb = skb_rb_first(&sk->tcp_rtx_queue); skb; skb = next) { for (skb = skb_rb_first(&sk->tcp_rtx_queue); skb; skb = next) {
struct tcp_skb_cb *scb = TCP_SKB_CB(skb); struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
const u32 start_seq = scb->seq;
u8 sacked = scb->sacked; u8 sacked = scb->sacked;
u32 acked_pcount; u32 acked_pcount;
...@@ -3083,7 +3066,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, ...@@ -3083,7 +3066,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
first_ackt = last_ackt; first_ackt = last_ackt;
last_in_flight = TCP_SKB_CB(skb)->tx.in_flight; last_in_flight = TCP_SKB_CB(skb)->tx.in_flight;
reord = min(pkts_acked, reord); if (before(start_seq, reord))
reord = start_seq;
if (!after(scb->end_seq, tp->high_seq)) if (!after(scb->end_seq, tp->high_seq))
flag |= FLAG_ORIG_SACK_ACKED; flag |= FLAG_ORIG_SACK_ACKED;
} }
...@@ -3161,15 +3145,12 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, ...@@ -3161,15 +3145,12 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
int delta; int delta;
/* Non-retransmitted hole got filled? That's reordering */ /* Non-retransmitted hole got filled? That's reordering */
if (reord < prior_fackets && reord <= tp->fackets_out) if (before(reord, prior_fack))
tcp_update_reordering(sk, tp->fackets_out - reord, 0); tcp_check_sack_reordering(sk, reord, 0);
delta = prior_sacked - tp->sacked_out; delta = prior_sacked - tp->sacked_out;
tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
} }
tp->fackets_out -= min(pkts_acked, tp->fackets_out);
} else if (skb && rtt_update && sack_rtt_us >= 0 && } else if (skb && rtt_update && sack_rtt_us >= 0 &&
sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) { sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) {
/* Do not re-arm RTO if the sack RTT is measured from data sent /* Do not re-arm RTO if the sack RTT is measured from data sent
...@@ -3210,7 +3191,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, ...@@ -3210,7 +3191,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
} }
} }
#endif #endif
*acked = pkts_acked;
return flag; return flag;
} }
...@@ -3519,12 +3499,11 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) ...@@ -3519,12 +3499,11 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
u32 ack_seq = TCP_SKB_CB(skb)->seq; u32 ack_seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq; u32 ack = TCP_SKB_CB(skb)->ack_seq;
bool is_dupack = false; bool is_dupack = false;
u32 prior_fackets;
int prior_packets = tp->packets_out; int prior_packets = tp->packets_out;
u32 delivered = tp->delivered; u32 delivered = tp->delivered;
u32 lost = tp->lost; u32 lost = tp->lost;
int acked = 0; /* Number of packets newly acked */
int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */ int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
u32 prior_fack;
sack_state.first_sackt = 0; sack_state.first_sackt = 0;
sack_state.rate = &rs; sack_state.rate = &rs;
...@@ -3556,7 +3535,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) ...@@ -3556,7 +3535,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
icsk->icsk_retransmits = 0; icsk->icsk_retransmits = 0;
} }
prior_fackets = tp->fackets_out; prior_fack = tcp_highest_sack_seq(tp);
rs.prior_in_flight = tcp_packets_in_flight(tp); rs.prior_in_flight = tcp_packets_in_flight(tp);
/* ts_recent update must be made after we are sure that the packet /* ts_recent update must be made after we are sure that the packet
...@@ -3612,8 +3591,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) ...@@ -3612,8 +3591,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
goto no_queue; goto no_queue;
/* See if we can take anything off of the retransmit queue. */ /* See if we can take anything off of the retransmit queue. */
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked, flag |= tcp_clean_rtx_queue(sk, prior_fack, prior_snd_una, &sack_state);
&sack_state);
tcp_rack_update_reo_wnd(sk, &rs); tcp_rack_update_reo_wnd(sk, &rs);
...@@ -3625,7 +3603,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) ...@@ -3625,7 +3603,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (tcp_ack_is_dubious(sk, flag)) { if (tcp_ack_is_dubious(sk, flag)) {
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag,
&rexmit);
} }
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
...@@ -3641,7 +3620,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) ...@@ -3641,7 +3620,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
no_queue: no_queue:
/* If data was DSACKed, see if we can undo a cwnd reduction. */ /* If data was DSACKed, see if we can undo a cwnd reduction. */
if (flag & FLAG_DSACKING_ACK) if (flag & FLAG_DSACKING_ACK)
tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag,
&rexmit);
/* If this ack opens up a zero window, clear backoff. It was /* If this ack opens up a zero window, clear backoff. It was
* being used to time the probes, and is probably far higher than * being used to time the probes, and is probably far higher than
* it needs to be for normal retransmission. * it needs to be for normal retransmission.
...@@ -3663,7 +3643,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) ...@@ -3663,7 +3643,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (TCP_SKB_CB(skb)->sacked) { if (TCP_SKB_CB(skb)->sacked) {
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
&sack_state); &sack_state);
tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag,
&rexmit);
tcp_xmit_recovery(sk, rexmit); tcp_xmit_recovery(sk, rexmit);
} }
......
...@@ -475,7 +475,6 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, ...@@ -475,7 +475,6 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->packets_out = 0; newtp->packets_out = 0;
newtp->retrans_out = 0; newtp->retrans_out = 0;
newtp->sacked_out = 0; newtp->sacked_out = 0;
newtp->fackets_out = 0;
newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
newtp->tlp_high_seq = 0; newtp->tlp_high_seq = 0;
newtp->lsndtime = tcp_jiffies32; newtp->lsndtime = tcp_jiffies32;
......
...@@ -1218,21 +1218,6 @@ static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now) ...@@ -1218,21 +1218,6 @@ static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
} }
} }
/* When a modification to fackets out becomes necessary, we need to check
* skb is counted to fackets_out or not.
*/
static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
int decr)
{
struct tcp_sock *tp = tcp_sk(sk);
if (!tp->sacked_out || tcp_is_reno(tp))
return;
if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
tp->fackets_out -= decr;
}
/* Pcount in the middle of the write queue got changed, we need to do various /* Pcount in the middle of the write queue got changed, we need to do various
* tweaks to fix counters * tweaks to fix counters
*/ */
...@@ -1253,8 +1238,6 @@ static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int de ...@@ -1253,8 +1238,6 @@ static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int de
if (tcp_is_reno(tp) && decr > 0) if (tcp_is_reno(tp) && decr > 0)
tp->sacked_out -= min_t(u32, tp->sacked_out, decr); tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
tcp_adjust_fackets_out(sk, skb, decr);
if (tp->lost_skb_hint && if (tp->lost_skb_hint &&
before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment