Commit 2e605294 authored by Ilpo Järvinen's avatar Ilpo Järvinen Committed by David S. Miller

[TCP]: Also handle snd_una changes in tcp_cwnd_down

tcp_cwnd_down must check for it too as it should be conservative
in case of collapse stuff and also when receiver is trying to
lie (though that wouldn't be very successful/useful anyway).

Note:
- Separated also is_dupack and do_lost in fast_retransalert
	* Much cleaner look-and-feel now
	* This time it really fixes cumulative ACK with many new
	  SACK blocks recovery entry (I claimed this fixes with
	  last patch but it wasn't). TCP will now call
	  tcp_update_scoreboard regardless of is_dupack when
	  in recovery as long as there is enough fackets_out.
- Introduce FLAG_SND_UNA_ADVANCED
	* Some prior_snd_una arguments are unnecessary after it
- Added helper FLAG_ANY_PROGRESS to avoid long FLAG...|FLAG...
  constructs
Signed-off-by: default avatarIlpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3a97aeb5
......@@ -102,11 +102,13 @@ int sysctl_tcp_abc __read_mostly;
#define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */
#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
#define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */
#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
#define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE)
#define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED)
#define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED)
#define IsReno(tp) ((tp)->rx_opt.sack_ok == 0)
#define IsFack(tp) ((tp)->rx_opt.sack_ok & 2)
......@@ -1856,7 +1858,7 @@ static void tcp_cwnd_down(struct sock *sk, int flag)
struct tcp_sock *tp = tcp_sk(sk);
int decr = tp->snd_cwnd_cnt + 1;
if ((flag&FLAG_FORWARD_PROGRESS) ||
if ((flag&FLAG_ANY_PROGRESS) ||
(IsReno(tp) && !(flag&FLAG_NOT_DUP))) {
tp->snd_cwnd_cnt = decr&1;
decr >>= 1;
......@@ -2107,15 +2109,13 @@ static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
* tcp_xmit_retransmit_queue().
*/
static void
tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
int prior_packets, int flag)
tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int is_dupack = (tp->snd_una == prior_snd_una &&
(!(flag&FLAG_NOT_DUP) ||
((flag&FLAG_DATA_SACKED) &&
(tp->fackets_out > tp->reordering))));
int is_dupack = !(flag&(FLAG_SND_UNA_ADVANCED|FLAG_NOT_DUP));
int do_lost = is_dupack || ((flag&FLAG_DATA_SACKED) &&
(tp->fackets_out > tp->reordering));
/* Some technical things:
* 1. Reno does not count dupacks (sacked_out) automatically. */
......@@ -2192,14 +2192,14 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
/* F. Process state. */
switch (icsk->icsk_ca_state) {
case TCP_CA_Recovery:
if (prior_snd_una == tp->snd_una) {
if (!(flag & FLAG_SND_UNA_ADVANCED)) {
if (IsReno(tp) && is_dupack)
tcp_add_reno_sack(sk);
} else {
int acked = prior_packets - tp->packets_out;
if (IsReno(tp))
tcp_remove_reno_sacks(sk, acked);
is_dupack = tcp_try_undo_partial(sk, acked);
do_lost = tcp_try_undo_partial(sk, acked);
}
break;
case TCP_CA_Loss:
......@@ -2215,7 +2215,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
/* Loss is undone; fall through to processing in Open state. */
default:
if (IsReno(tp)) {
if (tp->snd_una != prior_snd_una)
if (flag & FLAG_SND_UNA_ADVANCED)
tcp_reset_reno_sack(tp);
if (is_dupack)
tcp_add_reno_sack(sk);
......@@ -2264,7 +2264,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
tcp_set_ca_state(sk, TCP_CA_Recovery);
}
if (is_dupack || tcp_head_timedout(sk))
if (do_lost || tcp_head_timedout(sk))
tcp_update_scoreboard(sk);
tcp_cwnd_down(sk, flag);
tcp_xmit_retransmit_queue(sk);
......@@ -2684,7 +2684,7 @@ static void tcp_undo_spur_to_response(struct sock *sk, int flag)
* to prove that the RTO is indeed spurious. It transfers the control
* from F-RTO to the conventional RTO recovery
*/
static int tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag)
static int tcp_process_frto(struct sock *sk, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
......@@ -2704,8 +2704,7 @@ static int tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag)
* ACK isn't duplicate nor advances window, e.g., opposite dir
* data, winupdate
*/
if ((tp->snd_una == prior_snd_una) && (flag&FLAG_NOT_DUP) &&
!(flag&FLAG_FORWARD_PROGRESS))
if (!(flag&FLAG_ANY_PROGRESS) && (flag&FLAG_NOT_DUP))
return 1;
if (!(flag&FLAG_DATA_ACKED)) {
......@@ -2785,6 +2784,9 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
if (before(ack, prior_snd_una))
goto old_ack;
if (after(ack, prior_snd_una))
flag |= FLAG_SND_UNA_ADVANCED;
if (sysctl_tcp_abc) {
if (icsk->icsk_ca_state < TCP_CA_CWR)
tp->bytes_acked += ack - prior_snd_una;
......@@ -2837,14 +2839,14 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
flag |= tcp_clean_rtx_queue(sk, &seq_rtt);
if (tp->frto_counter)
frto_cwnd = tcp_process_frto(sk, prior_snd_una, flag);
frto_cwnd = tcp_process_frto(sk, flag);
if (tcp_ack_is_dubious(sk, flag)) {
/* Advance CWND, if state allows this. */
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
tcp_may_raise_cwnd(sk, flag))
tcp_cong_avoid(sk, ack, prior_in_flight, 0);
tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
tcp_fastretrans_alert(sk, prior_packets, flag);
} else {
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
tcp_cong_avoid(sk, ack, prior_in_flight, 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment