Commit 8dc242ad authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: refine tcp_pacing_delay() for very low pacing rates

With the addition of horizon feature to sch_fq, we noticed some
suboptimal behavior of extremely low pacing rate TCP flows, especially
when TCP is not aware of a drop happening in lower stacks.

Back in commit 3f80e08f ("tcp: add tcp_reset_xmit_timer() helper"),
tcp_pacing_delay() was added to estimate an extra delay to add to standard
rto timers.

This patch removes the skb argument from this helper and
tcp_reset_xmit_timer() because it makes more sense to simply
consider the time at which next packet is allowed to be sent,
instead of the time of whatever packet has been sent.

This avoids arming RTO timer too soon and removes
spurious horizon drops.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b94c280d
......@@ -1289,26 +1289,22 @@ static inline bool tcp_needs_internal_pacing(const struct sock *sk)
return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
}
/* Return in jiffies the delay before one skb is sent.
* If @skb is NULL, we look at EDT for next packet being sent on the socket.
/* Estimates in how many jiffies next packet for this flow can be sent.
* Scheduling a retransmit timer too early would be silly.
*/
static inline unsigned long tcp_pacing_delay(const struct sock *sk,
const struct sk_buff *skb)
static inline unsigned long tcp_pacing_delay(const struct sock *sk)
{
s64 pacing_delay = skb ? skb->tstamp : tcp_sk(sk)->tcp_wstamp_ns;
s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
pacing_delay -= tcp_sk(sk)->tcp_clock_cache;
return pacing_delay > 0 ? nsecs_to_jiffies(pacing_delay) : 0;
return delay > 0 ? nsecs_to_jiffies(delay) : 0;
}
static inline void tcp_reset_xmit_timer(struct sock *sk,
const int what,
unsigned long when,
const unsigned long max_when,
const struct sk_buff *skb)
const unsigned long max_when)
{
inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk, skb),
inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
max_when);
}
......@@ -1336,8 +1332,7 @@ static inline void tcp_check_probe_timer(struct sock *sk)
{
if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
tcp_probe0_base(sk), TCP_RTO_MAX,
NULL);
tcp_probe0_base(sk), TCP_RTO_MAX);
}
static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
......
......@@ -3014,7 +3014,7 @@ void tcp_rearm_rto(struct sock *sk)
rto = usecs_to_jiffies(max_t(int, delta_us, 1));
}
tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
TCP_RTO_MAX, tcp_rtx_queue_head(sk));
TCP_RTO_MAX);
}
}
......@@ -3291,7 +3291,7 @@ static void tcp_ack_probe(struct sock *sk)
unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
when, TCP_RTO_MAX, NULL);
when, TCP_RTO_MAX);
}
}
......
......@@ -2593,8 +2593,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
if (rto_delta_us > 0)
timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
TCP_RTO_MAX, NULL);
tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, TCP_RTO_MAX);
return true;
}
......@@ -3174,8 +3173,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto,
TCP_RTO_MAX,
skb);
TCP_RTO_MAX);
}
}
......@@ -3907,7 +3905,7 @@ void tcp_send_probe0(struct sock *sk)
*/
timeout = TCP_RESOURCE_PROBE_INTERVAL;
}
tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX, NULL);
tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX);
}
int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment