Commit 6b5a5c0d authored by Neal Cardwell's avatar Neal Cardwell Committed by David S. Miller

tcp: do not scale TSO segment size with reordering degree

Since 2005 (c1b4a7e6)
tcp_tso_should_defer has been using tcp_max_burst() as a target limit
for deciding how large to make outgoing TSO packets when not using
sysctl_tcp_tso_win_divisor. But since 2008
(dd9e0dda) tcp_max_burst() returns the
reordering degree. We should not have tcp_tso_should_defer attempt to
build larger segments just because there is more reordering. This
commit splits the notion of deferral size used in TSO from the notion
of burst size used in cwnd moderation, and returns the TSO deferral
limit to its original value.
Signed-off-by: default avatarNeal Cardwell <ncardwell@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent befc93fe
...@@ -834,6 +834,14 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk) ...@@ -834,6 +834,14 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst); extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
/* The maximum number of MSS of available cwnd for which TSO defers
* sending if not using sysctl_tcp_tso_win_divisor.
*/
static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
{
return 3;
}
/* Slow start with delack produces 3 packets of burst, so that /* Slow start with delack produces 3 packets of burst, so that
* it is safe "de facto". This will be the default - same as * it is safe "de facto". This will be the default - same as
* the default reordering threshold - but if reordering increases, * the default reordering threshold - but if reordering increases,
......
...@@ -292,7 +292,7 @@ int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) ...@@ -292,7 +292,7 @@ int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
left * tp->mss_cache < sk->sk_gso_max_size) left * tp->mss_cache < sk->sk_gso_max_size)
return 1; return 1;
return left <= tcp_max_burst(tp); return left <= tcp_max_tso_deferred_mss(tp);
} }
EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited); EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
......
...@@ -1581,7 +1581,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) ...@@ -1581,7 +1581,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
* frame, so if we have space for more than 3 frames * frame, so if we have space for more than 3 frames
* then send now. * then send now.
*/ */
if (limit > tcp_max_burst(tp) * tp->mss_cache) if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
goto send_now; goto send_now;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment