Commit 72dc5b92 authored by Stephen Hemminger's avatar Stephen Hemminger Committed by David S. Miller

[TCP]: Minimum congestion window consolidation.

Many of the TCP congestion methods all just use ssthresh
as the minimum congestion window on decrease.  Rather than
duplicating the code, just have that be the default if that
handle in the ops structure is not set.

Minor behaviour change to TCP compound.  It probably wants
to use this (ssthresh) as lower bound, rather than ssthresh/2
because the latter causes undershoot on loss.
Signed-off-by: default avatarStephen Hemminger <shemminger@osdl.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a4ed2584
...@@ -632,7 +632,7 @@ struct tcp_congestion_ops { ...@@ -632,7 +632,7 @@ struct tcp_congestion_ops {
/* return slow start threshold (required) */ /* return slow start threshold (required) */
u32 (*ssthresh)(struct sock *sk); u32 (*ssthresh)(struct sock *sk);
/* lower bound for congestion window (optional) */ /* lower bound for congestion window (optional) */
u32 (*min_cwnd)(struct sock *sk); u32 (*min_cwnd)(const struct sock *sk);
/* do new cwnd calculation (required) */ /* do new cwnd calculation (required) */
void (*cong_avoid)(struct sock *sk, u32 ack, void (*cong_avoid)(struct sock *sk, u32 ack,
u32 rtt, u32 in_flight, int good_ack); u32 rtt, u32 in_flight, int good_ack);
...@@ -667,7 +667,7 @@ extern struct tcp_congestion_ops tcp_init_congestion_ops; ...@@ -667,7 +667,7 @@ extern struct tcp_congestion_ops tcp_init_congestion_ops;
extern u32 tcp_reno_ssthresh(struct sock *sk); extern u32 tcp_reno_ssthresh(struct sock *sk);
extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack,
u32 rtt, u32 in_flight, int flag); u32 rtt, u32 in_flight, int flag);
extern u32 tcp_reno_min_cwnd(struct sock *sk); extern u32 tcp_reno_min_cwnd(const struct sock *sk);
extern struct tcp_congestion_ops tcp_reno; extern struct tcp_congestion_ops tcp_reno;
static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
......
...@@ -198,12 +198,6 @@ static u32 bictcp_undo_cwnd(struct sock *sk) ...@@ -198,12 +198,6 @@ static u32 bictcp_undo_cwnd(struct sock *sk)
return max(tp->snd_cwnd, ca->last_max_cwnd); return max(tp->snd_cwnd, ca->last_max_cwnd);
} }
static u32 bictcp_min_cwnd(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
return tp->snd_ssthresh;
}
static void bictcp_state(struct sock *sk, u8 new_state) static void bictcp_state(struct sock *sk, u8 new_state)
{ {
if (new_state == TCP_CA_Loss) if (new_state == TCP_CA_Loss)
...@@ -231,7 +225,6 @@ static struct tcp_congestion_ops bictcp = { ...@@ -231,7 +225,6 @@ static struct tcp_congestion_ops bictcp = {
.cong_avoid = bictcp_cong_avoid, .cong_avoid = bictcp_cong_avoid,
.set_state = bictcp_state, .set_state = bictcp_state,
.undo_cwnd = bictcp_undo_cwnd, .undo_cwnd = bictcp_undo_cwnd,
.min_cwnd = bictcp_min_cwnd,
.pkts_acked = bictcp_acked, .pkts_acked = bictcp_acked,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.name = "bic", .name = "bic",
......
...@@ -419,7 +419,6 @@ static struct tcp_congestion_ops tcp_compound = { ...@@ -419,7 +419,6 @@ static struct tcp_congestion_ops tcp_compound = {
.init = tcp_compound_init, .init = tcp_compound_init,
.ssthresh = tcp_reno_ssthresh, .ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_compound_cong_avoid, .cong_avoid = tcp_compound_cong_avoid,
.min_cwnd = tcp_reno_min_cwnd,
.rtt_sample = tcp_compound_rtt_calc, .rtt_sample = tcp_compound_rtt_calc,
.set_state = tcp_compound_state, .set_state = tcp_compound_state,
.cwnd_event = tcp_compound_cwnd_event, .cwnd_event = tcp_compound_cwnd_event,
......
...@@ -38,7 +38,7 @@ int tcp_register_congestion_control(struct tcp_congestion_ops *ca) ...@@ -38,7 +38,7 @@ int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
int ret = 0; int ret = 0;
/* all algorithms must implement ssthresh and cong_avoid ops */ /* all algorithms must implement ssthresh and cong_avoid ops */
if (!ca->ssthresh || !ca->cong_avoid || !ca->min_cwnd) { if (!ca->ssthresh || !ca->cong_avoid) {
printk(KERN_ERR "TCP %s does not implement required ops\n", printk(KERN_ERR "TCP %s does not implement required ops\n",
ca->name); ca->name);
return -EINVAL; return -EINVAL;
...@@ -251,8 +251,8 @@ u32 tcp_reno_ssthresh(struct sock *sk) ...@@ -251,8 +251,8 @@ u32 tcp_reno_ssthresh(struct sock *sk)
} }
EXPORT_SYMBOL_GPL(tcp_reno_ssthresh); EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
/* Lower bound on congestion window. */ /* Lower bound on congestion window with halving. */
u32 tcp_reno_min_cwnd(struct sock *sk) u32 tcp_reno_min_cwnd(const struct sock *sk)
{ {
const struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
return tp->snd_ssthresh/2; return tp->snd_ssthresh/2;
......
...@@ -325,11 +325,6 @@ static u32 bictcp_undo_cwnd(struct sock *sk) ...@@ -325,11 +325,6 @@ static u32 bictcp_undo_cwnd(struct sock *sk)
return max(tcp_sk(sk)->snd_cwnd, ca->last_max_cwnd); return max(tcp_sk(sk)->snd_cwnd, ca->last_max_cwnd);
} }
static u32 bictcp_min_cwnd(struct sock *sk)
{
return tcp_sk(sk)->snd_ssthresh;
}
static void bictcp_state(struct sock *sk, u8 new_state) static void bictcp_state(struct sock *sk, u8 new_state)
{ {
if (new_state == TCP_CA_Loss) if (new_state == TCP_CA_Loss)
...@@ -357,7 +352,6 @@ static struct tcp_congestion_ops cubictcp = { ...@@ -357,7 +352,6 @@ static struct tcp_congestion_ops cubictcp = {
.cong_avoid = bictcp_cong_avoid, .cong_avoid = bictcp_cong_avoid,
.set_state = bictcp_state, .set_state = bictcp_state,
.undo_cwnd = bictcp_undo_cwnd, .undo_cwnd = bictcp_undo_cwnd,
.min_cwnd = bictcp_min_cwnd,
.pkts_acked = bictcp_acked, .pkts_acked = bictcp_acked,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.name = "cubic", .name = "cubic",
......
...@@ -246,14 +246,6 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, ...@@ -246,14 +246,6 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
} }
} }
/* Lower bound on congestion window. */
static u32 htcp_min_cwnd(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
return tp->snd_ssthresh;
}
static void htcp_init(struct sock *sk) static void htcp_init(struct sock *sk)
{ {
struct htcp *ca = inet_csk_ca(sk); struct htcp *ca = inet_csk_ca(sk);
...@@ -285,7 +277,6 @@ static void htcp_state(struct sock *sk, u8 new_state) ...@@ -285,7 +277,6 @@ static void htcp_state(struct sock *sk, u8 new_state)
static struct tcp_congestion_ops htcp = { static struct tcp_congestion_ops htcp = {
.init = htcp_init, .init = htcp_init,
.ssthresh = htcp_recalc_ssthresh, .ssthresh = htcp_recalc_ssthresh,
.min_cwnd = htcp_min_cwnd,
.cong_avoid = htcp_cong_avoid, .cong_avoid = htcp_cong_avoid,
.set_state = htcp_state, .set_state = htcp_state,
.undo_cwnd = htcp_cwnd_undo, .undo_cwnd = htcp_cwnd_undo,
......
...@@ -1689,17 +1689,26 @@ static inline void tcp_moderate_cwnd(struct tcp_sock *tp) ...@@ -1689,17 +1689,26 @@ static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_cwnd_stamp = tcp_time_stamp;
} }
/* Lower bound on congestion window is slow start threshold
* unless congestion avoidance choice decides to overide it.
*/
static inline u32 tcp_cwnd_min(const struct sock *sk)
{
const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh;
}
/* Decrease cwnd each second ack. */ /* Decrease cwnd each second ack. */
static void tcp_cwnd_down(struct sock *sk) static void tcp_cwnd_down(struct sock *sk)
{ {
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
int decr = tp->snd_cwnd_cnt + 1; int decr = tp->snd_cwnd_cnt + 1;
tp->snd_cwnd_cnt = decr&1; tp->snd_cwnd_cnt = decr&1;
decr >>= 1; decr >>= 1;
if (decr && tp->snd_cwnd > icsk->icsk_ca_ops->min_cwnd(sk)) if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
tp->snd_cwnd -= decr; tp->snd_cwnd -= decr;
tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1); tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
......
...@@ -199,17 +199,10 @@ static u32 tcp_veno_ssthresh(struct sock *sk) ...@@ -199,17 +199,10 @@ static u32 tcp_veno_ssthresh(struct sock *sk)
return max(tp->snd_cwnd >> 1U, 2U); return max(tp->snd_cwnd >> 1U, 2U);
} }
static u32 tcp_veno_min_cwnd(struct sock * sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
return tp->snd_ssthresh;
}
static struct tcp_congestion_ops tcp_veno = { static struct tcp_congestion_ops tcp_veno = {
.init = tcp_veno_init, .init = tcp_veno_init,
.ssthresh = tcp_veno_ssthresh, .ssthresh = tcp_veno_ssthresh,
.cong_avoid = tcp_veno_cong_avoid, .cong_avoid = tcp_veno_cong_avoid,
.min_cwnd = tcp_veno_min_cwnd,
.rtt_sample = tcp_veno_rtt_calc, .rtt_sample = tcp_veno_rtt_calc,
.set_state = tcp_veno_state, .set_state = tcp_veno_state,
.cwnd_event = tcp_veno_cwnd_event, .cwnd_event = tcp_veno_cwnd_event,
......
...@@ -162,12 +162,6 @@ static inline u32 westwood_acked_count(struct sock *sk) ...@@ -162,12 +162,6 @@ static inline u32 westwood_acked_count(struct sock *sk)
return w->cumul_ack; return w->cumul_ack;
} }
static inline u32 westwood_bw_rttmin(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct westwood *w = inet_csk_ca(sk);
return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
}
/* /*
* TCP Westwood * TCP Westwood
...@@ -175,9 +169,11 @@ static inline u32 westwood_bw_rttmin(const struct sock *sk) ...@@ -175,9 +169,11 @@ static inline u32 westwood_bw_rttmin(const struct sock *sk)
* in packets we use mss_cache). Rttmin is guaranteed to be >= 2 * in packets we use mss_cache). Rttmin is guaranteed to be >= 2
* so avoids ever returning 0. * so avoids ever returning 0.
*/ */
static u32 tcp_westwood_cwnd_min(struct sock *sk) static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
{ {
return westwood_bw_rttmin(sk); const struct tcp_sock *tp = tcp_sk(sk);
const struct westwood *w = inet_csk_ca(sk);
return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
} }
static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event) static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
...@@ -191,11 +187,11 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event) ...@@ -191,11 +187,11 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
break; break;
case CA_EVENT_COMPLETE_CWR: case CA_EVENT_COMPLETE_CWR:
tp->snd_cwnd = tp->snd_ssthresh = westwood_bw_rttmin(sk); tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
break; break;
case CA_EVENT_FRTO: case CA_EVENT_FRTO:
tp->snd_ssthresh = westwood_bw_rttmin(sk); tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
break; break;
case CA_EVENT_SLOW_ACK: case CA_EVENT_SLOW_ACK:
...@@ -235,7 +231,7 @@ static struct tcp_congestion_ops tcp_westwood = { ...@@ -235,7 +231,7 @@ static struct tcp_congestion_ops tcp_westwood = {
.init = tcp_westwood_init, .init = tcp_westwood_init,
.ssthresh = tcp_reno_ssthresh, .ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_reno_cong_avoid, .cong_avoid = tcp_reno_cong_avoid,
.min_cwnd = tcp_westwood_cwnd_min, .min_cwnd = tcp_westwood_bw_rttmin,
.cwnd_event = tcp_westwood_event, .cwnd_event = tcp_westwood_event,
.get_info = tcp_westwood_info, .get_info = tcp_westwood_info,
.pkts_acked = tcp_westwood_pkts_acked, .pkts_acked = tcp_westwood_pkts_acked,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment