Commit 24901551 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: remove in_flight parameter from cong_avoid() methods

Commit e114a710 ("tcp: fix cwnd limited checking to improve
congestion control") obsoleted in_flight parameter from
tcp_is_cwnd_limited() and its callers.

This patch does the removal as promised.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Acked-by: default avatarNeal Cardwell <ncardwell@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e114a710
...@@ -796,7 +796,7 @@ struct tcp_congestion_ops { ...@@ -796,7 +796,7 @@ struct tcp_congestion_ops {
/* return slow start threshold (required) */ /* return slow start threshold (required) */
u32 (*ssthresh)(struct sock *sk); u32 (*ssthresh)(struct sock *sk);
/* do new cwnd calculation (required) */ /* do new cwnd calculation (required) */
void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked, u32 in_flight); void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
/* call before changing ca_state (optional) */ /* call before changing ca_state (optional) */
void (*set_state)(struct sock *sk, u8 new_state); void (*set_state)(struct sock *sk, u8 new_state);
/* call when cwnd event occurs (optional) */ /* call when cwnd event occurs (optional) */
...@@ -828,7 +828,7 @@ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w); ...@@ -828,7 +828,7 @@ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
extern struct tcp_congestion_ops tcp_init_congestion_ops; extern struct tcp_congestion_ops tcp_init_congestion_ops;
u32 tcp_reno_ssthresh(struct sock *sk); u32 tcp_reno_ssthresh(struct sock *sk);
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight); void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
extern struct tcp_congestion_ops tcp_reno; extern struct tcp_congestion_ops tcp_reno;
static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
...@@ -986,10 +986,8 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp) ...@@ -986,10 +986,8 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
* risks 100% overshoot. The advantage is that we discourage application to * risks 100% overshoot. The advantage is that we discourage application to
* either send more filler packets or data to artificially blow up the cwnd * either send more filler packets or data to artificially blow up the cwnd
* usage, and allow application-limited process to probe bw more aggressively. * usage, and allow application-limited process to probe bw more aggressively.
*
* TODO: remove in_flight once we can fix all callers, and their callers...
*/ */
static inline bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) static inline bool tcp_is_cwnd_limited(const struct sock *sk)
{ {
const struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
......
...@@ -140,13 +140,12 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) ...@@ -140,13 +140,12 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
ca->cnt = 1; ca->cnt = 1;
} }
static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
u32 in_flight)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk); struct bictcp *ca = inet_csk_ca(sk);
if (!tcp_is_cwnd_limited(sk, in_flight)) if (!tcp_is_cwnd_limited(sk))
return; return;
if (tp->snd_cwnd <= tp->snd_ssthresh) if (tp->snd_cwnd <= tp->snd_ssthresh)
......
...@@ -317,11 +317,11 @@ EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai); ...@@ -317,11 +317,11 @@ EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
/* This is Jacobson's slow start and congestion avoidance. /* This is Jacobson's slow start and congestion avoidance.
* SIGCOMM '88, p. 328. * SIGCOMM '88, p. 328.
*/ */
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
if (!tcp_is_cwnd_limited(sk, in_flight)) if (!tcp_is_cwnd_limited(sk))
return; return;
/* In "safe" area, increase. */ /* In "safe" area, increase. */
......
...@@ -304,13 +304,12 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) ...@@ -304,13 +304,12 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
ca->cnt = 1; ca->cnt = 1;
} }
static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
u32 in_flight)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk); struct bictcp *ca = inet_csk_ca(sk);
if (!tcp_is_cwnd_limited(sk, in_flight)) if (!tcp_is_cwnd_limited(sk))
return; return;
if (tp->snd_cwnd <= tp->snd_ssthresh) { if (tp->snd_cwnd <= tp->snd_ssthresh) {
......
...@@ -109,12 +109,12 @@ static void hstcp_init(struct sock *sk) ...@@ -109,12 +109,12 @@ static void hstcp_init(struct sock *sk)
tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
} }
static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct hstcp *ca = inet_csk_ca(sk); struct hstcp *ca = inet_csk_ca(sk);
if (!tcp_is_cwnd_limited(sk, in_flight)) if (!tcp_is_cwnd_limited(sk))
return; return;
if (tp->snd_cwnd <= tp->snd_ssthresh) if (tp->snd_cwnd <= tp->snd_ssthresh)
......
...@@ -227,12 +227,12 @@ static u32 htcp_recalc_ssthresh(struct sock *sk) ...@@ -227,12 +227,12 @@ static u32 htcp_recalc_ssthresh(struct sock *sk)
return max((tp->snd_cwnd * ca->beta) >> 7, 2U); return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
} }
static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct htcp *ca = inet_csk_ca(sk); struct htcp *ca = inet_csk_ca(sk);
if (!tcp_is_cwnd_limited(sk, in_flight)) if (!tcp_is_cwnd_limited(sk))
return; return;
if (tp->snd_cwnd <= tp->snd_ssthresh) if (tp->snd_cwnd <= tp->snd_ssthresh)
......
...@@ -87,8 +87,7 @@ static inline u32 hybla_fraction(u32 odds) ...@@ -87,8 +87,7 @@ static inline u32 hybla_fraction(u32 odds)
* o Give cwnd a new value based on the model proposed * o Give cwnd a new value based on the model proposed
* o remember increments <1 * o remember increments <1
*/ */
static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked, static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked)
u32 in_flight)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct hybla *ca = inet_csk_ca(sk); struct hybla *ca = inet_csk_ca(sk);
...@@ -101,11 +100,11 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked, ...@@ -101,11 +100,11 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked,
ca->minrtt_us = tp->srtt_us; ca->minrtt_us = tp->srtt_us;
} }
if (!tcp_is_cwnd_limited(sk, in_flight)) if (!tcp_is_cwnd_limited(sk))
return; return;
if (!ca->hybla_en) { if (!ca->hybla_en) {
tcp_reno_cong_avoid(sk, ack, acked, in_flight); tcp_reno_cong_avoid(sk, ack, acked);
return; return;
} }
......
...@@ -255,8 +255,7 @@ static void tcp_illinois_state(struct sock *sk, u8 new_state) ...@@ -255,8 +255,7 @@ static void tcp_illinois_state(struct sock *sk, u8 new_state)
/* /*
* Increase window in response to successful acknowledgment. * Increase window in response to successful acknowledgment.
*/ */
static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked, static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
u32 in_flight)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct illinois *ca = inet_csk_ca(sk); struct illinois *ca = inet_csk_ca(sk);
...@@ -265,7 +264,7 @@ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked, ...@@ -265,7 +264,7 @@ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked,
update_params(sk); update_params(sk);
/* RFC2861 only increase cwnd if fully utilized */ /* RFC2861 only increase cwnd if fully utilized */
if (!tcp_is_cwnd_limited(sk, in_flight)) if (!tcp_is_cwnd_limited(sk))
return; return;
/* In slow start */ /* In slow start */
......
...@@ -2938,10 +2938,11 @@ static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp) ...@@ -2938,10 +2938,11 @@ static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp)
tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt_us, -1L); tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt_us, -1L);
} }
static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{ {
const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_connection_sock *icsk = inet_csk(sk);
icsk->icsk_ca_ops->cong_avoid(sk, ack, acked, in_flight);
icsk->icsk_ca_ops->cong_avoid(sk, ack, acked);
tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
} }
...@@ -3364,7 +3365,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) ...@@ -3364,7 +3365,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
u32 ack_seq = TCP_SKB_CB(skb)->seq; u32 ack_seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq; u32 ack = TCP_SKB_CB(skb)->ack_seq;
bool is_dupack = false; bool is_dupack = false;
u32 prior_in_flight;
u32 prior_fackets; u32 prior_fackets;
int prior_packets = tp->packets_out; int prior_packets = tp->packets_out;
const int prior_unsacked = tp->packets_out - tp->sacked_out; const int prior_unsacked = tp->packets_out - tp->sacked_out;
...@@ -3397,7 +3397,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) ...@@ -3397,7 +3397,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
flag |= FLAG_SND_UNA_ADVANCED; flag |= FLAG_SND_UNA_ADVANCED;
prior_fackets = tp->fackets_out; prior_fackets = tp->fackets_out;
prior_in_flight = tcp_packets_in_flight(tp);
/* ts_recent update must be made after we are sure that the packet /* ts_recent update must be made after we are sure that the packet
* is in window. * is in window.
...@@ -3452,7 +3451,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) ...@@ -3452,7 +3451,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
/* Advance cwnd if state allows */ /* Advance cwnd if state allows */
if (tcp_may_raise_cwnd(sk, flag)) if (tcp_may_raise_cwnd(sk, flag))
tcp_cong_avoid(sk, ack, acked, prior_in_flight); tcp_cong_avoid(sk, ack, acked);
if (tcp_ack_is_dubious(sk, flag)) { if (tcp_ack_is_dubious(sk, flag)) {
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
......
...@@ -115,13 +115,12 @@ static void tcp_lp_init(struct sock *sk) ...@@ -115,13 +115,12 @@ static void tcp_lp_init(struct sock *sk)
* Will only call newReno CA when away from inference. * Will only call newReno CA when away from inference.
* From TCP-LP's paper, this will be handled in additive increasement. * From TCP-LP's paper, this will be handled in additive increasement.
*/ */
static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked, static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
u32 in_flight)
{ {
struct lp *lp = inet_csk_ca(sk); struct lp *lp = inet_csk_ca(sk);
if (!(lp->flag & LP_WITHIN_INF)) if (!(lp->flag & LP_WITHIN_INF))
tcp_reno_cong_avoid(sk, ack, acked, in_flight); tcp_reno_cong_avoid(sk, ack, acked);
} }
/** /**
......
...@@ -1408,7 +1408,7 @@ static void tcp_cwnd_validate(struct sock *sk, u32 unsent_segs) ...@@ -1408,7 +1408,7 @@ static void tcp_cwnd_validate(struct sock *sk, u32 unsent_segs)
tp->lsnd_pending = tp->packets_out + unsent_segs; tp->lsnd_pending = tp->packets_out + unsent_segs;
if (tcp_is_cwnd_limited(sk, 0)) { if (tcp_is_cwnd_limited(sk)) {
/* Network is feed fully. */ /* Network is feed fully. */
tp->snd_cwnd_used = 0; tp->snd_cwnd_used = 0;
tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_cwnd_stamp = tcp_time_stamp;
......
...@@ -15,12 +15,11 @@ ...@@ -15,12 +15,11 @@
#define TCP_SCALABLE_AI_CNT 50U #define TCP_SCALABLE_AI_CNT 50U
#define TCP_SCALABLE_MD_SCALE 3 #define TCP_SCALABLE_MD_SCALE 3
static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked, static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
u32 in_flight)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
if (!tcp_is_cwnd_limited(sk, in_flight)) if (!tcp_is_cwnd_limited(sk))
return; return;
if (tp->snd_cwnd <= tp->snd_ssthresh) if (tp->snd_cwnd <= tp->snd_ssthresh)
......
...@@ -163,14 +163,13 @@ static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp) ...@@ -163,14 +163,13 @@ static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
return min(tp->snd_ssthresh, tp->snd_cwnd-1); return min(tp->snd_ssthresh, tp->snd_cwnd-1);
} }
static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked, static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
u32 in_flight)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct vegas *vegas = inet_csk_ca(sk); struct vegas *vegas = inet_csk_ca(sk);
if (!vegas->doing_vegas_now) { if (!vegas->doing_vegas_now) {
tcp_reno_cong_avoid(sk, ack, acked, in_flight); tcp_reno_cong_avoid(sk, ack, acked);
return; return;
} }
...@@ -195,7 +194,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked, ...@@ -195,7 +194,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked,
/* We don't have enough RTT samples to do the Vegas /* We don't have enough RTT samples to do the Vegas
* calculation, so we'll behave like Reno. * calculation, so we'll behave like Reno.
*/ */
tcp_reno_cong_avoid(sk, ack, acked, in_flight); tcp_reno_cong_avoid(sk, ack, acked);
} else { } else {
u32 rtt, diff; u32 rtt, diff;
u64 target_cwnd; u64 target_cwnd;
......
...@@ -114,19 +114,18 @@ static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event) ...@@ -114,19 +114,18 @@ static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event)
tcp_veno_init(sk); tcp_veno_init(sk);
} }
static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked, static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
u32 in_flight)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct veno *veno = inet_csk_ca(sk); struct veno *veno = inet_csk_ca(sk);
if (!veno->doing_veno_now) { if (!veno->doing_veno_now) {
tcp_reno_cong_avoid(sk, ack, acked, in_flight); tcp_reno_cong_avoid(sk, ack, acked);
return; return;
} }
/* limited by applications */ /* limited by applications */
if (!tcp_is_cwnd_limited(sk, in_flight)) if (!tcp_is_cwnd_limited(sk))
return; return;
/* We do the Veno calculations only if we got enough rtt samples */ /* We do the Veno calculations only if we got enough rtt samples */
...@@ -134,7 +133,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked, ...@@ -134,7 +133,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked,
/* We don't have enough rtt samples to do the Veno /* We don't have enough rtt samples to do the Veno
* calculation, so we'll behave like Reno. * calculation, so we'll behave like Reno.
*/ */
tcp_reno_cong_avoid(sk, ack, acked, in_flight); tcp_reno_cong_avoid(sk, ack, acked);
} else { } else {
u64 target_cwnd; u64 target_cwnd;
u32 rtt; u32 rtt;
......
...@@ -69,13 +69,12 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us) ...@@ -69,13 +69,12 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us)
tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us); tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us);
} }
static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked, static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
u32 in_flight)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct yeah *yeah = inet_csk_ca(sk); struct yeah *yeah = inet_csk_ca(sk);
if (!tcp_is_cwnd_limited(sk, in_flight)) if (!tcp_is_cwnd_limited(sk))
return; return;
if (tp->snd_cwnd <= tp->snd_ssthresh) if (tp->snd_cwnd <= tp->snd_ssthresh)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment