Commit 5ee2c941 authored by Christoph Paasch's avatar Christoph Paasch Committed by David S. Miller

tcp: Remove unnecessary arg from tcp_enter_cwr and tcp_init_cwnd_reduction

Since Yuchung's 9b44190d (tcp: refactor F-RTO), tcp_enter_cwr is always
called with set_ssthresh = 1. Thus, we can remove this argument from
tcp_enter_cwr. Further, as we remove this one, tcp_init_cwnd_reduction
is then always called with set_ssthresh = true, and so we can get rid of
this argument as well.

Cc: Yuchung Cheng <ycheng@google.com>
Signed-off-by: default avatarChristoph Paasch <christoph.paasch@uclouvain.be>
Acked-by: default avatarYuchung Cheng <ycheng@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5517750f
...@@ -928,7 +928,7 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk) ...@@ -928,7 +928,7 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
/* Use define here intentionally to get WARN_ON location shown at the caller */ /* Use define here intentionally to get WARN_ON location shown at the caller */
#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out) #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); void tcp_enter_cwr(struct sock *sk);
__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst); __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
/* The maximum number of MSS of available cwnd for which TSO defers /* The maximum number of MSS of available cwnd for which TSO defers
......
...@@ -2475,7 +2475,7 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) ...@@ -2475,7 +2475,7 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
* losses and/or application stalls), do not perform any further cwnd * losses and/or application stalls), do not perform any further cwnd
* reductions, but instead slow start up to ssthresh. * reductions, but instead slow start up to ssthresh.
*/ */
static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh) static void tcp_init_cwnd_reduction(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
...@@ -2485,7 +2485,6 @@ static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh) ...@@ -2485,7 +2485,6 @@ static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh)
tp->prior_cwnd = tp->snd_cwnd; tp->prior_cwnd = tp->snd_cwnd;
tp->prr_delivered = 0; tp->prr_delivered = 0;
tp->prr_out = 0; tp->prr_out = 0;
if (set_ssthresh)
tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
TCP_ECN_queue_cwr(tp); TCP_ECN_queue_cwr(tp);
} }
...@@ -2528,14 +2527,14 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk) ...@@ -2528,14 +2527,14 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
} }
/* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */ /* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */
void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) void tcp_enter_cwr(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
tp->prior_ssthresh = 0; tp->prior_ssthresh = 0;
if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
tp->undo_marker = 0; tp->undo_marker = 0;
tcp_init_cwnd_reduction(sk, set_ssthresh); tcp_init_cwnd_reduction(sk);
tcp_set_ca_state(sk, TCP_CA_CWR); tcp_set_ca_state(sk, TCP_CA_CWR);
} }
} }
...@@ -2564,7 +2563,7 @@ static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked) ...@@ -2564,7 +2563,7 @@ static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
tp->retrans_stamp = 0; tp->retrans_stamp = 0;
if (flag & FLAG_ECE) if (flag & FLAG_ECE)
tcp_enter_cwr(sk, 1); tcp_enter_cwr(sk);
if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
tcp_try_keep_open(sk); tcp_try_keep_open(sk);
...@@ -2670,7 +2669,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack) ...@@ -2670,7 +2669,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
if (!ece_ack) if (!ece_ack)
tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->prior_ssthresh = tcp_current_ssthresh(sk);
tcp_init_cwnd_reduction(sk, true); tcp_init_cwnd_reduction(sk);
} }
tcp_set_ca_state(sk, TCP_CA_Recovery); tcp_set_ca_state(sk, TCP_CA_Recovery);
} }
...@@ -3346,7 +3345,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) ...@@ -3346,7 +3345,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
tp->tlp_high_seq = 0; tp->tlp_high_seq = 0;
/* Don't reduce cwnd if DSACK arrives for TLP retrans. */ /* Don't reduce cwnd if DSACK arrives for TLP retrans. */
if (!(flag & FLAG_DSACKING_ACK)) { if (!(flag & FLAG_DSACKING_ACK)) {
tcp_init_cwnd_reduction(sk, true); tcp_init_cwnd_reduction(sk);
tcp_set_ca_state(sk, TCP_CA_CWR); tcp_set_ca_state(sk, TCP_CA_CWR);
tcp_end_cwnd_reduction(sk); tcp_end_cwnd_reduction(sk);
tcp_try_keep_open(sk); tcp_try_keep_open(sk);
......
...@@ -979,7 +979,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, ...@@ -979,7 +979,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
if (likely(err <= 0)) if (likely(err <= 0))
return err; return err;
tcp_enter_cwr(sk, 1); tcp_enter_cwr(sk);
return net_xmit_eval(err); return net_xmit_eval(err);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment