Commit 1f6afc81 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: remove one indentation level in tcp_rcv_state_process()

Remove one level of indentation 'introduced' in commit
c3ae62af (tcp: should drop incoming frames without ACK flag set)

if (true) {
        ...
}

@acceptable variable is a boolean.

This patch is a pure cleanup.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 42e52bf9
...@@ -5536,6 +5536,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -5536,6 +5536,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
struct request_sock *req; struct request_sock *req;
int queued = 0; int queued = 0;
bool acceptable;
tp->rx_opt.saw_tstamp = 0; tp->rx_opt.saw_tstamp = 0;
...@@ -5606,157 +5607,153 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -5606,157 +5607,153 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
return 0; return 0;
/* step 5: check the ACK field */ /* step 5: check the ACK field */
if (true) { acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) > 0;
FLAG_UPDATE_TS_RECENT) > 0;
switch (sk->sk_state) {
case TCP_SYN_RECV:
if (acceptable) {
/* Once we leave TCP_SYN_RECV, we no longer
* need req so release it.
*/
if (req) {
tcp_synack_rtt_meas(sk, req);
tp->total_retrans = req->num_retrans;
reqsk_fastopen_remove(sk, req, false);
} else {
/* Make sure socket is routed, for
* correct metrics.
*/
icsk->icsk_af_ops->rebuild_header(sk);
tcp_init_congestion_control(sk);
tcp_mtup_init(sk); switch (sk->sk_state) {
tcp_init_buffer_space(sk); case TCP_SYN_RECV:
tp->copied_seq = tp->rcv_nxt; if (acceptable) {
} /* Once we leave TCP_SYN_RECV, we no longer
smp_mb(); * need req so release it.
tcp_set_state(sk, TCP_ESTABLISHED); */
sk->sk_state_change(sk); if (req) {
tcp_synack_rtt_meas(sk, req);
tp->total_retrans = req->num_retrans;
/* Note, that this wakeup is only for marginal reqsk_fastopen_remove(sk, req, false);
* crossed SYN case. Passively open sockets } else {
* are not waked up, because sk->sk_sleep == /* Make sure socket is routed, for
* NULL and sk->sk_socket == NULL. * correct metrics.
*/ */
if (sk->sk_socket) icsk->icsk_af_ops->rebuild_header(sk);
sk_wake_async(sk, tcp_init_congestion_control(sk);
SOCK_WAKE_IO, POLL_OUT);
tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
tp->snd_wnd = ntohs(th->window) <<
tp->rx_opt.snd_wscale;
tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
if (tp->rx_opt.tstamp_ok)
tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
if (req) {
/* Re-arm the timer because data may
* have been sent out. This is similar
* to the regular data transmission case
* when new data has just been ack'ed.
*
* (TFO) - we could try to be more
* aggressive and retranmitting any data
* sooner based on when they were sent
* out.
*/
tcp_rearm_rto(sk);
} else
tcp_init_metrics(sk);
/* Prevent spurious tcp_cwnd_restart() on tcp_mtup_init(sk);
* first data packet. tcp_init_buffer_space(sk);
tp->copied_seq = tp->rcv_nxt;
}
smp_mb();
tcp_set_state(sk, TCP_ESTABLISHED);
sk->sk_state_change(sk);
/* Note, that this wakeup is only for marginal
* crossed SYN case. Passively open sockets
* are not waked up, because sk->sk_sleep ==
* NULL and sk->sk_socket == NULL.
*/
if (sk->sk_socket)
sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
tp->snd_wnd = ntohs(th->window) <<
tp->rx_opt.snd_wscale;
tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
if (tp->rx_opt.tstamp_ok)
tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
if (req) {
/* Re-arm the timer because data may
* have been sent out. This is similar
* to the regular data transmission case
* when new data has just been ack'ed.
*
* (TFO) - we could try to be more aggressive
* and retransmitting any data sooner based
* on when they are sent out.
*/ */
tp->lsndtime = tcp_time_stamp; tcp_rearm_rto(sk);
} else
tcp_init_metrics(sk);
tcp_initialize_rcv_mss(sk); /* Prevent spurious tcp_cwnd_restart() on
tcp_fast_path_on(tp); * first data packet.
} else { */
return 1; tp->lsndtime = tcp_time_stamp;
}
break;
case TCP_FIN_WAIT1: tcp_initialize_rcv_mss(sk);
/* If we enter the TCP_FIN_WAIT1 state and we are a tcp_fast_path_on(tp);
* Fast Open socket and this is the first acceptable } else {
* ACK we have received, this would have acknowledged return 1;
* our SYNACK so stop the SYNACK timer. }
break;
case TCP_FIN_WAIT1:
/* If we enter the TCP_FIN_WAIT1 state and we are a
* Fast Open socket and this is the first acceptable
* ACK we have received, this would have acknowledged
* our SYNACK so stop the SYNACK timer.
*/
if (req != NULL) {
/* Return RST if ack_seq is invalid.
* Note that RFC793 only says to generate a
* DUPACK for it but for TCP Fast Open it seems
* better to treat this case like TCP_SYN_RECV
* above.
*/ */
if (req != NULL) { if (!acceptable)
/* Return RST if ack_seq is invalid. return 1;
* Note that RFC793 only says to generate a /* We no longer need the request sock. */
* DUPACK for it but for TCP Fast Open it seems reqsk_fastopen_remove(sk, req, false);
* better to treat this case like TCP_SYN_RECV tcp_rearm_rto(sk);
* above. }
*/ if (tp->snd_una == tp->write_seq) {
if (!acceptable) struct dst_entry *dst;
tcp_set_state(sk, TCP_FIN_WAIT2);
sk->sk_shutdown |= SEND_SHUTDOWN;
dst = __sk_dst_get(sk);
if (dst)
dst_confirm(dst);
if (!sock_flag(sk, SOCK_DEAD)) {
/* Wake up lingering close() */
sk->sk_state_change(sk);
} else {
int tmo;
if (tp->linger2 < 0 ||
(TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
tcp_done(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
return 1; return 1;
/* We no longer need the request sock. */ }
reqsk_fastopen_remove(sk, req, false);
tcp_rearm_rto(sk);
}
if (tp->snd_una == tp->write_seq) {
struct dst_entry *dst;
tcp_set_state(sk, TCP_FIN_WAIT2);
sk->sk_shutdown |= SEND_SHUTDOWN;
dst = __sk_dst_get(sk);
if (dst)
dst_confirm(dst);
if (!sock_flag(sk, SOCK_DEAD))
/* Wake up lingering close() */
sk->sk_state_change(sk);
else {
int tmo;
if (tp->linger2 < 0 ||
(TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
tcp_done(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
return 1;
}
tmo = tcp_fin_time(sk); tmo = tcp_fin_time(sk);
if (tmo > TCP_TIMEWAIT_LEN) { if (tmo > TCP_TIMEWAIT_LEN) {
inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
} else if (th->fin || sock_owned_by_user(sk)) { } else if (th->fin || sock_owned_by_user(sk)) {
/* Bad case. We could lose such FIN otherwise. /* Bad case. We could lose such FIN otherwise.
* It is not a big problem, but it looks confusing * It is not a big problem, but it looks confusing
* and not so rare event. We still can lose it now, * and not so rare event. We still can lose it now,
* if it spins in bh_lock_sock(), but it is really * if it spins in bh_lock_sock(), but it is really
* marginal case. * marginal case.
*/ */
inet_csk_reset_keepalive_timer(sk, tmo); inet_csk_reset_keepalive_timer(sk, tmo);
} else { } else {
tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
goto discard; goto discard;
}
} }
} }
break; }
break;
case TCP_CLOSING: case TCP_CLOSING:
if (tp->snd_una == tp->write_seq) { if (tp->snd_una == tp->write_seq) {
tcp_time_wait(sk, TCP_TIME_WAIT, 0); tcp_time_wait(sk, TCP_TIME_WAIT, 0);
goto discard; goto discard;
} }
break; break;
case TCP_LAST_ACK: case TCP_LAST_ACK:
if (tp->snd_una == tp->write_seq) { if (tp->snd_una == tp->write_seq) {
tcp_update_metrics(sk); tcp_update_metrics(sk);
tcp_done(sk); tcp_done(sk);
goto discard; goto discard;
}
break;
} }
break;
} }
/* step 6: check the URG bit */ /* step 6: check the URG bit */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment