Commit 92df7b51 authored by David S. Miller's avatar David S. Miller

[TCP]: tcp_write_xmit() tabbing cleanup

Put the main basic block of work at the top-level of
tabbing, and mark the TCP_CLOSE test with unlikely().
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a762a980
...@@ -842,54 +842,54 @@ static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) ...@@ -842,54 +842,54 @@ static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
static int tcp_write_xmit(struct sock *sk, int nonagle) static int tcp_write_xmit(struct sock *sk, int nonagle)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
unsigned int mss_now; unsigned int mss_now;
int sent_pkts;
/* If we are closed, the bytes will have to remain here. /* If we are closed, the bytes will have to remain here.
* In time closedown will finish, we empty the write queue and all * In time closedown will finish, we empty the write queue and all
* will be happy. * will be happy.
*/ */
if (sk->sk_state != TCP_CLOSE) { if (unlikely(sk->sk_state == TCP_CLOSE))
struct sk_buff *skb; return 0;
int sent_pkts = 0;
/* Account for SACKS, we may need to fragment due to this.
* It is just like the real MSS changing on us midstream.
* We also handle things correctly when the user adds some
* IP options mid-stream. Silly to do, but cover it.
*/
mss_now = tcp_current_mss(sk, 1);
while ((skb = sk->sk_send_head) &&
tcp_snd_test(sk, skb, mss_now,
tcp_skb_is_last(sk, skb) ? nonagle :
TCP_NAGLE_PUSH)) {
if (skb->len > mss_now) {
if (tcp_fragment(sk, skb, mss_now))
break;
}
TCP_SKB_CB(skb)->when = tcp_time_stamp; /* Account for SACKS, we may need to fragment due to this.
tcp_tso_set_push(skb); * It is just like the real MSS changing on us midstream.
if (tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC))) * We also handle things correctly when the user adds some
* IP options mid-stream. Silly to do, but cover it.
*/
mss_now = tcp_current_mss(sk, 1);
sent_pkts = 0;
while ((skb = sk->sk_send_head) &&
tcp_snd_test(sk, skb, mss_now,
tcp_skb_is_last(sk, skb) ? nonagle :
TCP_NAGLE_PUSH)) {
if (skb->len > mss_now) {
if (tcp_fragment(sk, skb, mss_now))
break; break;
}
/* Advance the send_head. This one is sent out. TCP_SKB_CB(skb)->when = tcp_time_stamp;
* This call will increment packets_out. tcp_tso_set_push(skb);
*/ if (tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC)))
update_send_head(sk, tp, skb); break;
tcp_minshall_update(tp, mss_now, skb); /* Advance the send_head. This one is sent out.
sent_pkts = 1; * This call will increment packets_out.
} */
update_send_head(sk, tp, skb);
if (sent_pkts) { tcp_minshall_update(tp, mss_now, skb);
tcp_cwnd_validate(sk, tp); sent_pkts = 1;
return 0; }
}
return !tp->packets_out && sk->sk_send_head; if (sent_pkts) {
tcp_cwnd_validate(sk, tp);
return 0;
} }
return 0;
return !tp->packets_out && sk->sk_send_head;
} }
/* Push out any pending frames which were held back due to /* Push out any pending frames which were held back due to
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment