Commit 9e412ba7 authored by Ilpo Järvinen's avatar Ilpo Järvinen Committed by David S. Miller

[TCP]: Sed magic converts func(sk, tp, ...) -> func(sk, ...)

This is (mostly) automated change using magic:

sed -e '/struct sock \*sk/ N' -e '/struct sock \*sk/ N'
    -e '/struct sock \*sk/ N' -e '/struct sock \*sk/ N'
    -e 's|struct sock \*sk,[\n\t ]*struct tcp_sock \*tp\([^{]*\n{\n\)|
	  struct sock \*sk\1\tstruct tcp_sock *tp = tcp_sk(sk);\n|g'
    -e 's|struct sock \*sk, struct tcp_sock \*tp|
	  struct sock \*sk|g' -e 's|sk, tp\([^-]\)|sk\1|g'

Fixed four unused variable (tp) warnings that were introduced.

In addition, manually added newlines after local variables and
tweaked function arguments positioning.

$ gcc --version
gcc (GCC) 4.1.1 20060525 (Red Hat 4.1.1-1)
...
$ codiff -fV built-in.o.old built-in.o.new
net/ipv4/route.c:
  rt_cache_flush |  +14
 1 function changed, 14 bytes added

net/ipv4/tcp.c:
  tcp_setsockopt |   -5
  tcp_sendpage   |  -25
  tcp_sendmsg    |  -16
 3 functions changed, 46 bytes removed

net/ipv4/tcp_input.c:
  tcp_try_undo_recovery |   +3
  tcp_try_undo_dsack    |   +2
  tcp_mark_head_lost    |  -12
  tcp_ack               |  -15
  tcp_event_data_recv   |  -32
  tcp_rcv_state_process |  -10
  tcp_rcv_established   |   +1
 7 functions changed, 6 bytes added, 69 bytes removed, diff: -63

net/ipv4/tcp_output.c:
  update_send_head          |   -9
  tcp_transmit_skb          |  +19
  tcp_cwnd_validate         |   +1
  tcp_write_wakeup          |  -17
  __tcp_push_pending_frames |  -25
  tcp_push_one              |   -8
  tcp_send_fin              |   -4
 7 functions changed, 20 bytes added, 63 bytes removed, diff: -43

built-in.o.new:
 18 functions changed, 40 bytes added, 178 bytes removed, diff: -138
Signed-off-by: default avatarIlpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 38b4da38
...@@ -420,9 +420,9 @@ extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, ...@@ -420,9 +420,9 @@ extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
/* tcp_output.c */ /* tcp_output.c */
extern void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
unsigned int cur_mss, int nonagle); int nonagle);
extern int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp); extern int tcp_may_send_now(struct sock *sk);
extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
extern void tcp_xmit_retransmit_queue(struct sock *); extern void tcp_xmit_retransmit_queue(struct sock *);
extern void tcp_simple_retransmit(struct sock *); extern void tcp_simple_retransmit(struct sock *);
...@@ -479,8 +479,10 @@ static inline void tcp_fast_path_on(struct tcp_sock *tp) ...@@ -479,8 +479,10 @@ static inline void tcp_fast_path_on(struct tcp_sock *tp)
__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
} }
static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp) static inline void tcp_fast_path_check(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk);
if (skb_queue_empty(&tp->out_of_order_queue) && if (skb_queue_empty(&tp->out_of_order_queue) &&
tp->rcv_wnd && tp->rcv_wnd &&
atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
...@@ -592,9 +594,9 @@ static inline void tcp_dec_pcount_approx(__u32 *count, ...@@ -592,9 +594,9 @@ static inline void tcp_dec_pcount_approx(__u32 *count,
} }
static inline void tcp_packets_out_inc(struct sock *sk, static inline void tcp_packets_out_inc(struct sock *sk,
struct tcp_sock *tp,
const struct sk_buff *skb) const struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk);
int orig = tp->packets_out; int orig = tp->packets_out;
tp->packets_out += tcp_skb_pcount(skb); tp->packets_out += tcp_skb_pcount(skb);
...@@ -778,18 +780,21 @@ static inline void tcp_minshall_update(struct tcp_sock *tp, int mss, ...@@ -778,18 +780,21 @@ static inline void tcp_minshall_update(struct tcp_sock *tp, int mss,
tp->snd_sml = TCP_SKB_CB(skb)->end_seq; tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
} }
static inline void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp) static inline void tcp_check_probe_timer(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_connection_sock *icsk = inet_csk(sk);
if (!tp->packets_out && !icsk->icsk_pending) if (!tp->packets_out && !icsk->icsk_pending)
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
icsk->icsk_rto, TCP_RTO_MAX); icsk->icsk_rto, TCP_RTO_MAX);
} }
static inline void tcp_push_pending_frames(struct sock *sk, static inline void tcp_push_pending_frames(struct sock *sk)
struct tcp_sock *tp)
{ {
__tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle); struct tcp_sock *tp = tcp_sk(sk);
__tcp_push_pending_frames(sk, tcp_current_mss(sk, 1), tp->nonagle);
} }
static inline void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq) static inline void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq)
......
...@@ -27,9 +27,10 @@ static inline void TCP_ECN_send_synack(struct tcp_sock *tp, ...@@ -27,9 +27,10 @@ static inline void TCP_ECN_send_synack(struct tcp_sock *tp,
TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE; TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE;
} }
static inline void TCP_ECN_send_syn(struct sock *sk, struct tcp_sock *tp, static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk);
tp->ecn_flags = 0; tp->ecn_flags = 0;
if (sysctl_tcp_ecn) { if (sysctl_tcp_ecn) {
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR; TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR;
...@@ -44,9 +45,11 @@ TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th) ...@@ -44,9 +45,11 @@ TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
th->ece = 1; th->ece = 1;
} }
static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp, static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
struct sk_buff *skb, int tcp_header_len) int tcp_header_len)
{ {
struct tcp_sock *tp = tcp_sk(sk);
if (tp->ecn_flags & TCP_ECN_OK) { if (tp->ecn_flags & TCP_ECN_OK) {
/* Not-retransmitted data segment: set ECT and inject CWR. */ /* Not-retransmitted data segment: set ECT and inject CWR. */
if (skb->len != tcp_header_len && if (skb->len != tcp_header_len &&
......
...@@ -460,9 +460,9 @@ static inline int forced_push(struct tcp_sock *tp) ...@@ -460,9 +460,9 @@ static inline int forced_push(struct tcp_sock *tp)
return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
} }
static inline void skb_entail(struct sock *sk, struct tcp_sock *tp, static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
skb->csum = 0; skb->csum = 0;
...@@ -486,15 +486,17 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, ...@@ -486,15 +486,17 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
} }
} }
static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags, static inline void tcp_push(struct sock *sk, int flags, int mss_now,
int mss_now, int nonagle) int nonagle)
{ {
struct tcp_sock *tp = tcp_sk(sk);
if (tcp_send_head(sk)) { if (tcp_send_head(sk)) {
struct sk_buff *skb = tcp_write_queue_tail(sk); struct sk_buff *skb = tcp_write_queue_tail(sk);
if (!(flags & MSG_MORE) || forced_push(tp)) if (!(flags & MSG_MORE) || forced_push(tp))
tcp_mark_push(tp, skb); tcp_mark_push(tp, skb);
tcp_mark_urg(tp, flags, skb); tcp_mark_urg(tp, flags, skb);
__tcp_push_pending_frames(sk, tp, mss_now, __tcp_push_pending_frames(sk, mss_now,
(flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
} }
} }
...@@ -540,7 +542,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse ...@@ -540,7 +542,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
if (!skb) if (!skb)
goto wait_for_memory; goto wait_for_memory;
skb_entail(sk, tp, skb); skb_entail(sk, skb);
copy = size_goal; copy = size_goal;
} }
...@@ -586,7 +588,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse ...@@ -586,7 +588,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
if (forced_push(tp)) { if (forced_push(tp)) {
tcp_mark_push(tp, skb); tcp_mark_push(tp, skb);
__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
} else if (skb == tcp_send_head(sk)) } else if (skb == tcp_send_head(sk))
tcp_push_one(sk, mss_now); tcp_push_one(sk, mss_now);
continue; continue;
...@@ -595,7 +597,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse ...@@ -595,7 +597,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory: wait_for_memory:
if (copied) if (copied)
tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error; goto do_error;
...@@ -606,7 +608,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse ...@@ -606,7 +608,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
out: out:
if (copied) if (copied)
tcp_push(sk, tp, flags, mss_now, tp->nonagle); tcp_push(sk, flags, mss_now, tp->nonagle);
return copied; return copied;
do_error: do_error:
...@@ -637,8 +639,9 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, ...@@ -637,8 +639,9 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
#define TCP_PAGE(sk) (sk->sk_sndmsg_page) #define TCP_PAGE(sk) (sk->sk_sndmsg_page)
#define TCP_OFF(sk) (sk->sk_sndmsg_off) #define TCP_OFF(sk) (sk->sk_sndmsg_off)
static inline int select_size(struct sock *sk, struct tcp_sock *tp) static inline int select_size(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk);
int tmp = tp->mss_cache; int tmp = tp->mss_cache;
if (sk->sk_route_caps & NETIF_F_SG) { if (sk->sk_route_caps & NETIF_F_SG) {
...@@ -714,7 +717,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -714,7 +717,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (!sk_stream_memory_free(sk)) if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf; goto wait_for_sndbuf;
skb = sk_stream_alloc_pskb(sk, select_size(sk, tp), skb = sk_stream_alloc_pskb(sk, select_size(sk),
0, sk->sk_allocation); 0, sk->sk_allocation);
if (!skb) if (!skb)
goto wait_for_memory; goto wait_for_memory;
...@@ -725,7 +728,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -725,7 +728,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (sk->sk_route_caps & NETIF_F_ALL_CSUM) if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
skb->ip_summed = CHECKSUM_PARTIAL; skb->ip_summed = CHECKSUM_PARTIAL;
skb_entail(sk, tp, skb); skb_entail(sk, skb);
copy = size_goal; copy = size_goal;
} }
...@@ -830,7 +833,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -830,7 +833,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (forced_push(tp)) { if (forced_push(tp)) {
tcp_mark_push(tp, skb); tcp_mark_push(tp, skb);
__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
} else if (skb == tcp_send_head(sk)) } else if (skb == tcp_send_head(sk))
tcp_push_one(sk, mss_now); tcp_push_one(sk, mss_now);
continue; continue;
...@@ -839,7 +842,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -839,7 +842,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory: wait_for_memory:
if (copied) if (copied)
tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error; goto do_error;
...@@ -851,7 +854,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -851,7 +854,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
out: out:
if (copied) if (copied)
tcp_push(sk, tp, flags, mss_now, tp->nonagle); tcp_push(sk, flags, mss_now, tp->nonagle);
TCP_CHECK_TIMER(sk); TCP_CHECK_TIMER(sk);
release_sock(sk); release_sock(sk);
return copied; return copied;
...@@ -1389,7 +1392,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1389,7 +1392,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
skip_copy: skip_copy:
if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
tp->urg_data = 0; tp->urg_data = 0;
tcp_fast_path_check(sk, tp); tcp_fast_path_check(sk);
} }
if (used + offset < skb->len) if (used + offset < skb->len)
continue; continue;
...@@ -1830,7 +1833,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, ...@@ -1830,7 +1833,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
* for currently queued segments. * for currently queued segments.
*/ */
tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
tcp_push_pending_frames(sk, tp); tcp_push_pending_frames(sk);
} else { } else {
tp->nonagle &= ~TCP_NAGLE_OFF; tp->nonagle &= ~TCP_NAGLE_OFF;
} }
...@@ -1854,7 +1857,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, ...@@ -1854,7 +1857,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
tp->nonagle &= ~TCP_NAGLE_CORK; tp->nonagle &= ~TCP_NAGLE_CORK;
if (tp->nonagle&TCP_NAGLE_OFF) if (tp->nonagle&TCP_NAGLE_OFF)
tp->nonagle |= TCP_NAGLE_PUSH; tp->nonagle |= TCP_NAGLE_PUSH;
tcp_push_pending_frames(sk, tp); tcp_push_pending_frames(sk);
} }
break; break;
......
...@@ -235,9 +235,9 @@ static void tcp_fixup_sndbuf(struct sock *sk) ...@@ -235,9 +235,9 @@ static void tcp_fixup_sndbuf(struct sock *sk)
*/ */
/* Slow part of check#2. */ /* Slow part of check#2. */
static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp, static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
const struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk);
/* Optimize this! */ /* Optimize this! */
int truesize = tcp_win_from_space(skb->truesize)/2; int truesize = tcp_win_from_space(skb->truesize)/2;
int window = tcp_win_from_space(sysctl_tcp_rmem[2])/2; int window = tcp_win_from_space(sysctl_tcp_rmem[2])/2;
...@@ -252,9 +252,11 @@ static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp, ...@@ -252,9 +252,11 @@ static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp,
return 0; return 0;
} }
static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp, static void tcp_grow_window(struct sock *sk,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk);
/* Check #1 */ /* Check #1 */
if (tp->rcv_ssthresh < tp->window_clamp && if (tp->rcv_ssthresh < tp->window_clamp &&
(int)tp->rcv_ssthresh < tcp_space(sk) && (int)tp->rcv_ssthresh < tcp_space(sk) &&
...@@ -267,7 +269,7 @@ static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp, ...@@ -267,7 +269,7 @@ static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp,
if (tcp_win_from_space(skb->truesize) <= skb->len) if (tcp_win_from_space(skb->truesize) <= skb->len)
incr = 2*tp->advmss; incr = 2*tp->advmss;
else else
incr = __tcp_grow_window(sk, tp, skb); incr = __tcp_grow_window(sk, skb);
if (incr) { if (incr) {
tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp);
...@@ -330,8 +332,9 @@ static void tcp_init_buffer_space(struct sock *sk) ...@@ -330,8 +332,9 @@ static void tcp_init_buffer_space(struct sock *sk)
} }
/* 5. Recalculate window clamp after socket hit its memory bounds. */ /* 5. Recalculate window clamp after socket hit its memory bounds. */
static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp) static void tcp_clamp_window(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
icsk->icsk_ack.quick = 0; icsk->icsk_ack.quick = 0;
...@@ -503,8 +506,9 @@ void tcp_rcv_space_adjust(struct sock *sk) ...@@ -503,8 +506,9 @@ void tcp_rcv_space_adjust(struct sock *sk)
* each ACK we send, he increments snd_cwnd and transmits more of his * each ACK we send, he increments snd_cwnd and transmits more of his
* queue. -DaveM * queue. -DaveM
*/ */
static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
u32 now; u32 now;
...@@ -545,7 +549,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_ ...@@ -545,7 +549,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_
TCP_ECN_check_ce(tp, skb); TCP_ECN_check_ce(tp, skb);
if (skb->len >= 128) if (skb->len >= 128)
tcp_grow_window(sk, tp, skb); tcp_grow_window(sk, skb);
} }
/* Called to compute a smoothed rtt estimate. The data fed to this /* Called to compute a smoothed rtt estimate. The data fed to this
...@@ -1541,8 +1545,10 @@ static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb) ...@@ -1541,8 +1545,10 @@ static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto); return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto);
} }
static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp) static inline int tcp_head_timedout(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk);
return tp->packets_out && return tp->packets_out &&
tcp_skb_timedout(sk, tcp_write_queue_head(sk)); tcp_skb_timedout(sk, tcp_write_queue_head(sk));
} }
...@@ -1640,8 +1646,9 @@ static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp) ...@@ -1640,8 +1646,9 @@ static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp)
* Main question: may we further continue forward transmission * Main question: may we further continue forward transmission
* with the same cwnd? * with the same cwnd?
*/ */
static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp) static int tcp_time_to_recover(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk);
__u32 packets_out; __u32 packets_out;
/* Do not perform any recovery during FRTO algorithm */ /* Do not perform any recovery during FRTO algorithm */
...@@ -1659,7 +1666,7 @@ static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp) ...@@ -1659,7 +1666,7 @@ static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp)
/* Trick#3 : when we use RFC2988 timer restart, fast /* Trick#3 : when we use RFC2988 timer restart, fast
* retransmit can be triggered by timeout of queue head. * retransmit can be triggered by timeout of queue head.
*/ */
if (tcp_head_timedout(sk, tp)) if (tcp_head_timedout(sk))
return 1; return 1;
/* Trick#4: It is still not OK... But will it be useful to delay /* Trick#4: It is still not OK... But will it be useful to delay
...@@ -1668,7 +1675,7 @@ static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp) ...@@ -1668,7 +1675,7 @@ static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp)
packets_out = tp->packets_out; packets_out = tp->packets_out;
if (packets_out <= tp->reordering && if (packets_out <= tp->reordering &&
tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) && tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) &&
!tcp_may_send_now(sk, tp)) { !tcp_may_send_now(sk)) {
/* We have nothing to send. This connection is limited /* We have nothing to send. This connection is limited
* either by receiver window or by application. * either by receiver window or by application.
*/ */
...@@ -1708,8 +1715,10 @@ static void tcp_add_reno_sack(struct sock *sk) ...@@ -1708,8 +1715,10 @@ static void tcp_add_reno_sack(struct sock *sk)
/* Account for ACK, ACKing some data in Reno Recovery phase. */ /* Account for ACK, ACKing some data in Reno Recovery phase. */
static void tcp_remove_reno_sacks(struct sock *sk, struct tcp_sock *tp, int acked) static void tcp_remove_reno_sacks(struct sock *sk, int acked)
{ {
struct tcp_sock *tp = tcp_sk(sk);
if (acked > 0) { if (acked > 0) {
/* One ACK acked hole. The rest eat duplicate ACKs. */ /* One ACK acked hole. The rest eat duplicate ACKs. */
if (acked-1 >= tp->sacked_out) if (acked-1 >= tp->sacked_out)
...@@ -1728,9 +1737,10 @@ static inline void tcp_reset_reno_sack(struct tcp_sock *tp) ...@@ -1728,9 +1737,10 @@ static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
} }
/* Mark head of queue up as lost. */ /* Mark head of queue up as lost. */
static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp, static void tcp_mark_head_lost(struct sock *sk,
int packets, u32 high_seq) int packets, u32 high_seq)
{ {
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb; struct sk_buff *skb;
int cnt; int cnt;
...@@ -1771,15 +1781,17 @@ static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp, ...@@ -1771,15 +1781,17 @@ static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,
/* Account newly detected lost packet(s) */ /* Account newly detected lost packet(s) */
static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp) static void tcp_update_scoreboard(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk);
if (IsFack(tp)) { if (IsFack(tp)) {
int lost = tp->fackets_out - tp->reordering; int lost = tp->fackets_out - tp->reordering;
if (lost <= 0) if (lost <= 0)
lost = 1; lost = 1;
tcp_mark_head_lost(sk, tp, lost, tp->high_seq); tcp_mark_head_lost(sk, lost, tp->high_seq);
} else { } else {
tcp_mark_head_lost(sk, tp, 1, tp->high_seq); tcp_mark_head_lost(sk, 1, tp->high_seq);
} }
/* New heuristics: it is possible only after we switched /* New heuristics: it is possible only after we switched
...@@ -1787,7 +1799,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp) ...@@ -1787,7 +1799,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
* Hence, we can detect timed out packets during fast * Hence, we can detect timed out packets during fast
* retransmit without falling to slow start. * retransmit without falling to slow start.
*/ */
if (!IsReno(tp) && tcp_head_timedout(sk, tp)) { if (!IsReno(tp) && tcp_head_timedout(sk)) {
struct sk_buff *skb; struct sk_buff *skb;
skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
...@@ -1867,9 +1879,11 @@ static inline int tcp_packet_delayed(struct tcp_sock *tp) ...@@ -1867,9 +1879,11 @@ static inline int tcp_packet_delayed(struct tcp_sock *tp)
/* Undo procedures. */ /* Undo procedures. */
#if FASTRETRANS_DEBUG > 1 #if FASTRETRANS_DEBUG > 1
static void DBGUNDO(struct sock *sk, struct tcp_sock *tp, const char *msg) static void DBGUNDO(struct sock *sk, const char *msg)
{ {
struct tcp_sock *tp = tcp_sk(sk);
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n", printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n",
msg, msg,
NIPQUAD(inet->daddr), ntohs(inet->dport), NIPQUAD(inet->daddr), ntohs(inet->dport),
...@@ -1915,13 +1929,15 @@ static inline int tcp_may_undo(struct tcp_sock *tp) ...@@ -1915,13 +1929,15 @@ static inline int tcp_may_undo(struct tcp_sock *tp)
} }
/* People celebrate: "We love our President!" */ /* People celebrate: "We love our President!" */
static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp) static int tcp_try_undo_recovery(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk);
if (tcp_may_undo(tp)) { if (tcp_may_undo(tp)) {
/* Happy end! We did not retransmit anything /* Happy end! We did not retransmit anything
* or our original transmission succeeded. * or our original transmission succeeded.
*/ */
DBGUNDO(sk, tp, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
tcp_undo_cwr(sk, 1); tcp_undo_cwr(sk, 1);
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
...@@ -1941,10 +1957,12 @@ static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp) ...@@ -1941,10 +1957,12 @@ static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp)
} }
/* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp) static void tcp_try_undo_dsack(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk);
if (tp->undo_marker && !tp->undo_retrans) { if (tp->undo_marker && !tp->undo_retrans) {
DBGUNDO(sk, tp, "D-SACK"); DBGUNDO(sk, "D-SACK");
tcp_undo_cwr(sk, 1); tcp_undo_cwr(sk, 1);
tp->undo_marker = 0; tp->undo_marker = 0;
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO); NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
...@@ -1953,9 +1971,9 @@ static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp) ...@@ -1953,9 +1971,9 @@ static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp)
/* Undo during fast recovery after partial ACK. */ /* Undo during fast recovery after partial ACK. */
static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp, static int tcp_try_undo_partial(struct sock *sk, int acked)
int acked)
{ {
struct tcp_sock *tp = tcp_sk(sk);
/* Partial ACK arrived. Force Hoe's retransmit. */ /* Partial ACK arrived. Force Hoe's retransmit. */
int failed = IsReno(tp) || tp->fackets_out>tp->reordering; int failed = IsReno(tp) || tp->fackets_out>tp->reordering;
...@@ -1968,7 +1986,7 @@ static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp, ...@@ -1968,7 +1986,7 @@ static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp,
tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
DBGUNDO(sk, tp, "Hoe"); DBGUNDO(sk, "Hoe");
tcp_undo_cwr(sk, 0); tcp_undo_cwr(sk, 0);
NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO); NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
...@@ -1982,8 +2000,10 @@ static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp, ...@@ -1982,8 +2000,10 @@ static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp,
} }
/* Undo during loss recovery after partial ACK. */ /* Undo during loss recovery after partial ACK. */
static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp) static int tcp_try_undo_loss(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk);
if (tcp_may_undo(tp)) { if (tcp_may_undo(tp)) {
struct sk_buff *skb; struct sk_buff *skb;
tcp_for_write_queue(skb, sk) { tcp_for_write_queue(skb, sk) {
...@@ -1994,7 +2014,7 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp) ...@@ -1994,7 +2014,7 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp)
clear_all_retrans_hints(tp); clear_all_retrans_hints(tp);
DBGUNDO(sk, tp, "partial loss"); DBGUNDO(sk, "partial loss");
tp->lost_out = 0; tp->lost_out = 0;
tp->left_out = tp->sacked_out; tp->left_out = tp->sacked_out;
tcp_undo_cwr(sk, 1); tcp_undo_cwr(sk, 1);
...@@ -2016,8 +2036,10 @@ static inline void tcp_complete_cwr(struct sock *sk) ...@@ -2016,8 +2036,10 @@ static inline void tcp_complete_cwr(struct sock *sk)
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
} }
static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag) static void tcp_try_to_open(struct sock *sk, int flag)
{ {
struct tcp_sock *tp = tcp_sk(sk);
tp->left_out = tp->sacked_out; tp->left_out = tp->sacked_out;
if (tp->retrans_out == 0) if (tp->retrans_out == 0)
...@@ -2111,7 +2133,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, ...@@ -2111,7 +2133,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
before(tp->snd_una, tp->high_seq) && before(tp->snd_una, tp->high_seq) &&
icsk->icsk_ca_state != TCP_CA_Open && icsk->icsk_ca_state != TCP_CA_Open &&
tp->fackets_out > tp->reordering) { tp->fackets_out > tp->reordering) {
tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq); tcp_mark_head_lost(sk, tp->fackets_out-tp->reordering, tp->high_seq);
NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
} }
...@@ -2127,7 +2149,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, ...@@ -2127,7 +2149,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
switch (icsk->icsk_ca_state) { switch (icsk->icsk_ca_state) {
case TCP_CA_Loss: case TCP_CA_Loss:
icsk->icsk_retransmits = 0; icsk->icsk_retransmits = 0;
if (tcp_try_undo_recovery(sk, tp)) if (tcp_try_undo_recovery(sk))
return; return;
break; break;
...@@ -2141,7 +2163,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, ...@@ -2141,7 +2163,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
break; break;
case TCP_CA_Disorder: case TCP_CA_Disorder:
tcp_try_undo_dsack(sk, tp); tcp_try_undo_dsack(sk);
if (!tp->undo_marker || if (!tp->undo_marker ||
/* For SACK case do not Open to allow to undo /* For SACK case do not Open to allow to undo
* catching for all duplicate ACKs. */ * catching for all duplicate ACKs. */
...@@ -2154,7 +2176,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, ...@@ -2154,7 +2176,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
case TCP_CA_Recovery: case TCP_CA_Recovery:
if (IsReno(tp)) if (IsReno(tp))
tcp_reset_reno_sack(tp); tcp_reset_reno_sack(tp);
if (tcp_try_undo_recovery(sk, tp)) if (tcp_try_undo_recovery(sk))
return; return;
tcp_complete_cwr(sk); tcp_complete_cwr(sk);
break; break;
...@@ -2170,14 +2192,14 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, ...@@ -2170,14 +2192,14 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
} else { } else {
int acked = prior_packets - tp->packets_out; int acked = prior_packets - tp->packets_out;
if (IsReno(tp)) if (IsReno(tp))
tcp_remove_reno_sacks(sk, tp, acked); tcp_remove_reno_sacks(sk, acked);
is_dupack = tcp_try_undo_partial(sk, tp, acked); is_dupack = tcp_try_undo_partial(sk, acked);
} }
break; break;
case TCP_CA_Loss: case TCP_CA_Loss:
if (flag&FLAG_DATA_ACKED) if (flag&FLAG_DATA_ACKED)
icsk->icsk_retransmits = 0; icsk->icsk_retransmits = 0;
if (!tcp_try_undo_loss(sk, tp)) { if (!tcp_try_undo_loss(sk)) {
tcp_moderate_cwnd(tp); tcp_moderate_cwnd(tp);
tcp_xmit_retransmit_queue(sk); tcp_xmit_retransmit_queue(sk);
return; return;
...@@ -2194,10 +2216,10 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, ...@@ -2194,10 +2216,10 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
} }
if (icsk->icsk_ca_state == TCP_CA_Disorder) if (icsk->icsk_ca_state == TCP_CA_Disorder)
tcp_try_undo_dsack(sk, tp); tcp_try_undo_dsack(sk);
if (!tcp_time_to_recover(sk, tp)) { if (!tcp_time_to_recover(sk)) {
tcp_try_to_open(sk, tp, flag); tcp_try_to_open(sk, flag);
return; return;
} }
...@@ -2236,8 +2258,8 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, ...@@ -2236,8 +2258,8 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
tcp_set_ca_state(sk, TCP_CA_Recovery); tcp_set_ca_state(sk, TCP_CA_Recovery);
} }
if (is_dupack || tcp_head_timedout(sk, tp)) if (is_dupack || tcp_head_timedout(sk))
tcp_update_scoreboard(sk, tp); tcp_update_scoreboard(sk);
tcp_cwnd_down(sk); tcp_cwnd_down(sk);
tcp_xmit_retransmit_queue(sk); tcp_xmit_retransmit_queue(sk);
} }
...@@ -2313,8 +2335,10 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, ...@@ -2313,8 +2335,10 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
* RFC2988 recommends to restart timer to now+rto. * RFC2988 recommends to restart timer to now+rto.
*/ */
static void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp) static void tcp_ack_packets_out(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk);
if (!tp->packets_out) { if (!tp->packets_out) {
inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
} else { } else {
...@@ -2471,7 +2495,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) ...@@ -2471,7 +2495,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
if (acked&FLAG_ACKED) { if (acked&FLAG_ACKED) {
tcp_ack_update_rtt(sk, acked, seq_rtt); tcp_ack_update_rtt(sk, acked, seq_rtt);
tcp_ack_packets_out(sk, tp); tcp_ack_packets_out(sk);
if (rtt_sample && !(acked & FLAG_RETRANS_DATA_ACKED)) if (rtt_sample && !(acked & FLAG_RETRANS_DATA_ACKED))
(*rtt_sample)(sk, tcp_usrtt(&tv)); (*rtt_sample)(sk, tcp_usrtt(&tv));
...@@ -2556,9 +2580,10 @@ static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack ...@@ -2556,9 +2580,10 @@ static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack
* Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
* and in FreeBSD. NetBSD's one is even worse.) is wrong. * and in FreeBSD. NetBSD's one is even worse.) is wrong.
*/ */
static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp, static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack,
struct sk_buff *skb, u32 ack, u32 ack_seq) u32 ack_seq)
{ {
struct tcp_sock *tp = tcp_sk(sk);
int flag = 0; int flag = 0;
u32 nwin = ntohs(tcp_hdr(skb)->window); u32 nwin = ntohs(tcp_hdr(skb)->window);
...@@ -2576,7 +2601,7 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp, ...@@ -2576,7 +2601,7 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
* fast path is recovered for sending TCP. * fast path is recovered for sending TCP.
*/ */
tp->pred_flags = 0; tp->pred_flags = 0;
tcp_fast_path_check(sk, tp); tcp_fast_path_check(sk);
if (nwin > tp->max_window) { if (nwin > tp->max_window) {
tp->max_window = nwin; tp->max_window = nwin;
...@@ -2762,7 +2787,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) ...@@ -2762,7 +2787,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
else else
NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS); NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS);
flag |= tcp_ack_update_window(sk, tp, skb, ack, ack_seq); flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
if (TCP_SKB_CB(skb)->sacked) if (TCP_SKB_CB(skb)->sacked)
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
...@@ -3426,7 +3451,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) ...@@ -3426,7 +3451,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
} }
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
if (skb->len) if (skb->len)
tcp_event_data_recv(sk, tp, skb); tcp_event_data_recv(sk, skb);
if (th->fin) if (th->fin)
tcp_fin(skb, sk, th); tcp_fin(skb, sk, th);
...@@ -3443,7 +3468,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) ...@@ -3443,7 +3468,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
if (tp->rx_opt.num_sacks) if (tp->rx_opt.num_sacks)
tcp_sack_remove(tp); tcp_sack_remove(tp);
tcp_fast_path_check(sk, tp); tcp_fast_path_check(sk);
if (eaten > 0) if (eaten > 0)
__kfree_skb(skb); __kfree_skb(skb);
...@@ -3734,7 +3759,7 @@ static int tcp_prune_queue(struct sock *sk) ...@@ -3734,7 +3759,7 @@ static int tcp_prune_queue(struct sock *sk)
NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED); NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED);
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
tcp_clamp_window(sk, tp); tcp_clamp_window(sk);
else if (tcp_memory_pressure) else if (tcp_memory_pressure)
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
...@@ -3803,8 +3828,10 @@ void tcp_cwnd_application_limited(struct sock *sk) ...@@ -3803,8 +3828,10 @@ void tcp_cwnd_application_limited(struct sock *sk)
tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_cwnd_stamp = tcp_time_stamp;
} }
static int tcp_should_expand_sndbuf(struct sock *sk, struct tcp_sock *tp) static int tcp_should_expand_sndbuf(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk);
/* If the user specified a specific send buffer setting, do /* If the user specified a specific send buffer setting, do
* not modify it. * not modify it.
*/ */
...@@ -3836,7 +3863,7 @@ static void tcp_new_space(struct sock *sk) ...@@ -3836,7 +3863,7 @@ static void tcp_new_space(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
if (tcp_should_expand_sndbuf(sk, tp)) { if (tcp_should_expand_sndbuf(sk)) {
int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
MAX_TCP_HEADER + 16 + sizeof(struct sk_buff), MAX_TCP_HEADER + 16 + sizeof(struct sk_buff),
demanded = max_t(unsigned int, tp->snd_cwnd, demanded = max_t(unsigned int, tp->snd_cwnd,
...@@ -3860,9 +3887,9 @@ static void tcp_check_space(struct sock *sk) ...@@ -3860,9 +3887,9 @@ static void tcp_check_space(struct sock *sk)
} }
} }
static inline void tcp_data_snd_check(struct sock *sk, struct tcp_sock *tp) static inline void tcp_data_snd_check(struct sock *sk)
{ {
tcp_push_pending_frames(sk, tp); tcp_push_pending_frames(sk);
tcp_check_space(sk); tcp_check_space(sk);
} }
...@@ -4196,7 +4223,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -4196,7 +4223,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
*/ */
tcp_ack(sk, skb, 0); tcp_ack(sk, skb, 0);
__kfree_skb(skb); __kfree_skb(skb);
tcp_data_snd_check(sk, tp); tcp_data_snd_check(sk);
return 0; return 0;
} else { /* Header too small */ } else { /* Header too small */
TCP_INC_STATS_BH(TCP_MIB_INERRS); TCP_INC_STATS_BH(TCP_MIB_INERRS);
...@@ -4267,12 +4294,12 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -4267,12 +4294,12 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
} }
tcp_event_data_recv(sk, tp, skb); tcp_event_data_recv(sk, skb);
if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) {
/* Well, only one small jumplet in fast path... */ /* Well, only one small jumplet in fast path... */
tcp_ack(sk, skb, FLAG_DATA); tcp_ack(sk, skb, FLAG_DATA);
tcp_data_snd_check(sk, tp); tcp_data_snd_check(sk);
if (!inet_csk_ack_scheduled(sk)) if (!inet_csk_ack_scheduled(sk))
goto no_ack; goto no_ack;
} }
...@@ -4355,7 +4382,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -4355,7 +4382,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
/* step 7: process the segment text */ /* step 7: process the segment text */
tcp_data_queue(sk, skb); tcp_data_queue(sk, skb);
tcp_data_snd_check(sk, tp); tcp_data_snd_check(sk);
tcp_ack_snd_check(sk); tcp_ack_snd_check(sk);
return 0; return 0;
...@@ -4672,7 +4699,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -4672,7 +4699,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
/* Do step6 onward by hand. */ /* Do step6 onward by hand. */
tcp_urg(sk, skb, th); tcp_urg(sk, skb, th);
__kfree_skb(skb); __kfree_skb(skb);
tcp_data_snd_check(sk, tp); tcp_data_snd_check(sk);
return 0; return 0;
} }
...@@ -4864,7 +4891,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -4864,7 +4891,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
/* tcp_data could move socket to TIME-WAIT */ /* tcp_data could move socket to TIME-WAIT */
if (sk->sk_state != TCP_CLOSE) { if (sk->sk_state != TCP_CLOSE) {
tcp_data_snd_check(sk, tp); tcp_data_snd_check(sk);
tcp_ack_snd_check(sk); tcp_ack_snd_check(sk);
} }
......
...@@ -62,12 +62,13 @@ int sysctl_tcp_base_mss __read_mostly = 512; ...@@ -62,12 +62,13 @@ int sysctl_tcp_base_mss __read_mostly = 512;
/* By default, RFC2861 behavior. */ /* By default, RFC2861 behavior. */
int sysctl_tcp_slow_start_after_idle __read_mostly = 1; int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
static void update_send_head(struct sock *sk, struct tcp_sock *tp, static void update_send_head(struct sock *sk, struct sk_buff *skb)
struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk);
tcp_advance_send_head(sk, skb); tcp_advance_send_head(sk, skb);
tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
tcp_packets_out_inc(sk, tp, skb); tcp_packets_out_inc(sk, skb);
} }
/* SND.NXT, if window was not shrunk. /* SND.NXT, if window was not shrunk.
...@@ -76,8 +77,10 @@ static void update_send_head(struct sock *sk, struct tcp_sock *tp, ...@@ -76,8 +77,10 @@ static void update_send_head(struct sock *sk, struct tcp_sock *tp,
* Anything in between SND.UNA...SND.UNA+SND.WND also can be already * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
* invalid. OK, let's make this for now: * invalid. OK, let's make this for now:
*/ */
static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp) static inline __u32 tcp_acceptable_seq(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk);
if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt)) if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt))
return tp->snd_nxt; return tp->snd_nxt;
else else
...@@ -516,7 +519,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, ...@@ -516,7 +519,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
md5 ? &md5_hash_location : md5 ? &md5_hash_location :
#endif #endif
NULL); NULL);
TCP_ECN_send(sk, tp, skb, tcp_header_size); TCP_ECN_send(sk, skb, tcp_header_size);
} }
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
...@@ -927,8 +930,9 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) ...@@ -927,8 +930,9 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
/* Congestion window validation. (RFC2861) */ /* Congestion window validation. (RFC2861) */
static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) static void tcp_cwnd_validate(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk);
__u32 packets_out = tp->packets_out; __u32 packets_out = tp->packets_out;
if (packets_out >= tp->snd_cwnd) { if (packets_out >= tp->snd_cwnd) {
...@@ -1076,8 +1080,9 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, ...@@ -1076,8 +1080,9 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
return cwnd_quota; return cwnd_quota;
} }
int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp) int tcp_may_send_now(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = tcp_send_head(sk); struct sk_buff *skb = tcp_send_head(sk);
return (skb && return (skb &&
...@@ -1144,8 +1149,9 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, ...@@ -1144,8 +1149,9 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
* *
* This algorithm is from John Heffner. * This algorithm is from John Heffner.
*/ */
static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_connection_sock *icsk = inet_csk(sk);
u32 send_win, cong_win, limit, in_flight; u32 send_win, cong_win, limit, in_flight;
...@@ -1324,7 +1330,7 @@ static int tcp_mtu_probe(struct sock *sk) ...@@ -1324,7 +1330,7 @@ static int tcp_mtu_probe(struct sock *sk)
/* Decrement cwnd here because we are sending /* Decrement cwnd here because we are sending
* effectively two packets. */ * effectively two packets. */
tp->snd_cwnd--; tp->snd_cwnd--;
update_send_head(sk, tp, nskb); update_send_head(sk, nskb);
icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
...@@ -1387,7 +1393,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) ...@@ -1387,7 +1393,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
nonagle : TCP_NAGLE_PUSH)))) nonagle : TCP_NAGLE_PUSH))))
break; break;
} else { } else {
if (tcp_tso_should_defer(sk, tp, skb)) if (tcp_tso_should_defer(sk, skb))
break; break;
} }
...@@ -1416,14 +1422,14 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) ...@@ -1416,14 +1422,14 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
/* Advance the send_head. This one is sent out. /* Advance the send_head. This one is sent out.
* This call will increment packets_out. * This call will increment packets_out.
*/ */
update_send_head(sk, tp, skb); update_send_head(sk, skb);
tcp_minshall_update(tp, mss_now, skb); tcp_minshall_update(tp, mss_now, skb);
sent_pkts++; sent_pkts++;
} }
if (likely(sent_pkts)) { if (likely(sent_pkts)) {
tcp_cwnd_validate(sk, tp); tcp_cwnd_validate(sk);
return 0; return 0;
} }
return !tp->packets_out && tcp_send_head(sk); return !tp->packets_out && tcp_send_head(sk);
...@@ -1433,14 +1439,14 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) ...@@ -1433,14 +1439,14 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
* TCP_CORK or attempt at coalescing tiny packets. * TCP_CORK or attempt at coalescing tiny packets.
* The socket must be locked by the caller. * The socket must be locked by the caller.
*/ */
void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
unsigned int cur_mss, int nonagle) int nonagle)
{ {
struct sk_buff *skb = tcp_send_head(sk); struct sk_buff *skb = tcp_send_head(sk);
if (skb) { if (skb) {
if (tcp_write_xmit(sk, cur_mss, nonagle)) if (tcp_write_xmit(sk, cur_mss, nonagle))
tcp_check_probe_timer(sk, tp); tcp_check_probe_timer(sk);
} }
} }
...@@ -1484,8 +1490,8 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now) ...@@ -1484,8 +1490,8 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
TCP_SKB_CB(skb)->when = tcp_time_stamp; TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) { if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {
update_send_head(sk, tp, skb); update_send_head(sk, skb);
tcp_cwnd_validate(sk, tp); tcp_cwnd_validate(sk);
return; return;
} }
} }
...@@ -1933,7 +1939,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) ...@@ -1933,7 +1939,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
* segments to send. * segments to send.
*/ */
if (tcp_may_send_now(sk, tp)) if (tcp_may_send_now(sk))
return; return;
if (tp->forward_skb_hint) { if (tp->forward_skb_hint) {
...@@ -2023,7 +2029,7 @@ void tcp_send_fin(struct sock *sk) ...@@ -2023,7 +2029,7 @@ void tcp_send_fin(struct sock *sk)
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
tcp_queue_skb(sk, skb); tcp_queue_skb(sk, skb);
} }
__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF); __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
} }
/* We get here when a process closes a file descriptor (either due to /* We get here when a process closes a file descriptor (either due to
...@@ -2033,7 +2039,6 @@ void tcp_send_fin(struct sock *sk) ...@@ -2033,7 +2039,6 @@ void tcp_send_fin(struct sock *sk)
*/ */
void tcp_send_active_reset(struct sock *sk, gfp_t priority) void tcp_send_active_reset(struct sock *sk, gfp_t priority)
{ {
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb; struct sk_buff *skb;
/* NOTE: No TCP options attached and we never retransmit this. */ /* NOTE: No TCP options attached and we never retransmit this. */
...@@ -2053,7 +2058,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) ...@@ -2053,7 +2058,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
skb_shinfo(skb)->gso_type = 0; skb_shinfo(skb)->gso_type = 0;
/* Send it off. */ /* Send it off. */
TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp); TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk);
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
TCP_SKB_CB(skb)->when = tcp_time_stamp; TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (tcp_transmit_skb(sk, skb, 0, priority)) if (tcp_transmit_skb(sk, skb, 0, priority))
...@@ -2271,7 +2276,7 @@ int tcp_connect(struct sock *sk) ...@@ -2271,7 +2276,7 @@ int tcp_connect(struct sock *sk)
skb_reserve(buff, MAX_TCP_HEADER); skb_reserve(buff, MAX_TCP_HEADER);
TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN; TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
TCP_ECN_send_syn(sk, tp, buff); TCP_ECN_send_syn(sk, buff);
TCP_SKB_CB(buff)->sacked = 0; TCP_SKB_CB(buff)->sacked = 0;
skb_shinfo(buff)->gso_segs = 1; skb_shinfo(buff)->gso_segs = 1;
skb_shinfo(buff)->gso_size = 0; skb_shinfo(buff)->gso_size = 0;
...@@ -2363,7 +2368,6 @@ void tcp_send_ack(struct sock *sk) ...@@ -2363,7 +2368,6 @@ void tcp_send_ack(struct sock *sk)
{ {
/* If we have been reset, we may not send again. */ /* If we have been reset, we may not send again. */
if (sk->sk_state != TCP_CLOSE) { if (sk->sk_state != TCP_CLOSE) {
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *buff; struct sk_buff *buff;
/* We are not putting this on the write queue, so /* We are not putting this on the write queue, so
...@@ -2389,7 +2393,7 @@ void tcp_send_ack(struct sock *sk) ...@@ -2389,7 +2393,7 @@ void tcp_send_ack(struct sock *sk)
skb_shinfo(buff)->gso_type = 0; skb_shinfo(buff)->gso_type = 0;
/* Send it off, this clears delayed acks for us. */ /* Send it off, this clears delayed acks for us. */
TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp); TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk);
TCP_SKB_CB(buff)->when = tcp_time_stamp; TCP_SKB_CB(buff)->when = tcp_time_stamp;
tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC); tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
} }
...@@ -2467,7 +2471,7 @@ int tcp_write_wakeup(struct sock *sk) ...@@ -2467,7 +2471,7 @@ int tcp_write_wakeup(struct sock *sk)
TCP_SKB_CB(skb)->when = tcp_time_stamp; TCP_SKB_CB(skb)->when = tcp_time_stamp;
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
if (!err) { if (!err) {
update_send_head(sk, tp, skb); update_send_head(sk, skb);
} }
return err; return err;
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment