Commit 463c84b9 authored by Arnaldo Carvalho de Melo's avatar Arnaldo Carvalho de Melo Committed by David S. Miller

[NET]: Introduce inet_connection_sock

This creates struct inet_connection_sock, moving members out of struct
tcp_sock that are shareable with other INET connection oriented
protocols, such as DCCP, that in my private tree already uses most of
these members.

The functions that operate on these members were renamed, using a
inet_csk_ prefix while not being moved yet to a new file, so as to
ease the review of these changes.
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 87d11ceb
...@@ -128,7 +128,6 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk) ...@@ -128,7 +128,6 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
return (struct inet_request_sock *)sk; return (struct inet_request_sock *)sk;
} }
struct inet_bind_bucket;
struct ipv6_pinfo; struct ipv6_pinfo;
struct inet_sock { struct inet_sock {
...@@ -158,7 +157,6 @@ struct inet_sock { ...@@ -158,7 +157,6 @@ struct inet_sock {
int mc_index; /* Multicast device index */ int mc_index; /* Multicast device index */
__u32 mc_addr; __u32 mc_addr;
struct ip_mc_socklist *mc_list; /* Group array */ struct ip_mc_socklist *mc_list; /* Group array */
struct inet_bind_bucket *bind_hash;
/* /*
* Following members are used to retain the infomation to build * Following members are used to retain the infomation to build
* an ip header on each ip fragmentation while the socket is corked. * an ip header on each ip fragmentation while the socket is corked.
......
...@@ -333,15 +333,15 @@ static inline struct in6_addr *tcp_v6_rcv_saddr(const struct sock *sk) ...@@ -333,15 +333,15 @@ static inline struct in6_addr *tcp_v6_rcv_saddr(const struct sock *sk)
return sk->sk_family == AF_INET6 ? __tcp_v6_rcv_saddr(sk) : NULL; return sk->sk_family == AF_INET6 ? __tcp_v6_rcv_saddr(sk) : NULL;
} }
static inline int tcp_twsk_ipv6only(const struct sock *sk) static inline int inet_twsk_ipv6only(const struct sock *sk)
{ {
return inet_twsk(sk)->tw_ipv6only; return inet_twsk(sk)->tw_ipv6only;
} }
static inline int tcp_v6_ipv6only(const struct sock *sk) static inline int inet_v6_ipv6only(const struct sock *sk)
{ {
return likely(sk->sk_state != TCP_TIME_WAIT) ? return likely(sk->sk_state != TCP_TIME_WAIT) ?
ipv6_only_sock(sk) : tcp_twsk_ipv6only(sk); ipv6_only_sock(sk) : inet_twsk_ipv6only(sk);
} }
#else #else
#define __ipv6_only_sock(sk) 0 #define __ipv6_only_sock(sk) 0
...@@ -360,7 +360,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk) ...@@ -360,7 +360,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
#define __tcp_v6_rcv_saddr(__sk) NULL #define __tcp_v6_rcv_saddr(__sk) NULL
#define tcp_v6_rcv_saddr(__sk) NULL #define tcp_v6_rcv_saddr(__sk) NULL
#define tcp_twsk_ipv6only(__sk) 0 #define tcp_twsk_ipv6only(__sk) 0
#define tcp_v6_ipv6only(__sk) 0 #define inet_v6_ipv6only(__sk) 0
#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
#define INET6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \ #define INET6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
......
...@@ -177,8 +177,8 @@ struct tcp_info ...@@ -177,8 +177,8 @@ struct tcp_info
#include <linux/config.h> #include <linux/config.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/ip.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h> #include <net/inet_timewait_sock.h>
/* This defines a selective acknowledgement block. */ /* This defines a selective acknowledgement block. */
...@@ -219,8 +219,8 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) ...@@ -219,8 +219,8 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
} }
struct tcp_sock { struct tcp_sock {
/* inet_sock has to be the first member of tcp_sock */ /* inet_connection_sock has to be the first member of tcp_sock */
struct inet_sock inet; struct inet_connection_sock inet_conn;
int tcp_header_len; /* Bytes of tcp header to send */ int tcp_header_len; /* Bytes of tcp header to send */
/* /*
...@@ -241,18 +241,6 @@ struct tcp_sock { ...@@ -241,18 +241,6 @@ struct tcp_sock {
__u32 snd_sml; /* Last byte of the most recently transmitted small packet */ __u32 snd_sml; /* Last byte of the most recently transmitted small packet */
__u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ __u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
__u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ __u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
/* Delayed ACK control data */
struct {
__u8 pending; /* ACK is pending */
__u8 quick; /* Scheduled number of quick acks */
__u8 pingpong; /* The session is interactive */
__u8 blocked; /* Delayed ACK was blocked by socket lock*/
__u32 ato; /* Predicted tick of soft clock */
unsigned long timeout; /* Currently scheduled timeout */
__u32 lrcvtime; /* timestamp of last received data packet*/
__u16 last_seg_size; /* Size of last incoming segment */
__u16 rcv_mss; /* MSS used for delayed ACK decisions */
} ack;
/* Data for direct copy to user */ /* Data for direct copy to user */
struct { struct {
...@@ -271,8 +259,8 @@ struct tcp_sock { ...@@ -271,8 +259,8 @@ struct tcp_sock {
__u16 xmit_size_goal; /* Goal for segmenting output packets */ __u16 xmit_size_goal; /* Goal for segmenting output packets */
__u16 ext_header_len; /* Network protocol overhead (IP/IPv6 options) */ __u16 ext_header_len; /* Network protocol overhead (IP/IPv6 options) */
__u8 ca_state; /* State of fast-retransmit machine */ __u8 ca_state; /* State of fast-retransmit machine */
__u8 retransmits; /* Number of unrecovered RTO timeouts. */
__u8 keepalive_probes; /* num of allowed keep alive probes */
__u16 advmss; /* Advertised MSS */ __u16 advmss; /* Advertised MSS */
__u32 window_clamp; /* Maximal window to advertise */ __u32 window_clamp; /* Maximal window to advertise */
__u32 rcv_ssthresh; /* Current window clamp */ __u32 rcv_ssthresh; /* Current window clamp */
...@@ -281,7 +269,7 @@ struct tcp_sock { ...@@ -281,7 +269,7 @@ struct tcp_sock {
__u8 reordering; /* Packet reordering metric. */ __u8 reordering; /* Packet reordering metric. */
__u8 frto_counter; /* Number of new acks after RTO */ __u8 frto_counter; /* Number of new acks after RTO */
__u8 unused; __u8 nonagle; /* Disable Nagle algorithm? */
__u8 defer_accept; /* User waits for some data after accept() */ __u8 defer_accept; /* User waits for some data after accept() */
/* RTT measurement */ /* RTT measurement */
...@@ -290,19 +278,13 @@ struct tcp_sock { ...@@ -290,19 +278,13 @@ struct tcp_sock {
__u32 mdev_max; /* maximal mdev for the last rtt period */ __u32 mdev_max; /* maximal mdev for the last rtt period */
__u32 rttvar; /* smoothed mdev_max */ __u32 rttvar; /* smoothed mdev_max */
__u32 rtt_seq; /* sequence number to update rttvar */ __u32 rtt_seq; /* sequence number to update rttvar */
__u32 rto; /* retransmit timeout */
__u32 packets_out; /* Packets which are "in flight" */ __u32 packets_out; /* Packets which are "in flight" */
__u32 left_out; /* Packets which leaved network */ __u32 left_out; /* Packets which leaved network */
__u32 retrans_out; /* Retransmitted packets out */ __u32 retrans_out; /* Retransmitted packets out */
__u8 backoff; /* backoff */
/* /*
* Options received (usually on last packet, some only on SYN packets). * Options received (usually on last packet, some only on SYN packets).
*/ */
__u8 nonagle; /* Disable Nagle algorithm? */
__u8 keepalive_probes; /* num of allowed keep alive probes */
__u8 probes_out; /* unanswered 0 window probes */
struct tcp_options_received rx_opt; struct tcp_options_received rx_opt;
/* /*
...@@ -315,11 +297,6 @@ struct tcp_sock { ...@@ -315,11 +297,6 @@ struct tcp_sock {
__u32 snd_cwnd_used; __u32 snd_cwnd_used;
__u32 snd_cwnd_stamp; __u32 snd_cwnd_stamp;
/* Two commonly used timers in both sender and receiver paths. */
unsigned long timeout;
struct timer_list retransmit_timer; /* Resend (no ack) */
struct timer_list delack_timer; /* Ack delay */
struct sk_buff_head out_of_order_queue; /* Out of order segments go here */ struct sk_buff_head out_of_order_queue; /* Out of order segments go here */
struct tcp_func *af_specific; /* Operations which are AF_INET{4,6} specific */ struct tcp_func *af_specific; /* Operations which are AF_INET{4,6} specific */
...@@ -334,7 +311,7 @@ struct tcp_sock { ...@@ -334,7 +311,7 @@ struct tcp_sock {
struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */ struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/ struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
__u8 syn_retries; /* num of allowed syn retries */ __u8 probes_out; /* unanswered 0 window probes */
__u8 ecn_flags; /* ECN status bits. */ __u8 ecn_flags; /* ECN status bits. */
__u16 prior_ssthresh; /* ssthresh saved at recovery start */ __u16 prior_ssthresh; /* ssthresh saved at recovery start */
__u32 lost_out; /* Lost packets */ __u32 lost_out; /* Lost packets */
...@@ -349,14 +326,12 @@ struct tcp_sock { ...@@ -349,14 +326,12 @@ struct tcp_sock {
int undo_retrans; /* number of undoable retransmissions. */ int undo_retrans; /* number of undoable retransmissions. */
__u32 urg_seq; /* Seq of received urgent pointer */ __u32 urg_seq; /* Seq of received urgent pointer */
__u16 urg_data; /* Saved octet of OOB data and control flags */ __u16 urg_data; /* Saved octet of OOB data and control flags */
__u8 pending; /* Scheduled timer event */
__u8 urg_mode; /* In urgent mode */ __u8 urg_mode; /* In urgent mode */
/* ONE BYTE HOLE, TRY TO PACK! */
__u32 snd_up; /* Urgent pointer */ __u32 snd_up; /* Urgent pointer */
__u32 total_retrans; /* Total retransmits for entire connection */ __u32 total_retrans; /* Total retransmits for entire connection */
struct request_sock_queue accept_queue; /* FIFO of established children */
unsigned int keepalive_time; /* time before keep alive takes place */ unsigned int keepalive_time; /* time before keep alive takes place */
unsigned int keepalive_intvl; /* time interval between keep alive probes */ unsigned int keepalive_intvl; /* time interval between keep alive probes */
int linger2; int linger2;
......
/*
* NET Generic infrastructure for INET connection oriented protocols.
*
* Definitions for inet_connection_sock
*
* Authors: Many people, see the TCP sources
*
* From code originally in TCP
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _INET_CONNECTION_SOCK_H
#define _INET_CONNECTION_SOCK_H
#include <linux/ip.h>
#include <linux/timer.h>
#include <net/request_sock.h>
struct inet_bind_bucket;
struct inet_hashinfo;
/** inet_connection_sock - INET connection oriented sock
*
* @icsk_accept_queue: FIFO of established children
* @icsk_bind_hash: Bind node
* @icsk_timeout: Timeout
* @icsk_retransmit_timer: Resend (no ack)
* @icsk_rto: Retransmit timeout
* @icsk_retransmits: Number of unrecovered [RTO] timeouts
* @icsk_pending: Scheduled timer event
* @icsk_backoff: Backoff
* @icsk_syn_retries: Number of allowed SYN (or equivalent) retries
* @icsk_ack: Delayed ACK control data
*/
struct inet_connection_sock {
/* inet_sock has to be the first member! */
struct inet_sock icsk_inet;
struct request_sock_queue icsk_accept_queue;
struct inet_bind_bucket *icsk_bind_hash;
unsigned long icsk_timeout;
struct timer_list icsk_retransmit_timer;
struct timer_list icsk_delack_timer;
__u32 icsk_rto;
__u8 icsk_retransmits;
__u8 icsk_pending;
__u8 icsk_backoff;
__u8 icsk_syn_retries;
struct {
__u8 pending; /* ACK is pending */
__u8 quick; /* Scheduled number of quick acks */
__u8 pingpong; /* The session is interactive */
__u8 blocked; /* Delayed ACK was blocked by socket lock */
__u32 ato; /* Predicted tick of soft clock */
unsigned long timeout; /* Currently scheduled timeout */
__u32 lrcvtime; /* timestamp of last received data packet */
__u16 last_seg_size; /* Size of last incoming segment */
__u16 rcv_mss; /* MSS used for delayed ACK decisions */
} icsk_ack;
};
static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
{
return (struct inet_connection_sock *)sk;
}
extern void inet_csk_init_xmit_timers(struct sock *sk,
void (*retransmit_handler)(unsigned long),
void (*delack_handler)(unsigned long),
void (*keepalive_handler)(unsigned long));
extern void inet_csk_clear_xmit_timers(struct sock *sk);
extern struct request_sock *inet_csk_search_req(const struct sock *sk,
struct request_sock ***prevp,
const __u16 rport,
const __u32 raddr,
const __u32 laddr);
extern int inet_csk_get_port(struct inet_hashinfo *hashinfo,
struct sock *sk, unsigned short snum);
extern struct dst_entry* inet_csk_route_req(struct sock *sk,
const struct request_sock *req);
#endif /* _INET_CONNECTION_SOCK_H */
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -26,6 +25,7 @@ ...@@ -26,6 +25,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <net/inet_connection_sock.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/tcp_states.h> #include <net/tcp_states.h>
...@@ -185,9 +185,9 @@ static inline void __inet_inherit_port(struct inet_hashinfo *table, ...@@ -185,9 +185,9 @@ static inline void __inet_inherit_port(struct inet_hashinfo *table,
struct inet_bind_bucket *tb; struct inet_bind_bucket *tb;
spin_lock(&head->lock); spin_lock(&head->lock);
tb = inet_sk(sk)->bind_hash; tb = inet_csk(sk)->icsk_bind_hash;
sk_add_bind_node(child, &tb->owners); sk_add_bind_node(child, &tb->owners);
inet_sk(child)->bind_hash = tb; inet_csk(child)->icsk_bind_hash = tb;
spin_unlock(&head->lock); spin_unlock(&head->lock);
} }
......
...@@ -224,17 +224,17 @@ static inline int reqsk_queue_added(struct request_sock_queue *queue) ...@@ -224,17 +224,17 @@ static inline int reqsk_queue_added(struct request_sock_queue *queue)
return prev_qlen; return prev_qlen;
} }
static inline int reqsk_queue_len(struct request_sock_queue *queue) static inline int reqsk_queue_len(const struct request_sock_queue *queue)
{ {
return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0; return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0;
} }
static inline int reqsk_queue_len_young(struct request_sock_queue *queue) static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
{ {
return queue->listen_opt->qlen_young; return queue->listen_opt->qlen_young;
} }
static inline int reqsk_queue_is_full(struct request_sock_queue *queue) static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
{ {
return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log; return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
} }
......
...@@ -493,9 +493,6 @@ extern int sk_wait_data(struct sock *sk, long *timeo); ...@@ -493,9 +493,6 @@ extern int sk_wait_data(struct sock *sk, long *timeo);
struct request_sock_ops; struct request_sock_ops;
/* Here is the right place to enable sock refcounting debugging */
//#define SOCK_REFCNT_DEBUG
/* Networking protocol blocks we attach to sockets. /* Networking protocol blocks we attach to sockets.
* socket layer -> transport layer interface * socket layer -> transport layer interface
* transport -> network interface is defined by struct inet_proto * transport -> network interface is defined by struct inet_proto
......
...@@ -19,10 +19,11 @@ ...@@ -19,10 +19,11 @@
#define _TCP_H #define _TCP_H
#define TCP_DEBUG 1 #define TCP_DEBUG 1
#define INET_CSK_DEBUG 1
#define FASTRETRANS_DEBUG 1 #define FASTRETRANS_DEBUG 1
/* Cancel timers, when they are not required. */ /* Cancel timers, when they are not required. */
#undef TCP_CLEAR_TIMERS #undef INET_CSK_CLEAR_TIMERS
#include <linux/config.h> #include <linux/config.h>
#include <linux/list.h> #include <linux/list.h>
...@@ -205,10 +206,10 @@ extern void tcp_tw_deschedule(struct inet_timewait_sock *tw); ...@@ -205,10 +206,10 @@ extern void tcp_tw_deschedule(struct inet_timewait_sock *tw);
#define TCPOLEN_SACK_BASE_ALIGNED 4 #define TCPOLEN_SACK_BASE_ALIGNED 4
#define TCPOLEN_SACK_PERBLOCK 8 #define TCPOLEN_SACK_PERBLOCK 8
#define TCP_TIME_RETRANS 1 /* Retransmit timer */ #define ICSK_TIME_RETRANS 1 /* Retransmit timer */
#define TCP_TIME_DACK 2 /* Delayed ack timer */ #define ICSK_TIME_DACK 2 /* Delayed ack timer */
#define TCP_TIME_PROBE0 3 /* Zero window probe timer */ #define ICSK_TIME_PROBE0 3 /* Zero window probe timer */
#define TCP_TIME_KEEPOPEN 4 /* Keepalive timer */ #define ICSK_TIME_KEEPOPEN 4 /* Keepalive timer */
/* Flags in tp->nonagle */ /* Flags in tp->nonagle */
#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
...@@ -257,9 +258,9 @@ extern atomic_t tcp_sockets_allocated; ...@@ -257,9 +258,9 @@ extern atomic_t tcp_sockets_allocated;
extern int tcp_memory_pressure; extern int tcp_memory_pressure;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#define TCP_INET_FAMILY(fam) ((fam) == AF_INET) #define AF_INET_FAMILY(fam) ((fam) == AF_INET)
#else #else
#define TCP_INET_FAMILY(fam) 1 #define AF_INET_FAMILY(fam) 1
#endif #endif
/* /*
...@@ -372,41 +373,42 @@ extern int tcp_rcv_established(struct sock *sk, ...@@ -372,41 +373,42 @@ extern int tcp_rcv_established(struct sock *sk,
extern void tcp_rcv_space_adjust(struct sock *sk); extern void tcp_rcv_space_adjust(struct sock *sk);
enum tcp_ack_state_t enum inet_csk_ack_state_t {
{ ICSK_ACK_SCHED = 1,
TCP_ACK_SCHED = 1, ICSK_ACK_TIMER = 2,
TCP_ACK_TIMER = 2, ICSK_ACK_PUSHED = 4
TCP_ACK_PUSHED= 4
}; };
static inline void tcp_schedule_ack(struct tcp_sock *tp) static inline void inet_csk_schedule_ack(struct sock *sk)
{ {
tp->ack.pending |= TCP_ACK_SCHED; inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED;
} }
static inline int tcp_ack_scheduled(struct tcp_sock *tp) static inline int inet_csk_ack_scheduled(const struct sock *sk)
{ {
return tp->ack.pending&TCP_ACK_SCHED; return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED;
} }
static __inline__ void tcp_dec_quickack_mode(struct tcp_sock *tp, unsigned int pkts) static inline void tcp_dec_quickack_mode(struct sock *sk,
const unsigned int pkts)
{ {
if (tp->ack.quick) { struct inet_connection_sock *icsk = inet_csk(sk);
if (pkts >= tp->ack.quick) {
tp->ack.quick = 0;
if (icsk->icsk_ack.quick) {
if (pkts >= icsk->icsk_ack.quick) {
icsk->icsk_ack.quick = 0;
/* Leaving quickack mode we deflate ATO. */ /* Leaving quickack mode we deflate ATO. */
tp->ack.ato = TCP_ATO_MIN; icsk->icsk_ack.ato = TCP_ATO_MIN;
} else } else
tp->ack.quick -= pkts; icsk->icsk_ack.quick -= pkts;
} }
} }
extern void tcp_enter_quickack_mode(struct tcp_sock *tp); extern void tcp_enter_quickack_mode(struct sock *sk);
static __inline__ void tcp_delack_init(struct tcp_sock *tp) static inline void inet_csk_delack_init(struct sock *sk)
{ {
memset(&tp->ack, 0, sizeof(tp->ack)); memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
} }
static inline void tcp_clear_options(struct tcp_options_received *rx_opt) static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
...@@ -440,7 +442,7 @@ extern void tcp_update_metrics(struct sock *sk); ...@@ -440,7 +442,7 @@ extern void tcp_update_metrics(struct sock *sk);
extern void tcp_close(struct sock *sk, extern void tcp_close(struct sock *sk,
long timeout); long timeout);
extern struct sock * tcp_accept(struct sock *sk, int flags, int *err); extern struct sock * inet_csk_accept(struct sock *sk, int flags, int *err);
extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait); extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
extern int tcp_getsockopt(struct sock *sk, int level, extern int tcp_getsockopt(struct sock *sk, int level,
...@@ -534,15 +536,18 @@ extern void tcp_cwnd_application_limited(struct sock *sk); ...@@ -534,15 +536,18 @@ extern void tcp_cwnd_application_limited(struct sock *sk);
/* tcp_timer.c */ /* tcp_timer.c */
extern void tcp_init_xmit_timers(struct sock *); extern void tcp_init_xmit_timers(struct sock *);
extern void tcp_clear_xmit_timers(struct sock *); static inline void tcp_clear_xmit_timers(struct sock *sk)
{
inet_csk_clear_xmit_timers(sk);
}
extern void tcp_delete_keepalive_timer(struct sock *); extern void inet_csk_delete_keepalive_timer(struct sock *sk);
extern void tcp_reset_keepalive_timer(struct sock *, unsigned long); extern void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
extern unsigned int tcp_current_mss(struct sock *sk, int large); extern unsigned int tcp_current_mss(struct sock *sk, int large);
#ifdef TCP_DEBUG #ifdef INET_CSK_DEBUG
extern const char tcp_timer_bug_msg[]; extern const char inet_csk_timer_bug_msg[];
#endif #endif
/* tcp_diag.c */ /* tcp_diag.c */
...@@ -554,70 +559,58 @@ typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, ...@@ -554,70 +559,58 @@ typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor); sk_read_actor_t recv_actor);
static inline void tcp_clear_xmit_timer(struct sock *sk, int what) static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
switch (what) {
case TCP_TIME_RETRANS:
case TCP_TIME_PROBE0:
tp->pending = 0;
#ifdef TCP_CLEAR_TIMERS if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
sk_stop_timer(sk, &tp->retransmit_timer); icsk->icsk_pending = 0;
#ifdef INET_CSK_CLEAR_TIMERS
sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
#endif #endif
break; } else if (what == ICSK_TIME_DACK) {
case TCP_TIME_DACK: icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0;
tp->ack.blocked = 0; #ifdef INET_CSK_CLEAR_TIMERS
tp->ack.pending = 0; sk_stop_timer(sk, &icsk->icsk_delack_timer);
#ifdef TCP_CLEAR_TIMERS
sk_stop_timer(sk, &tp->delack_timer);
#endif #endif
break; }
default: #ifdef INET_CSK_DEBUG
#ifdef TCP_DEBUG else {
printk(tcp_timer_bug_msg); pr_debug(inet_csk_timer_bug_msg);
}
#endif #endif
return;
};
} }
/* /*
* Reset the retransmission timer * Reset the retransmission timer
*/ */
static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when) static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
unsigned long when)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
if (when > TCP_RTO_MAX) { if (when > TCP_RTO_MAX) {
#ifdef TCP_DEBUG #ifdef INET_CSK_DEBUG
printk(KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr()); pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
sk, what, when, current_text_addr());
#endif #endif
when = TCP_RTO_MAX; when = TCP_RTO_MAX;
} }
switch (what) { if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
case TCP_TIME_RETRANS: icsk->icsk_pending = what;
case TCP_TIME_PROBE0: icsk->icsk_timeout = jiffies + when;
tp->pending = what; sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
tp->timeout = jiffies+when; } else if (what == ICSK_TIME_DACK) {
sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout); icsk->icsk_ack.pending |= ICSK_ACK_TIMER;
break; icsk->icsk_ack.timeout = jiffies + when;
sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
case TCP_TIME_DACK: }
tp->ack.pending |= TCP_ACK_TIMER; #ifdef INET_CSK_DEBUG
tp->ack.timeout = jiffies+when; else {
sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout); pr_debug(inet_csk_timer_bug_msg);
break; }
default:
#ifdef TCP_DEBUG
printk(tcp_timer_bug_msg);
#endif #endif
return;
};
} }
/* Initialize RCV_MSS value. /* Initialize RCV_MSS value.
...@@ -637,7 +630,7 @@ static inline void tcp_initialize_rcv_mss(struct sock *sk) ...@@ -637,7 +630,7 @@ static inline void tcp_initialize_rcv_mss(struct sock *sk)
hint = min(hint, TCP_MIN_RCVMSS); hint = min(hint, TCP_MIN_RCVMSS);
hint = max(hint, TCP_MIN_MSS); hint = max(hint, TCP_MIN_MSS);
tp->ack.rcv_mss = hint; inet_csk(sk)->icsk_ack.rcv_mss = hint;
} }
static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
...@@ -772,7 +765,7 @@ static inline void tcp_packets_out_inc(struct sock *sk, ...@@ -772,7 +765,7 @@ static inline void tcp_packets_out_inc(struct sock *sk,
tp->packets_out += tcp_skb_pcount(skb); tp->packets_out += tcp_skb_pcount(skb);
if (!orig) if (!orig)
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto);
} }
static inline void tcp_packets_out_dec(struct tcp_sock *tp, static inline void tcp_packets_out_dec(struct tcp_sock *tp,
...@@ -939,8 +932,9 @@ static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss, ...@@ -939,8 +932,9 @@ static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss,
static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp) static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp)
{ {
if (!tp->packets_out && !tp->pending) const struct inet_connection_sock *icsk = inet_csk(sk);
tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto); if (!tp->packets_out && !icsk->icsk_pending)
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, icsk->icsk_rto);
} }
static __inline__ void tcp_push_pending_frames(struct sock *sk, static __inline__ void tcp_push_pending_frames(struct sock *sk,
...@@ -1021,8 +1015,9 @@ static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb) ...@@ -1021,8 +1015,9 @@ static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
tp->ucopy.memory = 0; tp->ucopy.memory = 0;
} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
wake_up_interruptible(sk->sk_sleep); wake_up_interruptible(sk->sk_sleep);
if (!tcp_ack_scheduled(tp)) if (!inet_csk_ack_scheduled(sk))
tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4); inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
(3 * TCP_RTO_MIN) / 4);
} }
return 1; return 1;
} }
...@@ -1055,7 +1050,7 @@ static __inline__ void tcp_set_state(struct sock *sk, int state) ...@@ -1055,7 +1050,7 @@ static __inline__ void tcp_set_state(struct sock *sk, int state)
TCP_INC_STATS(TCP_MIB_ESTABRESETS); TCP_INC_STATS(TCP_MIB_ESTABRESETS);
sk->sk_prot->unhash(sk); sk->sk_prot->unhash(sk);
if (inet_sk(sk)->bind_hash && if (inet_csk(sk)->icsk_bind_hash &&
!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
inet_put_port(&tcp_hashinfo, sk); inet_put_port(&tcp_hashinfo, sk);
/* fall through */ /* fall through */
...@@ -1186,51 +1181,55 @@ static inline int tcp_full_space(const struct sock *sk) ...@@ -1186,51 +1181,55 @@ static inline int tcp_full_space(const struct sock *sk)
return tcp_win_from_space(sk->sk_rcvbuf); return tcp_win_from_space(sk->sk_rcvbuf);
} }
static inline void tcp_acceptq_queue(struct sock *sk, struct request_sock *req, static inline void inet_csk_reqsk_queue_add(struct sock *sk,
struct request_sock *req,
struct sock *child) struct sock *child)
{ {
reqsk_queue_add(&tcp_sk(sk)->accept_queue, req, sk, child); reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child);
} }
static inline void static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
tcp_synq_removed(struct sock *sk, struct request_sock *req) struct request_sock *req)
{ {
if (reqsk_queue_removed(&tcp_sk(sk)->accept_queue, req) == 0) if (reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req) == 0)
tcp_delete_keepalive_timer(sk); inet_csk_delete_keepalive_timer(sk);
} }
static inline void tcp_synq_added(struct sock *sk) static inline void inet_csk_reqsk_queue_added(struct sock *sk,
const unsigned long timeout)
{ {
if (reqsk_queue_added(&tcp_sk(sk)->accept_queue) == 0) if (reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue) == 0)
tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT); inet_csk_reset_keepalive_timer(sk, timeout);
} }
static inline int tcp_synq_len(struct sock *sk) static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
{ {
return reqsk_queue_len(&tcp_sk(sk)->accept_queue); return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
} }
static inline int tcp_synq_young(struct sock *sk) static inline int inet_csk_reqsk_queue_young(const struct sock *sk)
{ {
return reqsk_queue_len_young(&tcp_sk(sk)->accept_queue); return reqsk_queue_len_young(&inet_csk(sk)->icsk_accept_queue);
} }
static inline int tcp_synq_is_full(struct sock *sk) static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
{ {
return reqsk_queue_is_full(&tcp_sk(sk)->accept_queue); return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue);
} }
static inline void tcp_synq_unlink(struct tcp_sock *tp, struct request_sock *req, static inline void inet_csk_reqsk_queue_unlink(struct sock *sk,
struct request_sock *req,
struct request_sock **prev) struct request_sock **prev)
{ {
reqsk_queue_unlink(&tp->accept_queue, req, prev); reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req, prev);
} }
static inline void tcp_synq_drop(struct sock *sk, struct request_sock *req, static inline void inet_csk_reqsk_queue_drop(struct sock *sk,
struct request_sock *req,
struct request_sock **prev) struct request_sock **prev)
{ {
tcp_synq_unlink(tcp_sk(sk), req, prev); inet_csk_reqsk_queue_unlink(sk, req, prev);
tcp_synq_removed(sk, req); inet_csk_reqsk_queue_removed(sk, req);
reqsk_free(req); reqsk_free(req);
} }
...@@ -1265,12 +1264,13 @@ static inline int keepalive_time_when(const struct tcp_sock *tp) ...@@ -1265,12 +1264,13 @@ static inline int keepalive_time_when(const struct tcp_sock *tp)
return tp->keepalive_time ? : sysctl_tcp_keepalive_time; return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
} }
static inline int tcp_fin_time(const struct tcp_sock *tp) static inline int tcp_fin_time(const struct sock *sk)
{ {
int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout; int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
const int rto = inet_csk(sk)->icsk_rto;
if (fin_timeout < (tp->rto<<2) - (tp->rto>>1)) if (fin_timeout < (rto << 2) - (rto >> 1))
fin_timeout = (tp->rto<<2) - (tp->rto>>1); fin_timeout = (rto << 2) - (rto >> 1);
return fin_timeout; return fin_timeout;
} }
......
...@@ -88,7 +88,7 @@ static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb) ...@@ -88,7 +88,7 @@ static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb)
* it is surely retransmit. It is not in ECN RFC, * it is surely retransmit. It is not in ECN RFC,
* but Linux follows this rule. */ * but Linux follows this rule. */
else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags))) else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags)))
tcp_enter_quickack_mode(tp); tcp_enter_quickack_mode((struct sock *)tp);
} }
} }
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h> #include <net/inet_hashtables.h>
/* /*
...@@ -56,10 +57,9 @@ void inet_bind_bucket_destroy(kmem_cache_t *cachep, struct inet_bind_bucket *tb) ...@@ -56,10 +57,9 @@ void inet_bind_bucket_destroy(kmem_cache_t *cachep, struct inet_bind_bucket *tb)
void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
const unsigned short snum) const unsigned short snum)
{ {
struct inet_sock *inet = inet_sk(sk); inet_sk(sk)->num = snum;
inet->num = snum;
sk_add_bind_node(sk, &tb->owners); sk_add_bind_node(sk, &tb->owners);
inet->bind_hash = tb; inet_csk(sk)->icsk_bind_hash = tb;
} }
EXPORT_SYMBOL(inet_bind_hash); EXPORT_SYMBOL(inet_bind_hash);
...@@ -69,16 +69,15 @@ EXPORT_SYMBOL(inet_bind_hash); ...@@ -69,16 +69,15 @@ EXPORT_SYMBOL(inet_bind_hash);
*/ */
static void __inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk) static void __inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk)
{ {
struct inet_sock *inet = inet_sk(sk); const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size);
const int bhash = inet_bhashfn(inet->num, hashinfo->bhash_size);
struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
struct inet_bind_bucket *tb; struct inet_bind_bucket *tb;
spin_lock(&head->lock); spin_lock(&head->lock);
tb = inet->bind_hash; tb = inet_csk(sk)->icsk_bind_hash;
__sk_del_bind_node(sk); __sk_del_bind_node(sk);
inet->bind_hash = NULL; inet_csk(sk)->icsk_bind_hash = NULL;
inet->num = 0; inet_sk(sk)->num = 0;
inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
spin_unlock(&head->lock); spin_unlock(&head->lock);
} }
......
...@@ -56,6 +56,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, ...@@ -56,6 +56,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
struct inet_hashinfo *hashinfo) struct inet_hashinfo *hashinfo)
{ {
const struct inet_sock *inet = inet_sk(sk); const struct inet_sock *inet = inet_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_ehash_bucket *ehead = &hashinfo->ehash[sk->sk_hashent]; struct inet_ehash_bucket *ehead = &hashinfo->ehash[sk->sk_hashent];
struct inet_bind_hashbucket *bhead; struct inet_bind_hashbucket *bhead;
/* Step 1: Put TW into bind hash. Original socket stays there too. /* Step 1: Put TW into bind hash. Original socket stays there too.
...@@ -64,8 +65,8 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, ...@@ -64,8 +65,8 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
*/ */
bhead = &hashinfo->bhash[inet_bhashfn(inet->num, hashinfo->bhash_size)]; bhead = &hashinfo->bhash[inet_bhashfn(inet->num, hashinfo->bhash_size)];
spin_lock(&bhead->lock); spin_lock(&bhead->lock);
tw->tw_tb = inet->bind_hash; tw->tw_tb = icsk->icsk_bind_hash;
BUG_TRAP(inet->bind_hash); BUG_TRAP(icsk->icsk_bind_hash);
inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
spin_unlock(&bhead->lock); spin_unlock(&bhead->lock);
......
...@@ -180,7 +180,7 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, ...@@ -180,7 +180,7 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
child = tp->af_specific->syn_recv_sock(sk, skb, req, dst); child = tp->af_specific->syn_recv_sock(sk, skb, req, dst);
if (child) if (child)
tcp_acceptq_queue(sk, req, child); inet_csk_reqsk_queue_add(sk, req, child);
else else
reqsk_free(req); reqsk_free(req);
......
...@@ -313,7 +313,7 @@ EXPORT_SYMBOL(tcp_enter_memory_pressure); ...@@ -313,7 +313,7 @@ EXPORT_SYMBOL(tcp_enter_memory_pressure);
static __inline__ unsigned int tcp_listen_poll(struct sock *sk, static __inline__ unsigned int tcp_listen_poll(struct sock *sk,
poll_table *wait) poll_table *wait)
{ {
return !reqsk_queue_empty(&tcp_sk(sk)->accept_queue) ? (POLLIN | POLLRDNORM) : 0; return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ? (POLLIN | POLLRDNORM) : 0;
} }
/* /*
...@@ -458,15 +458,15 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) ...@@ -458,15 +458,15 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
int tcp_listen_start(struct sock *sk) int tcp_listen_start(struct sock *sk)
{ {
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
int rc = reqsk_queue_alloc(&tp->accept_queue, TCP_SYNQ_HSIZE); int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, TCP_SYNQ_HSIZE);
if (rc != 0) if (rc != 0)
return rc; return rc;
sk->sk_max_ack_backlog = 0; sk->sk_max_ack_backlog = 0;
sk->sk_ack_backlog = 0; sk->sk_ack_backlog = 0;
tcp_delack_init(tp); inet_csk_delack_init(sk);
/* There is race window here: we announce ourselves listening, /* There is race window here: we announce ourselves listening,
* but this transition is still not validated by get_port(). * but this transition is still not validated by get_port().
...@@ -484,7 +484,7 @@ int tcp_listen_start(struct sock *sk) ...@@ -484,7 +484,7 @@ int tcp_listen_start(struct sock *sk)
} }
sk->sk_state = TCP_CLOSE; sk->sk_state = TCP_CLOSE;
__reqsk_queue_destroy(&tp->accept_queue); __reqsk_queue_destroy(&icsk->icsk_accept_queue);
return -EADDRINUSE; return -EADDRINUSE;
} }
...@@ -495,14 +495,14 @@ int tcp_listen_start(struct sock *sk) ...@@ -495,14 +495,14 @@ int tcp_listen_start(struct sock *sk)
static void tcp_listen_stop (struct sock *sk) static void tcp_listen_stop (struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
struct request_sock *acc_req; struct request_sock *acc_req;
struct request_sock *req; struct request_sock *req;
tcp_delete_keepalive_timer(sk); inet_csk_delete_keepalive_timer(sk);
/* make all the listen_opt local to us */ /* make all the listen_opt local to us */
acc_req = reqsk_queue_yank_acceptq(&tp->accept_queue); acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue);
/* Following specs, it would be better either to send FIN /* Following specs, it would be better either to send FIN
* (and enter FIN-WAIT-1, it is normal close) * (and enter FIN-WAIT-1, it is normal close)
...@@ -512,7 +512,7 @@ static void tcp_listen_stop (struct sock *sk) ...@@ -512,7 +512,7 @@ static void tcp_listen_stop (struct sock *sk)
* To be honest, we are not able to make either * To be honest, we are not able to make either
* of the variants now. --ANK * of the variants now. --ANK
*/ */
reqsk_queue_destroy(&tp->accept_queue); reqsk_queue_destroy(&icsk->icsk_accept_queue);
while ((req = acc_req) != NULL) { while ((req = acc_req) != NULL) {
struct sock *child = req->sk; struct sock *child = req->sk;
...@@ -1039,20 +1039,21 @@ static void cleanup_rbuf(struct sock *sk, int copied) ...@@ -1039,20 +1039,21 @@ static void cleanup_rbuf(struct sock *sk, int copied)
BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)); BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
#endif #endif
if (tcp_ack_scheduled(tp)) { if (inet_csk_ack_scheduled(sk)) {
const struct inet_connection_sock *icsk = inet_csk(sk);
/* Delayed ACKs frequently hit locked sockets during bulk /* Delayed ACKs frequently hit locked sockets during bulk
* receive. */ * receive. */
if (tp->ack.blocked || if (icsk->icsk_ack.blocked ||
/* Once-per-two-segments ACK was not sent by tcp_input.c */ /* Once-per-two-segments ACK was not sent by tcp_input.c */
tp->rcv_nxt - tp->rcv_wup > tp->ack.rcv_mss || tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
/* /*
* If this read emptied read buffer, we send ACK, if * If this read emptied read buffer, we send ACK, if
* connection is not bidirectional, user drained * connection is not bidirectional, user drained
* receive buffer and there was a small segment * receive buffer and there was a small segment
* in queue. * in queue.
*/ */
(copied > 0 && (tp->ack.pending & TCP_ACK_PUSHED) && (copied > 0 && (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
!tp->ack.pingpong && !atomic_read(&sk->sk_rmem_alloc))) !icsk->icsk_ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
time_to_ack = 1; time_to_ack = 1;
} }
...@@ -1569,7 +1570,7 @@ void tcp_destroy_sock(struct sock *sk) ...@@ -1569,7 +1570,7 @@ void tcp_destroy_sock(struct sock *sk)
BUG_TRAP(sk_unhashed(sk)); BUG_TRAP(sk_unhashed(sk));
/* If it has not 0 inet_sk(sk)->num, it must be bound */ /* If it has not 0 inet_sk(sk)->num, it must be bound */
BUG_TRAP(!inet_sk(sk)->num || inet_sk(sk)->bind_hash); BUG_TRAP(!inet_sk(sk)->num || inet_csk(sk)->icsk_bind_hash);
sk->sk_prot->destroy(sk); sk->sk_prot->destroy(sk);
...@@ -1698,10 +1699,10 @@ void tcp_close(struct sock *sk, long timeout) ...@@ -1698,10 +1699,10 @@ void tcp_close(struct sock *sk, long timeout)
tcp_send_active_reset(sk, GFP_ATOMIC); tcp_send_active_reset(sk, GFP_ATOMIC);
NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER); NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
} else { } else {
int tmo = tcp_fin_time(tp); const int tmo = tcp_fin_time(sk);
if (tmo > TCP_TIMEWAIT_LEN) { if (tmo > TCP_TIMEWAIT_LEN) {
tcp_reset_keepalive_timer(sk, tcp_fin_time(tp)); inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk));
} else { } else {
atomic_inc(&tcp_orphan_count); atomic_inc(&tcp_orphan_count);
tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
...@@ -1746,6 +1747,7 @@ static inline int tcp_need_reset(int state) ...@@ -1746,6 +1747,7 @@ static inline int tcp_need_reset(int state)
int tcp_disconnect(struct sock *sk, int flags) int tcp_disconnect(struct sock *sk, int flags)
{ {
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
int err = 0; int err = 0;
int old_state = sk->sk_state; int old_state = sk->sk_state;
...@@ -1782,7 +1784,7 @@ int tcp_disconnect(struct sock *sk, int flags) ...@@ -1782,7 +1784,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->srtt = 0; tp->srtt = 0;
if ((tp->write_seq += tp->max_window + 2) == 0) if ((tp->write_seq += tp->max_window + 2) == 0)
tp->write_seq = 1; tp->write_seq = 1;
tp->backoff = 0; icsk->icsk_backoff = 0;
tp->snd_cwnd = 2; tp->snd_cwnd = 2;
tp->probes_out = 0; tp->probes_out = 0;
tp->packets_out = 0; tp->packets_out = 0;
...@@ -1790,13 +1792,13 @@ int tcp_disconnect(struct sock *sk, int flags) ...@@ -1790,13 +1792,13 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->snd_cwnd_cnt = 0; tp->snd_cwnd_cnt = 0;
tcp_set_ca_state(tp, TCP_CA_Open); tcp_set_ca_state(tp, TCP_CA_Open);
tcp_clear_retrans(tp); tcp_clear_retrans(tp);
tcp_delack_init(tp); inet_csk_delack_init(sk);
sk->sk_send_head = NULL; sk->sk_send_head = NULL;
tp->rx_opt.saw_tstamp = 0; tp->rx_opt.saw_tstamp = 0;
tcp_sack_reset(&tp->rx_opt); tcp_sack_reset(&tp->rx_opt);
__sk_dst_reset(sk); __sk_dst_reset(sk);
BUG_TRAP(!inet->num || inet->bind_hash); BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
sk->sk_error_report(sk); sk->sk_error_report(sk);
return err; return err;
...@@ -1808,7 +1810,7 @@ int tcp_disconnect(struct sock *sk, int flags) ...@@ -1808,7 +1810,7 @@ int tcp_disconnect(struct sock *sk, int flags)
*/ */
static int wait_for_connect(struct sock *sk, long timeo) static int wait_for_connect(struct sock *sk, long timeo)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
int err; int err;
...@@ -1830,11 +1832,11 @@ static int wait_for_connect(struct sock *sk, long timeo) ...@@ -1830,11 +1832,11 @@ static int wait_for_connect(struct sock *sk, long timeo)
prepare_to_wait_exclusive(sk->sk_sleep, &wait, prepare_to_wait_exclusive(sk->sk_sleep, &wait,
TASK_INTERRUPTIBLE); TASK_INTERRUPTIBLE);
release_sock(sk); release_sock(sk);
if (reqsk_queue_empty(&tp->accept_queue)) if (reqsk_queue_empty(&icsk->icsk_accept_queue))
timeo = schedule_timeout(timeo); timeo = schedule_timeout(timeo);
lock_sock(sk); lock_sock(sk);
err = 0; err = 0;
if (!reqsk_queue_empty(&tp->accept_queue)) if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
break; break;
err = -EINVAL; err = -EINVAL;
if (sk->sk_state != TCP_LISTEN) if (sk->sk_state != TCP_LISTEN)
...@@ -1854,9 +1856,9 @@ static int wait_for_connect(struct sock *sk, long timeo) ...@@ -1854,9 +1856,9 @@ static int wait_for_connect(struct sock *sk, long timeo)
* This will accept the next outstanding connection. * This will accept the next outstanding connection.
*/ */
struct sock *tcp_accept(struct sock *sk, int flags, int *err) struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
struct sock *newsk; struct sock *newsk;
int error; int error;
...@@ -1870,7 +1872,7 @@ struct sock *tcp_accept(struct sock *sk, int flags, int *err) ...@@ -1870,7 +1872,7 @@ struct sock *tcp_accept(struct sock *sk, int flags, int *err)
goto out_err; goto out_err;
/* Find already established connection */ /* Find already established connection */
if (reqsk_queue_empty(&tp->accept_queue)) { if (reqsk_queue_empty(&icsk->icsk_accept_queue)) {
long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
/* If this is a non blocking socket don't sleep */ /* If this is a non blocking socket don't sleep */
...@@ -1883,7 +1885,7 @@ struct sock *tcp_accept(struct sock *sk, int flags, int *err) ...@@ -1883,7 +1885,7 @@ struct sock *tcp_accept(struct sock *sk, int flags, int *err)
goto out_err; goto out_err;
} }
newsk = reqsk_queue_get_child(&tp->accept_queue, sk); newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
BUG_TRAP(newsk->sk_state != TCP_SYN_RECV); BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
out: out:
release_sock(sk); release_sock(sk);
...@@ -1901,6 +1903,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, ...@@ -1901,6 +1903,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
int optlen) int optlen)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
int val; int val;
int err = 0; int err = 0;
...@@ -1999,7 +2002,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, ...@@ -1999,7 +2002,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
elapsed = tp->keepalive_time - elapsed; elapsed = tp->keepalive_time - elapsed;
else else
elapsed = 0; elapsed = 0;
tcp_reset_keepalive_timer(sk, elapsed); inet_csk_reset_keepalive_timer(sk, elapsed);
} }
} }
break; break;
...@@ -2019,7 +2022,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, ...@@ -2019,7 +2022,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
if (val < 1 || val > MAX_TCP_SYNCNT) if (val < 1 || val > MAX_TCP_SYNCNT)
err = -EINVAL; err = -EINVAL;
else else
tp->syn_retries = val; icsk->icsk_syn_retries = val;
break; break;
case TCP_LINGER2: case TCP_LINGER2:
...@@ -2058,16 +2061,16 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, ...@@ -2058,16 +2061,16 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
case TCP_QUICKACK: case TCP_QUICKACK:
if (!val) { if (!val) {
tp->ack.pingpong = 1; icsk->icsk_ack.pingpong = 1;
} else { } else {
tp->ack.pingpong = 0; icsk->icsk_ack.pingpong = 0;
if ((1 << sk->sk_state) & if ((1 << sk->sk_state) &
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
tcp_ack_scheduled(tp)) { inet_csk_ack_scheduled(sk)) {
tp->ack.pending |= TCP_ACK_PUSHED; icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
cleanup_rbuf(sk, 1); cleanup_rbuf(sk, 1);
if (!(val & 1)) if (!(val & 1))
tp->ack.pingpong = 1; icsk->icsk_ack.pingpong = 1;
} }
} }
break; break;
...@@ -2084,15 +2087,16 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, ...@@ -2084,15 +2087,16 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
void tcp_get_info(struct sock *sk, struct tcp_info *info) void tcp_get_info(struct sock *sk, struct tcp_info *info)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
u32 now = tcp_time_stamp; u32 now = tcp_time_stamp;
memset(info, 0, sizeof(*info)); memset(info, 0, sizeof(*info));
info->tcpi_state = sk->sk_state; info->tcpi_state = sk->sk_state;
info->tcpi_ca_state = tp->ca_state; info->tcpi_ca_state = tp->ca_state;
info->tcpi_retransmits = tp->retransmits; info->tcpi_retransmits = icsk->icsk_retransmits;
info->tcpi_probes = tp->probes_out; info->tcpi_probes = tp->probes_out;
info->tcpi_backoff = tp->backoff; info->tcpi_backoff = icsk->icsk_backoff;
if (tp->rx_opt.tstamp_ok) if (tp->rx_opt.tstamp_ok)
info->tcpi_options |= TCPI_OPT_TIMESTAMPS; info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
...@@ -2107,10 +2111,10 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) ...@@ -2107,10 +2111,10 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
if (tp->ecn_flags&TCP_ECN_OK) if (tp->ecn_flags&TCP_ECN_OK)
info->tcpi_options |= TCPI_OPT_ECN; info->tcpi_options |= TCPI_OPT_ECN;
info->tcpi_rto = jiffies_to_usecs(tp->rto); info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
info->tcpi_ato = jiffies_to_usecs(tp->ack.ato); info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
info->tcpi_snd_mss = tp->mss_cache; info->tcpi_snd_mss = tp->mss_cache;
info->tcpi_rcv_mss = tp->ack.rcv_mss; info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
info->tcpi_unacked = tp->packets_out; info->tcpi_unacked = tp->packets_out;
info->tcpi_sacked = tp->sacked_out; info->tcpi_sacked = tp->sacked_out;
...@@ -2119,7 +2123,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) ...@@ -2119,7 +2123,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_fackets = tp->fackets_out; info->tcpi_fackets = tp->fackets_out;
info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
info->tcpi_last_data_recv = jiffies_to_msecs(now - tp->ack.lrcvtime); info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
info->tcpi_pmtu = tp->pmtu_cookie; info->tcpi_pmtu = tp->pmtu_cookie;
...@@ -2179,7 +2183,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, ...@@ -2179,7 +2183,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes; val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
break; break;
case TCP_SYNCNT: case TCP_SYNCNT:
val = tp->syn_retries ? : sysctl_tcp_syn_retries; val = inet_csk(sk)->icsk_syn_retries ? : sysctl_tcp_syn_retries;
break; break;
case TCP_LINGER2: case TCP_LINGER2:
val = tp->linger2; val = tp->linger2;
...@@ -2209,7 +2213,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, ...@@ -2209,7 +2213,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
return 0; return 0;
} }
case TCP_QUICKACK: case TCP_QUICKACK:
val = !tp->ack.pingpong; val = !inet_csk(sk)->icsk_ack.pingpong;
break; break;
case TCP_CONGESTION: case TCP_CONGESTION:
...@@ -2340,7 +2344,7 @@ void __init tcp_init(void) ...@@ -2340,7 +2344,7 @@ void __init tcp_init(void)
tcp_register_congestion_control(&tcp_reno); tcp_register_congestion_control(&tcp_reno);
} }
EXPORT_SYMBOL(tcp_accept); EXPORT_SYMBOL(inet_csk_accept);
EXPORT_SYMBOL(tcp_close); EXPORT_SYMBOL(tcp_close);
EXPORT_SYMBOL(tcp_destroy_sock); EXPORT_SYMBOL(tcp_destroy_sock);
EXPORT_SYMBOL(tcp_disconnect); EXPORT_SYMBOL(tcp_disconnect);
......
...@@ -48,8 +48,9 @@ static struct sock *tcpnl; ...@@ -48,8 +48,9 @@ static struct sock *tcpnl;
static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk, static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
int ext, u32 pid, u32 seq, u16 nlmsg_flags) int ext, u32 pid, u32 seq, u16 nlmsg_flags)
{ {
struct inet_sock *inet = inet_sk(sk); const struct inet_sock *inet = inet_sk(sk);
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcpdiagmsg *r; struct tcpdiagmsg *r;
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
struct tcp_info *info = NULL; struct tcp_info *info = NULL;
...@@ -129,14 +130,14 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk, ...@@ -129,14 +130,14 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
#define EXPIRES_IN_MS(tmo) ((tmo-jiffies)*1000+HZ-1)/HZ #define EXPIRES_IN_MS(tmo) ((tmo-jiffies)*1000+HZ-1)/HZ
if (tp->pending == TCP_TIME_RETRANS) { if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
r->tcpdiag_timer = 1; r->tcpdiag_timer = 1;
r->tcpdiag_retrans = tp->retransmits; r->tcpdiag_retrans = icsk->icsk_retransmits;
r->tcpdiag_expires = EXPIRES_IN_MS(tp->timeout); r->tcpdiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
} else if (tp->pending == TCP_TIME_PROBE0) { } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
r->tcpdiag_timer = 4; r->tcpdiag_timer = 4;
r->tcpdiag_retrans = tp->probes_out; r->tcpdiag_retrans = tp->probes_out;
r->tcpdiag_expires = EXPIRES_IN_MS(tp->timeout); r->tcpdiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
} else if (timer_pending(&sk->sk_timer)) { } else if (timer_pending(&sk->sk_timer)) {
r->tcpdiag_timer = 2; r->tcpdiag_timer = 2;
r->tcpdiag_retrans = tp->probes_out; r->tcpdiag_retrans = tp->probes_out;
...@@ -497,7 +498,7 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk, ...@@ -497,7 +498,7 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk,
{ {
struct tcpdiag_entry entry; struct tcpdiag_entry entry;
struct tcpdiagreq *r = NLMSG_DATA(cb->nlh); struct tcpdiagreq *r = NLMSG_DATA(cb->nlh);
struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt; struct listen_sock *lopt;
struct rtattr *bc = NULL; struct rtattr *bc = NULL;
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
...@@ -513,9 +514,9 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk, ...@@ -513,9 +514,9 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk,
entry.family = sk->sk_family; entry.family = sk->sk_family;
read_lock_bh(&tp->accept_queue.syn_wait_lock); read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
lopt = tp->accept_queue.listen_opt; lopt = icsk->icsk_accept_queue.listen_opt;
if (!lopt || !lopt->qlen) if (!lopt || !lopt->qlen)
goto out; goto out;
...@@ -572,7 +573,7 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk, ...@@ -572,7 +573,7 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk,
} }
out: out:
read_unlock_bh(&tp->accept_queue.syn_wait_lock); read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
return err; return err;
} }
......
...@@ -114,20 +114,21 @@ int sysctl_tcp_moderate_rcvbuf = 1; ...@@ -114,20 +114,21 @@ int sysctl_tcp_moderate_rcvbuf = 1;
/* Adapt the MSS value used to make delayed ack decision to the /* Adapt the MSS value used to make delayed ack decision to the
* real world. * real world.
*/ */
static inline void tcp_measure_rcv_mss(struct tcp_sock *tp, static inline void tcp_measure_rcv_mss(struct sock *sk,
struct sk_buff *skb) const struct sk_buff *skb)
{ {
unsigned int len, lss; struct inet_connection_sock *icsk = inet_csk(sk);
const unsigned int lss = icsk->icsk_ack.last_seg_size;
unsigned int len;
lss = tp->ack.last_seg_size; icsk->icsk_ack.last_seg_size = 0;
tp->ack.last_seg_size = 0;
/* skb->len may jitter because of SACKs, even if peer /* skb->len may jitter because of SACKs, even if peer
* sends good full-sized frames. * sends good full-sized frames.
*/ */
len = skb->len; len = skb->len;
if (len >= tp->ack.rcv_mss) { if (len >= icsk->icsk_ack.rcv_mss) {
tp->ack.rcv_mss = len; icsk->icsk_ack.rcv_mss = len;
} else { } else {
/* Otherwise, we make more careful check taking into account, /* Otherwise, we make more careful check taking into account,
* that SACKs block is variable. * that SACKs block is variable.
...@@ -147,41 +148,44 @@ static inline void tcp_measure_rcv_mss(struct tcp_sock *tp, ...@@ -147,41 +148,44 @@ static inline void tcp_measure_rcv_mss(struct tcp_sock *tp,
* tcp header plus fixed timestamp option length. * tcp header plus fixed timestamp option length.
* Resulting "len" is MSS free of SACK jitter. * Resulting "len" is MSS free of SACK jitter.
*/ */
len -= tp->tcp_header_len; len -= tcp_sk(sk)->tcp_header_len;
tp->ack.last_seg_size = len; icsk->icsk_ack.last_seg_size = len;
if (len == lss) { if (len == lss) {
tp->ack.rcv_mss = len; icsk->icsk_ack.rcv_mss = len;
return; return;
} }
} }
tp->ack.pending |= TCP_ACK_PUSHED; icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
} }
} }
static void tcp_incr_quickack(struct tcp_sock *tp) static void tcp_incr_quickack(struct sock *sk)
{ {
unsigned quickacks = tp->rcv_wnd/(2*tp->ack.rcv_mss); struct inet_connection_sock *icsk = inet_csk(sk);
unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
if (quickacks==0) if (quickacks==0)
quickacks=2; quickacks=2;
if (quickacks > tp->ack.quick) if (quickacks > icsk->icsk_ack.quick)
tp->ack.quick = min(quickacks, TCP_MAX_QUICKACKS); icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
} }
void tcp_enter_quickack_mode(struct tcp_sock *tp) void tcp_enter_quickack_mode(struct sock *sk)
{ {
tcp_incr_quickack(tp); struct inet_connection_sock *icsk = inet_csk(sk);
tp->ack.pingpong = 0; tcp_incr_quickack(sk);
tp->ack.ato = TCP_ATO_MIN; icsk->icsk_ack.pingpong = 0;
icsk->icsk_ack.ato = TCP_ATO_MIN;
} }
/* Send ACKs quickly, if "quick" count is not exhausted /* Send ACKs quickly, if "quick" count is not exhausted
* and the session is not interactive. * and the session is not interactive.
*/ */
static __inline__ int tcp_in_quickack_mode(struct tcp_sock *tp) static inline int tcp_in_quickack_mode(const struct sock *sk)
{ {
return (tp->ack.quick && !tp->ack.pingpong); const struct inet_connection_sock *icsk = inet_csk(sk);
return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
} }
/* Buffer size and advertised window tuning. /* Buffer size and advertised window tuning.
...@@ -224,8 +228,8 @@ static void tcp_fixup_sndbuf(struct sock *sk) ...@@ -224,8 +228,8 @@ static void tcp_fixup_sndbuf(struct sock *sk)
*/ */
/* Slow part of check#2. */ /* Slow part of check#2. */
static int __tcp_grow_window(struct sock *sk, struct tcp_sock *tp, static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp,
struct sk_buff *skb) const struct sk_buff *skb)
{ {
/* Optimize this! */ /* Optimize this! */
int truesize = tcp_win_from_space(skb->truesize)/2; int truesize = tcp_win_from_space(skb->truesize)/2;
...@@ -233,7 +237,7 @@ static int __tcp_grow_window(struct sock *sk, struct tcp_sock *tp, ...@@ -233,7 +237,7 @@ static int __tcp_grow_window(struct sock *sk, struct tcp_sock *tp,
while (tp->rcv_ssthresh <= window) { while (tp->rcv_ssthresh <= window) {
if (truesize <= skb->len) if (truesize <= skb->len)
return 2*tp->ack.rcv_mss; return 2 * inet_csk(sk)->icsk_ack.rcv_mss;
truesize >>= 1; truesize >>= 1;
window >>= 1; window >>= 1;
...@@ -260,7 +264,7 @@ static inline void tcp_grow_window(struct sock *sk, struct tcp_sock *tp, ...@@ -260,7 +264,7 @@ static inline void tcp_grow_window(struct sock *sk, struct tcp_sock *tp,
if (incr) { if (incr) {
tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp);
tp->ack.quick |= 1; inet_csk(sk)->icsk_ack.quick |= 1;
} }
} }
} }
...@@ -325,7 +329,7 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp) ...@@ -325,7 +329,7 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
unsigned int app_win = tp->rcv_nxt - tp->copied_seq; unsigned int app_win = tp->rcv_nxt - tp->copied_seq;
int ofo_win = 0; int ofo_win = 0;
tp->ack.quick = 0; inet_csk(sk)->icsk_ack.quick = 0;
skb_queue_walk(&tp->out_of_order_queue, skb) { skb_queue_walk(&tp->out_of_order_queue, skb) {
ofo_win += skb->len; ofo_win += skb->len;
...@@ -346,8 +350,8 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp) ...@@ -346,8 +350,8 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
app_win += ofo_win; app_win += ofo_win;
if (atomic_read(&sk->sk_rmem_alloc) >= 2 * sk->sk_rcvbuf) if (atomic_read(&sk->sk_rmem_alloc) >= 2 * sk->sk_rcvbuf)
app_win >>= 1; app_win >>= 1;
if (app_win > tp->ack.rcv_mss) if (app_win > inet_csk(sk)->icsk_ack.rcv_mss)
app_win -= tp->ack.rcv_mss; app_win -= inet_csk(sk)->icsk_ack.rcv_mss;
app_win = max(app_win, 2U*tp->advmss); app_win = max(app_win, 2U*tp->advmss);
if (!ofo_win) if (!ofo_win)
...@@ -415,11 +419,12 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) ...@@ -415,11 +419,12 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
tp->rcv_rtt_est.time = tcp_time_stamp; tp->rcv_rtt_est.time = tcp_time_stamp;
} }
static inline void tcp_rcv_rtt_measure_ts(struct tcp_sock *tp, struct sk_buff *skb) static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk);
if (tp->rx_opt.rcv_tsecr && if (tp->rx_opt.rcv_tsecr &&
(TCP_SKB_CB(skb)->end_seq - (TCP_SKB_CB(skb)->end_seq -
TCP_SKB_CB(skb)->seq >= tp->ack.rcv_mss)) TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss))
tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0);
} }
...@@ -492,41 +497,42 @@ void tcp_rcv_space_adjust(struct sock *sk) ...@@ -492,41 +497,42 @@ void tcp_rcv_space_adjust(struct sock *sk)
*/ */
static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
{ {
struct inet_connection_sock *icsk = inet_csk(sk);
u32 now; u32 now;
tcp_schedule_ack(tp); inet_csk_schedule_ack(sk);
tcp_measure_rcv_mss(tp, skb); tcp_measure_rcv_mss(sk, skb);
tcp_rcv_rtt_measure(tp); tcp_rcv_rtt_measure(tp);
now = tcp_time_stamp; now = tcp_time_stamp;
if (!tp->ack.ato) { if (!icsk->icsk_ack.ato) {
/* The _first_ data packet received, initialize /* The _first_ data packet received, initialize
* delayed ACK engine. * delayed ACK engine.
*/ */
tcp_incr_quickack(tp); tcp_incr_quickack(sk);
tp->ack.ato = TCP_ATO_MIN; icsk->icsk_ack.ato = TCP_ATO_MIN;
} else { } else {
int m = now - tp->ack.lrcvtime; int m = now - icsk->icsk_ack.lrcvtime;
if (m <= TCP_ATO_MIN/2) { if (m <= TCP_ATO_MIN/2) {
/* The fastest case is the first. */ /* The fastest case is the first. */
tp->ack.ato = (tp->ack.ato>>1) + TCP_ATO_MIN/2; icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
} else if (m < tp->ack.ato) { } else if (m < icsk->icsk_ack.ato) {
tp->ack.ato = (tp->ack.ato>>1) + m; icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m;
if (tp->ack.ato > tp->rto) if (icsk->icsk_ack.ato > icsk->icsk_rto)
tp->ack.ato = tp->rto; icsk->icsk_ack.ato = icsk->icsk_rto;
} else if (m > tp->rto) { } else if (m > icsk->icsk_rto) {
/* Too long gap. Apparently sender falled to /* Too long gap. Apparently sender falled to
* restart window, so that we send ACKs quickly. * restart window, so that we send ACKs quickly.
*/ */
tcp_incr_quickack(tp); tcp_incr_quickack(sk);
sk_stream_mem_reclaim(sk); sk_stream_mem_reclaim(sk);
} }
} }
tp->ack.lrcvtime = now; icsk->icsk_ack.lrcvtime = now;
TCP_ECN_check_ce(tp, skb); TCP_ECN_check_ce(tp, skb);
...@@ -611,8 +617,9 @@ static void tcp_rtt_estimator(struct tcp_sock *tp, __u32 mrtt, u32 *usrtt) ...@@ -611,8 +617,9 @@ static void tcp_rtt_estimator(struct tcp_sock *tp, __u32 mrtt, u32 *usrtt)
/* Calculate rto without backoff. This is the second half of Van Jacobson's /* Calculate rto without backoff. This is the second half of Van Jacobson's
* routine referred to above. * routine referred to above.
*/ */
static inline void tcp_set_rto(struct tcp_sock *tp) static inline void tcp_set_rto(struct sock *sk)
{ {
const struct tcp_sock *tp = tcp_sk(sk);
/* Old crap is replaced with new one. 8) /* Old crap is replaced with new one. 8)
* *
* More seriously: * More seriously:
...@@ -623,7 +630,7 @@ static inline void tcp_set_rto(struct tcp_sock *tp) ...@@ -623,7 +630,7 @@ static inline void tcp_set_rto(struct tcp_sock *tp)
* is invisible. Actually, Linux-2.4 also generates erratic * is invisible. Actually, Linux-2.4 also generates erratic
* ACKs in some curcumstances. * ACKs in some curcumstances.
*/ */
tp->rto = (tp->srtt >> 3) + tp->rttvar; inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar;
/* 2. Fixups made earlier cannot be right. /* 2. Fixups made earlier cannot be right.
* If we do not estimate RTO correctly without them, * If we do not estimate RTO correctly without them,
...@@ -635,10 +642,10 @@ static inline void tcp_set_rto(struct tcp_sock *tp) ...@@ -635,10 +642,10 @@ static inline void tcp_set_rto(struct tcp_sock *tp)
/* NOTE: clamping at TCP_RTO_MIN is not required, current algo /* NOTE: clamping at TCP_RTO_MIN is not required, current algo
* guarantees that rto is higher. * guarantees that rto is higher.
*/ */
static inline void tcp_bound_rto(struct tcp_sock *tp) static inline void tcp_bound_rto(struct sock *sk)
{ {
if (tp->rto > TCP_RTO_MAX) if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
tp->rto = TCP_RTO_MAX; inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
} }
/* Save metrics learned by this TCP session. /* Save metrics learned by this TCP session.
...@@ -658,7 +665,7 @@ void tcp_update_metrics(struct sock *sk) ...@@ -658,7 +665,7 @@ void tcp_update_metrics(struct sock *sk)
if (dst && (dst->flags&DST_HOST)) { if (dst && (dst->flags&DST_HOST)) {
int m; int m;
if (tp->backoff || !tp->srtt) { if (inet_csk(sk)->icsk_backoff || !tp->srtt) {
/* This session failed to estimate rtt. Why? /* This session failed to estimate rtt. Why?
* Probably, no packets returned in time. * Probably, no packets returned in time.
* Reset our results. * Reset our results.
...@@ -801,9 +808,9 @@ static void tcp_init_metrics(struct sock *sk) ...@@ -801,9 +808,9 @@ static void tcp_init_metrics(struct sock *sk)
tp->mdev = dst_metric(dst, RTAX_RTTVAR); tp->mdev = dst_metric(dst, RTAX_RTTVAR);
tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN); tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
} }
tcp_set_rto(tp); tcp_set_rto(sk);
tcp_bound_rto(tp); tcp_bound_rto(sk);
if (tp->rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
goto reset; goto reset;
tp->snd_cwnd = tcp_init_cwnd(tp, dst); tp->snd_cwnd = tcp_init_cwnd(tp, dst);
tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_cwnd_stamp = tcp_time_stamp;
...@@ -817,7 +824,7 @@ static void tcp_init_metrics(struct sock *sk) ...@@ -817,7 +824,7 @@ static void tcp_init_metrics(struct sock *sk)
if (!tp->rx_opt.saw_tstamp && tp->srtt) { if (!tp->rx_opt.saw_tstamp && tp->srtt) {
tp->srtt = 0; tp->srtt = 0;
tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT; tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
tp->rto = TCP_TIMEOUT_INIT; inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
} }
} }
...@@ -1118,7 +1125,7 @@ void tcp_enter_frto(struct sock *sk) ...@@ -1118,7 +1125,7 @@ void tcp_enter_frto(struct sock *sk)
if (tp->ca_state <= TCP_CA_Disorder || if (tp->ca_state <= TCP_CA_Disorder ||
tp->snd_una == tp->high_seq || tp->snd_una == tp->high_seq ||
(tp->ca_state == TCP_CA_Loss && !tp->retransmits)) { (tp->ca_state == TCP_CA_Loss && !inet_csk(sk)->icsk_retransmits)) {
tp->prior_ssthresh = tcp_current_ssthresh(tp); tp->prior_ssthresh = tcp_current_ssthresh(tp);
tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); tp->snd_ssthresh = tp->ca_ops->ssthresh(tp);
tcp_ca_event(tp, CA_EVENT_FRTO); tcp_ca_event(tp, CA_EVENT_FRTO);
...@@ -1214,7 +1221,7 @@ void tcp_enter_loss(struct sock *sk, int how) ...@@ -1214,7 +1221,7 @@ void tcp_enter_loss(struct sock *sk, int how)
/* Reduce ssthresh if it has not yet been made inside this window. */ /* Reduce ssthresh if it has not yet been made inside this window. */
if (tp->ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || if (tp->ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq ||
(tp->ca_state == TCP_CA_Loss && !tp->retransmits)) { (tp->ca_state == TCP_CA_Loss && !inet_csk(sk)->icsk_retransmits)) {
tp->prior_ssthresh = tcp_current_ssthresh(tp); tp->prior_ssthresh = tcp_current_ssthresh(tp);
tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); tp->snd_ssthresh = tp->ca_ops->ssthresh(tp);
tcp_ca_event(tp, CA_EVENT_LOSS); tcp_ca_event(tp, CA_EVENT_LOSS);
...@@ -1253,7 +1260,7 @@ void tcp_enter_loss(struct sock *sk, int how) ...@@ -1253,7 +1260,7 @@ void tcp_enter_loss(struct sock *sk, int how)
TCP_ECN_queue_cwr(tp); TCP_ECN_queue_cwr(tp);
} }
static int tcp_check_sack_reneging(struct sock *sk, struct tcp_sock *tp) static int tcp_check_sack_reneging(struct sock *sk)
{ {
struct sk_buff *skb; struct sk_buff *skb;
...@@ -1268,9 +1275,10 @@ static int tcp_check_sack_reneging(struct sock *sk, struct tcp_sock *tp) ...@@ -1268,9 +1275,10 @@ static int tcp_check_sack_reneging(struct sock *sk, struct tcp_sock *tp)
NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING); NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);
tcp_enter_loss(sk, 1); tcp_enter_loss(sk, 1);
tp->retransmits++; inet_csk(sk)->icsk_retransmits++;
tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue)); tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue));
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto);
return 1; return 1;
} }
return 0; return 0;
...@@ -1281,15 +1289,15 @@ static inline int tcp_fackets_out(struct tcp_sock *tp) ...@@ -1281,15 +1289,15 @@ static inline int tcp_fackets_out(struct tcp_sock *tp)
return IsReno(tp) ? tp->sacked_out+1 : tp->fackets_out; return IsReno(tp) ? tp->sacked_out+1 : tp->fackets_out;
} }
static inline int tcp_skb_timedout(struct tcp_sock *tp, struct sk_buff *skb) static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
{ {
return (tcp_time_stamp - TCP_SKB_CB(skb)->when > tp->rto); return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto);
} }
static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp) static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp)
{ {
return tp->packets_out && return tp->packets_out &&
tcp_skb_timedout(tp, skb_peek(&sk->sk_write_queue)); tcp_skb_timedout(sk, skb_peek(&sk->sk_write_queue));
} }
/* Linux NewReno/SACK/FACK/ECN state machine. /* Linux NewReno/SACK/FACK/ECN state machine.
...@@ -1509,7 +1517,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp) ...@@ -1509,7 +1517,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
struct sk_buff *skb; struct sk_buff *skb;
sk_stream_for_retrans_queue(skb, sk) { sk_stream_for_retrans_queue(skb, sk) {
if (tcp_skb_timedout(tp, skb) && if (tcp_skb_timedout(sk, skb) &&
!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { !(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
tp->lost_out += tcp_skb_pcount(skb); tp->lost_out += tcp_skb_pcount(skb);
...@@ -1676,7 +1684,7 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp) ...@@ -1676,7 +1684,7 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp)
tp->left_out = tp->sacked_out; tp->left_out = tp->sacked_out;
tcp_undo_cwr(tp, 1); tcp_undo_cwr(tp, 1);
NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
tp->retransmits = 0; inet_csk(sk)->icsk_retransmits = 0;
tp->undo_marker = 0; tp->undo_marker = 0;
if (!IsReno(tp)) if (!IsReno(tp))
tcp_set_ca_state(tp, TCP_CA_Open); tcp_set_ca_state(tp, TCP_CA_Open);
...@@ -1750,7 +1758,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, ...@@ -1750,7 +1758,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
tp->prior_ssthresh = 0; tp->prior_ssthresh = 0;
/* B. In all the states check for reneging SACKs. */ /* B. In all the states check for reneging SACKs. */
if (tp->sacked_out && tcp_check_sack_reneging(sk, tp)) if (tp->sacked_out && tcp_check_sack_reneging(sk))
return; return;
/* C. Process data loss notification, provided it is valid. */ /* C. Process data loss notification, provided it is valid. */
...@@ -1774,7 +1782,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, ...@@ -1774,7 +1782,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
} else if (!before(tp->snd_una, tp->high_seq)) { } else if (!before(tp->snd_una, tp->high_seq)) {
switch (tp->ca_state) { switch (tp->ca_state) {
case TCP_CA_Loss: case TCP_CA_Loss:
tp->retransmits = 0; inet_csk(sk)->icsk_retransmits = 0;
if (tcp_try_undo_recovery(sk, tp)) if (tcp_try_undo_recovery(sk, tp))
return; return;
break; break;
...@@ -1824,7 +1832,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, ...@@ -1824,7 +1832,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
break; break;
case TCP_CA_Loss: case TCP_CA_Loss:
if (flag&FLAG_DATA_ACKED) if (flag&FLAG_DATA_ACKED)
tp->retransmits = 0; inet_csk(sk)->icsk_retransmits = 0;
if (!tcp_try_undo_loss(sk, tp)) { if (!tcp_try_undo_loss(sk, tp)) {
tcp_moderate_cwnd(tp); tcp_moderate_cwnd(tp);
tcp_xmit_retransmit_queue(sk); tcp_xmit_retransmit_queue(sk);
...@@ -1881,10 +1889,8 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, ...@@ -1881,10 +1889,8 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
/* Read draft-ietf-tcplw-high-performance before mucking /* Read draft-ietf-tcplw-high-performance before mucking
* with this code. (Superceeds RFC1323) * with this code. (Superceeds RFC1323)
*/ */
static void tcp_ack_saw_tstamp(struct tcp_sock *tp, u32 *usrtt, int flag) static void tcp_ack_saw_tstamp(struct sock *sk, u32 *usrtt, int flag)
{ {
__u32 seq_rtt;
/* RTTM Rule: A TSecr value received in a segment is used to /* RTTM Rule: A TSecr value received in a segment is used to
* update the averaged RTT measurement only if the segment * update the averaged RTT measurement only if the segment
* acknowledges some new data, i.e., only if it advances the * acknowledges some new data, i.e., only if it advances the
...@@ -1900,14 +1906,15 @@ static void tcp_ack_saw_tstamp(struct tcp_sock *tp, u32 *usrtt, int flag) ...@@ -1900,14 +1906,15 @@ static void tcp_ack_saw_tstamp(struct tcp_sock *tp, u32 *usrtt, int flag)
* answer arrives rto becomes 120 seconds! If at least one of segments * answer arrives rto becomes 120 seconds! If at least one of segments
* in window is lost... Voila. --ANK (010210) * in window is lost... Voila. --ANK (010210)
*/ */
seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; struct tcp_sock *tp = tcp_sk(sk);
const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
tcp_rtt_estimator(tp, seq_rtt, usrtt); tcp_rtt_estimator(tp, seq_rtt, usrtt);
tcp_set_rto(tp); tcp_set_rto(sk);
tp->backoff = 0; inet_csk(sk)->icsk_backoff = 0;
tcp_bound_rto(tp); tcp_bound_rto(sk);
} }
static void tcp_ack_no_tstamp(struct tcp_sock *tp, u32 seq_rtt, u32 *usrtt, int flag) static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, u32 *usrtt, int flag)
{ {
/* We don't have a timestamp. Can only use /* We don't have a timestamp. Can only use
* packets that are not retransmitted to determine * packets that are not retransmitted to determine
...@@ -1921,20 +1928,21 @@ static void tcp_ack_no_tstamp(struct tcp_sock *tp, u32 seq_rtt, u32 *usrtt, int ...@@ -1921,20 +1928,21 @@ static void tcp_ack_no_tstamp(struct tcp_sock *tp, u32 seq_rtt, u32 *usrtt, int
if (flag & FLAG_RETRANS_DATA_ACKED) if (flag & FLAG_RETRANS_DATA_ACKED)
return; return;
tcp_rtt_estimator(tp, seq_rtt, usrtt); tcp_rtt_estimator(tcp_sk(sk), seq_rtt, usrtt);
tcp_set_rto(tp); tcp_set_rto(sk);
tp->backoff = 0; inet_csk(sk)->icsk_backoff = 0;
tcp_bound_rto(tp); tcp_bound_rto(sk);
} }
static inline void tcp_ack_update_rtt(struct tcp_sock *tp, static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
int flag, s32 seq_rtt, u32 *usrtt) const s32 seq_rtt, u32 *usrtt)
{ {
const struct tcp_sock *tp = tcp_sk(sk);
/* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
tcp_ack_saw_tstamp(tp, usrtt, flag); tcp_ack_saw_tstamp(sk, usrtt, flag);
else if (seq_rtt >= 0) else if (seq_rtt >= 0)
tcp_ack_no_tstamp(tp, seq_rtt, usrtt, flag); tcp_ack_no_tstamp(sk, seq_rtt, usrtt, flag);
} }
static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt,
...@@ -1951,9 +1959,9 @@ static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, ...@@ -1951,9 +1959,9 @@ static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt,
static inline void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp) static inline void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp)
{ {
if (!tp->packets_out) { if (!tp->packets_out) {
tcp_clear_xmit_timer(sk, TCP_TIME_RETRANS); inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
} else { } else {
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto);
} }
} }
...@@ -2090,7 +2098,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt ...@@ -2090,7 +2098,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
} }
if (acked&FLAG_ACKED) { if (acked&FLAG_ACKED) {
tcp_ack_update_rtt(tp, acked, seq_rtt, seq_usrtt); tcp_ack_update_rtt(sk, acked, seq_rtt, seq_usrtt);
tcp_ack_packets_out(sk, tp); tcp_ack_packets_out(sk, tp);
if (tp->ca_ops->pkts_acked) if (tp->ca_ops->pkts_acked)
...@@ -2125,20 +2133,21 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt ...@@ -2125,20 +2133,21 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
static void tcp_ack_probe(struct sock *sk) static void tcp_ack_probe(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
/* Was it a usable window open? */ /* Was it a usable window open? */
if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq, if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
tp->snd_una + tp->snd_wnd)) { tp->snd_una + tp->snd_wnd)) {
tp->backoff = 0; icsk->icsk_backoff = 0;
tcp_clear_xmit_timer(sk, TCP_TIME_PROBE0); inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
/* Socket must be waked up by subsequent tcp_data_snd_check(). /* Socket must be waked up by subsequent tcp_data_snd_check().
* This function is not for random using! * This function is not for random using!
*/ */
} else { } else {
tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
min(tp->rto << tp->backoff, TCP_RTO_MAX)); min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX));
} }
} }
...@@ -2157,8 +2166,8 @@ static inline int tcp_may_raise_cwnd(struct tcp_sock *tp, int flag) ...@@ -2157,8 +2166,8 @@ static inline int tcp_may_raise_cwnd(struct tcp_sock *tp, int flag)
/* Check that window update is acceptable. /* Check that window update is acceptable.
* The function assumes that snd_una<=ack<=snd_next. * The function assumes that snd_una<=ack<=snd_next.
*/ */
static inline int tcp_may_update_window(struct tcp_sock *tp, u32 ack, static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack,
u32 ack_seq, u32 nwin) const u32 ack_seq, const u32 nwin)
{ {
return (after(ack, tp->snd_una) || return (after(ack, tp->snd_una) ||
after(ack_seq, tp->snd_wl1) || after(ack_seq, tp->snd_wl1) ||
...@@ -2500,8 +2509,9 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) ...@@ -2500,8 +2509,9 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
* up to bandwidth of 18Gigabit/sec. 8) ] * up to bandwidth of 18Gigabit/sec. 8) ]
*/ */
static int tcp_disordered_ack(struct tcp_sock *tp, struct sk_buff *skb) static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk);
struct tcphdr *th = skb->h.th; struct tcphdr *th = skb->h.th;
u32 seq = TCP_SKB_CB(skb)->seq; u32 seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq; u32 ack = TCP_SKB_CB(skb)->ack_seq;
...@@ -2516,14 +2526,15 @@ static int tcp_disordered_ack(struct tcp_sock *tp, struct sk_buff *skb) ...@@ -2516,14 +2526,15 @@ static int tcp_disordered_ack(struct tcp_sock *tp, struct sk_buff *skb)
!tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) &&
/* 4. ... and sits in replay window. */ /* 4. ... and sits in replay window. */
(s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (tp->rto*1024)/HZ); (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
} }
static inline int tcp_paws_discard(struct tcp_sock *tp, struct sk_buff *skb) static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff *skb)
{ {
const struct tcp_sock *tp = tcp_sk(sk);
return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW && return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW &&
xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS && xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS &&
!tcp_disordered_ack(tp, skb)); !tcp_disordered_ack(sk, skb));
} }
/* Check segment sequence number for validity. /* Check segment sequence number for validity.
...@@ -2586,7 +2597,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) ...@@ -2586,7 +2597,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
tcp_schedule_ack(tp); inet_csk_schedule_ack(sk);
sk->sk_shutdown |= RCV_SHUTDOWN; sk->sk_shutdown |= RCV_SHUTDOWN;
sock_set_flag(sk, SOCK_DONE); sock_set_flag(sk, SOCK_DONE);
...@@ -2596,7 +2607,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) ...@@ -2596,7 +2607,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
case TCP_ESTABLISHED: case TCP_ESTABLISHED:
/* Move to CLOSE_WAIT */ /* Move to CLOSE_WAIT */
tcp_set_state(sk, TCP_CLOSE_WAIT); tcp_set_state(sk, TCP_CLOSE_WAIT);
tp->ack.pingpong = 1; inet_csk(sk)->icsk_ack.pingpong = 1;
break; break;
case TCP_CLOSE_WAIT: case TCP_CLOSE_WAIT:
...@@ -2694,7 +2705,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) ...@@ -2694,7 +2705,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
tcp_enter_quickack_mode(tp); tcp_enter_quickack_mode(sk);
if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) { if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) {
u32 end_seq = TCP_SKB_CB(skb)->end_seq; u32 end_seq = TCP_SKB_CB(skb)->end_seq;
...@@ -2942,7 +2953,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) ...@@ -2942,7 +2953,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
* gap in queue is filled. * gap in queue is filled.
*/ */
if (skb_queue_empty(&tp->out_of_order_queue)) if (skb_queue_empty(&tp->out_of_order_queue))
tp->ack.pingpong = 0; inet_csk(sk)->icsk_ack.pingpong = 0;
} }
if (tp->rx_opt.num_sacks) if (tp->rx_opt.num_sacks)
...@@ -2963,8 +2974,8 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) ...@@ -2963,8 +2974,8 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
out_of_window: out_of_window:
tcp_enter_quickack_mode(tp); tcp_enter_quickack_mode(sk);
tcp_schedule_ack(tp); inet_csk_schedule_ack(sk);
drop: drop:
__kfree_skb(skb); __kfree_skb(skb);
return; return;
...@@ -2974,7 +2985,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) ...@@ -2974,7 +2985,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))
goto out_of_window; goto out_of_window;
tcp_enter_quickack_mode(tp); tcp_enter_quickack_mode(sk);
if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
/* Partial packet, seq < rcv_next < end_seq */ /* Partial packet, seq < rcv_next < end_seq */
...@@ -3003,7 +3014,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) ...@@ -3003,7 +3014,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
/* Disable header prediction. */ /* Disable header prediction. */
tp->pred_flags = 0; tp->pred_flags = 0;
tcp_schedule_ack(tp); inet_csk_schedule_ack(sk);
SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
...@@ -3373,13 +3384,13 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) ...@@ -3373,13 +3384,13 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
/* More than one full frame received... */ /* More than one full frame received... */
if (((tp->rcv_nxt - tp->rcv_wup) > tp->ack.rcv_mss if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss
/* ... and right edge of window advances far enough. /* ... and right edge of window advances far enough.
* (tcp_recvmsg() will send ACK otherwise). Or... * (tcp_recvmsg() will send ACK otherwise). Or...
*/ */
&& __tcp_select_window(sk) >= tp->rcv_wnd) || && __tcp_select_window(sk) >= tp->rcv_wnd) ||
/* We ACK each frame or... */ /* We ACK each frame or... */
tcp_in_quickack_mode(tp) || tcp_in_quickack_mode(sk) ||
/* We have out of order data. */ /* We have out of order data. */
(ofo_possible && (ofo_possible &&
skb_peek(&tp->out_of_order_queue))) { skb_peek(&tp->out_of_order_queue))) {
...@@ -3393,8 +3404,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) ...@@ -3393,8 +3404,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
static __inline__ void tcp_ack_snd_check(struct sock *sk) static __inline__ void tcp_ack_snd_check(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); if (!inet_csk_ack_scheduled(sk)) {
if (!tcp_ack_scheduled(tp)) {
/* We sent a data segment already. */ /* We sent a data segment already. */
return; return;
} }
...@@ -3648,7 +3658,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -3648,7 +3658,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
tp->rcv_nxt == tp->rcv_wup) tp->rcv_nxt == tp->rcv_wup)
tcp_store_ts_recent(tp); tcp_store_ts_recent(tp);
tcp_rcv_rtt_measure_ts(tp, skb); tcp_rcv_rtt_measure_ts(sk, skb);
/* We know that such packets are checksummed /* We know that such packets are checksummed
* on entry. * on entry.
...@@ -3681,7 +3691,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -3681,7 +3691,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
tp->rcv_nxt == tp->rcv_wup) tp->rcv_nxt == tp->rcv_wup)
tcp_store_ts_recent(tp); tcp_store_ts_recent(tp);
tcp_rcv_rtt_measure_ts(tp, skb); tcp_rcv_rtt_measure_ts(sk, skb);
__skb_pull(skb, tcp_header_len); __skb_pull(skb, tcp_header_len);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
...@@ -3702,7 +3712,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -3702,7 +3712,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
tp->rcv_nxt == tp->rcv_wup) tp->rcv_nxt == tp->rcv_wup)
tcp_store_ts_recent(tp); tcp_store_ts_recent(tp);
tcp_rcv_rtt_measure_ts(tp, skb); tcp_rcv_rtt_measure_ts(sk, skb);
if ((int)skb->truesize > sk->sk_forward_alloc) if ((int)skb->truesize > sk->sk_forward_alloc)
goto step5; goto step5;
...@@ -3722,7 +3732,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -3722,7 +3732,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
/* Well, only one small jumplet in fast path... */ /* Well, only one small jumplet in fast path... */
tcp_ack(sk, skb, FLAG_DATA); tcp_ack(sk, skb, FLAG_DATA);
tcp_data_snd_check(sk, tp); tcp_data_snd_check(sk, tp);
if (!tcp_ack_scheduled(tp)) if (!inet_csk_ack_scheduled(sk))
goto no_ack; goto no_ack;
} }
...@@ -3744,7 +3754,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -3744,7 +3754,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
* RFC1323: H1. Apply PAWS check first. * RFC1323: H1. Apply PAWS check first.
*/ */
if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
tcp_paws_discard(tp, skb)) { tcp_paws_discard(sk, skb)) {
if (!th->rst) { if (!th->rst) {
NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
tcp_send_dupack(sk, skb); tcp_send_dupack(sk, skb);
...@@ -3791,7 +3801,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -3791,7 +3801,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
if(th->ack) if(th->ack)
tcp_ack(sk, skb, FLAG_SLOWPATH); tcp_ack(sk, skb, FLAG_SLOWPATH);
tcp_rcv_rtt_measure_ts(tp, skb); tcp_rcv_rtt_measure_ts(sk, skb);
/* Process urgent data. */ /* Process urgent data. */
tcp_urg(sk, skb, th); tcp_urg(sk, skb, th);
...@@ -3933,7 +3943,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -3933,7 +3943,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
tcp_init_buffer_space(sk); tcp_init_buffer_space(sk);
if (sock_flag(sk, SOCK_KEEPOPEN)) if (sock_flag(sk, SOCK_KEEPOPEN))
tcp_reset_keepalive_timer(sk, keepalive_time_when(tp)); inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
if (!tp->rx_opt.snd_wscale) if (!tp->rx_opt.snd_wscale)
__tcp_fast_path_on(tp, tp->snd_wnd); __tcp_fast_path_on(tp, tp->snd_wnd);
...@@ -3945,7 +3955,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -3945,7 +3955,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
sk_wake_async(sk, 0, POLL_OUT); sk_wake_async(sk, 0, POLL_OUT);
} }
if (sk->sk_write_pending || tp->defer_accept || tp->ack.pingpong) { if (sk->sk_write_pending || tp->defer_accept || inet_csk(sk)->icsk_ack.pingpong) {
/* Save one ACK. Data will be ready after /* Save one ACK. Data will be ready after
* several ticks, if write_pending is set. * several ticks, if write_pending is set.
* *
...@@ -3953,12 +3963,12 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -3953,12 +3963,12 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
* look so _wonderfully_ clever, that I was not able * look so _wonderfully_ clever, that I was not able
* to stand against the temptation 8) --ANK * to stand against the temptation 8) --ANK
*/ */
tcp_schedule_ack(tp); inet_csk_schedule_ack(sk);
tp->ack.lrcvtime = tcp_time_stamp; inet_csk(sk)->icsk_ack.lrcvtime = tcp_time_stamp;
tp->ack.ato = TCP_ATO_MIN; inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
tcp_incr_quickack(tp); tcp_incr_quickack(sk);
tcp_enter_quickack_mode(tp); tcp_enter_quickack_mode(sk);
tcp_reset_xmit_timer(sk, TCP_TIME_DACK, TCP_DELACK_MAX); inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX);
discard: discard:
__kfree_skb(skb); __kfree_skb(skb);
...@@ -4114,7 +4124,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -4114,7 +4124,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
} }
if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
tcp_paws_discard(tp, skb)) { tcp_paws_discard(sk, skb)) {
if (!th->rst) { if (!th->rst) {
NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
tcp_send_dupack(sk, skb); tcp_send_dupack(sk, skb);
...@@ -4183,7 +4193,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -4183,7 +4193,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
*/ */
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
!tp->srtt) !tp->srtt)
tcp_ack_saw_tstamp(tp, 0, 0); tcp_ack_saw_tstamp(sk, 0, 0);
if (tp->rx_opt.tstamp_ok) if (tp->rx_opt.tstamp_ok)
tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
...@@ -4230,9 +4240,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -4230,9 +4240,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
return 1; return 1;
} }
tmo = tcp_fin_time(tp); tmo = tcp_fin_time(sk);
if (tmo > TCP_TIMEWAIT_LEN) { if (tmo > TCP_TIMEWAIT_LEN) {
tcp_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
} else if (th->fin || sock_owned_by_user(sk)) { } else if (th->fin || sock_owned_by_user(sk)) {
/* Bad case. We could lose such FIN otherwise. /* Bad case. We could lose such FIN otherwise.
* It is not a big problem, but it looks confusing * It is not a big problem, but it looks confusing
...@@ -4240,7 +4250,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, ...@@ -4240,7 +4250,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
* if it spins in bh_lock_sock(), but it is really * if it spins in bh_lock_sock(), but it is really
* marginal case. * marginal case.
*/ */
tcp_reset_keepalive_timer(sk, tmo); inet_csk_reset_keepalive_timer(sk, tmo);
} else { } else {
tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
goto discard; goto discard;
......
...@@ -104,7 +104,7 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { ...@@ -104,7 +104,7 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
*/ */
int sysctl_local_port_range[2] = { 1024, 4999 }; int sysctl_local_port_range[2] = { 1024, 4999 };
static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb) static inline int inet_csk_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb)
{ {
const u32 sk_rcv_saddr = inet_rcv_saddr(sk); const u32 sk_rcv_saddr = inet_rcv_saddr(sk);
struct sock *sk2; struct sock *sk2;
...@@ -113,7 +113,7 @@ static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb ...@@ -113,7 +113,7 @@ static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb
sk_for_each_bound(sk2, node, &tb->owners) { sk_for_each_bound(sk2, node, &tb->owners) {
if (sk != sk2 && if (sk != sk2 &&
!tcp_v6_ipv6only(sk2) && !inet_v6_ipv6only(sk2) &&
(!sk->sk_bound_dev_if || (!sk->sk_bound_dev_if ||
!sk2->sk_bound_dev_if || !sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
...@@ -132,7 +132,8 @@ static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb ...@@ -132,7 +132,8 @@ static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb
/* Obtain a reference to a local port for the given sock, /* Obtain a reference to a local port for the given sock,
* if snum is zero it means select any available local port. * if snum is zero it means select any available local port.
*/ */
static int tcp_v4_get_port(struct sock *sk, unsigned short snum) int inet_csk_get_port(struct inet_hashinfo *hashinfo,
struct sock *sk, unsigned short snum)
{ {
struct inet_bind_hashbucket *head; struct inet_bind_hashbucket *head;
struct hlist_node *node; struct hlist_node *node;
...@@ -146,16 +147,16 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) ...@@ -146,16 +147,16 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
int remaining = (high - low) + 1; int remaining = (high - low) + 1;
int rover; int rover;
spin_lock(&tcp_hashinfo.portalloc_lock); spin_lock(&hashinfo->portalloc_lock);
if (tcp_hashinfo.port_rover < low) if (hashinfo->port_rover < low)
rover = low; rover = low;
else else
rover = tcp_hashinfo.port_rover; rover = hashinfo->port_rover;
do { do {
rover++; rover++;
if (rover > high) if (rover > high)
rover = low; rover = low;
head = &tcp_hashinfo.bhash[inet_bhashfn(rover, tcp_hashinfo.bhash_size)]; head = &hashinfo->bhash[inet_bhashfn(rover, hashinfo->bhash_size)];
spin_lock(&head->lock); spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain) inet_bind_bucket_for_each(tb, node, &head->chain)
if (tb->port == rover) if (tb->port == rover)
...@@ -164,8 +165,8 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) ...@@ -164,8 +165,8 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
next: next:
spin_unlock(&head->lock); spin_unlock(&head->lock);
} while (--remaining > 0); } while (--remaining > 0);
tcp_hashinfo.port_rover = rover; hashinfo->port_rover = rover;
spin_unlock(&tcp_hashinfo.portalloc_lock); spin_unlock(&hashinfo->portalloc_lock);
/* Exhausted local port range during search? It is not /* Exhausted local port range during search? It is not
* possible for us to be holding one of the bind hash * possible for us to be holding one of the bind hash
...@@ -182,7 +183,7 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) ...@@ -182,7 +183,7 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
*/ */
snum = rover; snum = rover;
} else { } else {
head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; head = &hashinfo->bhash[inet_bhashfn(snum, hashinfo->bhash_size)];
spin_lock(&head->lock); spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain) inet_bind_bucket_for_each(tb, node, &head->chain)
if (tb->port == snum) if (tb->port == snum)
...@@ -199,13 +200,13 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) ...@@ -199,13 +200,13 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
goto success; goto success;
} else { } else {
ret = 1; ret = 1;
if (tcp_bind_conflict(sk, tb)) if (inet_csk_bind_conflict(sk, tb))
goto fail_unlock; goto fail_unlock;
} }
} }
tb_not_found: tb_not_found:
ret = 1; ret = 1;
if (!tb && (tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, snum)) == NULL) if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep, head, snum)) == NULL)
goto fail_unlock; goto fail_unlock;
if (hlist_empty(&tb->owners)) { if (hlist_empty(&tb->owners)) {
if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
...@@ -216,9 +217,9 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) ...@@ -216,9 +217,9 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
(!sk->sk_reuse || sk->sk_state == TCP_LISTEN)) (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
tb->fastreuse = 0; tb->fastreuse = 0;
success: success:
if (!inet_sk(sk)->bind_hash) if (!inet_csk(sk)->icsk_bind_hash)
inet_bind_hash(sk, tb, snum); inet_bind_hash(sk, tb, snum);
BUG_TRAP(inet_sk(sk)->bind_hash == tb); BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb);
ret = 0; ret = 0;
fail_unlock: fail_unlock:
...@@ -228,6 +229,11 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) ...@@ -228,6 +229,11 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
return ret; return ret;
} }
static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
{
return inet_csk_get_port(&tcp_hashinfo, sk, snum);
}
static void tcp_v4_hash(struct sock *sk) static void tcp_v4_hash(struct sock *sk)
{ {
inet_hash(&tcp_hashinfo, sk); inet_hash(&tcp_hashinfo, sk);
...@@ -426,7 +432,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk) ...@@ -426,7 +432,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk)
} }
head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
tb = inet_sk(sk)->bind_hash; tb = inet_csk(sk)->icsk_bind_hash;
spin_lock_bh(&head->lock); spin_lock_bh(&head->lock);
if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
__inet_hash(&tcp_hashinfo, sk, 0); __inet_hash(&tcp_hashinfo, sk, 0);
...@@ -557,25 +563,28 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) ...@@ -557,25 +563,28 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
return err; return err;
} }
static __inline__ int tcp_v4_iif(struct sk_buff *skb) static inline int inet_iif(const struct sk_buff *skb)
{ {
return ((struct rtable *)skb->dst)->rt_iif; return ((struct rtable *)skb->dst)->rt_iif;
} }
static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd) static inline u32 inet_synq_hash(const u32 raddr, const u16 rport,
const u32 rnd, const u16 synq_hsize)
{ {
return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1)); return jhash_2words(raddr, (u32)rport, rnd) & (synq_hsize - 1);
} }
static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp, struct request_sock *inet_csk_search_req(const struct sock *sk,
struct request_sock ***prevp, struct request_sock ***prevp,
__u16 rport, const __u16 rport, const __u32 raddr,
__u32 raddr, __u32 laddr) const __u32 laddr)
{ {
struct listen_sock *lopt = tp->accept_queue.listen_opt; const struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
struct request_sock *req, **prev; struct request_sock *req, **prev;
for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)]; for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd,
lopt->nr_table_entries)];
(req = *prev) != NULL; (req = *prev) != NULL;
prev = &req->dl_next) { prev = &req->dl_next) {
const struct inet_request_sock *ireq = inet_rsk(req); const struct inet_request_sock *ireq = inet_rsk(req);
...@@ -583,7 +592,7 @@ static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp, ...@@ -583,7 +592,7 @@ static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp,
if (ireq->rmt_port == rport && if (ireq->rmt_port == rport &&
ireq->rmt_addr == raddr && ireq->rmt_addr == raddr &&
ireq->loc_addr == laddr && ireq->loc_addr == laddr &&
TCP_INET_FAMILY(req->rsk_ops->family)) { AF_INET_FAMILY(req->rsk_ops->family)) {
BUG_TRAP(!req->sk); BUG_TRAP(!req->sk);
*prevp = prev; *prevp = prev;
break; break;
...@@ -595,12 +604,13 @@ static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp, ...@@ -595,12 +604,13 @@ static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp,
static void tcp_v4_synq_add(struct sock *sk, struct request_sock *req) static void tcp_v4_synq_add(struct sock *sk, struct request_sock *req)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt = tp->accept_queue.listen_opt; struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
u32 h = tcp_v4_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd); const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
lopt->hash_rnd, lopt->nr_table_entries);
reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT); reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, TCP_TIMEOUT_INIT);
tcp_synq_added(sk); inet_csk_reqsk_queue_added(sk, TCP_TIMEOUT_INIT);
} }
...@@ -687,7 +697,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) ...@@ -687,7 +697,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
} }
sk = inet_lookup(&tcp_hashinfo, iph->daddr, th->dest, iph->saddr, sk = inet_lookup(&tcp_hashinfo, iph->daddr, th->dest, iph->saddr,
th->source, tcp_v4_iif(skb)); th->source, inet_iif(skb));
if (!sk) { if (!sk) {
ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
return; return;
...@@ -747,7 +757,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) ...@@ -747,7 +757,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
if (sock_owned_by_user(sk)) if (sock_owned_by_user(sk))
goto out; goto out;
req = tcp_v4_search_req(tp, &prev, th->dest, req = inet_csk_search_req(sk, &prev, th->dest,
iph->daddr, iph->saddr); iph->daddr, iph->saddr);
if (!req) if (!req)
goto out; goto out;
...@@ -768,7 +778,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) ...@@ -768,7 +778,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
* created socket, and POSIX does not want network * created socket, and POSIX does not want network
* errors returned from accept(). * errors returned from accept().
*/ */
tcp_synq_drop(sk, req, prev); inet_csk_reqsk_queue_drop(sk, req, prev);
goto out; goto out;
case TCP_SYN_SENT: case TCP_SYN_SENT:
...@@ -953,8 +963,8 @@ static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) ...@@ -953,8 +963,8 @@ static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
req->ts_recent); req->ts_recent);
} }
static struct dst_entry* tcp_v4_route_req(struct sock *sk, struct dst_entry* inet_csk_route_req(struct sock *sk,
struct request_sock *req) const struct request_sock *req)
{ {
struct rtable *rt; struct rtable *rt;
const struct inet_request_sock *ireq = inet_rsk(req); const struct inet_request_sock *ireq = inet_rsk(req);
...@@ -966,7 +976,7 @@ static struct dst_entry* tcp_v4_route_req(struct sock *sk, ...@@ -966,7 +976,7 @@ static struct dst_entry* tcp_v4_route_req(struct sock *sk,
ireq->rmt_addr), ireq->rmt_addr),
.saddr = ireq->loc_addr, .saddr = ireq->loc_addr,
.tos = RT_CONN_FLAGS(sk) } }, .tos = RT_CONN_FLAGS(sk) } },
.proto = IPPROTO_TCP, .proto = sk->sk_protocol,
.uli_u = { .ports = .uli_u = { .ports =
{ .sport = inet_sk(sk)->sport, { .sport = inet_sk(sk)->sport,
.dport = ireq->rmt_port } } }; .dport = ireq->rmt_port } } };
...@@ -996,7 +1006,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, ...@@ -996,7 +1006,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
struct sk_buff * skb; struct sk_buff * skb;
/* First, grab a route. */ /* First, grab a route. */
if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL) if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
goto out; goto out;
skb = tcp_make_synack(sk, dst, req); skb = tcp_make_synack(sk, dst, req);
...@@ -1098,7 +1108,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1098,7 +1108,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
* limitations, they conserve resources and peer is * limitations, they conserve resources and peer is
* evidently real one. * evidently real one.
*/ */
if (tcp_synq_is_full(sk) && !isn) { if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
#ifdef CONFIG_SYN_COOKIES #ifdef CONFIG_SYN_COOKIES
if (sysctl_tcp_syncookies) { if (sysctl_tcp_syncookies) {
want_cookie = 1; want_cookie = 1;
...@@ -1112,7 +1122,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1112,7 +1122,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
* clogging syn queue with openreqs with exponentially increasing * clogging syn queue with openreqs with exponentially increasing
* timeout. * timeout.
*/ */
if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1) if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
goto drop; goto drop;
req = reqsk_alloc(&tcp_request_sock_ops); req = reqsk_alloc(&tcp_request_sock_ops);
...@@ -1169,7 +1179,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1169,7 +1179,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
*/ */
if (tmp_opt.saw_tstamp && if (tmp_opt.saw_tstamp &&
sysctl_tcp_tw_recycle && sysctl_tcp_tw_recycle &&
(dst = tcp_v4_route_req(sk, req)) != NULL && (dst = inet_csk_route_req(sk, req)) != NULL &&
(peer = rt_get_peer((struct rtable *)dst)) != NULL && (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
peer->v4daddr == saddr) { peer->v4daddr == saddr) {
if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL && if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
...@@ -1182,7 +1192,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1182,7 +1192,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
} }
/* Kill the following clause, if you dislike this way. */ /* Kill the following clause, if you dislike this way. */
else if (!sysctl_tcp_syncookies && else if (!sysctl_tcp_syncookies &&
(sysctl_max_syn_backlog - tcp_synq_len(sk) < (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
(sysctl_max_syn_backlog >> 2)) && (sysctl_max_syn_backlog >> 2)) &&
(!peer || !peer->tcp_ts_stamp) && (!peer || !peer->tcp_ts_stamp) &&
(!dst || !dst_metric(dst, RTAX_RTT))) { (!dst || !dst_metric(dst, RTAX_RTT))) {
...@@ -1240,7 +1250,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, ...@@ -1240,7 +1250,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
if (sk_acceptq_is_full(sk)) if (sk_acceptq_is_full(sk))
goto exit_overflow; goto exit_overflow;
if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL) if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
goto exit; goto exit;
newsk = tcp_create_openreq_child(sk, req, skb); newsk = tcp_create_openreq_child(sk, req, skb);
...@@ -1257,7 +1267,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, ...@@ -1257,7 +1267,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newinet->saddr = ireq->loc_addr; newinet->saddr = ireq->loc_addr;
newinet->opt = ireq->opt; newinet->opt = ireq->opt;
ireq->opt = NULL; ireq->opt = NULL;
newinet->mc_index = tcp_v4_iif(skb); newinet->mc_index = inet_iif(skb);
newinet->mc_ttl = skb->nh.iph->ttl; newinet->mc_ttl = skb->nh.iph->ttl;
newtp->ext_header_len = 0; newtp->ext_header_len = 0;
if (newinet->opt) if (newinet->opt)
...@@ -1285,18 +1295,17 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) ...@@ -1285,18 +1295,17 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
{ {
struct tcphdr *th = skb->h.th; struct tcphdr *th = skb->h.th;
struct iphdr *iph = skb->nh.iph; struct iphdr *iph = skb->nh.iph;
struct tcp_sock *tp = tcp_sk(sk);
struct sock *nsk; struct sock *nsk;
struct request_sock **prev; struct request_sock **prev;
/* Find possible connection requests. */ /* Find possible connection requests. */
struct request_sock *req = tcp_v4_search_req(tp, &prev, th->source, struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
iph->saddr, iph->daddr); iph->saddr, iph->daddr);
if (req) if (req)
return tcp_check_req(sk, skb, req, prev); return tcp_check_req(sk, skb, req, prev);
nsk = __inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr, nsk = __inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr,
th->source, skb->nh.iph->daddr, th->source, skb->nh.iph->daddr,
ntohs(th->dest), tcp_v4_iif(skb)); ntohs(th->dest), inet_iif(skb));
if (nsk) { if (nsk) {
if (nsk->sk_state != TCP_TIME_WAIT) { if (nsk->sk_state != TCP_TIME_WAIT) {
...@@ -1440,7 +1449,7 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1440,7 +1449,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source, sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source,
skb->nh.iph->daddr, ntohs(th->dest), skb->nh.iph->daddr, ntohs(th->dest),
tcp_v4_iif(skb)); inet_iif(skb));
if (!sk) if (!sk)
goto no_tcp_socket; goto no_tcp_socket;
...@@ -1507,7 +1516,7 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1507,7 +1516,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo, struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo,
skb->nh.iph->daddr, skb->nh.iph->daddr,
ntohs(th->dest), ntohs(th->dest),
tcp_v4_iif(skb)); inet_iif(skb));
if (sk2) { if (sk2) {
tcp_tw_deschedule((struct inet_timewait_sock *)sk); tcp_tw_deschedule((struct inet_timewait_sock *)sk);
inet_twsk_put((struct inet_timewait_sock *)sk); inet_twsk_put((struct inet_timewait_sock *)sk);
...@@ -1619,7 +1628,7 @@ static int tcp_v4_init_sock(struct sock *sk) ...@@ -1619,7 +1628,7 @@ static int tcp_v4_init_sock(struct sock *sk)
tcp_init_xmit_timers(sk); tcp_init_xmit_timers(sk);
tcp_prequeue_init(tp); tcp_prequeue_init(tp);
tp->rto = TCP_TIMEOUT_INIT; inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
tp->mdev = TCP_TIMEOUT_INIT; tp->mdev = TCP_TIMEOUT_INIT;
/* So many TCP implementations out there (incorrectly) count the /* So many TCP implementations out there (incorrectly) count the
...@@ -1672,7 +1681,7 @@ int tcp_v4_destroy_sock(struct sock *sk) ...@@ -1672,7 +1681,7 @@ int tcp_v4_destroy_sock(struct sock *sk)
__skb_queue_purge(&tp->ucopy.prequeue); __skb_queue_purge(&tp->ucopy.prequeue);
/* Clean up a referenced TCP bind bucket. */ /* Clean up a referenced TCP bind bucket. */
if (inet_sk(sk)->bind_hash) if (inet_csk(sk)->icsk_bind_hash)
inet_put_port(&tcp_hashinfo, sk); inet_put_port(&tcp_hashinfo, sk);
/* /*
...@@ -1707,7 +1716,7 @@ static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw) ...@@ -1707,7 +1716,7 @@ static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
static void *listening_get_next(struct seq_file *seq, void *cur) static void *listening_get_next(struct seq_file *seq, void *cur)
{ {
struct tcp_sock *tp; struct inet_connection_sock *icsk;
struct hlist_node *node; struct hlist_node *node;
struct sock *sk = cur; struct sock *sk = cur;
struct tcp_iter_state* st = seq->private; struct tcp_iter_state* st = seq->private;
...@@ -1723,7 +1732,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) ...@@ -1723,7 +1732,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
if (st->state == TCP_SEQ_STATE_OPENREQ) { if (st->state == TCP_SEQ_STATE_OPENREQ) {
struct request_sock *req = cur; struct request_sock *req = cur;
tp = tcp_sk(st->syn_wait_sk); icsk = inet_csk(st->syn_wait_sk);
req = req->dl_next; req = req->dl_next;
while (1) { while (1) {
while (req) { while (req) {
...@@ -1736,17 +1745,17 @@ static void *listening_get_next(struct seq_file *seq, void *cur) ...@@ -1736,17 +1745,17 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
if (++st->sbucket >= TCP_SYNQ_HSIZE) if (++st->sbucket >= TCP_SYNQ_HSIZE)
break; break;
get_req: get_req:
req = tp->accept_queue.listen_opt->syn_table[st->sbucket]; req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
} }
sk = sk_next(st->syn_wait_sk); sk = sk_next(st->syn_wait_sk);
st->state = TCP_SEQ_STATE_LISTENING; st->state = TCP_SEQ_STATE_LISTENING;
read_unlock_bh(&tp->accept_queue.syn_wait_lock); read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
} else { } else {
tp = tcp_sk(sk); icsk = inet_csk(sk);
read_lock_bh(&tp->accept_queue.syn_wait_lock); read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
if (reqsk_queue_len(&tp->accept_queue)) if (reqsk_queue_len(&icsk->icsk_accept_queue))
goto start_req; goto start_req;
read_unlock_bh(&tp->accept_queue.syn_wait_lock); read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
sk = sk_next(sk); sk = sk_next(sk);
} }
get_sk: get_sk:
...@@ -1755,9 +1764,9 @@ static void *listening_get_next(struct seq_file *seq, void *cur) ...@@ -1755,9 +1764,9 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
cur = sk; cur = sk;
goto out; goto out;
} }
tp = tcp_sk(sk); icsk = inet_csk(sk);
read_lock_bh(&tp->accept_queue.syn_wait_lock); read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
if (reqsk_queue_len(&tp->accept_queue)) { if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
start_req: start_req:
st->uid = sock_i_uid(sk); st->uid = sock_i_uid(sk);
st->syn_wait_sk = sk; st->syn_wait_sk = sk;
...@@ -1765,7 +1774,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) ...@@ -1765,7 +1774,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
st->sbucket = 0; st->sbucket = 0;
goto get_req; goto get_req;
} }
read_unlock_bh(&tp->accept_queue.syn_wait_lock); read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
} }
if (++st->bucket < INET_LHTABLE_SIZE) { if (++st->bucket < INET_LHTABLE_SIZE) {
sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]); sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
...@@ -1951,8 +1960,8 @@ static void tcp_seq_stop(struct seq_file *seq, void *v) ...@@ -1951,8 +1960,8 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
switch (st->state) { switch (st->state) {
case TCP_SEQ_STATE_OPENREQ: case TCP_SEQ_STATE_OPENREQ:
if (v) { if (v) {
struct tcp_sock *tp = tcp_sk(st->syn_wait_sk); struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
read_unlock_bh(&tp->accept_queue.syn_wait_lock); read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
} }
case TCP_SEQ_STATE_LISTENING: case TCP_SEQ_STATE_LISTENING:
if (v != SEQ_START_TOKEN) if (v != SEQ_START_TOKEN)
...@@ -2058,18 +2067,19 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i) ...@@ -2058,18 +2067,19 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
int timer_active; int timer_active;
unsigned long timer_expires; unsigned long timer_expires;
struct tcp_sock *tp = tcp_sk(sp); struct tcp_sock *tp = tcp_sk(sp);
const struct inet_connection_sock *icsk = inet_csk(sp);
struct inet_sock *inet = inet_sk(sp); struct inet_sock *inet = inet_sk(sp);
unsigned int dest = inet->daddr; unsigned int dest = inet->daddr;
unsigned int src = inet->rcv_saddr; unsigned int src = inet->rcv_saddr;
__u16 destp = ntohs(inet->dport); __u16 destp = ntohs(inet->dport);
__u16 srcp = ntohs(inet->sport); __u16 srcp = ntohs(inet->sport);
if (tp->pending == TCP_TIME_RETRANS) { if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
timer_active = 1; timer_active = 1;
timer_expires = tp->timeout; timer_expires = icsk->icsk_timeout;
} else if (tp->pending == TCP_TIME_PROBE0) { } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4; timer_active = 4;
timer_expires = tp->timeout; timer_expires = icsk->icsk_timeout;
} else if (timer_pending(&sp->sk_timer)) { } else if (timer_pending(&sp->sk_timer)) {
timer_active = 2; timer_active = 2;
timer_expires = sp->sk_timer.expires; timer_expires = sp->sk_timer.expires;
...@@ -2084,12 +2094,14 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i) ...@@ -2084,12 +2094,14 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq, tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
timer_active, timer_active,
jiffies_to_clock_t(timer_expires - jiffies), jiffies_to_clock_t(timer_expires - jiffies),
tp->retransmits, icsk->icsk_retransmits,
sock_i_uid(sp), sock_i_uid(sp),
tp->probes_out, tp->probes_out,
sock_i_ino(sp), sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_refcnt), sp,
tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong, icsk->icsk_rto,
icsk->icsk_ack.ato,
(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
tp->snd_cwnd, tp->snd_cwnd,
tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh); tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
} }
...@@ -2174,7 +2186,7 @@ struct proto tcp_prot = { ...@@ -2174,7 +2186,7 @@ struct proto tcp_prot = {
.close = tcp_close, .close = tcp_close,
.connect = tcp_v4_connect, .connect = tcp_v4_connect,
.disconnect = tcp_disconnect, .disconnect = tcp_disconnect,
.accept = tcp_accept, .accept = inet_csk_accept,
.ioctl = tcp_ioctl, .ioctl = tcp_ioctl,
.init = tcp_v4_init_sock, .init = tcp_v4_init_sock,
.destroy = tcp_v4_destroy_sock, .destroy = tcp_v4_destroy_sock,
......
...@@ -271,7 +271,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) ...@@ -271,7 +271,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
if (tw != NULL) { if (tw != NULL) {
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
const int rto = (tp->rto << 2) - (tp->rto >> 1); const struct inet_connection_sock *icsk = inet_csk(sk);
const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
tcptw->tw_rcv_nxt = tp->rcv_nxt; tcptw->tw_rcv_nxt = tp->rcv_nxt;
...@@ -605,10 +606,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, ...@@ -605,10 +606,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
struct inet_request_sock *ireq = inet_rsk(req); struct inet_request_sock *ireq = inet_rsk(req);
struct tcp_request_sock *treq = tcp_rsk(req); struct tcp_request_sock *treq = tcp_rsk(req);
struct inet_sock *newinet = inet_sk(newsk); struct inet_sock *newinet = inet_sk(newsk);
struct inet_connection_sock *newicsk = inet_csk(newsk);
struct tcp_sock *newtp; struct tcp_sock *newtp;
newsk->sk_state = TCP_SYN_RECV; newsk->sk_state = TCP_SYN_RECV;
newinet->bind_hash = NULL; newicsk->icsk_bind_hash = NULL;
/* Clone the TCP header template */ /* Clone the TCP header template */
newinet->dport = ireq->rmt_port; newinet->dport = ireq->rmt_port;
...@@ -624,11 +626,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, ...@@ -624,11 +626,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn); tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn);
newtp->retransmits = 0; newicsk->icsk_retransmits = 0;
newtp->backoff = 0; newicsk->icsk_backoff = 0;
newtp->srtt = 0; newtp->srtt = 0;
newtp->mdev = TCP_TIMEOUT_INIT; newtp->mdev = TCP_TIMEOUT_INIT;
newtp->rto = TCP_TIMEOUT_INIT; newicsk->icsk_rto = TCP_TIMEOUT_INIT;
newtp->packets_out = 0; newtp->packets_out = 0;
newtp->left_out = 0; newtp->left_out = 0;
...@@ -667,10 +669,10 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, ...@@ -667,10 +669,10 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->rx_opt.num_sacks = 0; newtp->rx_opt.num_sacks = 0;
newtp->urg_data = 0; newtp->urg_data = 0;
/* Deinitialize accept_queue to trap illegal accesses. */ /* Deinitialize accept_queue to trap illegal accesses. */
memset(&newtp->accept_queue, 0, sizeof(newtp->accept_queue)); memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
if (sock_flag(newsk, SOCK_KEEPOPEN)) if (sock_flag(newsk, SOCK_KEEPOPEN))
tcp_reset_keepalive_timer(newsk, inet_csk_reset_keepalive_timer(newsk,
keepalive_time_when(newtp)); keepalive_time_when(newtp));
newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
...@@ -701,7 +703,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, ...@@ -701,7 +703,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->tcp_header_len = sizeof(struct tcphdr); newtp->tcp_header_len = sizeof(struct tcphdr);
} }
if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len; newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
newtp->rx_opt.mss_clamp = req->mss; newtp->rx_opt.mss_clamp = req->mss;
TCP_ECN_openreq_child(newtp, req); TCP_ECN_openreq_child(newtp, req);
if (newtp->ecn_flags&TCP_ECN_OK) if (newtp->ecn_flags&TCP_ECN_OK)
...@@ -881,10 +883,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, ...@@ -881,10 +883,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
if (child == NULL) if (child == NULL)
goto listen_overflow; goto listen_overflow;
tcp_synq_unlink(tp, req, prev); inet_csk_reqsk_queue_unlink(sk, req, prev);
tcp_synq_removed(sk, req); inet_csk_reqsk_queue_removed(sk, req);
tcp_acceptq_queue(sk, req, child); inet_csk_reqsk_queue_add(sk, req, child);
return child; return child;
listen_overflow: listen_overflow:
...@@ -898,7 +900,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, ...@@ -898,7 +900,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
if (!(flg & TCP_FLAG_RST)) if (!(flg & TCP_FLAG_RST))
req->rsk_ops->send_reset(skb); req->rsk_ops->send_reset(skb);
tcp_synq_drop(sk, req, prev); inet_csk_reqsk_queue_drop(sk, req, prev);
return NULL; return NULL;
} }
......
...@@ -105,8 +105,9 @@ static __u16 tcp_advertise_mss(struct sock *sk) ...@@ -105,8 +105,9 @@ static __u16 tcp_advertise_mss(struct sock *sk)
/* RFC2861. Reset CWND after idle period longer RTO to "restart window". /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
* This is the first part of cwnd validation mechanism. */ * This is the first part of cwnd validation mechanism. */
static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst) static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
{ {
struct tcp_sock *tp = tcp_sk(sk);
s32 delta = tcp_time_stamp - tp->lsndtime; s32 delta = tcp_time_stamp - tp->lsndtime;
u32 restart_cwnd = tcp_init_cwnd(tp, dst); u32 restart_cwnd = tcp_init_cwnd(tp, dst);
u32 cwnd = tp->snd_cwnd; u32 cwnd = tp->snd_cwnd;
...@@ -116,7 +117,7 @@ static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst) ...@@ -116,7 +117,7 @@ static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst)
tp->snd_ssthresh = tcp_current_ssthresh(tp); tp->snd_ssthresh = tcp_current_ssthresh(tp);
restart_cwnd = min(restart_cwnd, cwnd); restart_cwnd = min(restart_cwnd, cwnd);
while ((delta -= tp->rto) > 0 && cwnd > restart_cwnd) while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
cwnd >>= 1; cwnd >>= 1;
tp->snd_cwnd = max(cwnd, restart_cwnd); tp->snd_cwnd = max(cwnd, restart_cwnd);
tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_cwnd_stamp = tcp_time_stamp;
...@@ -126,26 +127,25 @@ static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst) ...@@ -126,26 +127,25 @@ static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst)
static inline void tcp_event_data_sent(struct tcp_sock *tp, static inline void tcp_event_data_sent(struct tcp_sock *tp,
struct sk_buff *skb, struct sock *sk) struct sk_buff *skb, struct sock *sk)
{ {
u32 now = tcp_time_stamp; struct inet_connection_sock *icsk = inet_csk(sk);
const u32 now = tcp_time_stamp;
if (!tp->packets_out && (s32)(now - tp->lsndtime) > tp->rto) if (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)
tcp_cwnd_restart(tp, __sk_dst_get(sk)); tcp_cwnd_restart(sk, __sk_dst_get(sk));
tp->lsndtime = now; tp->lsndtime = now;
/* If it is a reply for ato after last received /* If it is a reply for ato after last received
* packet, enter pingpong mode. * packet, enter pingpong mode.
*/ */
if ((u32)(now - tp->ack.lrcvtime) < tp->ack.ato) if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
tp->ack.pingpong = 1; icsk->icsk_ack.pingpong = 1;
} }
static __inline__ void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) static __inline__ void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
{ {
struct tcp_sock *tp = tcp_sk(sk); tcp_dec_quickack_mode(sk, pkts);
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
tcp_dec_quickack_mode(tp, pkts);
tcp_clear_xmit_timer(sk, TCP_TIME_DACK);
} }
/* Determine a window scaling and initial window to offer. /* Determine a window scaling and initial window to offer.
...@@ -696,7 +696,7 @@ static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) ...@@ -696,7 +696,7 @@ static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
if (tp->packets_out > tp->snd_cwnd_used) if (tp->packets_out > tp->snd_cwnd_used)
tp->snd_cwnd_used = tp->packets_out; tp->snd_cwnd_used = tp->packets_out;
if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto) if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
tcp_cwnd_application_limited(sk); tcp_cwnd_application_limited(sk);
} }
} }
...@@ -1147,6 +1147,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now) ...@@ -1147,6 +1147,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
*/ */
u32 __tcp_select_window(struct sock *sk) u32 __tcp_select_window(struct sock *sk)
{ {
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
/* MSS for the peer's data. Previous verions used mss_clamp /* MSS for the peer's data. Previous verions used mss_clamp
* here. I don't know if the value based on our guesses * here. I don't know if the value based on our guesses
...@@ -1154,7 +1155,7 @@ u32 __tcp_select_window(struct sock *sk) ...@@ -1154,7 +1155,7 @@ u32 __tcp_select_window(struct sock *sk)
* but may be worse for the performance because of rcv_mss * but may be worse for the performance because of rcv_mss
* fluctuations. --SAW 1998/11/1 * fluctuations. --SAW 1998/11/1
*/ */
int mss = tp->ack.rcv_mss; int mss = icsk->icsk_ack.rcv_mss;
int free_space = tcp_space(sk); int free_space = tcp_space(sk);
int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
int window; int window;
...@@ -1163,7 +1164,7 @@ u32 __tcp_select_window(struct sock *sk) ...@@ -1163,7 +1164,7 @@ u32 __tcp_select_window(struct sock *sk)
mss = full_space; mss = full_space;
if (free_space < full_space/2) { if (free_space < full_space/2) {
tp->ack.quick = 0; icsk->icsk_ack.quick = 0;
if (tcp_memory_pressure) if (tcp_memory_pressure)
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss); tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss);
...@@ -1491,7 +1492,8 @@ void tcp_xmit_retransmit_queue(struct sock *sk) ...@@ -1491,7 +1492,8 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
if (skb == if (skb ==
skb_peek(&sk->sk_write_queue)) skb_peek(&sk->sk_write_queue))
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto);
} }
packet_cnt -= tcp_skb_pcount(skb); packet_cnt -= tcp_skb_pcount(skb);
...@@ -1544,7 +1546,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) ...@@ -1544,7 +1546,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
break; break;
if (skb == skb_peek(&sk->sk_write_queue)) if (skb == skb_peek(&sk->sk_write_queue))
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto);
NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS); NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
} }
...@@ -1780,8 +1782,8 @@ static inline void tcp_connect_init(struct sock *sk) ...@@ -1780,8 +1782,8 @@ static inline void tcp_connect_init(struct sock *sk)
tp->rcv_wup = 0; tp->rcv_wup = 0;
tp->copied_seq = 0; tp->copied_seq = 0;
tp->rto = TCP_TIMEOUT_INIT; inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
tp->retransmits = 0; inet_csk(sk)->icsk_retransmits = 0;
tcp_clear_retrans(tp); tcp_clear_retrans(tp);
} }
...@@ -1824,7 +1826,7 @@ int tcp_connect(struct sock *sk) ...@@ -1824,7 +1826,7 @@ int tcp_connect(struct sock *sk)
TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
/* Timer for repeating the SYN until an answer. */ /* Timer for repeating the SYN until an answer. */
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto);
return 0; return 0;
} }
...@@ -1834,20 +1836,21 @@ int tcp_connect(struct sock *sk) ...@@ -1834,20 +1836,21 @@ int tcp_connect(struct sock *sk)
*/ */
void tcp_send_delayed_ack(struct sock *sk) void tcp_send_delayed_ack(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
int ato = tp->ack.ato; int ato = icsk->icsk_ack.ato;
unsigned long timeout; unsigned long timeout;
if (ato > TCP_DELACK_MIN) { if (ato > TCP_DELACK_MIN) {
const struct tcp_sock *tp = tcp_sk(sk);
int max_ato = HZ/2; int max_ato = HZ/2;
if (tp->ack.pingpong || (tp->ack.pending&TCP_ACK_PUSHED)) if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
max_ato = TCP_DELACK_MAX; max_ato = TCP_DELACK_MAX;
/* Slow path, intersegment interval is "high". */ /* Slow path, intersegment interval is "high". */
/* If some rtt estimate is known, use it to bound delayed ack. /* If some rtt estimate is known, use it to bound delayed ack.
* Do not use tp->rto here, use results of rtt measurements * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
* directly. * directly.
*/ */
if (tp->srtt) { if (tp->srtt) {
...@@ -1864,21 +1867,22 @@ void tcp_send_delayed_ack(struct sock *sk) ...@@ -1864,21 +1867,22 @@ void tcp_send_delayed_ack(struct sock *sk)
timeout = jiffies + ato; timeout = jiffies + ato;
/* Use new timeout only if there wasn't a older one earlier. */ /* Use new timeout only if there wasn't a older one earlier. */
if (tp->ack.pending&TCP_ACK_TIMER) { if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
/* If delack timer was blocked or is about to expire, /* If delack timer was blocked or is about to expire,
* send ACK now. * send ACK now.
*/ */
if (tp->ack.blocked || time_before_eq(tp->ack.timeout, jiffies+(ato>>2))) { if (icsk->icsk_ack.blocked ||
time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
tcp_send_ack(sk); tcp_send_ack(sk);
return; return;
} }
if (!time_before(timeout, tp->ack.timeout)) if (!time_before(timeout, icsk->icsk_ack.timeout))
timeout = tp->ack.timeout; timeout = icsk->icsk_ack.timeout;
} }
tp->ack.pending |= TCP_ACK_SCHED|TCP_ACK_TIMER; icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
tp->ack.timeout = timeout; icsk->icsk_ack.timeout = timeout;
sk_reset_timer(sk, &tp->delack_timer, timeout); sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
} }
/* This routine sends an ack and also updates the window. */ /* This routine sends an ack and also updates the window. */
...@@ -1895,9 +1899,9 @@ void tcp_send_ack(struct sock *sk) ...@@ -1895,9 +1899,9 @@ void tcp_send_ack(struct sock *sk)
*/ */
buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
if (buff == NULL) { if (buff == NULL) {
tcp_schedule_ack(tp); inet_csk_schedule_ack(sk);
tp->ack.ato = TCP_ATO_MIN; inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
tcp_reset_xmit_timer(sk, TCP_TIME_DACK, TCP_DELACK_MAX); inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX);
return; return;
} }
...@@ -2011,6 +2015,7 @@ int tcp_write_wakeup(struct sock *sk) ...@@ -2011,6 +2015,7 @@ int tcp_write_wakeup(struct sock *sk)
*/ */
void tcp_send_probe0(struct sock *sk) void tcp_send_probe0(struct sock *sk)
{ {
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
int err; int err;
...@@ -2019,16 +2024,16 @@ void tcp_send_probe0(struct sock *sk) ...@@ -2019,16 +2024,16 @@ void tcp_send_probe0(struct sock *sk)
if (tp->packets_out || !sk->sk_send_head) { if (tp->packets_out || !sk->sk_send_head) {
/* Cancel probe timer, if it is not required. */ /* Cancel probe timer, if it is not required. */
tp->probes_out = 0; tp->probes_out = 0;
tp->backoff = 0; icsk->icsk_backoff = 0;
return; return;
} }
if (err <= 0) { if (err <= 0) {
if (tp->backoff < sysctl_tcp_retries2) if (icsk->icsk_backoff < sysctl_tcp_retries2)
tp->backoff++; icsk->icsk_backoff++;
tp->probes_out++; tp->probes_out++;
tcp_reset_xmit_timer (sk, TCP_TIME_PROBE0, inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
min(tp->rto << tp->backoff, TCP_RTO_MAX)); min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX));
} else { } else {
/* If packet was not sent due to local congestion, /* If packet was not sent due to local congestion,
* do not backoff and do not remember probes_out. * do not backoff and do not remember probes_out.
...@@ -2038,8 +2043,9 @@ void tcp_send_probe0(struct sock *sk) ...@@ -2038,8 +2043,9 @@ void tcp_send_probe0(struct sock *sk)
*/ */
if (!tp->probes_out) if (!tp->probes_out)
tp->probes_out=1; tp->probes_out=1;
tcp_reset_xmit_timer (sk, TCP_TIME_PROBE0, inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
min(tp->rto << tp->backoff, TCP_RESOURCE_PROBE_INTERVAL)); min(icsk->icsk_rto << icsk->icsk_backoff,
TCP_RESOURCE_PROBE_INTERVAL));
} }
} }
......
...@@ -36,9 +36,9 @@ static void tcp_write_timer(unsigned long); ...@@ -36,9 +36,9 @@ static void tcp_write_timer(unsigned long);
static void tcp_delack_timer(unsigned long); static void tcp_delack_timer(unsigned long);
static void tcp_keepalive_timer (unsigned long data); static void tcp_keepalive_timer (unsigned long data);
#ifdef TCP_DEBUG #ifdef INET_CSK_DEBUG
const char tcp_timer_bug_msg[] = KERN_DEBUG "tcpbug: unknown timer value\n"; const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
EXPORT_SYMBOL(tcp_timer_bug_msg); EXPORT_SYMBOL(inet_csk_timer_bug_msg);
#endif #endif
/* /*
...@@ -46,40 +46,45 @@ EXPORT_SYMBOL(tcp_timer_bug_msg); ...@@ -46,40 +46,45 @@ EXPORT_SYMBOL(tcp_timer_bug_msg);
* We may wish use just one timer maintaining a list of expire jiffies * We may wish use just one timer maintaining a list of expire jiffies
* to optimize. * to optimize.
*/ */
void inet_csk_init_xmit_timers(struct sock *sk,
void tcp_init_xmit_timers(struct sock *sk) void (*retransmit_handler)(unsigned long),
void (*delack_handler)(unsigned long),
void (*keepalive_handler)(unsigned long))
{ {
struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
init_timer(&tp->retransmit_timer); init_timer(&icsk->icsk_retransmit_timer);
tp->retransmit_timer.function=&tcp_write_timer; init_timer(&icsk->icsk_delack_timer);
tp->retransmit_timer.data = (unsigned long) sk; init_timer(&sk->sk_timer);
tp->pending = 0;
init_timer(&tp->delack_timer); icsk->icsk_retransmit_timer.function = retransmit_handler;
tp->delack_timer.function=&tcp_delack_timer; icsk->icsk_delack_timer.function = delack_handler;
tp->delack_timer.data = (unsigned long) sk; sk->sk_timer.function = keepalive_handler;
tp->ack.pending = 0;
init_timer(&sk->sk_timer); icsk->icsk_retransmit_timer.data =
sk->sk_timer.function = &tcp_keepalive_timer; icsk->icsk_delack_timer.data =
sk->sk_timer.data = (unsigned long)sk; sk->sk_timer.data = (unsigned long)sk;
icsk->icsk_pending = icsk->icsk_ack.pending = 0;
} }
void tcp_clear_xmit_timers(struct sock *sk) void inet_csk_clear_xmit_timers(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
tp->pending = 0; icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
sk_stop_timer(sk, &tp->retransmit_timer);
tp->ack.pending = 0;
tp->ack.blocked = 0;
sk_stop_timer(sk, &tp->delack_timer);
sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
sk_stop_timer(sk, &icsk->icsk_delack_timer);
sk_stop_timer(sk, &sk->sk_timer); sk_stop_timer(sk, &sk->sk_timer);
} }
void tcp_init_xmit_timers(struct sock *sk)
{
inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
&tcp_keepalive_timer);
}
static void tcp_write_err(struct sock *sk) static void tcp_write_err(struct sock *sk)
{ {
sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
...@@ -155,15 +160,15 @@ static int tcp_orphan_retries(struct sock *sk, int alive) ...@@ -155,15 +160,15 @@ static int tcp_orphan_retries(struct sock *sk, int alive)
/* A write timeout has occurred. Process the after effects. */ /* A write timeout has occurred. Process the after effects. */
static int tcp_write_timeout(struct sock *sk) static int tcp_write_timeout(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk);
int retry_until; int retry_until;
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
if (tp->retransmits) if (icsk->icsk_retransmits)
dst_negative_advice(&sk->sk_dst_cache); dst_negative_advice(&sk->sk_dst_cache);
retry_until = tp->syn_retries ? : sysctl_tcp_syn_retries; retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
} else { } else {
if (tp->retransmits >= sysctl_tcp_retries1) { if (icsk->icsk_retransmits >= sysctl_tcp_retries1) {
/* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu black /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu black
hole detection. :-( hole detection. :-(
...@@ -189,16 +194,16 @@ static int tcp_write_timeout(struct sock *sk) ...@@ -189,16 +194,16 @@ static int tcp_write_timeout(struct sock *sk)
retry_until = sysctl_tcp_retries2; retry_until = sysctl_tcp_retries2;
if (sock_flag(sk, SOCK_DEAD)) { if (sock_flag(sk, SOCK_DEAD)) {
int alive = (tp->rto < TCP_RTO_MAX); const int alive = (icsk->icsk_rto < TCP_RTO_MAX);
retry_until = tcp_orphan_retries(sk, alive); retry_until = tcp_orphan_retries(sk, alive);
if (tcp_out_of_resources(sk, alive || tp->retransmits < retry_until)) if (tcp_out_of_resources(sk, alive || icsk->icsk_retransmits < retry_until))
return 1; return 1;
} }
} }
if (tp->retransmits >= retry_until) { if (icsk->icsk_retransmits >= retry_until) {
/* Has it gone just too far? */ /* Has it gone just too far? */
tcp_write_err(sk); tcp_write_err(sk);
return 1; return 1;
...@@ -210,26 +215,27 @@ static void tcp_delack_timer(unsigned long data) ...@@ -210,26 +215,27 @@ static void tcp_delack_timer(unsigned long data)
{ {
struct sock *sk = (struct sock*)data; struct sock *sk = (struct sock*)data;
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
bh_lock_sock(sk); bh_lock_sock(sk);
if (sock_owned_by_user(sk)) { if (sock_owned_by_user(sk)) {
/* Try again later. */ /* Try again later. */
tp->ack.blocked = 1; icsk->icsk_ack.blocked = 1;
NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED);
sk_reset_timer(sk, &tp->delack_timer, jiffies + TCP_DELACK_MIN); sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN);
goto out_unlock; goto out_unlock;
} }
sk_stream_mem_reclaim(sk); sk_stream_mem_reclaim(sk);
if (sk->sk_state == TCP_CLOSE || !(tp->ack.pending & TCP_ACK_TIMER)) if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
goto out; goto out;
if (time_after(tp->ack.timeout, jiffies)) { if (time_after(icsk->icsk_ack.timeout, jiffies)) {
sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout); sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
goto out; goto out;
} }
tp->ack.pending &= ~TCP_ACK_TIMER; icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
if (!skb_queue_empty(&tp->ucopy.prequeue)) { if (!skb_queue_empty(&tp->ucopy.prequeue)) {
struct sk_buff *skb; struct sk_buff *skb;
...@@ -242,16 +248,16 @@ static void tcp_delack_timer(unsigned long data) ...@@ -242,16 +248,16 @@ static void tcp_delack_timer(unsigned long data)
tp->ucopy.memory = 0; tp->ucopy.memory = 0;
} }
if (tcp_ack_scheduled(tp)) { if (inet_csk_ack_scheduled(sk)) {
if (!tp->ack.pingpong) { if (!icsk->icsk_ack.pingpong) {
/* Delayed ACK missed: inflate ATO. */ /* Delayed ACK missed: inflate ATO. */
tp->ack.ato = min(tp->ack.ato << 1, tp->rto); icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
} else { } else {
/* Delayed ACK missed: leave pingpong mode and /* Delayed ACK missed: leave pingpong mode and
* deflate ATO. * deflate ATO.
*/ */
tp->ack.pingpong = 0; icsk->icsk_ack.pingpong = 0;
tp->ack.ato = TCP_ATO_MIN; icsk->icsk_ack.ato = TCP_ATO_MIN;
} }
tcp_send_ack(sk); tcp_send_ack(sk);
NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS);
...@@ -294,7 +300,8 @@ static void tcp_probe_timer(struct sock *sk) ...@@ -294,7 +300,8 @@ static void tcp_probe_timer(struct sock *sk)
max_probes = sysctl_tcp_retries2; max_probes = sysctl_tcp_retries2;
if (sock_flag(sk, SOCK_DEAD)) { if (sock_flag(sk, SOCK_DEAD)) {
int alive = ((tp->rto<<tp->backoff) < TCP_RTO_MAX); const struct inet_connection_sock *icsk = inet_csk(sk);
const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX);
max_probes = tcp_orphan_retries(sk, alive); max_probes = tcp_orphan_retries(sk, alive);
...@@ -317,6 +324,7 @@ static void tcp_probe_timer(struct sock *sk) ...@@ -317,6 +324,7 @@ static void tcp_probe_timer(struct sock *sk)
static void tcp_retransmit_timer(struct sock *sk) static void tcp_retransmit_timer(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
if (!tp->packets_out) if (!tp->packets_out)
goto out; goto out;
...@@ -351,7 +359,7 @@ static void tcp_retransmit_timer(struct sock *sk) ...@@ -351,7 +359,7 @@ static void tcp_retransmit_timer(struct sock *sk)
if (tcp_write_timeout(sk)) if (tcp_write_timeout(sk))
goto out; goto out;
if (tp->retransmits == 0) { if (icsk->icsk_retransmits == 0) {
if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) { if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) {
if (tp->rx_opt.sack_ok) { if (tp->rx_opt.sack_ok) {
if (tp->ca_state == TCP_CA_Recovery) if (tp->ca_state == TCP_CA_Recovery)
...@@ -381,10 +389,10 @@ static void tcp_retransmit_timer(struct sock *sk) ...@@ -381,10 +389,10 @@ static void tcp_retransmit_timer(struct sock *sk)
/* Retransmission failed because of local congestion, /* Retransmission failed because of local congestion,
* do not backoff. * do not backoff.
*/ */
if (!tp->retransmits) if (!icsk->icsk_retransmits)
tp->retransmits=1; icsk->icsk_retransmits = 1;
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
min(tp->rto, TCP_RESOURCE_PROBE_INTERVAL)); min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL));
goto out; goto out;
} }
...@@ -403,13 +411,13 @@ static void tcp_retransmit_timer(struct sock *sk) ...@@ -403,13 +411,13 @@ static void tcp_retransmit_timer(struct sock *sk)
* implemented ftp to mars will work nicely. We will have to fix * implemented ftp to mars will work nicely. We will have to fix
* the 120 second clamps though! * the 120 second clamps though!
*/ */
tp->backoff++; icsk->icsk_backoff++;
tp->retransmits++; icsk->icsk_retransmits++;
out_reset_timer: out_reset_timer:
tp->rto = min(tp->rto << 1, TCP_RTO_MAX); icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto);
if (tp->retransmits > sysctl_tcp_retries1) if (icsk->icsk_retransmits > sysctl_tcp_retries1)
__sk_dst_reset(sk); __sk_dst_reset(sk);
out:; out:;
...@@ -418,32 +426,32 @@ out:; ...@@ -418,32 +426,32 @@ out:;
static void tcp_write_timer(unsigned long data) static void tcp_write_timer(unsigned long data)
{ {
struct sock *sk = (struct sock*)data; struct sock *sk = (struct sock*)data;
struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
int event; int event;
bh_lock_sock(sk); bh_lock_sock(sk);
if (sock_owned_by_user(sk)) { if (sock_owned_by_user(sk)) {
/* Try again later */ /* Try again later */
sk_reset_timer(sk, &tp->retransmit_timer, jiffies + (HZ / 20)); sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20));
goto out_unlock; goto out_unlock;
} }
if (sk->sk_state == TCP_CLOSE || !tp->pending) if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
goto out; goto out;
if (time_after(tp->timeout, jiffies)) { if (time_after(icsk->icsk_timeout, jiffies)) {
sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout); sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
goto out; goto out;
} }
event = tp->pending; event = icsk->icsk_pending;
tp->pending = 0; icsk->icsk_pending = 0;
switch (event) { switch (event) {
case TCP_TIME_RETRANS: case ICSK_TIME_RETRANS:
tcp_retransmit_timer(sk); tcp_retransmit_timer(sk);
break; break;
case TCP_TIME_PROBE0: case ICSK_TIME_PROBE0:
tcp_probe_timer(sk); tcp_probe_timer(sk);
break; break;
} }
...@@ -463,8 +471,9 @@ static void tcp_write_timer(unsigned long data) ...@@ -463,8 +471,9 @@ static void tcp_write_timer(unsigned long data)
static void tcp_synack_timer(struct sock *sk) static void tcp_synack_timer(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct listen_sock *lopt = tp->accept_queue.listen_opt; struct inet_connection_sock *icsk = inet_csk(sk);
int max_retries = tp->syn_retries ? : sysctl_tcp_synack_retries; struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
int thresh = max_retries; int thresh = max_retries;
unsigned long now = jiffies; unsigned long now = jiffies;
struct request_sock **reqp, *req; struct request_sock **reqp, *req;
...@@ -526,8 +535,8 @@ static void tcp_synack_timer(struct sock *sk) ...@@ -526,8 +535,8 @@ static void tcp_synack_timer(struct sock *sk)
} }
/* Drop this request */ /* Drop this request */
tcp_synq_unlink(tp, req, reqp); inet_csk_reqsk_queue_unlink(sk, req, reqp);
reqsk_queue_removed(&tp->accept_queue, req); reqsk_queue_removed(&icsk->icsk_accept_queue, req);
reqsk_free(req); reqsk_free(req);
continue; continue;
} }
...@@ -541,15 +550,15 @@ static void tcp_synack_timer(struct sock *sk) ...@@ -541,15 +550,15 @@ static void tcp_synack_timer(struct sock *sk)
lopt->clock_hand = i; lopt->clock_hand = i;
if (lopt->qlen) if (lopt->qlen)
tcp_reset_keepalive_timer(sk, TCP_SYNQ_INTERVAL); inet_csk_reset_keepalive_timer(sk, TCP_SYNQ_INTERVAL);
} }
void tcp_delete_keepalive_timer (struct sock *sk) void inet_csk_delete_keepalive_timer(struct sock *sk)
{ {
sk_stop_timer(sk, &sk->sk_timer); sk_stop_timer(sk, &sk->sk_timer);
} }
void tcp_reset_keepalive_timer (struct sock *sk, unsigned long len) void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
{ {
sk_reset_timer(sk, &sk->sk_timer, jiffies + len); sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
} }
...@@ -560,9 +569,9 @@ void tcp_set_keepalive(struct sock *sk, int val) ...@@ -560,9 +569,9 @@ void tcp_set_keepalive(struct sock *sk, int val)
return; return;
if (val && !sock_flag(sk, SOCK_KEEPOPEN)) if (val && !sock_flag(sk, SOCK_KEEPOPEN))
tcp_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
else if (!val) else if (!val)
tcp_delete_keepalive_timer(sk); inet_csk_delete_keepalive_timer(sk);
} }
...@@ -576,7 +585,7 @@ static void tcp_keepalive_timer (unsigned long data) ...@@ -576,7 +585,7 @@ static void tcp_keepalive_timer (unsigned long data)
bh_lock_sock(sk); bh_lock_sock(sk);
if (sock_owned_by_user(sk)) { if (sock_owned_by_user(sk)) {
/* Try again later. */ /* Try again later. */
tcp_reset_keepalive_timer (sk, HZ/20); inet_csk_reset_keepalive_timer (sk, HZ/20);
goto out; goto out;
} }
...@@ -587,7 +596,7 @@ static void tcp_keepalive_timer (unsigned long data) ...@@ -587,7 +596,7 @@ static void tcp_keepalive_timer (unsigned long data)
if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
if (tp->linger2 >= 0) { if (tp->linger2 >= 0) {
int tmo = tcp_fin_time(tp) - TCP_TIMEWAIT_LEN; const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
if (tmo > 0) { if (tmo > 0) {
tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
...@@ -634,7 +643,7 @@ static void tcp_keepalive_timer (unsigned long data) ...@@ -634,7 +643,7 @@ static void tcp_keepalive_timer (unsigned long data)
sk_stream_mem_reclaim(sk); sk_stream_mem_reclaim(sk);
resched: resched:
tcp_reset_keepalive_timer (sk, elapsed); inet_csk_reset_keepalive_timer (sk, elapsed);
goto out; goto out;
death: death:
...@@ -645,7 +654,7 @@ static void tcp_keepalive_timer (unsigned long data) ...@@ -645,7 +654,7 @@ static void tcp_keepalive_timer (unsigned long data)
sock_put(sk); sock_put(sk);
} }
EXPORT_SYMBOL(tcp_clear_xmit_timers); EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
EXPORT_SYMBOL(tcp_delete_keepalive_timer); EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
EXPORT_SYMBOL(tcp_init_xmit_timers); EXPORT_SYMBOL(tcp_init_xmit_timers);
EXPORT_SYMBOL(tcp_reset_keepalive_timer); EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
...@@ -1043,7 +1043,7 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) ...@@ -1043,7 +1043,7 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
u32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr; u32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr;
u32 sk2_rcv_saddr = inet_rcv_saddr(sk2); u32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
int sk_ipv6only = ipv6_only_sock(sk); int sk_ipv6only = ipv6_only_sock(sk);
int sk2_ipv6only = tcp_v6_ipv6only(sk2); int sk2_ipv6only = inet_v6_ipv6only(sk2);
int addr_type = ipv6_addr_type(sk_rcv_saddr6); int addr_type = ipv6_addr_type(sk_rcv_saddr6);
int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
......
...@@ -207,9 +207,9 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum) ...@@ -207,9 +207,9 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
tb->fastreuse = 0; tb->fastreuse = 0;
success: success:
if (!inet_sk(sk)->bind_hash) if (!inet_csk(sk)->icsk_bind_hash)
inet_bind_hash(sk, tb, snum); inet_bind_hash(sk, tb, snum);
BUG_TRAP(inet_sk(sk)->bind_hash == tb); BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb);
ret = 0; ret = 0;
fail_unlock: fail_unlock:
...@@ -381,7 +381,7 @@ EXPORT_SYMBOL_GPL(tcp_v6_lookup); ...@@ -381,7 +381,7 @@ EXPORT_SYMBOL_GPL(tcp_v6_lookup);
* Open request hash tables. * Open request hash tables.
*/ */
static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd) static u32 tcp_v6_synq_hash(const struct in6_addr *raddr, const u16 rport, const u32 rnd)
{ {
u32 a, b, c; u32 a, b, c;
...@@ -401,14 +401,15 @@ static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd) ...@@ -401,14 +401,15 @@ static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd)
return c & (TCP_SYNQ_HSIZE - 1); return c & (TCP_SYNQ_HSIZE - 1);
} }
static struct request_sock *tcp_v6_search_req(struct tcp_sock *tp, static struct request_sock *tcp_v6_search_req(const struct sock *sk,
struct request_sock ***prevp, struct request_sock ***prevp,
__u16 rport, __u16 rport,
struct in6_addr *raddr, struct in6_addr *raddr,
struct in6_addr *laddr, struct in6_addr *laddr,
int iif) int iif)
{ {
struct listen_sock *lopt = tp->accept_queue.listen_opt; const struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
struct request_sock *req, **prev; struct request_sock *req, **prev;
for (prev = &lopt->syn_table[tcp_v6_synq_hash(raddr, rport, lopt->hash_rnd)]; for (prev = &lopt->syn_table[tcp_v6_synq_hash(raddr, rport, lopt->hash_rnd)];
...@@ -619,7 +620,7 @@ static int tcp_v6_hash_connect(struct sock *sk) ...@@ -619,7 +620,7 @@ static int tcp_v6_hash_connect(struct sock *sk)
} }
head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
tb = inet_sk(sk)->bind_hash; tb = inet_csk(sk)->icsk_bind_hash;
spin_lock_bh(&head->lock); spin_lock_bh(&head->lock);
if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
...@@ -925,7 +926,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ...@@ -925,7 +926,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (sock_owned_by_user(sk)) if (sock_owned_by_user(sk))
goto out; goto out;
req = tcp_v6_search_req(tp, &prev, th->dest, &hdr->daddr, req = tcp_v6_search_req(sk, &prev, th->dest, &hdr->daddr,
&hdr->saddr, tcp_v6_iif(skb)); &hdr->saddr, tcp_v6_iif(skb));
if (!req) if (!req)
goto out; goto out;
...@@ -940,7 +941,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ...@@ -940,7 +941,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
goto out; goto out;
} }
tcp_synq_drop(sk, req, prev); inet_csk_reqsk_queue_drop(sk, req, prev);
goto out; goto out;
case TCP_SYN_SENT: case TCP_SYN_SENT:
...@@ -1245,11 +1246,10 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) ...@@ -1245,11 +1246,10 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
{ {
struct request_sock *req, **prev; struct request_sock *req, **prev;
struct tcphdr *th = skb->h.th; struct tcphdr *th = skb->h.th;
struct tcp_sock *tp = tcp_sk(sk);
struct sock *nsk; struct sock *nsk;
/* Find possible connection requests. */ /* Find possible connection requests. */
req = tcp_v6_search_req(tp, &prev, th->source, &skb->nh.ipv6h->saddr, req = tcp_v6_search_req(sk, &prev, th->source, &skb->nh.ipv6h->saddr,
&skb->nh.ipv6h->daddr, tcp_v6_iif(skb)); &skb->nh.ipv6h->daddr, tcp_v6_iif(skb));
if (req) if (req)
return tcp_check_req(sk, skb, req, prev); return tcp_check_req(sk, skb, req, prev);
...@@ -1278,12 +1278,12 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) ...@@ -1278,12 +1278,12 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
static void tcp_v6_synq_add(struct sock *sk, struct request_sock *req) static void tcp_v6_synq_add(struct sock *sk, struct request_sock *req)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt = tp->accept_queue.listen_opt; struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
u32 h = tcp_v6_synq_hash(&tcp6_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd); const u32 h = tcp_v6_synq_hash(&tcp6_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd);
reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT); reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, TCP_TIMEOUT_INIT);
tcp_synq_added(sk); inet_csk_reqsk_queue_added(sk, TCP_TIMEOUT_INIT);
} }
...@@ -1308,13 +1308,13 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1308,13 +1308,13 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
/* /*
* There are no SYN attacks on IPv6, yet... * There are no SYN attacks on IPv6, yet...
*/ */
if (tcp_synq_is_full(sk) && !isn) { if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n"); printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
goto drop; goto drop;
} }
if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1) if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
goto drop; goto drop;
req = reqsk_alloc(&tcp6_request_sock_ops); req = reqsk_alloc(&tcp6_request_sock_ops);
...@@ -2015,7 +2015,7 @@ static int tcp_v6_init_sock(struct sock *sk) ...@@ -2015,7 +2015,7 @@ static int tcp_v6_init_sock(struct sock *sk)
tcp_init_xmit_timers(sk); tcp_init_xmit_timers(sk);
tcp_prequeue_init(tp); tcp_prequeue_init(tp);
tp->rto = TCP_TIMEOUT_INIT; inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
tp->mdev = TCP_TIMEOUT_INIT; tp->mdev = TCP_TIMEOUT_INIT;
/* So many TCP implementations out there (incorrectly) count the /* So many TCP implementations out there (incorrectly) count the
...@@ -2098,18 +2098,20 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) ...@@ -2098,18 +2098,20 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
unsigned long timer_expires; unsigned long timer_expires;
struct inet_sock *inet = inet_sk(sp); struct inet_sock *inet = inet_sk(sp);
struct tcp_sock *tp = tcp_sk(sp); struct tcp_sock *tp = tcp_sk(sp);
const struct inet_connection_sock *icsk = inet_csk(sp);
struct ipv6_pinfo *np = inet6_sk(sp); struct ipv6_pinfo *np = inet6_sk(sp);
dest = &np->daddr; dest = &np->daddr;
src = &np->rcv_saddr; src = &np->rcv_saddr;
destp = ntohs(inet->dport); destp = ntohs(inet->dport);
srcp = ntohs(inet->sport); srcp = ntohs(inet->sport);
if (tp->pending == TCP_TIME_RETRANS) {
if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
timer_active = 1; timer_active = 1;
timer_expires = tp->timeout; timer_expires = icsk->icsk_timeout;
} else if (tp->pending == TCP_TIME_PROBE0) { } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4; timer_active = 4;
timer_expires = tp->timeout; timer_expires = icsk->icsk_timeout;
} else if (timer_pending(&sp->sk_timer)) { } else if (timer_pending(&sp->sk_timer)) {
timer_active = 2; timer_active = 2;
timer_expires = sp->sk_timer.expires; timer_expires = sp->sk_timer.expires;
...@@ -2130,12 +2132,14 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) ...@@ -2130,12 +2132,14 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq, tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq,
timer_active, timer_active,
jiffies_to_clock_t(timer_expires - jiffies), jiffies_to_clock_t(timer_expires - jiffies),
tp->retransmits, icsk->icsk_retransmits,
sock_i_uid(sp), sock_i_uid(sp),
tp->probes_out, tp->probes_out,
sock_i_ino(sp), sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_refcnt), sp,
tp->rto, tp->ack.ato, (tp->ack.quick<<1)|tp->ack.pingpong, icsk->icsk_rto,
icsk->icsk_ack.ato,
(icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
); );
} }
...@@ -2227,7 +2231,7 @@ struct proto tcpv6_prot = { ...@@ -2227,7 +2231,7 @@ struct proto tcpv6_prot = {
.close = tcp_close, .close = tcp_close,
.connect = tcp_v6_connect, .connect = tcp_v6_connect,
.disconnect = tcp_disconnect, .disconnect = tcp_disconnect,
.accept = tcp_accept, .accept = inet_csk_accept,
.ioctl = tcp_ioctl, .ioctl = tcp_ioctl,
.init = tcp_v6_init_sock, .init = tcp_v6_init_sock,
.destroy = tcp_v6_destroy_sock, .destroy = tcp_v6_destroy_sock,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment