Commit f07143f5 authored by David S. Miller's avatar David S. Miller

Merge nuts.davemloft.net:/disk1/BK/network-2.6

into nuts.davemloft.net:/disk1/BK/net-2.6
parents 11d78290 e093407d
......@@ -60,8 +60,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "3.14"
#define DRV_MODULE_RELDATE "November 15, 2004"
#define DRV_MODULE_VERSION "3.15"
#define DRV_MODULE_RELDATE "January 6, 2005"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
......@@ -493,7 +493,8 @@ static void tg3_switch_clocks(struct tg3 *tp)
static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
{
u32 frame_val;
int loops, ret;
unsigned int loops;
int ret;
if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
tw32_f(MAC_MI_MODE,
......@@ -501,7 +502,7 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
udelay(80);
}
*val = 0xffffffff;
*val = 0x0;
frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
MI_COM_PHY_ADDR_MASK);
......@@ -512,7 +513,7 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
tw32_f(MAC_MI_COM, frame_val);
loops = PHY_BUSY_LOOPS;
while (loops-- > 0) {
while (loops != 0) {
udelay(10);
frame_val = tr32(MAC_MI_COM);
......@@ -521,10 +522,11 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
frame_val = tr32(MAC_MI_COM);
break;
}
loops -= 1;
}
ret = -EBUSY;
if (loops > 0) {
if (loops != 0) {
*val = frame_val & MI_COM_DATA_MASK;
ret = 0;
}
......@@ -540,7 +542,8 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
{
u32 frame_val;
int loops, ret;
unsigned int loops;
int ret;
if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
tw32_f(MAC_MI_MODE,
......@@ -558,7 +561,7 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
tw32_f(MAC_MI_COM, frame_val);
loops = PHY_BUSY_LOOPS;
while (loops-- > 0) {
while (loops != 0) {
udelay(10);
frame_val = tr32(MAC_MI_COM);
if ((frame_val & MI_COM_BUSY) == 0) {
......@@ -566,10 +569,11 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
frame_val = tr32(MAC_MI_COM);
break;
}
loops -= 1;
}
ret = -EBUSY;
if (loops > 0)
if (loops != 0)
ret = 0;
if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
......
......@@ -268,8 +268,7 @@ struct udp6_sock {
};
struct tcp6_sock {
struct inet_sock inet;
struct tcp_opt tcp;
struct tcp_sock tcp;
struct ipv6_pinfo inet6;
};
......
......@@ -214,7 +214,9 @@ enum tcp_congestion_algo {
TCP_BIC,
};
struct tcp_opt {
struct tcp_sock {
/* inet_sock has to be the first member of tcp_sock */
struct inet_sock inet;
int tcp_header_len; /* Bytes of tcp header to send */
/*
......@@ -438,15 +440,9 @@ struct tcp_opt {
} bictcp;
};
/* WARNING: don't change the layout of the members in tcp_sock! */
struct tcp_sock {
struct inet_sock inet;
struct tcp_opt tcp;
};
static inline struct tcp_opt * tcp_sk(const struct sock *__sk)
static inline struct tcp_sock *tcp_sk(const struct sock *sk)
{
return &((struct tcp_sock *)__sk)->tcp;
return (struct tcp_sock *)sk;
}
#endif
......
This diff is collapsed.
......@@ -9,8 +9,7 @@
#define TCP_ECN_QUEUE_CWR 2
#define TCP_ECN_DEMAND_CWR 4
static __inline__ void
TCP_ECN_queue_cwr(struct tcp_opt *tp)
static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp)
{
if (tp->ecn_flags&TCP_ECN_OK)
tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
......@@ -19,16 +18,16 @@ TCP_ECN_queue_cwr(struct tcp_opt *tp)
/* Output functions */
static __inline__ void
TCP_ECN_send_synack(struct tcp_opt *tp, struct sk_buff *skb)
static inline void TCP_ECN_send_synack(struct tcp_sock *tp,
struct sk_buff *skb)
{
TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR;
if (!(tp->ecn_flags&TCP_ECN_OK))
TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE;
}
static __inline__ void
TCP_ECN_send_syn(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb)
static inline void TCP_ECN_send_syn(struct sock *sk, struct tcp_sock *tp,
struct sk_buff *skb)
{
tp->ecn_flags = 0;
if (sysctl_tcp_ecn && !(sk->sk_route_caps & NETIF_F_TSO)) {
......@@ -45,8 +44,8 @@ TCP_ECN_make_synack(struct open_request *req, struct tcphdr *th)
th->ece = 1;
}
static __inline__ void
TCP_ECN_send(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb, int tcp_header_len)
static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp,
struct sk_buff *skb, int tcp_header_len)
{
if (tp->ecn_flags & TCP_ECN_OK) {
/* Not-retransmitted data segment: set ECT and inject CWR. */
......@@ -68,21 +67,18 @@ TCP_ECN_send(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb, int tcp_h
/* Input functions */
static __inline__ void
TCP_ECN_accept_cwr(struct tcp_opt *tp, struct sk_buff *skb)
static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, struct sk_buff *skb)
{
if (skb->h.th->cwr)
tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
}
static __inline__ void
TCP_ECN_withdraw_cwr(struct tcp_opt *tp)
static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp)
{
tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
}
static __inline__ void
TCP_ECN_check_ce(struct tcp_opt *tp, struct sk_buff *skb)
static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb)
{
if (tp->ecn_flags&TCP_ECN_OK) {
if (INET_ECN_is_ce(TCP_SKB_CB(skb)->flags))
......@@ -95,30 +91,27 @@ TCP_ECN_check_ce(struct tcp_opt *tp, struct sk_buff *skb)
}
}
static __inline__ void
TCP_ECN_rcv_synack(struct tcp_opt *tp, struct tcphdr *th)
static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, struct tcphdr *th)
{
if ((tp->ecn_flags&TCP_ECN_OK) && (!th->ece || th->cwr))
tp->ecn_flags &= ~TCP_ECN_OK;
}
static __inline__ void
TCP_ECN_rcv_syn(struct tcp_opt *tp, struct tcphdr *th)
static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, struct tcphdr *th)
{
if ((tp->ecn_flags&TCP_ECN_OK) && (!th->ece || !th->cwr))
tp->ecn_flags &= ~TCP_ECN_OK;
}
static __inline__ int
TCP_ECN_rcv_ecn_echo(struct tcp_opt *tp, struct tcphdr *th)
static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th)
{
if (th->ece && !th->syn && (tp->ecn_flags&TCP_ECN_OK))
return 1;
return 0;
}
static __inline__ void
TCP_ECN_openreq_child(struct tcp_opt *tp, struct open_request *req)
static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
struct open_request *req)
{
tp->ecn_flags = req->ecn_ok ? TCP_ECN_OK : 0;
}
......
......@@ -429,7 +429,7 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
if (err)
break;
if (sk->sk_type == SOCK_STREAM) {
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
if (sk->sk_family == PF_INET ||
(!((1 << sk->sk_state) &
......
......@@ -47,7 +47,7 @@ static __u16 const msstab[] = {
*/
__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int mssind;
const __u16 mss = *mssp;
......@@ -98,7 +98,7 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
struct open_request *req,
struct dst_entry *dst)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sock *child;
child = tp->af_specific->syn_recv_sock(sk, skb, req, dst);
......@@ -114,7 +114,7 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
struct ip_options *opt)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
__u32 cookie = ntohl(skb->h.th->ack_seq) - 1;
struct sock *ret = sk;
struct open_request *req;
......
......@@ -331,7 +331,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
{
unsigned int mask;
struct sock *sk = sock->sk;
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
poll_wait(file, sk->sk_sleep, wait);
if (sk->sk_state == TCP_LISTEN)
......@@ -414,7 +414,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int answ;
switch (cmd) {
......@@ -462,7 +462,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
int tcp_listen_start(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_listen_opt *lopt;
sk->sk_max_ack_backlog = 0;
......@@ -515,7 +515,7 @@ int tcp_listen_start(struct sock *sk)
static void tcp_listen_stop (struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_listen_opt *lopt = tp->listen_opt;
struct open_request *acc_req = tp->accept_queue;
struct open_request *req;
......@@ -579,18 +579,18 @@ static void tcp_listen_stop (struct sock *sk)
BUG_TRAP(!sk->sk_ack_backlog);
}
static inline void tcp_mark_push(struct tcp_opt *tp, struct sk_buff *skb)
static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
{
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
tp->pushed_seq = tp->write_seq;
}
static inline int forced_push(struct tcp_opt *tp)
static inline int forced_push(struct tcp_sock *tp)
{
return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
}
static inline void skb_entail(struct sock *sk, struct tcp_opt *tp,
static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
struct sk_buff *skb)
{
skb->csum = 0;
......@@ -606,7 +606,7 @@ static inline void skb_entail(struct sock *sk, struct tcp_opt *tp,
tp->nonagle &= ~TCP_NAGLE_PUSH;
}
static inline void tcp_mark_urg(struct tcp_opt *tp, int flags,
static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
struct sk_buff *skb)
{
if (flags & MSG_OOB) {
......@@ -616,7 +616,7 @@ static inline void tcp_mark_urg(struct tcp_opt *tp, int flags,
}
}
static inline void tcp_push(struct sock *sk, struct tcp_opt *tp, int flags,
static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
int mss_now, int nonagle)
{
if (sk->sk_send_head) {
......@@ -632,7 +632,7 @@ static inline void tcp_push(struct sock *sk, struct tcp_opt *tp, int flags,
static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
size_t psize, int flags)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int mss_now;
int err;
ssize_t copied;
......@@ -761,7 +761,7 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
#define TCP_OFF(sk) (sk->sk_sndmsg_off)
static inline int select_size(struct sock *sk, struct tcp_opt *tp)
static inline int select_size(struct sock *sk, struct tcp_sock *tp)
{
int tmp = tp->mss_cache_std;
......@@ -779,7 +779,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t size)
{
struct iovec *iov;
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
int iovlen, flags;
int mss_now;
......@@ -1003,7 +1003,7 @@ static int tcp_recv_urg(struct sock *sk, long timeo,
struct msghdr *msg, int len, int flags,
int *addr_len)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
/* No URG data to read. */
if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
......@@ -1053,7 +1053,7 @@ static int tcp_recv_urg(struct sock *sk, long timeo,
*/
static void cleanup_rbuf(struct sock *sk, int copied)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int time_to_ack = 0;
#if TCP_DEBUG
......@@ -1108,7 +1108,7 @@ static void cleanup_rbuf(struct sock *sk, int copied)
static void tcp_prequeue_process(struct sock *sk)
{
struct sk_buff *skb;
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
NET_ADD_STATS_USER(LINUX_MIB_TCPPREQUEUED, skb_queue_len(&tp->ucopy.prequeue));
......@@ -1155,7 +1155,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor)
{
struct sk_buff *skb;
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
u32 seq = tp->copied_seq;
u32 offset;
int copied = 0;
......@@ -1214,7 +1214,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int nonblock, int flags, int *addr_len)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int copied = 0;
u32 peek_seq;
u32 *seq;
......@@ -1720,7 +1720,7 @@ void tcp_close(struct sock *sk, long timeout)
*/
if (sk->sk_state == TCP_FIN_WAIT2) {
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
if (tp->linger2 < 0) {
tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC);
......@@ -1774,7 +1774,7 @@ static inline int tcp_need_reset(int state)
int tcp_disconnect(struct sock *sk, int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int err = 0;
int old_state = sk->sk_state;
......@@ -1836,7 +1836,7 @@ int tcp_disconnect(struct sock *sk, int flags)
*/
static int wait_for_connect(struct sock *sk, long timeo)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
DEFINE_WAIT(wait);
int err;
......@@ -1884,7 +1884,7 @@ static int wait_for_connect(struct sock *sk, long timeo)
struct sock *tcp_accept(struct sock *sk, int flags, int *err)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct open_request *req;
struct sock *newsk;
int error;
......@@ -1935,7 +1935,7 @@ struct sock *tcp_accept(struct sock *sk, int flags, int *err)
int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
int optlen)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int val;
int err = 0;
......@@ -2099,7 +2099,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
/* Return information about state of tcp endpoint in API format. */
void tcp_get_info(struct sock *sk, struct tcp_info *info)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
u32 now = tcp_time_stamp;
memset(info, 0, sizeof(*info));
......@@ -2158,7 +2158,7 @@ EXPORT_SYMBOL_GPL(tcp_get_info);
int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
int __user *optlen)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int val, len;
if (level != SOL_TCP)
......
......@@ -56,7 +56,7 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
int ext, u32 pid, u32 seq, u16 nlmsg_flags)
{
struct inet_sock *inet = inet_sk(sk);
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct tcpdiagmsg *r;
struct nlmsghdr *nlh;
struct tcp_info *info = NULL;
......@@ -512,7 +512,7 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk,
{
struct tcpdiag_entry entry;
struct tcpdiagreq *r = NLMSG_DATA(cb->nlh);
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_listen_opt *lopt;
struct rtattr *bc = NULL;
struct inet_sock *inet = inet_sk(sk);
......
This diff is collapsed.
......@@ -568,7 +568,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
tw = (struct tcp_tw_bucket *)sk2;
if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
/* With PAWS, it is safe from the viewpoint
of data integrity. Even without PAWS it
......@@ -744,7 +744,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk)
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
struct rtable *rt;
u32 daddr, nexthop;
......@@ -867,7 +867,7 @@ static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
}
static struct open_request *tcp_v4_search_req(struct tcp_opt *tp,
static struct open_request *tcp_v4_search_req(struct tcp_sock *tp,
struct open_request ***prevp,
__u16 rport,
__u32 raddr, __u32 laddr)
......@@ -893,7 +893,7 @@ static struct open_request *tcp_v4_search_req(struct tcp_opt *tp,
static void tcp_v4_synq_add(struct sock *sk, struct open_request *req)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_listen_opt *lopt = tp->listen_opt;
u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
......@@ -918,7 +918,7 @@ static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
{
struct dst_entry *dst;
struct inet_sock *inet = inet_sk(sk);
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
/* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
* send out by Linux are always <576bytes so they should go through
......@@ -979,7 +979,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
{
struct iphdr *iph = (struct iphdr *)skb->data;
struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
struct tcp_opt *tp;
struct tcp_sock *tp;
struct inet_sock *inet;
int type = skb->h.icmph->type;
int code = skb->h.icmph->code;
......@@ -1393,7 +1393,7 @@ struct or_calltable or_ipv4 = {
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct tcp_opt tp;
struct tcp_sock tp;
struct open_request *req;
__u32 saddr = skb->nh.iph->saddr;
__u32 daddr = skb->nh.iph->daddr;
......@@ -1550,7 +1550,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
struct dst_entry *dst)
{
struct inet_sock *newinet;
struct tcp_opt *newtp;
struct tcp_sock *newtp;
struct sock *newsk;
if (sk_acceptq_is_full(sk))
......@@ -1602,7 +1602,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
{
struct tcphdr *th = skb->h.th;
struct iphdr *iph = skb->nh.iph;
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sock *nsk;
struct open_request **prev;
/* Find possible connection requests. */
......@@ -1972,7 +1972,7 @@ static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
int tcp_v4_remember_stamp(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
struct inet_peer *peer = NULL;
int release_it = 0;
......@@ -2040,7 +2040,7 @@ struct tcp_func ipv4_specific = {
*/
static int tcp_v4_init_sock(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
skb_queue_head_init(&tp->out_of_order_queue);
tcp_init_xmit_timers(sk);
......@@ -2082,7 +2082,7 @@ static int tcp_v4_init_sock(struct sock *sk)
int tcp_v4_destroy_sock(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
tcp_clear_xmit_timers(sk);
......@@ -2131,7 +2131,7 @@ static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw)
static void *listening_get_next(struct seq_file *seq, void *cur)
{
struct tcp_opt *tp;
struct tcp_sock *tp;
struct hlist_node *node;
struct sock *sk = cur;
struct tcp_iter_state* st = seq->private;
......@@ -2375,7 +2375,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
switch (st->state) {
case TCP_SEQ_STATE_OPENREQ:
if (v) {
struct tcp_opt *tp = tcp_sk(st->syn_wait_sk);
struct tcp_sock *tp = tcp_sk(st->syn_wait_sk);
read_unlock_bh(&tp->syn_wait_lock);
}
case TCP_SEQ_STATE_LISTENING:
......@@ -2480,7 +2480,7 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
{
int timer_active;
unsigned long timer_expires;
struct tcp_opt *tp = tcp_sk(sp);
struct tcp_sock *tp = tcp_sk(sp);
struct inet_sock *inet = inet_sk(sp);
unsigned int dest = inet->daddr;
unsigned int src = inet->rcv_saddr;
......
......@@ -125,7 +125,7 @@ enum tcp_tw_status
tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
struct tcphdr *th, unsigned len)
{
struct tcp_opt tp;
struct tcp_sock tp;
int paws_reject = 0;
tp.saw_tstamp = 0;
......@@ -329,7 +329,7 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
void tcp_time_wait(struct sock *sk, int state, int timeo)
{
struct tcp_tw_bucket *tw = NULL;
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int recycle_ok = 0;
if (sysctl_tcp_tw_recycle && tp->ts_recent_stamp)
......@@ -692,7 +692,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, 0, sk->sk_prot->slab);
if(newsk != NULL) {
struct tcp_opt *newtp;
struct tcp_sock *newtp;
struct sk_filter *filter;
memcpy(newsk, sk, sizeof(struct tcp_sock));
......@@ -736,7 +736,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
return NULL;
}
/* Now setup tcp_opt */
/* Now setup tcp_sock */
newtp = tcp_sk(newsk);
newtp->pred_flags = 0;
newtp->rcv_nxt = req->rcv_isn + 1;
......@@ -860,10 +860,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
struct open_request **prev)
{
struct tcphdr *th = skb->h.th;
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
int paws_reject = 0;
struct tcp_opt ttp;
struct tcp_sock ttp;
struct sock *child;
ttp.saw_tstamp = 0;
......
......@@ -51,8 +51,8 @@ int sysctl_tcp_retrans_collapse = 1;
*/
int sysctl_tcp_tso_win_divisor = 8;
static __inline__
void update_send_head(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb)
static inline void update_send_head(struct sock *sk, struct tcp_sock *tp,
struct sk_buff *skb)
{
sk->sk_send_head = skb->next;
if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
......@@ -67,7 +67,7 @@ void update_send_head(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb)
* Anything in between SND.UNA...SND.UNA+SND.WND also can be already
* invalid. OK, let's make this for now:
*/
static __inline__ __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_opt *tp)
static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp)
{
if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt))
return tp->snd_nxt;
......@@ -91,7 +91,7 @@ static __inline__ __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_opt *tp)
*/
static __u16 tcp_advertise_mss(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct dst_entry *dst = __sk_dst_get(sk);
int mss = tp->advmss;
......@@ -105,7 +105,7 @@ static __u16 tcp_advertise_mss(struct sock *sk)
/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
* This is the first part of cwnd validation mechanism. */
static void tcp_cwnd_restart(struct tcp_opt *tp, struct dst_entry *dst)
static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst)
{
s32 delta = tcp_time_stamp - tp->lsndtime;
u32 restart_cwnd = tcp_init_cwnd(tp, dst);
......@@ -124,7 +124,8 @@ static void tcp_cwnd_restart(struct tcp_opt *tp, struct dst_entry *dst)
tp->snd_cwnd_used = 0;
}
static __inline__ void tcp_event_data_sent(struct tcp_opt *tp, struct sk_buff *skb, struct sock *sk)
static inline void tcp_event_data_sent(struct tcp_sock *tp,
struct sk_buff *skb, struct sock *sk)
{
u32 now = tcp_time_stamp;
......@@ -143,7 +144,7 @@ static __inline__ void tcp_event_data_sent(struct tcp_opt *tp, struct sk_buff *s
static __inline__ void tcp_event_ack_sent(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
tcp_dec_quickack_mode(tp);
tcp_clear_xmit_timer(sk, TCP_TIME_DACK);
......@@ -208,14 +209,14 @@ void tcp_select_initial_window(int __space, __u32 mss,
(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
}
/* Chose a new window to advertise, update state in tcp_opt for the
/* Chose a new window to advertise, update state in tcp_sock for the
* socket, and return result with RFC1323 scaling applied. The return
* value can be stuffed directly into th->window for an outgoing
* frame.
*/
static __inline__ u16 tcp_select_window(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
u32 cur_win = tcp_receive_window(tp);
u32 new_win = __tcp_select_window(sk);
......@@ -267,7 +268,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
{
if (skb != NULL) {
struct inet_sock *inet = inet_sk(sk);
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
int tcp_header_size = tp->tcp_header_len;
struct tcphdr *th;
......@@ -396,7 +397,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
*/
static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
/* Advance write_seq and place onto the write_queue. */
tp->write_seq = TCP_SKB_CB(skb)->end_seq;
......@@ -413,7 +414,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
*/
void tcp_push_one(struct sock *sk, unsigned cur_mss)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = sk->sk_send_head;
if (tcp_snd_test(tp, skb, cur_mss, TCP_NAGLE_PUSH)) {
......@@ -453,7 +454,7 @@ void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_std)
*/
static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *buff;
int nsize;
u16 flags;
......@@ -619,7 +620,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct dst_entry *dst = __sk_dst_get(sk);
int mss_now;
......@@ -666,7 +667,7 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
unsigned int tcp_current_mss(struct sock *sk, int large)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct dst_entry *dst = __sk_dst_get(sk);
unsigned int do_large, mss_now;
......@@ -727,7 +728,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large)
*/
int tcp_write_xmit(struct sock *sk, int nonagle)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
unsigned int mss_now;
/* If we are closed, the bytes will have to remain here.
......@@ -831,7 +832,7 @@ int tcp_write_xmit(struct sock *sk, int nonagle)
*/
u32 __tcp_select_window(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
/* MSS for the peer's data. Previous verions used mss_clamp
* here. I don't know if the value based on our guesses
* of peer's MSS is better for the performance. It's more correct
......@@ -892,7 +893,7 @@ u32 __tcp_select_window(struct sock *sk)
/* Attempt to collapse two adjacent SKB's during retransmission. */
static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *next_skb = skb->next;
/* The first test we must make is that neither of these two
......@@ -970,7 +971,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
*/
void tcp_simple_retransmit(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
unsigned int mss = tcp_current_mss(sk, 0);
int lost = 0;
......@@ -1016,7 +1017,7 @@ void tcp_simple_retransmit(struct sock *sk)
*/
int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
unsigned int cur_mss = tcp_current_mss(sk, 0);
int err;
......@@ -1140,7 +1141,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
*/
void tcp_xmit_retransmit_queue(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
int packet_cnt = tcp_get_pcount(&tp->lost_out);
......@@ -1235,7 +1236,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
*/
void tcp_send_fin(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue);
int mss_now;
......@@ -1281,7 +1282,7 @@ void tcp_send_fin(struct sock *sk)
*/
void tcp_send_active_reset(struct sock *sk, int priority)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
/* NOTE: No TCP options attached and we never retransmit this. */
......@@ -1346,7 +1347,7 @@ int tcp_send_synack(struct sock *sk)
struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
struct open_request *req)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct tcphdr *th;
int tcp_header_size;
struct sk_buff *skb;
......@@ -1417,7 +1418,7 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
static inline void tcp_connect_init(struct sock *sk)
{
struct dst_entry *dst = __sk_dst_get(sk);
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
/* We'll fix this up when we get a response from the other end.
* See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
......@@ -1466,7 +1467,7 @@ static inline void tcp_connect_init(struct sock *sk)
*/
int tcp_connect(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *buff;
tcp_connect_init(sk);
......@@ -1510,7 +1511,7 @@ int tcp_connect(struct sock *sk)
*/
void tcp_send_delayed_ack(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int ato = tp->ack.ato;
unsigned long timeout;
......@@ -1562,7 +1563,7 @@ void tcp_send_ack(struct sock *sk)
{
/* If we have been reset, we may not send again. */
if (sk->sk_state != TCP_CLOSE) {
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *buff;
/* We are not putting this on the write queue, so
......@@ -1605,7 +1606,7 @@ void tcp_send_ack(struct sock *sk)
*/
static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
/* We don't queue it, tcp_transmit_skb() sets ownership. */
......@@ -1634,7 +1635,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
int tcp_write_wakeup(struct sock *sk)
{
if (sk->sk_state != TCP_CLOSE) {
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
if ((skb = sk->sk_send_head) != NULL &&
......@@ -1688,7 +1689,7 @@ int tcp_write_wakeup(struct sock *sk)
*/
void tcp_send_probe0(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int err;
err = tcp_write_wakeup(sk);
......
......@@ -48,7 +48,7 @@ const char tcp_timer_bug_msg[] = KERN_DEBUG "tcpbug: unknown timer value\n";
void tcp_init_xmit_timers(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
init_timer(&tp->retransmit_timer);
tp->retransmit_timer.function=&tcp_write_timer;
......@@ -67,7 +67,7 @@ void tcp_init_xmit_timers(struct sock *sk)
void tcp_clear_xmit_timers(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
tp->pending = 0;
sk_stop_timer(sk, &tp->retransmit_timer);
......@@ -101,7 +101,7 @@ static void tcp_write_err(struct sock *sk)
*/
static int tcp_out_of_resources(struct sock *sk, int do_reset)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int orphans = atomic_read(&tcp_orphan_count);
/* If peer does not open window for long time, or did not transmit
......@@ -154,7 +154,7 @@ static int tcp_orphan_retries(struct sock *sk, int alive)
/* A write timeout has occurred. Process the after effects. */
static int tcp_write_timeout(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int retry_until;
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
......@@ -208,7 +208,7 @@ static int tcp_write_timeout(struct sock *sk)
static void tcp_delack_timer(unsigned long data)
{
struct sock *sk = (struct sock*)data;
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
......@@ -268,7 +268,7 @@ static void tcp_delack_timer(unsigned long data)
static void tcp_probe_timer(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int max_probes;
if (tcp_get_pcount(&tp->packets_out) || !sk->sk_send_head) {
......@@ -316,7 +316,7 @@ static void tcp_probe_timer(struct sock *sk)
static void tcp_retransmit_timer(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
if (!tcp_get_pcount(&tp->packets_out))
goto out;
......@@ -418,7 +418,7 @@ out:;
static void tcp_write_timer(unsigned long data)
{
struct sock *sk = (struct sock*)data;
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int event;
bh_lock_sock(sk);
......@@ -462,7 +462,7 @@ static void tcp_write_timer(unsigned long data)
static void tcp_synack_timer(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_listen_opt *lopt = tp->listen_opt;
int max_retries = tp->syn_retries ? : sysctl_tcp_synack_retries;
int thresh = max_retries;
......@@ -573,7 +573,7 @@ void tcp_set_keepalive(struct sock *sk, int val)
static void tcp_keepalive_timer (unsigned long data)
{
struct sock *sk = (struct sock *) data;
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
__u32 elapsed;
/* Only process if socket is not in use. */
......
......@@ -164,7 +164,7 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname,
ipv6_sock_mc_close(sk);
if (sk->sk_protocol == IPPROTO_TCP) {
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
local_bh_disable();
sock_prot_dec_use(sk->sk_prot);
......@@ -281,7 +281,7 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname,
retv = 0;
if (sk->sk_type == SOCK_STREAM) {
if (opt) {
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
if (!((1 << sk->sk_state) &
(TCPF_LISTEN | TCPF_CLOSE))
&& inet_sk(sk)->daddr != LOOPBACK4_IPV6) {
......
......@@ -235,7 +235,7 @@ static __inline__ void __tcp_v6_hash(struct sock *sk)
static void tcp_v6_hash(struct sock *sk)
{
if (sk->sk_state != TCP_CLOSE) {
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
if (tp->af_specific == &ipv6_mapped) {
tcp_prot.hash(sk);
......@@ -391,7 +391,7 @@ static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd)
return c & (TCP_SYNQ_HSIZE - 1);
}
static struct open_request *tcp_v6_search_req(struct tcp_opt *tp,
static struct open_request *tcp_v6_search_req(struct tcp_sock *tp,
struct open_request ***prevp,
__u16 rport,
struct in6_addr *raddr,
......@@ -466,7 +466,7 @@ static int tcp_v6_check_established(struct sock *sk)
ipv6_addr_equal(&tw->tw_v6_daddr, saddr) &&
ipv6_addr_equal(&tw->tw_v6_rcv_saddr, daddr) &&
sk2->sk_bound_dev_if == sk->sk_bound_dev_if) {
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
if (tw->tw_ts_recent_stamp) {
/* See comment in tcp_ipv4.c */
......@@ -551,7 +551,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct in6_addr *saddr = NULL, *final_p = NULL, final;
struct flowi fl;
struct dst_entry *dst;
......@@ -741,7 +741,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct ipv6_pinfo *np;
struct sock *sk;
int err;
struct tcp_opt *tp;
struct tcp_sock *tp;
__u32 seq;
sk = tcp_v6_lookup(&hdr->daddr, th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
......@@ -1146,7 +1146,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
{
struct open_request *req, **prev;
struct tcphdr *th = skb->h.th;
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sock *nsk;
/* Find possible connection requests. */
......@@ -1179,7 +1179,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
static void tcp_v6_synq_add(struct sock *sk, struct open_request *req)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_listen_opt *lopt = tp->listen_opt;
u32 h = tcp_v6_synq_hash(&req->af.v6_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
......@@ -1202,7 +1202,7 @@ static void tcp_v6_synq_add(struct sock *sk, struct open_request *req)
static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_opt tmptp, *tp = tcp_sk(sk);
struct tcp_sock tmptp, *tp = tcp_sk(sk);
struct open_request *req = NULL;
__u32 isn = TCP_SKB_CB(skb)->when;
......@@ -1282,7 +1282,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
struct tcp6_sock *newtcp6sk;
struct inet_sock *newinet;
struct tcp_opt *newtp;
struct tcp_sock *newtp;
struct sock *newsk;
struct ipv6_txoptions *opt;
......@@ -1297,7 +1297,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
return NULL;
newtcp6sk = (struct tcp6_sock *)newsk;
newtcp6sk->inet.pinet6 = &newtcp6sk->inet6;
inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
newinet = inet_sk(newsk);
newnp = inet6_sk(newsk);
......@@ -1390,7 +1390,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
~(NETIF_F_IP_CSUM | NETIF_F_TSO);
newtcp6sk = (struct tcp6_sock *)newsk;
newtcp6sk->inet.pinet6 = &newtcp6sk->inet6;
inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
newtp = tcp_sk(newsk);
newinet = inet_sk(newsk);
......@@ -1497,7 +1497,7 @@ static int tcp_v6_checksum_init(struct sk_buff *skb)
static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_opt *tp;
struct tcp_sock *tp;
struct sk_buff *opt_skb = NULL;
/* Imagine: socket is IPv6. IPv4 packet arrives,
......@@ -1919,7 +1919,7 @@ static struct tcp_func ipv6_mapped = {
*/
static int tcp_v6_init_sock(struct sock *sk)
{
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
skb_queue_head_init(&tp->out_of_order_queue);
tcp_init_xmit_timers(sk);
......@@ -2007,7 +2007,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
int timer_active;
unsigned long timer_expires;
struct inet_sock *inet = inet_sk(sp);
struct tcp_opt *tp = tcp_sk(sp);
struct tcp_sock *tp = tcp_sk(sp);
struct ipv6_pinfo *np = inet6_sk(sp);
dest = &np->daddr;
......
......@@ -1077,7 +1077,7 @@ static void
svc_tcp_init(struct svc_sock *svsk)
{
struct sock *sk = svsk->sk_sk;
struct tcp_opt *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
svsk->sk_recvfrom = svc_tcp_recvfrom;
svsk->sk_sendto = svc_tcp_sendto;
......
......@@ -1548,8 +1548,7 @@ xprt_bind_socket(struct rpc_xprt *xprt, struct socket *sock)
sk->sk_no_check = UDP_CSUM_NORCV;
xprt_set_connected(xprt);
} else {
struct tcp_opt *tp = tcp_sk(sk);
tp->nonagle = 1; /* disable Nagle's algorithm */
tcp_sk(sk)->nonagle = 1; /* disable Nagle's algorithm */
sk->sk_data_ready = tcp_data_ready;
sk->sk_state_change = tcp_state_change;
xprt_clear_connected(xprt);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment