Commit 4d54d865 authored by David S. Miller's avatar David S. Miller

Merge branch 'listener-sock-const'

Eric Dumazet says:

====================
dccp/tcp: constify listener sock

Another patch bomb to prepare lockless TCP/DCCP LISTEN handling.

SYNACK retransmits are built and sent without listener socket
being locked. Soon, initial SYNACK packets will have same property.

This series makes sure we did not something wrong with this model,
by adding a const qualifier in all the paths taken from synack building
and transmit, for IPv4/IPv6 and TCP/dccp.

The only potential problem was the rewrite of ecn bits for connections
with DCTCP as congestion module, but this was a very minor one.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6ea29da1 1b70e977
...@@ -489,7 +489,8 @@ struct flowi; ...@@ -489,7 +489,8 @@ struct flowi;
#ifndef CONFIG_XFRM #ifndef CONFIG_XFRM
static inline struct dst_entry *xfrm_lookup(struct net *net, static inline struct dst_entry *xfrm_lookup(struct net *net,
struct dst_entry *dst_orig, struct dst_entry *dst_orig,
const struct flowi *fl, struct sock *sk, const struct flowi *fl,
const struct sock *sk,
int flags) int flags)
{ {
return dst_orig; return dst_orig;
...@@ -498,7 +499,7 @@ static inline struct dst_entry *xfrm_lookup(struct net *net, ...@@ -498,7 +499,7 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
static inline struct dst_entry *xfrm_lookup_route(struct net *net, static inline struct dst_entry *xfrm_lookup_route(struct net *net,
struct dst_entry *dst_orig, struct dst_entry *dst_orig,
const struct flowi *fl, const struct flowi *fl,
struct sock *sk, const struct sock *sk,
int flags) int flags)
{ {
return dst_orig; return dst_orig;
...@@ -511,11 +512,11 @@ static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) ...@@ -511,11 +512,11 @@ static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
#else #else
struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl, struct sock *sk, const struct flowi *fl, const struct sock *sk,
int flags); int flags);
struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl, struct sock *sk, const struct flowi *fl, const struct sock *sk,
int flags); int flags);
/* skb attached with this dst needs transformation if dst->xfrm is valid */ /* skb attached with this dst needs transformation if dst->xfrm is valid */
......
...@@ -25,7 +25,7 @@ struct sockaddr; ...@@ -25,7 +25,7 @@ struct sockaddr;
int inet6_csk_bind_conflict(const struct sock *sk, int inet6_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb, bool relax); const struct inet_bind_bucket *tb, bool relax);
struct dst_entry *inet6_csk_route_req(struct sock *sk, struct flowi6 *fl6, struct dst_entry *inet6_csk_route_req(const struct sock *sk, struct flowi6 *fl6,
const struct request_sock *req); const struct request_sock *req);
struct request_sock *inet6_csk_search_req(struct sock *sk, struct request_sock *inet6_csk_search_req(struct sock *sk,
......
...@@ -266,7 +266,7 @@ int inet_csk_bind_conflict(const struct sock *sk, ...@@ -266,7 +266,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb, bool relax); const struct inet_bind_bucket *tb, bool relax);
int inet_csk_get_port(struct sock *sk, unsigned short snum); int inet_csk_get_port(struct sock *sk, unsigned short snum);
struct dst_entry *inet_csk_route_req(struct sock *sk, struct flowi4 *fl4, struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
const struct request_sock *req); const struct request_sock *req);
struct dst_entry *inet_csk_route_child_sock(struct sock *sk, struct sock *newsk, struct dst_entry *inet_csk_route_child_sock(struct sock *sk, struct sock *newsk,
const struct request_sock *req); const struct request_sock *req);
......
...@@ -100,7 +100,7 @@ int igmp_mc_init(void); ...@@ -100,7 +100,7 @@ int igmp_mc_init(void);
* Functions provided by ip.c * Functions provided by ip.c
*/ */
int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
__be32 saddr, __be32 daddr, __be32 saddr, __be32 daddr,
struct ip_options_rcu *opt); struct ip_options_rcu *opt);
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
...@@ -282,10 +282,12 @@ int ip_decrease_ttl(struct iphdr *iph) ...@@ -282,10 +282,12 @@ int ip_decrease_ttl(struct iphdr *iph)
} }
static inline static inline
int ip_dont_fragment(struct sock *sk, struct dst_entry *dst) int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
{ {
return inet_sk(sk)->pmtudisc == IP_PMTUDISC_DO || u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
(inet_sk(sk)->pmtudisc == IP_PMTUDISC_WANT &&
return pmtudisc == IP_PMTUDISC_DO ||
(pmtudisc == IP_PMTUDISC_WANT &&
!(dst_metric_locked(dst, RTAX_MTU))); !(dst_metric_locked(dst, RTAX_MTU)));
} }
......
...@@ -812,7 +812,7 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb); ...@@ -812,7 +812,7 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
/* /*
* upper-layer output functions * upper-layer output functions
*/ */
int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
struct ipv6_txoptions *opt, int tclass); struct ipv6_txoptions *opt, int tclass);
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
...@@ -849,7 +849,7 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk) ...@@ -849,7 +849,7 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst, int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
struct flowi6 *fl6); struct flowi6 *fl6);
struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst); const struct in6_addr *final_dst);
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst); const struct in6_addr *final_dst);
......
...@@ -32,7 +32,7 @@ struct request_sock_ops { ...@@ -32,7 +32,7 @@ struct request_sock_ops {
int obj_size; int obj_size;
struct kmem_cache *slab; struct kmem_cache *slab;
char *slab_name; char *slab_name;
int (*rtx_syn_ack)(struct sock *sk, int (*rtx_syn_ack)(const struct sock *sk,
struct request_sock *req); struct request_sock *req);
void (*send_ack)(struct sock *sk, struct sk_buff *skb, void (*send_ack)(struct sock *sk, struct sk_buff *skb,
struct request_sock *req); struct request_sock *req);
...@@ -42,7 +42,7 @@ struct request_sock_ops { ...@@ -42,7 +42,7 @@ struct request_sock_ops {
void (*syn_ack_timeout)(const struct request_sock *req); void (*syn_ack_timeout)(const struct request_sock *req);
}; };
int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req); int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
/* struct request_sock - mini sock to represent a connection request /* struct request_sock - mini sock to represent a connection request
*/ */
......
...@@ -114,7 +114,7 @@ void rt_cache_flush(struct net *net); ...@@ -114,7 +114,7 @@ void rt_cache_flush(struct net *net);
void rt_flush_dev(struct net_device *dev); void rt_flush_dev(struct net_device *dev);
struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp); struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp);
struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp, struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
struct sock *sk); const struct sock *sk);
struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *ipv4_blackhole_route(struct net *net,
struct dst_entry *dst_orig); struct dst_entry *dst_orig);
......
...@@ -461,7 +461,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, ...@@ -461,7 +461,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
int tcp_connect(struct sock *sk); int tcp_connect(struct sock *sk);
struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
struct request_sock *req, struct request_sock *req,
struct tcp_fastopen_cookie *foc); struct tcp_fastopen_cookie *foc);
int tcp_disconnect(struct sock *sk, int flags); int tcp_disconnect(struct sock *sk, int flags);
...@@ -1207,7 +1207,8 @@ static inline int tcp_full_space(const struct sock *sk) ...@@ -1207,7 +1207,8 @@ static inline int tcp_full_space(const struct sock *sk)
} }
extern void tcp_openreq_init_rwin(struct request_sock *req, extern void tcp_openreq_init_rwin(struct request_sock *req,
struct sock *sk, struct dst_entry *dst); const struct sock *sk_listener,
const struct dst_entry *dst);
void tcp_enter_memory_pressure(struct sock *sk); void tcp_enter_memory_pressure(struct sock *sk);
...@@ -1371,16 +1372,16 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, ...@@ -1371,16 +1372,16 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
int family, const u8 *newkey, u8 newkeylen, gfp_t gfp); int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
int family); int family);
struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
const struct sock *addr_sk); const struct sock *addr_sk);
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
const union tcp_md5_addr *addr, const union tcp_md5_addr *addr,
int family); int family);
#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key) #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
#else #else
static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
const union tcp_md5_addr *addr, const union tcp_md5_addr *addr,
int family) int family)
{ {
...@@ -1675,7 +1676,7 @@ int tcp4_proc_init(void); ...@@ -1675,7 +1676,7 @@ int tcp4_proc_init(void);
void tcp4_proc_exit(void); void tcp4_proc_exit(void);
#endif #endif
int tcp_rtx_synack(struct sock *sk, struct request_sock *req); int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
int tcp_conn_request(struct request_sock_ops *rsk_ops, int tcp_conn_request(struct request_sock_ops *rsk_ops,
const struct tcp_request_sock_ops *af_ops, const struct tcp_request_sock_ops *af_ops,
struct sock *sk, struct sk_buff *skb); struct sock *sk, struct sk_buff *skb);
...@@ -1683,7 +1684,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, ...@@ -1683,7 +1684,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
/* TCP af-specific functions */ /* TCP af-specific functions */
struct tcp_sock_af_ops { struct tcp_sock_af_ops {
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk, struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
const struct sock *addr_sk); const struct sock *addr_sk);
int (*calc_md5_hash)(char *location, int (*calc_md5_hash)(char *location,
const struct tcp_md5sig_key *md5, const struct tcp_md5sig_key *md5,
...@@ -1698,14 +1699,15 @@ struct tcp_sock_af_ops { ...@@ -1698,14 +1699,15 @@ struct tcp_sock_af_ops {
struct tcp_request_sock_ops { struct tcp_request_sock_ops {
u16 mss_clamp; u16 mss_clamp;
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *(*req_md5_lookup)(struct sock *sk, struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
const struct sock *addr_sk); const struct sock *addr_sk);
int (*calc_md5_hash) (char *location, int (*calc_md5_hash) (char *location,
const struct tcp_md5sig_key *md5, const struct tcp_md5sig_key *md5,
const struct sock *sk, const struct sock *sk,
const struct sk_buff *skb); const struct sk_buff *skb);
#endif #endif
void (*init_req)(struct request_sock *req, struct sock *sk, void (*init_req)(struct request_sock *req,
const struct sock *sk_listener,
struct sk_buff *skb); struct sk_buff *skb);
#ifdef CONFIG_SYN_COOKIES #ifdef CONFIG_SYN_COOKIES
__u32 (*cookie_init_seq)(struct sock *sk, const struct sk_buff *skb, __u32 (*cookie_init_seq)(struct sock *sk, const struct sk_buff *skb,
...@@ -1715,7 +1717,7 @@ struct tcp_request_sock_ops { ...@@ -1715,7 +1717,7 @@ struct tcp_request_sock_ops {
const struct request_sock *req, const struct request_sock *req,
bool *strict); bool *strict);
__u32 (*init_seq)(const struct sk_buff *skb); __u32 (*init_seq)(const struct sk_buff *skb);
int (*send_synack)(struct sock *sk, struct dst_entry *dst, int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl, struct request_sock *req, struct flowi *fl, struct request_sock *req,
u16 queue_mapping, struct tcp_fastopen_cookie *foc); u16 queue_mapping, struct tcp_fastopen_cookie *foc);
void (*queue_hash_add)(struct sock *sk, struct request_sock *req, void (*queue_hash_add)(struct sock *sk, struct request_sock *req,
......
...@@ -293,7 +293,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized); ...@@ -293,7 +293,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
void dccp_destroy_sock(struct sock *sk); void dccp_destroy_sock(struct sock *sk);
void dccp_close(struct sock *sk, long timeout); void dccp_close(struct sock *sk, long timeout);
struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst,
struct request_sock *req); struct request_sock *req);
int dccp_connect(struct sock *sk); int dccp_connect(struct sock *sk);
......
...@@ -498,7 +498,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk, ...@@ -498,7 +498,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
return &rt->dst; return &rt->dst;
} }
static int dccp_v4_send_response(struct sock *sk, struct request_sock *req) static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req)
{ {
int err = -1; int err = -1;
struct sk_buff *skb; struct sk_buff *skb;
......
...@@ -181,7 +181,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ...@@ -181,7 +181,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
} }
static int dccp_v6_send_response(struct sock *sk, struct request_sock *req) static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req)
{ {
struct inet_request_sock *ireq = inet_rsk(req); struct inet_request_sock *ireq = inet_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk);
......
...@@ -390,7 +390,7 @@ int dccp_retransmit_skb(struct sock *sk) ...@@ -390,7 +390,7 @@ int dccp_retransmit_skb(struct sock *sk)
return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC)); return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC));
} }
struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst,
struct request_sock *req) struct request_sock *req)
{ {
struct dccp_hdr *dh; struct dccp_hdr *dh;
...@@ -398,13 +398,18 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, ...@@ -398,13 +398,18 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
const u32 dccp_header_size = sizeof(struct dccp_hdr) + const u32 dccp_header_size = sizeof(struct dccp_hdr) +
sizeof(struct dccp_hdr_ext) + sizeof(struct dccp_hdr_ext) +
sizeof(struct dccp_hdr_response); sizeof(struct dccp_hdr_response);
struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, struct sk_buff *skb;
GFP_ATOMIC);
if (skb == NULL) /* sk is marked const to clearly express we dont hold socket lock.
* sock_wmalloc() will atomically change sk->sk_wmem_alloc,
* it is safe to promote sk to non const.
*/
skb = sock_wmalloc((struct sock *)sk, MAX_DCCP_HEADER, 1,
GFP_ATOMIC);
if (!skb)
return NULL; return NULL;
/* Reserve space for headers. */ skb_reserve(skb, MAX_DCCP_HEADER);
skb_reserve(skb, sk->sk_prot->max_header);
skb_dst_set(skb, dst_clone(dst)); skb_dst_set(skb, dst_clone(dst));
......
...@@ -408,7 +408,7 @@ void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len) ...@@ -408,7 +408,7 @@ void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
} }
EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
struct dst_entry *inet_csk_route_req(struct sock *sk, struct dst_entry *inet_csk_route_req(const struct sock *sk,
struct flowi4 *fl4, struct flowi4 *fl4,
const struct request_sock *req) const struct request_sock *req)
{ {
...@@ -563,7 +563,7 @@ static inline void syn_ack_recalc(struct request_sock *req, const int thresh, ...@@ -563,7 +563,7 @@ static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
req->num_timeout >= rskq_defer_accept - 1; req->num_timeout >= rskq_defer_accept - 1;
} }
int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req) int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
{ {
int err = req->rsk_ops->rtx_syn_ack(parent, req); int err = req->rsk_ops->rtx_syn_ack(parent, req);
......
...@@ -137,7 +137,7 @@ static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst) ...@@ -137,7 +137,7 @@ static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
* Add an ip header to a skbuff and send it out. * Add an ip header to a skbuff and send it out.
* *
*/ */
int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
__be32 saddr, __be32 daddr, struct ip_options_rcu *opt) __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
{ {
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
...@@ -151,15 +151,17 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, ...@@ -151,15 +151,17 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
iph->version = 4; iph->version = 4;
iph->ihl = 5; iph->ihl = 5;
iph->tos = inet->tos; iph->tos = inet->tos;
if (ip_dont_fragment(sk, &rt->dst))
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
iph->ttl = ip_select_ttl(inet, &rt->dst); iph->ttl = ip_select_ttl(inet, &rt->dst);
iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr); iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
iph->saddr = saddr; iph->saddr = saddr;
iph->protocol = sk->sk_protocol; iph->protocol = sk->sk_protocol;
ip_select_ident(sock_net(sk), skb, sk); if (ip_dont_fragment(sk, &rt->dst)) {
iph->frag_off = htons(IP_DF);
iph->id = 0;
} else {
iph->frag_off = 0;
__ip_select_ident(sock_net(sk), iph, 1);
}
if (opt && opt->opt.optlen) { if (opt && opt->opt.optlen) {
iph->ihl += opt->opt.optlen>>2; iph->ihl += opt->opt.optlen>>2;
......
...@@ -2291,7 +2291,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or ...@@ -2291,7 +2291,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
} }
struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4, struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
struct sock *sk) const struct sock *sk)
{ {
struct rtable *rt = __ip_route_output_key(net, flp4); struct rtable *rt = __ip_route_output_key(net, flp4);
......
...@@ -173,6 +173,10 @@ void tcp_assign_congestion_control(struct sock *sk) ...@@ -173,6 +173,10 @@ void tcp_assign_congestion_control(struct sock *sk)
*/ */
if (ca->get_info) if (ca->get_info)
memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
if (ca->flags & TCP_CONG_NEEDS_ECN)
INET_ECN_xmit(sk);
else
INET_ECN_dontxmit(sk);
} }
void tcp_init_congestion_control(struct sock *sk) void tcp_init_congestion_control(struct sock *sk)
...@@ -181,6 +185,10 @@ void tcp_init_congestion_control(struct sock *sk) ...@@ -181,6 +185,10 @@ void tcp_init_congestion_control(struct sock *sk)
if (icsk->icsk_ca_ops->init) if (icsk->icsk_ca_ops->init)
icsk->icsk_ca_ops->init(sk); icsk->icsk_ca_ops->init(sk);
if (tcp_ca_needs_ecn(sk))
INET_ECN_xmit(sk);
else
INET_ECN_dontxmit(sk);
} }
static void tcp_reinit_congestion_control(struct sock *sk, static void tcp_reinit_congestion_control(struct sock *sk,
...@@ -192,8 +200,8 @@ static void tcp_reinit_congestion_control(struct sock *sk, ...@@ -192,8 +200,8 @@ static void tcp_reinit_congestion_control(struct sock *sk,
icsk->icsk_ca_ops = ca; icsk->icsk_ca_ops = ca;
icsk->icsk_ca_setsockopt = 1; icsk->icsk_ca_setsockopt = 1;
if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init) if (sk->sk_state != TCP_CLOSE)
icsk->icsk_ca_ops->init(sk); tcp_init_congestion_control(sk);
} }
/* Manage refcounts on socket close. */ /* Manage refcounts on socket close. */
......
...@@ -818,7 +818,7 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, ...@@ -818,7 +818,7 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
* This still operates on a request_sock only, not on a big * This still operates on a request_sock only, not on a big
* socket. * socket.
*/ */
static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl, struct flowi *fl,
struct request_sock *req, struct request_sock *req,
u16 queue_mapping, u16 queue_mapping,
...@@ -865,7 +865,7 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req) ...@@ -865,7 +865,7 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
*/ */
/* Find the Key structure for an address. */ /* Find the Key structure for an address. */
struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
const union tcp_md5_addr *addr, const union tcp_md5_addr *addr,
int family) int family)
{ {
...@@ -877,7 +877,7 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, ...@@ -877,7 +877,7 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
/* caller either holds rcu_read_lock() or socket lock */ /* caller either holds rcu_read_lock() or socket lock */
md5sig = rcu_dereference_check(tp->md5sig_info, md5sig = rcu_dereference_check(tp->md5sig_info,
sock_owned_by_user(sk) || sock_owned_by_user(sk) ||
lockdep_is_held(&sk->sk_lock.slock)); lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
if (!md5sig) if (!md5sig)
return NULL; return NULL;
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
...@@ -894,7 +894,7 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, ...@@ -894,7 +894,7 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
} }
EXPORT_SYMBOL(tcp_md5_do_lookup); EXPORT_SYMBOL(tcp_md5_do_lookup);
struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
const struct sock *addr_sk) const struct sock *addr_sk)
{ {
const union tcp_md5_addr *addr; const union tcp_md5_addr *addr;
...@@ -1168,7 +1168,8 @@ static bool tcp_v4_inbound_md5_hash(struct sock *sk, ...@@ -1168,7 +1168,8 @@ static bool tcp_v4_inbound_md5_hash(struct sock *sk,
} }
#endif #endif
static void tcp_v4_init_req(struct request_sock *req, struct sock *sk_listener, static void tcp_v4_init_req(struct request_sock *req,
const struct sock *sk_listener,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct inet_request_sock *ireq = inet_rsk(req); struct inet_request_sock *ireq = inet_rsk(req);
......
...@@ -362,27 +362,35 @@ void tcp_twsk_destructor(struct sock *sk) ...@@ -362,27 +362,35 @@ void tcp_twsk_destructor(struct sock *sk)
} }
EXPORT_SYMBOL_GPL(tcp_twsk_destructor); EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
/* Warning : This function is called without sk_listener being locked.
* Be sure to read socket fields once, as their value could change under us.
*/
void tcp_openreq_init_rwin(struct request_sock *req, void tcp_openreq_init_rwin(struct request_sock *req,
struct sock *sk, struct dst_entry *dst) const struct sock *sk_listener,
const struct dst_entry *dst)
{ {
struct inet_request_sock *ireq = inet_rsk(req); struct inet_request_sock *ireq = inet_rsk(req);
struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk_listener);
__u8 rcv_wscale; u16 user_mss = READ_ONCE(tp->rx_opt.user_mss);
int full_space = tcp_full_space(sk_listener);
int mss = dst_metric_advmss(dst); int mss = dst_metric_advmss(dst);
u32 window_clamp;
__u8 rcv_wscale;
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) if (user_mss && user_mss < mss)
mss = tp->rx_opt.user_mss; mss = user_mss;
window_clamp = READ_ONCE(tp->window_clamp);
/* Set this up on the first call only */ /* Set this up on the first call only */
req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); req->window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
/* limit the window selection if the user enforce a smaller rx buffer */ /* limit the window selection if the user enforce a smaller rx buffer */
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
(req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0)) (req->window_clamp > full_space || req->window_clamp == 0))
req->window_clamp = tcp_full_space(sk); req->window_clamp = full_space;
/* tcp_full_space because it is guaranteed to be the first packet */ /* tcp_full_space because it is guaranteed to be the first packet */
tcp_select_initial_window(tcp_full_space(sk), tcp_select_initial_window(full_space,
mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
&req->rcv_wnd, &req->rcv_wnd,
&req->window_clamp, &req->window_clamp,
......
...@@ -357,14 +357,10 @@ static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb) ...@@ -357,14 +357,10 @@ static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
} }
static void static void
tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th, tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
struct sock *sk)
{ {
if (inet_rsk(req)->ecn_ok) { if (inet_rsk(req)->ecn_ok)
th->ece = 1; th->ece = 1;
if (tcp_ca_needs_ecn(sk))
INET_ECN_xmit(sk);
}
} }
/* Set up ECN state for a packet on a ESTABLISHED socket that is about to /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
...@@ -612,12 +608,11 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, ...@@ -612,12 +608,11 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
} }
/* Set up TCP options for SYN-ACKs. */ /* Set up TCP options for SYN-ACKs. */
static unsigned int tcp_synack_options(struct sock *sk, static unsigned int tcp_synack_options(struct request_sock *req,
struct request_sock *req, unsigned int mss, struct sk_buff *skb,
unsigned int mss, struct sk_buff *skb, struct tcp_out_options *opts,
struct tcp_out_options *opts, const struct tcp_md5sig_key *md5,
const struct tcp_md5sig_key *md5, struct tcp_fastopen_cookie *foc)
struct tcp_fastopen_cookie *foc)
{ {
struct inet_request_sock *ireq = inet_rsk(req); struct inet_request_sock *ireq = inet_rsk(req);
unsigned int remaining = MAX_TCP_OPTION_SPACE; unsigned int remaining = MAX_TCP_OPTION_SPACE;
...@@ -2949,20 +2944,25 @@ int tcp_send_synack(struct sock *sk) ...@@ -2949,20 +2944,25 @@ int tcp_send_synack(struct sock *sk)
* Allocate one skb and build a SYNACK packet. * Allocate one skb and build a SYNACK packet.
* @dst is consumed : Caller should not use it again. * @dst is consumed : Caller should not use it again.
*/ */
struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
struct request_sock *req, struct request_sock *req,
struct tcp_fastopen_cookie *foc) struct tcp_fastopen_cookie *foc)
{ {
struct tcp_out_options opts;
struct inet_request_sock *ireq = inet_rsk(req); struct inet_request_sock *ireq = inet_rsk(req);
struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
struct tcphdr *th;
struct sk_buff *skb;
struct tcp_md5sig_key *md5 = NULL; struct tcp_md5sig_key *md5 = NULL;
struct tcp_out_options opts;
struct sk_buff *skb;
int tcp_header_size; int tcp_header_size;
struct tcphdr *th;
u16 user_mss;
int mss; int mss;
skb = sock_wmalloc(sk, MAX_TCP_HEADER, 1, GFP_ATOMIC); /* sk is a const pointer, because we want to express multiple cpus
* might call us concurrently.
* sock_wmalloc() will change sk->sk_wmem_alloc in an atomic way.
*/
skb = sock_wmalloc((struct sock *)sk, MAX_TCP_HEADER, 1, GFP_ATOMIC);
if (unlikely(!skb)) { if (unlikely(!skb)) {
dst_release(dst); dst_release(dst);
return NULL; return NULL;
...@@ -2973,8 +2973,9 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, ...@@ -2973,8 +2973,9 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
skb_dst_set(skb, dst); skb_dst_set(skb, dst);
mss = dst_metric_advmss(dst); mss = dst_metric_advmss(dst);
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) user_mss = READ_ONCE(tp->rx_opt.user_mss);
mss = tp->rx_opt.user_mss; if (user_mss && user_mss < mss)
mss = user_mss;
memset(&opts, 0, sizeof(opts)); memset(&opts, 0, sizeof(opts));
#ifdef CONFIG_SYN_COOKIES #ifdef CONFIG_SYN_COOKIES
...@@ -2989,8 +2990,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, ...@@ -2989,8 +2990,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req)); md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
#endif #endif
skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4); skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5, tcp_header_size = tcp_synack_options(req, mss, skb, &opts, md5, foc) +
foc) + sizeof(*th); sizeof(*th);
skb_push(skb, tcp_header_size); skb_push(skb, tcp_header_size);
skb_reset_transport_header(skb); skb_reset_transport_header(skb);
...@@ -2999,7 +3000,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, ...@@ -2999,7 +3000,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
memset(th, 0, sizeof(struct tcphdr)); memset(th, 0, sizeof(struct tcphdr));
th->syn = 1; th->syn = 1;
th->ack = 1; th->ack = 1;
tcp_ecn_make_synack(req, th, sk); tcp_ecn_make_synack(req, th);
th->source = htons(ireq->ir_num); th->source = htons(ireq->ir_num);
th->dest = ireq->ir_rmt_port; th->dest = ireq->ir_rmt_port;
/* Setting of flags are superfluous here for callers (and ECE is /* Setting of flags are superfluous here for callers (and ECE is
...@@ -3014,7 +3015,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, ...@@ -3014,7 +3015,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
th->window = htons(min(req->rcv_wnd, 65535U)); th->window = htons(min(req->rcv_wnd, 65535U));
tcp_options_write((__be32 *)(th + 1), tp, &opts); tcp_options_write((__be32 *)(th + 1), NULL, &opts);
th->doff = (tcp_header_size >> 2); th->doff = (tcp_header_size >> 2);
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS); TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS);
...@@ -3501,7 +3502,7 @@ void tcp_send_probe0(struct sock *sk) ...@@ -3501,7 +3502,7 @@ void tcp_send_probe0(struct sock *sk)
TCP_RTO_MAX); TCP_RTO_MAX);
} }
int tcp_rtx_synack(struct sock *sk, struct request_sock *req) int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
{ {
const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific; const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
struct flowi fl; struct flowi fl;
......
...@@ -263,7 +263,7 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, ...@@ -263,7 +263,7 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info) void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
{ {
struct ipv6_pinfo *np = inet6_sk(sk); const struct ipv6_pinfo *np = inet6_sk(sk);
struct sock_exterr_skb *serr; struct sock_exterr_skb *serr;
struct ipv6hdr *iph; struct ipv6hdr *iph;
struct sk_buff *skb; struct sk_buff *skb;
......
...@@ -65,7 +65,7 @@ int inet6_csk_bind_conflict(const struct sock *sk, ...@@ -65,7 +65,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
} }
EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict); EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
struct dst_entry *inet6_csk_route_req(struct sock *sk, struct dst_entry *inet6_csk_route_req(const struct sock *sk,
struct flowi6 *fl6, struct flowi6 *fl6,
const struct request_sock *req) const struct request_sock *req)
{ {
......
...@@ -150,14 +150,16 @@ int ip6_output(struct sock *sk, struct sk_buff *skb) ...@@ -150,14 +150,16 @@ int ip6_output(struct sock *sk, struct sk_buff *skb)
} }
/* /*
* xmit an sk_buff (used by TCP, SCTP and DCCP) * xmit an sk_buff (used by TCP, SCTP and DCCP)
* Note : socket lock is not held for SYNACK packets, but might be modified
* by calls to skb_set_owner_w() and ipv6_local_error(),
* which are using proper atomic operations or spinlocks.
*/ */
int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
struct ipv6_txoptions *opt, int tclass) struct ipv6_txoptions *opt, int tclass)
{ {
struct net *net = sock_net(sk); struct net *net = sock_net(sk);
struct ipv6_pinfo *np = inet6_sk(sk); const struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *first_hop = &fl6->daddr; struct in6_addr *first_hop = &fl6->daddr;
struct dst_entry *dst = skb_dst(skb); struct dst_entry *dst = skb_dst(skb);
struct ipv6hdr *hdr; struct ipv6hdr *hdr;
...@@ -186,7 +188,10 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, ...@@ -186,7 +188,10 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
} }
consume_skb(skb); consume_skb(skb);
skb = skb2; skb = skb2;
skb_set_owner_w(skb, sk); /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
* it is safe to call in our context (socket lock not held)
*/
skb_set_owner_w(skb, (struct sock *)sk);
} }
if (opt->opt_flen) if (opt->opt_flen)
ipv6_push_frag_opts(skb, opt, &proto); ipv6_push_frag_opts(skb, opt, &proto);
...@@ -224,13 +229,20 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, ...@@ -224,13 +229,20 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) { if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)), IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUT, skb->len); IPSTATS_MIB_OUT, skb->len);
/* hooks should never assume socket lock is held.
* we promote our socket to non const
*/
return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
net, sk, skb, NULL, dst->dev, net, (struct sock *)sk, skb, NULL, dst->dev,
dst_output_okfn); dst_output_okfn);
} }
skb->dev = dst->dev; skb->dev = dst->dev;
ipv6_local_error(sk, EMSGSIZE, fl6, mtu); /* ipv6_local_error() does not require socket lock,
* we promote our socket to non const
*/
ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu);
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb); kfree_skb(skb);
return -EMSGSIZE; return -EMSGSIZE;
...@@ -883,7 +895,7 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk, ...@@ -883,7 +895,7 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
return dst; return dst;
} }
static int ip6_dst_lookup_tail(struct net *net, struct sock *sk, static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
struct dst_entry **dst, struct flowi6 *fl6) struct dst_entry **dst, struct flowi6 *fl6)
{ {
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
...@@ -1014,7 +1026,7 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup); ...@@ -1014,7 +1026,7 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup);
* It returns a valid dst pointer on success, or a pointer encoded * It returns a valid dst pointer on success, or a pointer encoded
* error code. * error code.
*/ */
struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst) const struct in6_addr *final_dst)
{ {
struct dst_entry *dst = NULL; struct dst_entry *dst = NULL;
......
...@@ -434,7 +434,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ...@@ -434,7 +434,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
} }
static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst, static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl, struct flowi *fl,
struct request_sock *req, struct request_sock *req,
u16 queue_mapping, u16 queue_mapping,
...@@ -476,13 +476,13 @@ static void tcp_v6_reqsk_destructor(struct request_sock *req) ...@@ -476,13 +476,13 @@ static void tcp_v6_reqsk_destructor(struct request_sock *req)
} }
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
const struct in6_addr *addr) const struct in6_addr *addr)
{ {
return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6); return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
} }
static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk, static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
const struct sock *addr_sk) const struct sock *addr_sk)
{ {
return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr); return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
...@@ -663,22 +663,23 @@ static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) ...@@ -663,22 +663,23 @@ static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
} }
#endif #endif
static void tcp_v6_init_req(struct request_sock *req, struct sock *sk, static void tcp_v6_init_req(struct request_sock *req,
const struct sock *sk_listener,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct inet_request_sock *ireq = inet_rsk(req); struct inet_request_sock *ireq = inet_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk); const struct ipv6_pinfo *np = inet6_sk(sk_listener);
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
/* So that link locals have meaning */ /* So that link locals have meaning */
if (!sk->sk_bound_dev_if && if (!sk_listener->sk_bound_dev_if &&
ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
ireq->ir_iif = tcp_v6_iif(skb); ireq->ir_iif = tcp_v6_iif(skb);
if (!TCP_SKB_CB(skb)->tcp_tw_isn && if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
(ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) || (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxinfo ||
np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
np->rxopt.bits.rxohlim || np->repflow)) { np->rxopt.bits.rxohlim || np->repflow)) {
......
...@@ -1208,7 +1208,7 @@ static inline int policy_to_flow_dir(int dir) ...@@ -1208,7 +1208,7 @@ static inline int policy_to_flow_dir(int dir)
} }
} }
static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
const struct flowi *fl) const struct flowi *fl)
{ {
struct xfrm_policy *pol; struct xfrm_policy *pol;
...@@ -2185,7 +2185,7 @@ static struct dst_entry *make_blackhole(struct net *net, u16 family, ...@@ -2185,7 +2185,7 @@ static struct dst_entry *make_blackhole(struct net *net, u16 family,
*/ */
struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl, const struct flowi *fl,
struct sock *sk, int flags) const struct sock *sk, int flags)
{ {
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
struct flow_cache_object *flo; struct flow_cache_object *flo;
...@@ -2333,7 +2333,7 @@ EXPORT_SYMBOL(xfrm_lookup); ...@@ -2333,7 +2333,7 @@ EXPORT_SYMBOL(xfrm_lookup);
*/ */
struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl, const struct flowi *fl,
struct sock *sk, int flags) const struct sock *sk, int flags)
{ {
struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk, struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
flags | XFRM_LOOKUP_QUEUE | flags | XFRM_LOOKUP_QUEUE |
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment