Commit 2679161c authored by Eric Dumazet's avatar Eric Dumazet Committed by Greg Kroah-Hartman

tcp: do not drop syn_recv on all icmp reports

[ Upstream commit 9cf74903 ]

Petr Novopashenniy reported that ICMP redirects on SYN_RECV sockets
were leading to RST.

This is of course incorrect.

A specific list of ICMP messages should be able to drop a SYN_RECV.

For instance, a REDIRECT on SYN_RECV shall be ignored, as we do
not hold a dst per SYN_RECV pseudo request.

Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=111751
Fixes: 079096f1 ("tcp/dccp: install syn_recv requests into ehash table")
Reported-by: default avatarPetr Novopashenniy <pety@rusnet.ru>
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 3ba9b9f2
...@@ -449,7 +449,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); ...@@ -449,7 +449,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
void tcp_v4_mtu_reduced(struct sock *sk); void tcp_v4_mtu_reduced(struct sock *sk);
void tcp_req_err(struct sock *sk, u32 seq); void tcp_req_err(struct sock *sk, u32 seq, bool abort);
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
struct sock *tcp_create_openreq_child(const struct sock *sk, struct sock *tcp_create_openreq_child(const struct sock *sk,
struct request_sock *req, struct request_sock *req,
......
...@@ -312,7 +312,7 @@ static void do_redirect(struct sk_buff *skb, struct sock *sk) ...@@ -312,7 +312,7 @@ static void do_redirect(struct sk_buff *skb, struct sock *sk)
/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */ /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
void tcp_req_err(struct sock *sk, u32 seq) void tcp_req_err(struct sock *sk, u32 seq, bool abort)
{ {
struct request_sock *req = inet_reqsk(sk); struct request_sock *req = inet_reqsk(sk);
struct net *net = sock_net(sk); struct net *net = sock_net(sk);
...@@ -324,7 +324,7 @@ void tcp_req_err(struct sock *sk, u32 seq) ...@@ -324,7 +324,7 @@ void tcp_req_err(struct sock *sk, u32 seq)
if (seq != tcp_rsk(req)->snt_isn) { if (seq != tcp_rsk(req)->snt_isn) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
} else { } else if (abort) {
/* /*
* Still in SYN_RECV, just remove it silently. * Still in SYN_RECV, just remove it silently.
* There is no good way to pass the error to the newly * There is no good way to pass the error to the newly
...@@ -384,7 +384,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) ...@@ -384,7 +384,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
} }
seq = ntohl(th->seq); seq = ntohl(th->seq);
if (sk->sk_state == TCP_NEW_SYN_RECV) if (sk->sk_state == TCP_NEW_SYN_RECV)
return tcp_req_err(sk, seq); return tcp_req_err(sk, seq,
type == ICMP_PARAMETERPROB ||
type == ICMP_TIME_EXCEEDED ||
(type == ICMP_DEST_UNREACH &&
(code == ICMP_NET_UNREACH ||
code == ICMP_HOST_UNREACH)));
bh_lock_sock(sk); bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy /* If too many ICMPs get dropped on busy
......
...@@ -328,6 +328,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ...@@ -328,6 +328,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct tcp_sock *tp; struct tcp_sock *tp;
__u32 seq, snd_una; __u32 seq, snd_una;
struct sock *sk; struct sock *sk;
bool fatal;
int err; int err;
sk = __inet6_lookup_established(net, &tcp_hashinfo, sk = __inet6_lookup_established(net, &tcp_hashinfo,
...@@ -346,8 +347,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ...@@ -346,8 +347,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
return; return;
} }
seq = ntohl(th->seq); seq = ntohl(th->seq);
fatal = icmpv6_err_convert(type, code, &err);
if (sk->sk_state == TCP_NEW_SYN_RECV) if (sk->sk_state == TCP_NEW_SYN_RECV)
return tcp_req_err(sk, seq); return tcp_req_err(sk, seq, fatal);
bh_lock_sock(sk); bh_lock_sock(sk);
if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
...@@ -401,7 +403,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ...@@ -401,7 +403,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
goto out; goto out;
} }
icmpv6_err_convert(type, code, &err);
/* Might be for an request_sock */ /* Might be for an request_sock */
switch (sk->sk_state) { switch (sk->sk_state) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment