Commit ebb516af authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp/dccp: fix race at listener dismantle phase

Under stress, a close() on a listener can trigger the
WARN_ON(sk->sk_ack_backlog) in inet_csk_listen_stop()

We need to test if listener is still active before queueing
a child in inet_csk_reqsk_queue_add()

Create a common inet_child_forget() helper, and use it
from inet_csk_reqsk_queue_add() and inet_csk_listen_stop()
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f03f2e15
...@@ -268,13 +268,8 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, ...@@ -268,13 +268,8 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
struct sock *newsk, struct sock *newsk,
const struct request_sock *req); const struct request_sock *req);
static inline void inet_csk_reqsk_queue_add(struct sock *sk, void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
struct request_sock *req, struct sock *child);
struct sock *child)
{
reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child);
}
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
unsigned long timeout); unsigned long timeout);
......
...@@ -186,25 +186,6 @@ static inline bool reqsk_queue_empty(const struct request_sock_queue *queue) ...@@ -186,25 +186,6 @@ static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
return queue->rskq_accept_head == NULL; return queue->rskq_accept_head == NULL;
} }
static inline void reqsk_queue_add(struct request_sock_queue *queue,
struct request_sock *req,
struct sock *parent,
struct sock *child)
{
spin_lock(&queue->rskq_lock);
req->sk = child;
sk_acceptq_added(parent);
if (queue->rskq_accept_head == NULL)
queue->rskq_accept_head = req;
else
queue->rskq_accept_tail->dl_next = req;
queue->rskq_accept_tail = req;
req->dl_next = NULL;
spin_unlock(&queue->rskq_lock);
}
static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue, static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
struct sock *parent) struct sock *parent)
{ {
......
...@@ -764,6 +764,53 @@ int inet_csk_listen_start(struct sock *sk, int backlog) ...@@ -764,6 +764,53 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
} }
EXPORT_SYMBOL_GPL(inet_csk_listen_start); EXPORT_SYMBOL_GPL(inet_csk_listen_start);
static void inet_child_forget(struct sock *sk, struct request_sock *req,
struct sock *child)
{
sk->sk_prot->disconnect(child, O_NONBLOCK);
sock_orphan(child);
percpu_counter_inc(sk->sk_prot->orphan_count);
if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
BUG_ON(tcp_sk(child)->fastopen_rsk != req);
BUG_ON(sk != req->rsk_listener);
/* Paranoid, to prevent race condition if
* an inbound pkt destined for child is
* blocked by sock lock in tcp_v4_rcv().
* Also to satisfy an assertion in
* tcp_v4_destroy_sock().
*/
tcp_sk(child)->fastopen_rsk = NULL;
}
inet_csk_destroy_sock(child);
reqsk_put(req);
}
void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
struct sock *child)
{
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
spin_lock(&queue->rskq_lock);
if (unlikely(sk->sk_state != TCP_LISTEN)) {
inet_child_forget(sk, req, child);
} else {
req->sk = child;
req->dl_next = NULL;
if (queue->rskq_accept_head == NULL)
queue->rskq_accept_head = req;
else
queue->rskq_accept_tail->dl_next = req;
queue->rskq_accept_tail = req;
sk_acceptq_added(sk);
}
spin_unlock(&queue->rskq_lock);
}
EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
/* /*
* This routine closes sockets which have been at least partially * This routine closes sockets which have been at least partially
* opened, but not yet accepted. * opened, but not yet accepted.
...@@ -790,31 +837,11 @@ void inet_csk_listen_stop(struct sock *sk) ...@@ -790,31 +837,11 @@ void inet_csk_listen_stop(struct sock *sk)
WARN_ON(sock_owned_by_user(child)); WARN_ON(sock_owned_by_user(child));
sock_hold(child); sock_hold(child);
sk->sk_prot->disconnect(child, O_NONBLOCK); inet_child_forget(sk, req, child);
sock_orphan(child);
percpu_counter_inc(sk->sk_prot->orphan_count);
if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
BUG_ON(tcp_sk(child)->fastopen_rsk != req);
BUG_ON(sk != req->rsk_listener);
/* Paranoid, to prevent race condition if
* an inbound pkt destined for child is
* blocked by sock lock in tcp_v4_rcv().
* Also to satisfy an assertion in
* tcp_v4_destroy_sock().
*/
tcp_sk(child)->fastopen_rsk = NULL;
}
inet_csk_destroy_sock(child);
bh_unlock_sock(child); bh_unlock_sock(child);
local_bh_enable(); local_bh_enable();
sock_put(child); sock_put(child);
reqsk_put(req);
cond_resched(); cond_resched();
} }
if (queue->fastopenq.rskq_rst_head) { if (queue->fastopenq.rskq_rst_head) {
...@@ -829,7 +856,7 @@ void inet_csk_listen_stop(struct sock *sk) ...@@ -829,7 +856,7 @@ void inet_csk_listen_stop(struct sock *sk)
req = next; req = next;
} }
} }
WARN_ON(sk->sk_ack_backlog); WARN_ON_ONCE(sk->sk_ack_backlog);
} }
EXPORT_SYMBOL_GPL(inet_csk_listen_stop); EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment