Commit 9439ce00 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: rename struct tcp_request_sock listener

The listener field in struct tcp_request_sock is a pointer
back to the listener. We now have req->rsk_listener, so TCP
only needs one boolean and not a full pointer.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4e9a578e
...@@ -111,7 +111,7 @@ struct tcp_request_sock_ops; ...@@ -111,7 +111,7 @@ struct tcp_request_sock_ops;
struct tcp_request_sock { struct tcp_request_sock {
struct inet_request_sock req; struct inet_request_sock req;
const struct tcp_request_sock_ops *af_specific; const struct tcp_request_sock_ops *af_specific;
struct sock *listener; /* needed for TFO */ bool tfo_listener;
u32 rcv_isn; u32 rcv_isn;
u32 snt_isn; u32 snt_isn;
u32 snt_synack; /* synack sent time */ u32 snt_synack; /* synack sent time */
......
...@@ -153,24 +153,22 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) ...@@ -153,24 +153,22 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
* case might also exist in tcp_v4_hnd_req() that will trigger this locking * case might also exist in tcp_v4_hnd_req() that will trigger this locking
* order. * order.
* *
* When a TFO req is created, it needs to sock_hold its listener to prevent * This function also sets "treq->tfo_listener" to false.
* the latter data structure from going away. * treq->tfo_listener is used by the listener so it is protected by the
*
* This function also sets "treq->listener" to NULL and unreference listener
* socket. treq->listener is used by the listener so it is protected by the
* fastopenq->lock in this function. * fastopenq->lock in this function.
*/ */
void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
bool reset) bool reset)
{ {
struct sock *lsk = tcp_rsk(req)->listener; struct sock *lsk = req->rsk_listener;
struct fastopen_queue *fastopenq = struct fastopen_queue *fastopenq;
inet_csk(lsk)->icsk_accept_queue.fastopenq;
fastopenq = inet_csk(lsk)->icsk_accept_queue.fastopenq;
tcp_sk(sk)->fastopen_rsk = NULL; tcp_sk(sk)->fastopen_rsk = NULL;
spin_lock_bh(&fastopenq->lock); spin_lock_bh(&fastopenq->lock);
fastopenq->qlen--; fastopenq->qlen--;
tcp_rsk(req)->listener = NULL; tcp_rsk(req)->tfo_listener = false;
if (req->sk) /* the child socket hasn't been accepted yet */ if (req->sk) /* the child socket hasn't been accepted yet */
goto out; goto out;
...@@ -179,7 +177,6 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, ...@@ -179,7 +177,6 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
* special RST handling below. * special RST handling below.
*/ */
spin_unlock_bh(&fastopenq->lock); spin_unlock_bh(&fastopenq->lock);
sock_put(lsk);
reqsk_put(req); reqsk_put(req);
return; return;
} }
...@@ -201,5 +198,4 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, ...@@ -201,5 +198,4 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
fastopenq->qlen++; fastopenq->qlen++;
out: out:
spin_unlock_bh(&fastopenq->lock); spin_unlock_bh(&fastopenq->lock);
sock_put(lsk);
} }
...@@ -325,7 +325,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) ...@@ -325,7 +325,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
sk_acceptq_removed(sk); sk_acceptq_removed(sk);
if (sk->sk_protocol == IPPROTO_TCP && queue->fastopenq != NULL) { if (sk->sk_protocol == IPPROTO_TCP && queue->fastopenq != NULL) {
spin_lock_bh(&queue->fastopenq->lock); spin_lock_bh(&queue->fastopenq->lock);
if (tcp_rsk(req)->listener) { if (tcp_rsk(req)->tfo_listener) {
/* We are still waiting for the final ACK from 3WHS /* We are still waiting for the final ACK from 3WHS
* so can't free req now. Instead, we set req->sk to * so can't free req now. Instead, we set req->sk to
* NULL to signify that the child socket is taken * NULL to signify that the child socket is taken
...@@ -817,9 +817,9 @@ void inet_csk_listen_stop(struct sock *sk) ...@@ -817,9 +817,9 @@ void inet_csk_listen_stop(struct sock *sk)
percpu_counter_inc(sk->sk_prot->orphan_count); percpu_counter_inc(sk->sk_prot->orphan_count);
if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->listener) { if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
BUG_ON(tcp_sk(child)->fastopen_rsk != req); BUG_ON(tcp_sk(child)->fastopen_rsk != req);
BUG_ON(sk != tcp_rsk(req)->listener); BUG_ON(sk != req->rsk_listener);
/* Paranoid, to prevent race condition if /* Paranoid, to prevent race condition if
* an inbound pkt destined for child is * an inbound pkt destined for child is
...@@ -828,7 +828,6 @@ void inet_csk_listen_stop(struct sock *sk) ...@@ -828,7 +828,6 @@ void inet_csk_listen_stop(struct sock *sk)
* tcp_v4_destroy_sock(). * tcp_v4_destroy_sock().
*/ */
tcp_sk(child)->fastopen_rsk = NULL; tcp_sk(child)->fastopen_rsk = NULL;
sock_put(sk);
} }
inet_csk_destroy_sock(child); inet_csk_destroy_sock(child);
......
...@@ -345,7 +345,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) ...@@ -345,7 +345,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
ireq->tstamp_ok = tcp_opt.saw_tstamp; ireq->tstamp_ok = tcp_opt.saw_tstamp;
req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
treq->snt_synack = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0; treq->snt_synack = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0;
treq->listener = NULL; treq->tfo_listener = false;
ireq->ireq_family = AF_INET; ireq->ireq_family = AF_INET;
ireq->ir_iif = sk->sk_bound_dev_if; ireq->ir_iif = sk->sk_bound_dev_if;
......
...@@ -155,12 +155,7 @@ static bool tcp_fastopen_create_child(struct sock *sk, ...@@ -155,12 +155,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
tp = tcp_sk(child); tp = tcp_sk(child);
tp->fastopen_rsk = req; tp->fastopen_rsk = req;
/* Do a hold on the listner sk so that if the listener is being tcp_rsk(req)->tfo_listener = true;
* closed, the child that has been accepted can live on and still
* access listen_lock.
*/
sock_hold(sk);
tcp_rsk(req)->listener = sk;
/* RFC1323: The window in SYN & SYN/ACK segments is never /* RFC1323: The window in SYN & SYN/ACK segments is never
* scaled. So correct it appropriately. * scaled. So correct it appropriately.
......
...@@ -6120,7 +6120,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, ...@@ -6120,7 +6120,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
if (err || want_cookie) if (err || want_cookie)
goto drop_and_free; goto drop_and_free;
tcp_rsk(req)->listener = NULL; tcp_rsk(req)->tfo_listener = false;
af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT); af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
} }
......
...@@ -195,7 +195,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) ...@@ -195,7 +195,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
ireq = inet_rsk(req); ireq = inet_rsk(req);
treq = tcp_rsk(req); treq = tcp_rsk(req);
treq->listener = NULL; treq->tfo_listener = false;
ireq->ireq_family = AF_INET6; ireq->ireq_family = AF_INET6;
if (security_inet_conn_request(sk, skb, req)) if (security_inet_conn_request(sk, skb, req))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment