Commit b2827053 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: convert syn_wait_lock to a spinlock

This is a low hanging fruit, as we'll get rid of syn_wait_lock eventually.

We hold syn_wait_lock for such small sections, that it makes no sense to use
a read/write lock. A spin lock is simply faster.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8b929ab1
...@@ -173,11 +173,6 @@ struct fastopen_queue { ...@@ -173,11 +173,6 @@ struct fastopen_queue {
* %syn_wait_lock is necessary only to avoid proc interface having to grab the main * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
* lock sock while browsing the listening hash (otherwise it's deadlock prone). * lock sock while browsing the listening hash (otherwise it's deadlock prone).
* *
* This lock is acquired in read mode only from listening_get_next() seq_file
* op and it's acquired in write mode _only_ from code that is actively
* changing rskq_accept_head. All readers that are holding the master sock lock
* don't need to grab this lock in read mode too as rskq_accept_head. writes
* are always protected from the main sock lock.
*/ */
struct request_sock_queue { struct request_sock_queue {
struct request_sock *rskq_accept_head; struct request_sock *rskq_accept_head;
...@@ -192,7 +187,7 @@ struct request_sock_queue { ...@@ -192,7 +187,7 @@ struct request_sock_queue {
*/ */
/* temporary alignment, our goal is to get rid of this lock */ /* temporary alignment, our goal is to get rid of this lock */
rwlock_t syn_wait_lock ____cacheline_aligned_in_smp; spinlock_t syn_wait_lock ____cacheline_aligned_in_smp;
}; };
int reqsk_queue_alloc(struct request_sock_queue *queue, int reqsk_queue_alloc(struct request_sock_queue *queue,
...@@ -223,14 +218,14 @@ static inline void reqsk_queue_unlink(struct request_sock_queue *queue, ...@@ -223,14 +218,14 @@ static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
struct listen_sock *lopt = queue->listen_opt; struct listen_sock *lopt = queue->listen_opt;
struct request_sock **prev; struct request_sock **prev;
write_lock(&queue->syn_wait_lock); spin_lock(&queue->syn_wait_lock);
prev = &lopt->syn_table[req->rsk_hash]; prev = &lopt->syn_table[req->rsk_hash];
while (*prev != req) while (*prev != req)
prev = &(*prev)->dl_next; prev = &(*prev)->dl_next;
*prev = req->dl_next; *prev = req->dl_next;
write_unlock(&queue->syn_wait_lock); spin_unlock(&queue->syn_wait_lock);
if (del_timer(&req->rsk_timer)) if (del_timer(&req->rsk_timer))
reqsk_put(req); reqsk_put(req);
} }
......
...@@ -58,14 +58,14 @@ int reqsk_queue_alloc(struct request_sock_queue *queue, ...@@ -58,14 +58,14 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
return -ENOMEM; return -ENOMEM;
get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
rwlock_init(&queue->syn_wait_lock); spin_lock_init(&queue->syn_wait_lock);
queue->rskq_accept_head = NULL; queue->rskq_accept_head = NULL;
lopt->nr_table_entries = nr_table_entries; lopt->nr_table_entries = nr_table_entries;
lopt->max_qlen_log = ilog2(nr_table_entries); lopt->max_qlen_log = ilog2(nr_table_entries);
write_lock_bh(&queue->syn_wait_lock); spin_lock_bh(&queue->syn_wait_lock);
queue->listen_opt = lopt; queue->listen_opt = lopt;
write_unlock_bh(&queue->syn_wait_lock); spin_unlock_bh(&queue->syn_wait_lock);
return 0; return 0;
} }
...@@ -81,10 +81,10 @@ static inline struct listen_sock *reqsk_queue_yank_listen_sk( ...@@ -81,10 +81,10 @@ static inline struct listen_sock *reqsk_queue_yank_listen_sk(
{ {
struct listen_sock *lopt; struct listen_sock *lopt;
write_lock_bh(&queue->syn_wait_lock); spin_lock_bh(&queue->syn_wait_lock);
lopt = queue->listen_opt; lopt = queue->listen_opt;
queue->listen_opt = NULL; queue->listen_opt = NULL;
write_unlock_bh(&queue->syn_wait_lock); spin_unlock_bh(&queue->syn_wait_lock);
return lopt; return lopt;
} }
...@@ -100,7 +100,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) ...@@ -100,7 +100,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
for (i = 0; i < lopt->nr_table_entries; i++) { for (i = 0; i < lopt->nr_table_entries; i++) {
struct request_sock *req; struct request_sock *req;
write_lock_bh(&queue->syn_wait_lock); spin_lock_bh(&queue->syn_wait_lock);
while ((req = lopt->syn_table[i]) != NULL) { while ((req = lopt->syn_table[i]) != NULL) {
lopt->syn_table[i] = req->dl_next; lopt->syn_table[i] = req->dl_next;
atomic_inc(&lopt->qlen_dec); atomic_inc(&lopt->qlen_dec);
...@@ -108,7 +108,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) ...@@ -108,7 +108,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
reqsk_put(req); reqsk_put(req);
reqsk_put(req); reqsk_put(req);
} }
write_unlock_bh(&queue->syn_wait_lock); spin_unlock_bh(&queue->syn_wait_lock);
} }
} }
......
...@@ -495,7 +495,7 @@ struct request_sock *inet_csk_search_req(struct sock *sk, ...@@ -495,7 +495,7 @@ struct request_sock *inet_csk_search_req(struct sock *sk,
u32 hash = inet_synq_hash(raddr, rport, lopt->hash_rnd, u32 hash = inet_synq_hash(raddr, rport, lopt->hash_rnd,
lopt->nr_table_entries); lopt->nr_table_entries);
write_lock(&icsk->icsk_accept_queue.syn_wait_lock); spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) { for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
const struct inet_request_sock *ireq = inet_rsk(req); const struct inet_request_sock *ireq = inet_rsk(req);
...@@ -508,7 +508,7 @@ struct request_sock *inet_csk_search_req(struct sock *sk, ...@@ -508,7 +508,7 @@ struct request_sock *inet_csk_search_req(struct sock *sk,
break; break;
} }
} }
write_unlock(&icsk->icsk_accept_queue.syn_wait_lock); spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
return req; return req;
} }
...@@ -650,10 +650,10 @@ void reqsk_queue_hash_req(struct request_sock_queue *queue, ...@@ -650,10 +650,10 @@ void reqsk_queue_hash_req(struct request_sock_queue *queue,
setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req); setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
req->rsk_hash = hash; req->rsk_hash = hash;
write_lock(&queue->syn_wait_lock); spin_lock(&queue->syn_wait_lock);
req->dl_next = lopt->syn_table[hash]; req->dl_next = lopt->syn_table[hash];
lopt->syn_table[hash] = req; lopt->syn_table[hash] = req;
write_unlock(&queue->syn_wait_lock); spin_unlock(&queue->syn_wait_lock);
mod_timer_pinned(&req->rsk_timer, jiffies + timeout); mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
} }
......
...@@ -728,7 +728,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, ...@@ -728,7 +728,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
entry.family = sk->sk_family; entry.family = sk->sk_family;
read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
lopt = icsk->icsk_accept_queue.listen_opt; lopt = icsk->icsk_accept_queue.listen_opt;
if (!lopt || !listen_sock_qlen(lopt)) if (!lopt || !listen_sock_qlen(lopt))
...@@ -776,7 +776,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, ...@@ -776,7 +776,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
} }
out: out:
read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
return err; return err;
} }
......
...@@ -1909,13 +1909,13 @@ static void *listening_get_next(struct seq_file *seq, void *cur) ...@@ -1909,13 +1909,13 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
} }
sk = sk_nulls_next(st->syn_wait_sk); sk = sk_nulls_next(st->syn_wait_sk);
st->state = TCP_SEQ_STATE_LISTENING; st->state = TCP_SEQ_STATE_LISTENING;
read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
} else { } else {
icsk = inet_csk(sk); icsk = inet_csk(sk);
read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
if (reqsk_queue_len(&icsk->icsk_accept_queue)) if (reqsk_queue_len(&icsk->icsk_accept_queue))
goto start_req; goto start_req;
read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
sk = sk_nulls_next(sk); sk = sk_nulls_next(sk);
} }
get_sk: get_sk:
...@@ -1927,7 +1927,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) ...@@ -1927,7 +1927,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
goto out; goto out;
} }
icsk = inet_csk(sk); icsk = inet_csk(sk);
read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
if (reqsk_queue_len(&icsk->icsk_accept_queue)) { if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
start_req: start_req:
st->uid = sock_i_uid(sk); st->uid = sock_i_uid(sk);
...@@ -1936,7 +1936,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) ...@@ -1936,7 +1936,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
st->sbucket = 0; st->sbucket = 0;
goto get_req; goto get_req;
} }
read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
} }
spin_unlock_bh(&ilb->lock); spin_unlock_bh(&ilb->lock);
st->offset = 0; st->offset = 0;
...@@ -2155,7 +2155,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v) ...@@ -2155,7 +2155,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
case TCP_SEQ_STATE_OPENREQ: case TCP_SEQ_STATE_OPENREQ:
if (v) { if (v) {
struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk); struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
} }
case TCP_SEQ_STATE_LISTENING: case TCP_SEQ_STATE_LISTENING:
if (v != SEQ_START_TOKEN) if (v != SEQ_START_TOKEN)
......
...@@ -124,7 +124,7 @@ struct request_sock *inet6_csk_search_req(struct sock *sk, ...@@ -124,7 +124,7 @@ struct request_sock *inet6_csk_search_req(struct sock *sk,
u32 hash = inet6_synq_hash(raddr, rport, lopt->hash_rnd, u32 hash = inet6_synq_hash(raddr, rport, lopt->hash_rnd,
lopt->nr_table_entries); lopt->nr_table_entries);
write_lock(&icsk->icsk_accept_queue.syn_wait_lock); spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) { for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
const struct inet_request_sock *ireq = inet_rsk(req); const struct inet_request_sock *ireq = inet_rsk(req);
...@@ -138,7 +138,7 @@ struct request_sock *inet6_csk_search_req(struct sock *sk, ...@@ -138,7 +138,7 @@ struct request_sock *inet6_csk_search_req(struct sock *sk,
break; break;
} }
} }
write_unlock(&icsk->icsk_accept_queue.syn_wait_lock); spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
return req; return req;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment