Commit aac065c5 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: move qlen/young out of struct listen_sock

qlen_inc & young_inc were protected by listener lock,
while qlen_dec & young_dec were atomic fields.

Everything needs to be atomic for upcoming lockless listener.

Also move qlen/young in request_sock_queue as we'll get rid
of struct listen_sock eventually.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent fff1f300
......@@ -122,14 +122,7 @@ extern int sysctl_max_syn_backlog;
* @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
*/
struct listen_sock {
int qlen_inc; /* protected by listener lock */
int young_inc;/* protected by listener lock */
/* following fields can be updated by timer */
atomic_t qlen_dec; /* qlen = qlen_inc - qlen_dec */
atomic_t young_dec;
u32 max_qlen_log ____cacheline_aligned_in_smp;
u32 max_qlen_log;
u32 synflood_warned;
u32 hash_rnd;
u32 nr_table_entries;
......@@ -179,6 +172,9 @@ struct request_sock_queue {
spinlock_t rskq_lock;
u8 rskq_defer_accept;
atomic_t qlen;
atomic_t young;
struct request_sock *rskq_accept_head;
struct request_sock *rskq_accept_tail;
struct listen_sock *listen_opt;
......@@ -242,41 +238,25 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
static inline void reqsk_queue_removed(struct request_sock_queue *queue,
const struct request_sock *req)
{
struct listen_sock *lopt = queue->listen_opt;
if (req->num_timeout == 0)
atomic_inc(&lopt->young_dec);
atomic_inc(&lopt->qlen_dec);
atomic_dec(&queue->young);
atomic_dec(&queue->qlen);
}
static inline void reqsk_queue_added(struct request_sock_queue *queue)
{
struct listen_sock *lopt = queue->listen_opt;
lopt->young_inc++;
lopt->qlen_inc++;
}
static inline int listen_sock_qlen(const struct listen_sock *lopt)
{
return lopt->qlen_inc - atomic_read(&lopt->qlen_dec);
}
static inline int listen_sock_young(const struct listen_sock *lopt)
{
return lopt->young_inc - atomic_read(&lopt->young_dec);
atomic_inc(&queue->young);
atomic_inc(&queue->qlen);
}
static inline int reqsk_queue_len(const struct request_sock_queue *queue)
{
const struct listen_sock *lopt = queue->listen_opt;
return lopt ? listen_sock_qlen(lopt) : 0;
return atomic_read(&queue->qlen);
}
static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
{
return listen_sock_young(queue->listen_opt);
return atomic_read(&queue->young);
}
static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
......
......@@ -102,7 +102,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
/* make all the listen_opt local to us */
struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
if (listen_sock_qlen(lopt) != 0) {
if (reqsk_queue_len(queue) != 0) {
unsigned int i;
for (i = 0; i < lopt->nr_table_entries; i++) {
......@@ -116,7 +116,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
* or risk a dead lock.
*/
spin_unlock_bh(&queue->syn_wait_lock);
atomic_inc(&lopt->qlen_dec);
atomic_dec(&queue->qlen);
if (del_timer_sync(&req->rsk_timer))
reqsk_put(req);
reqsk_put(req);
......@@ -126,8 +126,8 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
}
}
if (WARN_ON(listen_sock_qlen(lopt) != 0))
pr_err("qlen %u\n", listen_sock_qlen(lopt));
if (WARN_ON(reqsk_queue_len(queue) != 0))
pr_err("qlen %u\n", reqsk_queue_len(queue));
kvfree(lopt);
}
......
......@@ -640,9 +640,9 @@ static void reqsk_timer_handler(unsigned long data)
* embrions; and abort old ones without pity, if old
* ones are about to clog our table.
*/
qlen = listen_sock_qlen(lopt);
qlen = reqsk_queue_len(queue);
if (qlen >> (lopt->max_qlen_log - 1)) {
int young = listen_sock_young(lopt) << 1;
int young = reqsk_queue_len_young(queue) << 1;
while (thresh > 2) {
if (qlen < young)
......@@ -664,7 +664,7 @@ static void reqsk_timer_handler(unsigned long data)
unsigned long timeo;
if (req->num_timeout++ == 0)
atomic_inc(&lopt->young_dec);
atomic_dec(&queue->young);
timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
mod_timer_pinned(&req->rsk_timer, jiffies + timeo);
return;
......
......@@ -753,7 +753,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
lopt = icsk->icsk_accept_queue.listen_opt;
if (!lopt || !listen_sock_qlen(lopt))
if (!lopt || !reqsk_queue_len(&icsk->icsk_accept_queue))
goto out;
if (bc) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment