Commit 0d5b9311 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

inet: frags: better deal with smp races

Multiple cpus might attempt to insert a new fragment in rhashtable,
if for example RPS is buggy, as reported by 배석진 in
https://patchwork.ozlabs.org/patch/994601/

We use rhashtable_lookup_get_insert_key() instead of
rhashtable_insert_fast() to let cpus losing the race
free their own inet_frag_queue and use the one that
was inserted by another cpu.

Fixes: 648700f7 ("inet: frags: use rhashtables for reassembly units")
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Reported-by: default avatar배석진 <soukjin.bae@samsung.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e12c2252
...@@ -178,21 +178,22 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, ...@@ -178,21 +178,22 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
} }
static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf, static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
void *arg) void *arg,
struct inet_frag_queue **prev)
{ {
struct inet_frags *f = nf->f; struct inet_frags *f = nf->f;
struct inet_frag_queue *q; struct inet_frag_queue *q;
int err;
q = inet_frag_alloc(nf, f, arg); q = inet_frag_alloc(nf, f, arg);
if (!q) if (!q) {
*prev = ERR_PTR(-ENOMEM);
return NULL; return NULL;
}
mod_timer(&q->timer, jiffies + nf->timeout); mod_timer(&q->timer, jiffies + nf->timeout);
err = rhashtable_insert_fast(&nf->rhashtable, &q->node, *prev = rhashtable_lookup_get_insert_key(&nf->rhashtable, &q->key,
f->rhash_params); &q->node, f->rhash_params);
if (err < 0) { if (*prev) {
q->flags |= INET_FRAG_COMPLETE; q->flags |= INET_FRAG_COMPLETE;
inet_frag_kill(q); inet_frag_kill(q);
inet_frag_destroy(q); inet_frag_destroy(q);
...@@ -204,22 +205,22 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf, ...@@ -204,22 +205,22 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */ /* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key) struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
{ {
struct inet_frag_queue *fq; struct inet_frag_queue *fq = NULL, *prev;
if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
return NULL; return NULL;
rcu_read_lock(); rcu_read_lock();
fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params); prev = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
if (fq) { if (!prev)
fq = inet_frag_create(nf, key, &prev);
if (prev && !IS_ERR(prev)) {
fq = prev;
if (!refcount_inc_not_zero(&fq->refcnt)) if (!refcount_inc_not_zero(&fq->refcnt))
fq = NULL; fq = NULL;
rcu_read_unlock();
return fq;
} }
rcu_read_unlock(); rcu_read_unlock();
return fq;
return inet_frag_create(nf, key);
} }
EXPORT_SYMBOL(inet_frag_find); EXPORT_SYMBOL(inet_frag_find);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment