Commit 96b33300 authored by Pablo Neira Ayuso's avatar Pablo Neira Ayuso

netfilter: nft_set_rbtree: use read spinlock to avoid datapath contention

rbtree GC does not modify the datastructure, instead it collects expired
elements and it enqueues a GC transaction. Use a read spinlock instead
to avoid data contention while GC worker is running.

Fixes: f6c383b8 ("netfilter: nf_tables: adapt set backend to use GC transaction API")
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent f15f29fd
...@@ -622,8 +622,7 @@ static void nft_rbtree_gc(struct work_struct *work) ...@@ -622,8 +622,7 @@ static void nft_rbtree_gc(struct work_struct *work)
if (!gc) if (!gc)
goto done; goto done;
write_lock_bh(&priv->lock); read_lock_bh(&priv->lock);
write_seqcount_begin(&priv->count);
for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
/* Ruleset has been updated, try later. */ /* Ruleset has been updated, try later. */
...@@ -673,8 +672,7 @@ static void nft_rbtree_gc(struct work_struct *work) ...@@ -673,8 +672,7 @@ static void nft_rbtree_gc(struct work_struct *work)
gc = nft_trans_gc_catchall(gc, gc_seq); gc = nft_trans_gc_catchall(gc, gc_seq);
try_later: try_later:
write_seqcount_end(&priv->count); read_unlock_bh(&priv->lock);
write_unlock_bh(&priv->lock);
if (gc) if (gc)
nft_trans_gc_queue_async_done(gc); nft_trans_gc_queue_async_done(gc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment