Commit 0de22baa authored by Taehee Yoo's avatar Taehee Yoo Committed by Pablo Neira Ayuso

netfilter: nf_tables: use rhashtable_walk_enter instead of rhashtable_walk_init

rhashtable_walk_init() is deprecated and rhashtable_walk_enter() can be
used instead. rhashtable_walk_init() is wrapper function of
rhashtable_walk_enter() so that logic is actually same.
But rhashtable_walk_enter() doesn't return error hence error path
code can be removed.
Signed-off-by: default avatarTaehee Yoo <ap420073@gmail.com>
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent f8b0a3ab
...@@ -254,20 +254,17 @@ int nf_flow_table_iterate(struct nf_flowtable *flow_table, ...@@ -254,20 +254,17 @@ int nf_flow_table_iterate(struct nf_flowtable *flow_table,
struct flow_offload_tuple_rhash *tuplehash; struct flow_offload_tuple_rhash *tuplehash;
struct rhashtable_iter hti; struct rhashtable_iter hti;
struct flow_offload *flow; struct flow_offload *flow;
int err; int err = 0;
err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL);
if (err)
return err;
rhashtable_walk_enter(&flow_table->rhashtable, &hti);
rhashtable_walk_start(&hti); rhashtable_walk_start(&hti);
while ((tuplehash = rhashtable_walk_next(&hti))) { while ((tuplehash = rhashtable_walk_next(&hti))) {
if (IS_ERR(tuplehash)) { if (IS_ERR(tuplehash)) {
err = PTR_ERR(tuplehash); if (PTR_ERR(tuplehash) != -EAGAIN) {
if (err != -EAGAIN) err = PTR_ERR(tuplehash);
goto out; break;
}
continue; continue;
} }
if (tuplehash->tuple.dir) if (tuplehash->tuple.dir)
...@@ -277,7 +274,6 @@ int nf_flow_table_iterate(struct nf_flowtable *flow_table, ...@@ -277,7 +274,6 @@ int nf_flow_table_iterate(struct nf_flowtable *flow_table,
iter(flow, data); iter(flow, data);
} }
out:
rhashtable_walk_stop(&hti); rhashtable_walk_stop(&hti);
rhashtable_walk_exit(&hti); rhashtable_walk_exit(&hti);
...@@ -290,25 +286,19 @@ static inline bool nf_flow_has_expired(const struct flow_offload *flow) ...@@ -290,25 +286,19 @@ static inline bool nf_flow_has_expired(const struct flow_offload *flow)
return (__s32)(flow->timeout - (u32)jiffies) <= 0; return (__s32)(flow->timeout - (u32)jiffies) <= 0;
} }
static int nf_flow_offload_gc_step(struct nf_flowtable *flow_table) static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table)
{ {
struct flow_offload_tuple_rhash *tuplehash; struct flow_offload_tuple_rhash *tuplehash;
struct rhashtable_iter hti; struct rhashtable_iter hti;
struct flow_offload *flow; struct flow_offload *flow;
int err;
err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL);
if (err)
return 0;
rhashtable_walk_enter(&flow_table->rhashtable, &hti);
rhashtable_walk_start(&hti); rhashtable_walk_start(&hti);
while ((tuplehash = rhashtable_walk_next(&hti))) { while ((tuplehash = rhashtable_walk_next(&hti))) {
if (IS_ERR(tuplehash)) { if (IS_ERR(tuplehash)) {
err = PTR_ERR(tuplehash); if (PTR_ERR(tuplehash) != -EAGAIN)
if (err != -EAGAIN) break;
goto out;
continue; continue;
} }
if (tuplehash->tuple.dir) if (tuplehash->tuple.dir)
...@@ -321,11 +311,8 @@ static int nf_flow_offload_gc_step(struct nf_flowtable *flow_table) ...@@ -321,11 +311,8 @@ static int nf_flow_offload_gc_step(struct nf_flowtable *flow_table)
FLOW_OFFLOAD_TEARDOWN))) FLOW_OFFLOAD_TEARDOWN)))
flow_offload_del(flow_table, flow); flow_offload_del(flow_table, flow);
} }
out:
rhashtable_walk_stop(&hti); rhashtable_walk_stop(&hti);
rhashtable_walk_exit(&hti); rhashtable_walk_exit(&hti);
return 1;
} }
static void nf_flow_offload_work_gc(struct work_struct *work) static void nf_flow_offload_work_gc(struct work_struct *work)
...@@ -514,7 +501,7 @@ void nf_flow_table_free(struct nf_flowtable *flow_table) ...@@ -514,7 +501,7 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
mutex_unlock(&flowtable_lock); mutex_unlock(&flowtable_lock);
cancel_delayed_work_sync(&flow_table->gc_work); cancel_delayed_work_sync(&flow_table->gc_work);
nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL); nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
WARN_ON(!nf_flow_offload_gc_step(flow_table)); nf_flow_offload_gc_step(flow_table);
rhashtable_destroy(&flow_table->rhashtable); rhashtable_destroy(&flow_table->rhashtable);
} }
EXPORT_SYMBOL_GPL(nf_flow_table_free); EXPORT_SYMBOL_GPL(nf_flow_table_free);
......
...@@ -244,21 +244,15 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set, ...@@ -244,21 +244,15 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_rhash_elem *he; struct nft_rhash_elem *he;
struct rhashtable_iter hti; struct rhashtable_iter hti;
struct nft_set_elem elem; struct nft_set_elem elem;
int err;
err = rhashtable_walk_init(&priv->ht, &hti, GFP_ATOMIC);
iter->err = err;
if (err)
return;
rhashtable_walk_enter(&priv->ht, &hti);
rhashtable_walk_start(&hti); rhashtable_walk_start(&hti);
while ((he = rhashtable_walk_next(&hti))) { while ((he = rhashtable_walk_next(&hti))) {
if (IS_ERR(he)) { if (IS_ERR(he)) {
err = PTR_ERR(he); if (PTR_ERR(he) != -EAGAIN) {
if (err != -EAGAIN) { iter->err = PTR_ERR(he);
iter->err = err; break;
goto out;
} }
continue; continue;
...@@ -275,13 +269,11 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set, ...@@ -275,13 +269,11 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
iter->err = iter->fn(ctx, set, iter, &elem); iter->err = iter->fn(ctx, set, iter, &elem);
if (iter->err < 0) if (iter->err < 0)
goto out; break;
cont: cont:
iter->count++; iter->count++;
} }
out:
rhashtable_walk_stop(&hti); rhashtable_walk_stop(&hti);
rhashtable_walk_exit(&hti); rhashtable_walk_exit(&hti);
} }
...@@ -293,21 +285,17 @@ static void nft_rhash_gc(struct work_struct *work) ...@@ -293,21 +285,17 @@ static void nft_rhash_gc(struct work_struct *work)
struct nft_rhash *priv; struct nft_rhash *priv;
struct nft_set_gc_batch *gcb = NULL; struct nft_set_gc_batch *gcb = NULL;
struct rhashtable_iter hti; struct rhashtable_iter hti;
int err;
priv = container_of(work, struct nft_rhash, gc_work.work); priv = container_of(work, struct nft_rhash, gc_work.work);
set = nft_set_container_of(priv); set = nft_set_container_of(priv);
err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL); rhashtable_walk_enter(&priv->ht, &hti);
if (err)
goto schedule;
rhashtable_walk_start(&hti); rhashtable_walk_start(&hti);
while ((he = rhashtable_walk_next(&hti))) { while ((he = rhashtable_walk_next(&hti))) {
if (IS_ERR(he)) { if (IS_ERR(he)) {
if (PTR_ERR(he) != -EAGAIN) if (PTR_ERR(he) != -EAGAIN)
goto out; break;
continue; continue;
} }
...@@ -326,17 +314,15 @@ static void nft_rhash_gc(struct work_struct *work) ...@@ -326,17 +314,15 @@ static void nft_rhash_gc(struct work_struct *work)
gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC); gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
if (gcb == NULL) if (gcb == NULL)
goto out; break;
rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params); rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params);
atomic_dec(&set->nelems); atomic_dec(&set->nelems);
nft_set_gc_batch_add(gcb, he); nft_set_gc_batch_add(gcb, he);
} }
out:
rhashtable_walk_stop(&hti); rhashtable_walk_stop(&hti);
rhashtable_walk_exit(&hti); rhashtable_walk_exit(&hti);
nft_set_gc_batch_complete(gcb); nft_set_gc_batch_complete(gcb);
schedule:
queue_delayed_work(system_power_efficient_wq, &priv->gc_work, queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
nft_set_gc_interval(set)); nft_set_gc_interval(set));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment