Commit a68d5398 authored by David S. Miller's avatar David S. Miller

Merge branch 'rhash-raw-walkers-remove-part-1'

Herbert Xu says:

====================
rhashtable: Get rid of raw table walkers part 1

This series starts the process of getting rid of all raw rhashtable
walkers (e.g., using any of the rht_for_each helpers) from the
kernel.

We need to do this before I can fix the resize kmalloc failure issue
by using multi-layered tables.

We should do this anyway because almost all raw table walkers are
already buggy in that they don't handle multiple rhashtables during
a resize.
====================

Dave/Tomas, please keep an eye out for any new patches that try
to introduce raw table walkers and nack them.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 363dc396 ad202074
......@@ -9936,6 +9936,7 @@ F: net/rfkill/
RHASHTABLE
M: Thomas Graf <tgraf@suug.ch>
M: Herbert Xu <herbert@gondor.apana.org.au>
L: netdev@vger.kernel.org
S: Maintained
F: lib/rhashtable.c
......
......@@ -173,7 +173,7 @@ struct rhashtable_walker {
struct rhashtable_iter {
struct rhashtable *ht;
struct rhash_head *p;
struct rhashtable_walker *walker;
struct rhashtable_walker walker;
unsigned int slot;
unsigned int skip;
};
......@@ -346,8 +346,8 @@ struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
struct bucket_table *old_tbl);
int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter,
gfp_t gfp);
void rhashtable_walk_enter(struct rhashtable *ht,
struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
void *rhashtable_walk_next(struct rhashtable_iter *iter);
......@@ -906,4 +906,12 @@ static inline int rhashtable_replace_fast(
return err;
}
/* Obsolete function, do not use in new code. */
static inline int rhashtable_walk_init(struct rhashtable *ht,
struct rhashtable_iter *iter, gfp_t gfp)
{
rhashtable_walk_enter(ht, iter);
return 0;
}
#endif /* _LINUX_RHASHTABLE_H */
......@@ -489,10 +489,9 @@ struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
/**
* rhashtable_walk_init - Initialise an iterator
* rhashtable_walk_enter - Initialise an iterator
* @ht: Table to walk over
* @iter: Hash table Iterator
* @gfp: GFP flags for allocations
*
* This function prepares a hash table walk.
*
......@@ -507,30 +506,22 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
* This function may sleep so you must not call it from interrupt
* context or with spin locks held.
*
* You must call rhashtable_walk_exit if this function returns
* successfully.
* You must call rhashtable_walk_exit after this function returns.
*/
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter,
gfp_t gfp)
void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
{
iter->ht = ht;
iter->p = NULL;
iter->slot = 0;
iter->skip = 0;
iter->walker = kmalloc(sizeof(*iter->walker), gfp);
if (!iter->walker)
return -ENOMEM;
spin_lock(&ht->lock);
iter->walker->tbl =
iter->walker.tbl =
rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
list_add(&iter->walker->list, &iter->walker->tbl->walkers);
list_add(&iter->walker.list, &iter->walker.tbl->walkers);
spin_unlock(&ht->lock);
return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_init);
EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
/**
* rhashtable_walk_exit - Free an iterator
......@@ -541,10 +532,9 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init);
void rhashtable_walk_exit(struct rhashtable_iter *iter)
{
spin_lock(&iter->ht->lock);
if (iter->walker->tbl)
list_del(&iter->walker->list);
if (iter->walker.tbl)
list_del(&iter->walker.list);
spin_unlock(&iter->ht->lock);
kfree(iter->walker);
}
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
......@@ -570,12 +560,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
rcu_read_lock();
spin_lock(&ht->lock);
if (iter->walker->tbl)
list_del(&iter->walker->list);
if (iter->walker.tbl)
list_del(&iter->walker.list);
spin_unlock(&ht->lock);
if (!iter->walker->tbl) {
iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
if (!iter->walker.tbl) {
iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
return -EAGAIN;
}
......@@ -597,7 +587,7 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_start);
*/
void *rhashtable_walk_next(struct rhashtable_iter *iter)
{
struct bucket_table *tbl = iter->walker->tbl;
struct bucket_table *tbl = iter->walker.tbl;
struct rhashtable *ht = iter->ht;
struct rhash_head *p = iter->p;
......@@ -630,8 +620,8 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter)
/* Ensure we see any new tables. */
smp_rmb();
iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (iter->walker->tbl) {
iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (iter->walker.tbl) {
iter->slot = 0;
iter->skip = 0;
return ERR_PTR(-EAGAIN);
......@@ -651,7 +641,7 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
__releases(RCU)
{
struct rhashtable *ht;
struct bucket_table *tbl = iter->walker->tbl;
struct bucket_table *tbl = iter->walker.tbl;
if (!tbl)
goto out;
......@@ -660,9 +650,9 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
spin_lock(&ht->lock);
if (tbl->rehash < tbl->size)
list_add(&iter->walker->list, &tbl->walkers);
list_add(&iter->walker.list, &tbl->walkers);
else
iter->walker->tbl = NULL;
iter->walker.tbl = NULL;
spin_unlock(&ht->lock);
iter->p = NULL;
......
......@@ -63,43 +63,75 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
int protocol, int s_num)
{
struct rhashtable_iter *hti = (void *)cb->args[2];
struct netlink_table *tbl = &nl_table[protocol];
struct rhashtable *ht = &tbl->hash;
const struct bucket_table *htbl = rht_dereference_rcu(ht->tbl, ht);
struct net *net = sock_net(skb->sk);
struct netlink_diag_req *req;
struct netlink_sock *nlsk;
struct sock *sk;
int ret = 0, num = 0, i;
int num = 2;
int ret = 0;
req = nlmsg_data(cb->nlh);
for (i = 0; i < htbl->size; i++) {
struct rhash_head *pos;
if (s_num > 1)
goto mc_list;
rht_for_each_entry_rcu(nlsk, pos, htbl, i, node) {
sk = (struct sock *)nlsk;
num--;
if (!net_eq(sock_net(sk), net))
continue;
if (num < s_num) {
num++;
if (!hti) {
hti = kmalloc(sizeof(*hti), GFP_KERNEL);
if (!hti)
return -ENOMEM;
cb->args[2] = (long)hti;
}
if (!s_num)
rhashtable_walk_enter(&tbl->hash, hti);
ret = rhashtable_walk_start(hti);
if (ret == -EAGAIN)
ret = 0;
if (ret)
goto stop;
while ((nlsk = rhashtable_walk_next(hti))) {
if (IS_ERR(nlsk)) {
ret = PTR_ERR(nlsk);
if (ret == -EAGAIN) {
ret = 0;
continue;
}
break;
}
if (sk_diag_fill(sk, skb, req,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI,
sock_i_ino(sk)) < 0) {
ret = 1;
goto done;
}
sk = (struct sock *)nlsk;
num++;
if (!net_eq(sock_net(sk), net))
continue;
if (sk_diag_fill(sk, skb, req,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI,
sock_i_ino(sk)) < 0) {
ret = 1;
break;
}
}
stop:
rhashtable_walk_stop(hti);
if (ret)
goto done;
rhashtable_walk_exit(hti);
cb->args[2] = 0;
num++;
mc_list:
read_lock(&nl_table_lock);
sk_for_each_bound(sk, &tbl->mc_list) {
if (sk_hashed(sk))
continue;
......@@ -116,13 +148,14 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
NLM_F_MULTI,
sock_i_ino(sk)) < 0) {
ret = 1;
goto done;
break;
}
num++;
}
read_unlock(&nl_table_lock);
done:
cb->args[0] = num;
cb->args[1] = protocol;
return ret;
}
......@@ -131,20 +164,20 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct netlink_diag_req *req;
int s_num = cb->args[0];
int err = 0;
req = nlmsg_data(cb->nlh);
rcu_read_lock();
read_lock(&nl_table_lock);
if (req->sdiag_protocol == NDIAG_PROTO_ALL) {
int i;
for (i = cb->args[1]; i < MAX_LINKS; i++) {
if (__netlink_diag_dump(skb, cb, i, s_num))
err = __netlink_diag_dump(skb, cb, i, s_num);
if (err)
break;
s_num = 0;
}
cb->args[1] = i;
} else {
if (req->sdiag_protocol >= MAX_LINKS) {
read_unlock(&nl_table_lock);
......@@ -152,13 +185,22 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
return -ENOENT;
}
__netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num);
err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num);
}
read_unlock(&nl_table_lock);
rcu_read_unlock();
return err < 0 ? err : skb->len;
}
static int netlink_diag_dump_done(struct netlink_callback *cb)
{
struct rhashtable_iter *hti = (void *)cb->args[2];
if (cb->args[0] == 1)
rhashtable_walk_exit(hti);
return skb->len;
kfree(hti);
return 0;
}
static int netlink_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
......@@ -172,6 +214,7 @@ static int netlink_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
if (h->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = netlink_diag_dump,
.done = netlink_diag_dump_done,
};
return netlink_dump_start(net->diag_nlsk, skb, h, &c);
} else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment