Commit 5af68ef7 authored by NeilBrown's avatar NeilBrown Committed by David S. Miller

rhashtable: simplify nested_table_alloc() and rht_bucket_nested_insert()

Now that we don't use the hash value or shift in nested_table_alloc()
there is room for simplification.
We only need to pass a "is this a leaf" flag to nested_table_alloc(),
and don't need to track as much information in
rht_bucket_nested_insert().

Note there is another minor cleanup in nested_table_alloc() here.
The number of elements in a page of "union nested_tables" is most naturally

  PAGE_SIZE / sizeof(ntbl[0])

The previous code had

  PAGE_SIZE / sizeof(ntbl[0].bucket)

which happens to be the correct value only because the bucket uses all
the space in the union.
Acked-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarNeilBrown <neilb@suse.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9b4f64a2
...@@ -116,7 +116,7 @@ static void bucket_table_free_rcu(struct rcu_head *head) ...@@ -116,7 +116,7 @@ static void bucket_table_free_rcu(struct rcu_head *head)
static union nested_table *nested_table_alloc(struct rhashtable *ht, static union nested_table *nested_table_alloc(struct rhashtable *ht,
union nested_table __rcu **prev, union nested_table __rcu **prev,
unsigned int shifted) bool leaf)
{ {
union nested_table *ntbl; union nested_table *ntbl;
int i; int i;
...@@ -127,8 +127,8 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht, ...@@ -127,8 +127,8 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
if (ntbl && shifted) { if (ntbl && leaf) {
for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++) for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
INIT_RHT_NULLS_HEAD(ntbl[i].bucket); INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
} }
...@@ -155,7 +155,7 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, ...@@ -155,7 +155,7 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
return NULL; return NULL;
if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
0)) { false)) {
kfree(tbl); kfree(tbl);
return NULL; return NULL;
} }
...@@ -1207,24 +1207,18 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, ...@@ -1207,24 +1207,18 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int index = hash & ((1 << tbl->nest) - 1);
unsigned int size = tbl->size >> tbl->nest; unsigned int size = tbl->size >> tbl->nest;
union nested_table *ntbl; union nested_table *ntbl;
unsigned int shifted;
unsigned int nhash;
ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
hash >>= tbl->nest; hash >>= tbl->nest;
nhash = index;
shifted = tbl->nest;
ntbl = nested_table_alloc(ht, &ntbl[index].table, ntbl = nested_table_alloc(ht, &ntbl[index].table,
size <= (1 << shift) ? shifted : 0); size <= (1 << shift));
while (ntbl && size > (1 << shift)) { while (ntbl && size > (1 << shift)) {
index = hash & ((1 << shift) - 1); index = hash & ((1 << shift) - 1);
size >>= shift; size >>= shift;
hash >>= shift; hash >>= shift;
nhash |= index << shifted;
shifted += shift;
ntbl = nested_table_alloc(ht, &ntbl[index].table, ntbl = nested_table_alloc(ht, &ntbl[index].table,
size <= (1 << shift) ? shifted : 0); size <= (1 << shift));
} }
if (!ntbl) if (!ntbl)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment