Commit 143eb9ac authored by David S. Miller's avatar David S. Miller

Merge branch 'rhashtable-cleanups'

NeilBrown says:

====================
Two clean-ups for rhashtable.

These two patches make small improvements to
rhashtable, but are otherwise unrelated.

Thanks to Herbert, Miguel, and Paul for the review.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 83b038db f7ad68bf
...@@ -366,14 +366,14 @@ ForEachMacros: ...@@ -366,14 +366,14 @@ ForEachMacros:
- 'rhl_for_each_entry_rcu' - 'rhl_for_each_entry_rcu'
- 'rhl_for_each_rcu' - 'rhl_for_each_rcu'
- 'rht_for_each' - 'rht_for_each'
- 'rht_for_each_continue' - 'rht_for_each_from'
- 'rht_for_each_entry' - 'rht_for_each_entry'
- 'rht_for_each_entry_continue' - 'rht_for_each_entry_from'
- 'rht_for_each_entry_rcu' - 'rht_for_each_entry_rcu'
- 'rht_for_each_entry_rcu_continue' - 'rht_for_each_entry_rcu_from'
- 'rht_for_each_entry_safe' - 'rht_for_each_entry_safe'
- 'rht_for_each_rcu' - 'rht_for_each_rcu'
- 'rht_for_each_rcu_continue' - 'rht_for_each_rcu_from'
- '__rq_for_each_bio' - '__rq_for_each_bio'
- 'rq_for_each_segment' - 'rq_for_each_segment'
- 'scsi_for_each_prot_sg' - 'scsi_for_each_prot_sg'
......
...@@ -63,7 +63,6 @@ ...@@ -63,7 +63,6 @@
struct bucket_table { struct bucket_table {
unsigned int size; unsigned int size;
unsigned int nest; unsigned int nest;
unsigned int rehash;
u32 hash_rnd; u32 hash_rnd;
unsigned int locks_mask; unsigned int locks_mask;
spinlock_t *locks; spinlock_t *locks;
...@@ -307,13 +306,13 @@ static inline struct rhash_head __rcu **rht_bucket_insert( ...@@ -307,13 +306,13 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
} }
/** /**
* rht_for_each_continue - continue iterating over hash chain * rht_for_each_from - iterate over hash chain from given head
* @pos: the &struct rhash_head to use as a loop cursor. * @pos: the &struct rhash_head to use as a loop cursor.
* @head: the previous &struct rhash_head to continue from * @head: the &struct rhash_head to start from
* @tbl: the &struct bucket_table * @tbl: the &struct bucket_table
* @hash: the hash value / bucket index * @hash: the hash value / bucket index
*/ */
#define rht_for_each_continue(pos, head, tbl, hash) \ #define rht_for_each_from(pos, head, tbl, hash) \
for (pos = rht_dereference_bucket(head, tbl, hash); \ for (pos = rht_dereference_bucket(head, tbl, hash); \
!rht_is_a_nulls(pos); \ !rht_is_a_nulls(pos); \
pos = rht_dereference_bucket((pos)->next, tbl, hash)) pos = rht_dereference_bucket((pos)->next, tbl, hash))
...@@ -325,18 +324,18 @@ static inline struct rhash_head __rcu **rht_bucket_insert( ...@@ -325,18 +324,18 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* @hash: the hash value / bucket index * @hash: the hash value / bucket index
*/ */
#define rht_for_each(pos, tbl, hash) \ #define rht_for_each(pos, tbl, hash) \
rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash) rht_for_each_from(pos, *rht_bucket(tbl, hash), tbl, hash)
/** /**
* rht_for_each_entry_continue - continue iterating over hash chain * rht_for_each_entry_from - iterate over hash chain from given head
* @tpos: the type * to use as a loop cursor. * @tpos: the type * to use as a loop cursor.
* @pos: the &struct rhash_head to use as a loop cursor. * @pos: the &struct rhash_head to use as a loop cursor.
* @head: the previous &struct rhash_head to continue from * @head: the &struct rhash_head to start from
* @tbl: the &struct bucket_table * @tbl: the &struct bucket_table
* @hash: the hash value / bucket index * @hash: the hash value / bucket index
* @member: name of the &struct rhash_head within the hashable struct. * @member: name of the &struct rhash_head within the hashable struct.
*/ */
#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \ #define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \
for (pos = rht_dereference_bucket(head, tbl, hash); \ for (pos = rht_dereference_bucket(head, tbl, hash); \
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
pos = rht_dereference_bucket((pos)->next, tbl, hash)) pos = rht_dereference_bucket((pos)->next, tbl, hash))
...@@ -350,7 +349,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert( ...@@ -350,7 +349,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* @member: name of the &struct rhash_head within the hashable struct. * @member: name of the &struct rhash_head within the hashable struct.
*/ */
#define rht_for_each_entry(tpos, pos, tbl, hash, member) \ #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \ rht_for_each_entry_from(tpos, pos, *rht_bucket(tbl, hash), \
tbl, hash, member) tbl, hash, member)
/** /**
...@@ -375,9 +374,9 @@ static inline struct rhash_head __rcu **rht_bucket_insert( ...@@ -375,9 +374,9 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
rht_dereference_bucket(pos->next, tbl, hash) : NULL) rht_dereference_bucket(pos->next, tbl, hash) : NULL)
/** /**
* rht_for_each_rcu_continue - continue iterating over rcu hash chain * rht_for_each_rcu_from - iterate over rcu hash chain from given head
* @pos: the &struct rhash_head to use as a loop cursor. * @pos: the &struct rhash_head to use as a loop cursor.
* @head: the previous &struct rhash_head to continue from * @head: the &struct rhash_head to start from
* @tbl: the &struct bucket_table * @tbl: the &struct bucket_table
* @hash: the hash value / bucket index * @hash: the hash value / bucket index
* *
...@@ -385,7 +384,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert( ...@@ -385,7 +384,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* the _rcu mutation primitives such as rhashtable_insert() as long as the * the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock(). * traversal is guarded by rcu_read_lock().
*/ */
#define rht_for_each_rcu_continue(pos, head, tbl, hash) \ #define rht_for_each_rcu_from(pos, head, tbl, hash) \
for (({barrier(); }), \ for (({barrier(); }), \
pos = rht_dereference_bucket_rcu(head, tbl, hash); \ pos = rht_dereference_bucket_rcu(head, tbl, hash); \
!rht_is_a_nulls(pos); \ !rht_is_a_nulls(pos); \
...@@ -402,13 +401,13 @@ static inline struct rhash_head __rcu **rht_bucket_insert( ...@@ -402,13 +401,13 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* traversal is guarded by rcu_read_lock(). * traversal is guarded by rcu_read_lock().
*/ */
#define rht_for_each_rcu(pos, tbl, hash) \ #define rht_for_each_rcu(pos, tbl, hash) \
rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash) rht_for_each_rcu_from(pos, *rht_bucket(tbl, hash), tbl, hash)
/** /**
* rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain * rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head
* @tpos: the type * to use as a loop cursor. * @tpos: the type * to use as a loop cursor.
* @pos: the &struct rhash_head to use as a loop cursor. * @pos: the &struct rhash_head to use as a loop cursor.
* @head: the previous &struct rhash_head to continue from * @head: the &struct rhash_head to start from
* @tbl: the &struct bucket_table * @tbl: the &struct bucket_table
* @hash: the hash value / bucket index * @hash: the hash value / bucket index
* @member: name of the &struct rhash_head within the hashable struct. * @member: name of the &struct rhash_head within the hashable struct.
...@@ -417,7 +416,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert( ...@@ -417,7 +416,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* the _rcu mutation primitives such as rhashtable_insert() as long as the * the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock(). * traversal is guarded by rcu_read_lock().
*/ */
#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \ #define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \
for (({barrier(); }), \ for (({barrier(); }), \
pos = rht_dereference_bucket_rcu(head, tbl, hash); \ pos = rht_dereference_bucket_rcu(head, tbl, hash); \
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
...@@ -436,7 +435,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert( ...@@ -436,7 +435,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* traversal is guarded by rcu_read_lock(). * traversal is guarded by rcu_read_lock().
*/ */
#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \ rht_for_each_entry_rcu_from(tpos, pos, *rht_bucket(tbl, hash), \
tbl, hash, member) tbl, hash, member)
/** /**
...@@ -492,7 +491,7 @@ static inline struct rhash_head *__rhashtable_lookup( ...@@ -492,7 +491,7 @@ static inline struct rhash_head *__rhashtable_lookup(
hash = rht_key_hashfn(ht, tbl, key, params); hash = rht_key_hashfn(ht, tbl, key, params);
head = rht_bucket(tbl, hash); head = rht_bucket(tbl, hash);
do { do {
rht_for_each_rcu_continue(he, *head, tbl, hash) { rht_for_each_rcu_from(he, *head, tbl, hash) {
if (params.obj_cmpfn ? if (params.obj_cmpfn ?
params.obj_cmpfn(&arg, rht_obj(ht, he)) : params.obj_cmpfn(&arg, rht_obj(ht, he)) :
rhashtable_compare(&arg, rht_obj(ht, he))) rhashtable_compare(&arg, rht_obj(ht, he)))
...@@ -626,7 +625,7 @@ static inline void *__rhashtable_insert_fast( ...@@ -626,7 +625,7 @@ static inline void *__rhashtable_insert_fast(
if (!pprev) if (!pprev)
goto out; goto out;
rht_for_each_continue(head, *pprev, tbl, hash) { rht_for_each_from(head, *pprev, tbl, hash) {
struct rhlist_head *plist; struct rhlist_head *plist;
struct rhlist_head *list; struct rhlist_head *list;
...@@ -776,12 +775,6 @@ static inline int rhltable_insert( ...@@ -776,12 +775,6 @@ static inline int rhltable_insert(
* @obj: pointer to hash head inside object * @obj: pointer to hash head inside object
* @params: hash table parameters * @params: hash table parameters
* *
* Locks down the bucket chain in both the old and new table if a resize
* is in progress to ensure that writers can't remove from the old table
* and can't insert to the new table during the atomic operation of search
* and insertion. Searches for duplicates in both the old and new table if
* a resize is in progress.
*
* This lookup function may only be used for fixed key hash table (key_len * This lookup function may only be used for fixed key hash table (key_len
* parameter set). It will BUG() if used inappropriately. * parameter set). It will BUG() if used inappropriately.
* *
...@@ -837,12 +830,6 @@ static inline void *rhashtable_lookup_get_insert_fast( ...@@ -837,12 +830,6 @@ static inline void *rhashtable_lookup_get_insert_fast(
* @obj: pointer to hash head inside object * @obj: pointer to hash head inside object
* @params: hash table parameters * @params: hash table parameters
* *
* Locks down the bucket chain in both the old and new table if a resize
* is in progress to ensure that writers can't remove from the old table
* and can't insert to the new table during the atomic operation of search
* and insertion. Searches for duplicates in both the old and new table if
* a resize is in progress.
*
* Lookups may occur in parallel with hashtable mutations and resizing. * Lookups may occur in parallel with hashtable mutations and resizing.
* *
* Will trigger an automatic deferred table resizing if residency in the * Will trigger an automatic deferred table resizing if residency in the
...@@ -903,7 +890,7 @@ static inline int __rhashtable_remove_fast_one( ...@@ -903,7 +890,7 @@ static inline int __rhashtable_remove_fast_one(
spin_lock_bh(lock); spin_lock_bh(lock);
pprev = rht_bucket_var(tbl, hash); pprev = rht_bucket_var(tbl, hash);
rht_for_each_continue(he, *pprev, tbl, hash) { rht_for_each_from(he, *pprev, tbl, hash) {
struct rhlist_head *list; struct rhlist_head *list;
list = container_of(he, struct rhlist_head, rhead); list = container_of(he, struct rhlist_head, rhead);
...@@ -1055,7 +1042,7 @@ static inline int __rhashtable_replace_fast( ...@@ -1055,7 +1042,7 @@ static inline int __rhashtable_replace_fast(
spin_lock_bh(lock); spin_lock_bh(lock);
pprev = rht_bucket_var(tbl, hash); pprev = rht_bucket_var(tbl, hash);
rht_for_each_continue(he, *pprev, tbl, hash) { rht_for_each_from(he, *pprev, tbl, hash) {
if (he != obj_old) { if (he != obj_old) {
pprev = &he->next; pprev = &he->next;
continue; continue;
......
...@@ -197,6 +197,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, ...@@ -197,6 +197,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
return NULL; return NULL;
} }
rcu_head_init(&tbl->rcu);
INIT_LIST_HEAD(&tbl->walkers); INIT_LIST_HEAD(&tbl->walkers);
tbl->hash_rnd = get_random_u32(); tbl->hash_rnd = get_random_u32();
...@@ -280,10 +281,9 @@ static int rhashtable_rehash_chain(struct rhashtable *ht, ...@@ -280,10 +281,9 @@ static int rhashtable_rehash_chain(struct rhashtable *ht,
while (!(err = rhashtable_rehash_one(ht, old_hash))) while (!(err = rhashtable_rehash_one(ht, old_hash)))
; ;
if (err == -ENOENT) { if (err == -ENOENT)
old_tbl->rehash++;
err = 0; err = 0;
}
spin_unlock_bh(old_bucket_lock); spin_unlock_bh(old_bucket_lock);
return err; return err;
...@@ -330,13 +330,16 @@ static int rhashtable_rehash_table(struct rhashtable *ht) ...@@ -330,13 +330,16 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
spin_lock(&ht->lock); spin_lock(&ht->lock);
list_for_each_entry(walker, &old_tbl->walkers, list) list_for_each_entry(walker, &old_tbl->walkers, list)
walker->tbl = NULL; walker->tbl = NULL;
spin_unlock(&ht->lock);
/* Wait for readers. All new readers will see the new /* Wait for readers. All new readers will see the new
* table, and thus no references to the old table will * table, and thus no references to the old table will
* remain. * remain.
* We do this inside the locked region so that
* rhashtable_walk_stop() can use rcu_head_after_call_rcu()
* to check if it should not re-link the table.
*/ */
call_rcu(&old_tbl->rcu, bucket_table_free_rcu); call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
spin_unlock(&ht->lock);
return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
} }
...@@ -487,7 +490,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, ...@@ -487,7 +490,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
elasticity = RHT_ELASTICITY; elasticity = RHT_ELASTICITY;
pprev = rht_bucket_var(tbl, hash); pprev = rht_bucket_var(tbl, hash);
rht_for_each_continue(head, *pprev, tbl, hash) { rht_for_each_from(head, *pprev, tbl, hash) {
struct rhlist_head *list; struct rhlist_head *list;
struct rhlist_head *plist; struct rhlist_head *plist;
...@@ -578,46 +581,22 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, ...@@ -578,46 +581,22 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
struct bucket_table *new_tbl; struct bucket_table *new_tbl;
struct bucket_table *tbl; struct bucket_table *tbl;
unsigned int hash; unsigned int hash;
spinlock_t *lock;
void *data; void *data;
tbl = rcu_dereference(ht->tbl); new_tbl = rcu_dereference(ht->tbl);
/* All insertions must grab the oldest table containing
* the hashed bucket that is yet to be rehashed.
*/
for (;;) {
hash = rht_head_hashfn(ht, tbl, obj, ht->p);
lock = rht_bucket_lock(tbl, hash);
spin_lock_bh(lock);
if (tbl->rehash <= hash)
break;
spin_unlock_bh(lock);
tbl = rht_dereference_rcu(tbl->future_tbl, ht);
}
data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
if (PTR_ERR(new_tbl) != -EEXIST)
data = ERR_CAST(new_tbl);
while (!IS_ERR_OR_NULL(new_tbl)) { do {
tbl = new_tbl; tbl = new_tbl;
hash = rht_head_hashfn(ht, tbl, obj, ht->p); hash = rht_head_hashfn(ht, tbl, obj, ht->p);
spin_lock_nested(rht_bucket_lock(tbl, hash), spin_lock_bh(rht_bucket_lock(tbl, hash));
SINGLE_DEPTH_NESTING);
data = rhashtable_lookup_one(ht, tbl, hash, key, obj); data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
if (PTR_ERR(new_tbl) != -EEXIST) if (PTR_ERR(new_tbl) != -EEXIST)
data = ERR_CAST(new_tbl); data = ERR_CAST(new_tbl);
spin_unlock(rht_bucket_lock(tbl, hash)); spin_unlock_bh(rht_bucket_lock(tbl, hash));
} } while (!IS_ERR_OR_NULL(new_tbl));
spin_unlock_bh(lock);
if (PTR_ERR(data) == -EAGAIN) if (PTR_ERR(data) == -EAGAIN)
data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?: data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
...@@ -939,10 +918,11 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter) ...@@ -939,10 +918,11 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
ht = iter->ht; ht = iter->ht;
spin_lock(&ht->lock); spin_lock(&ht->lock);
if (tbl->rehash < tbl->size) if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu))
list_add(&iter->walker.list, &tbl->walkers); /* This bucket table is being freed, don't re-link it. */
else
iter->walker.tbl = NULL; iter->walker.tbl = NULL;
else
list_add(&iter->walker.list, &tbl->walkers);
spin_unlock(&ht->lock); spin_unlock(&ht->lock);
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment