Commit 5fa7d3f9 authored by David S. Miller's avatar David S. Miller

Merge branch 'rhashtable-bit-locking-m68k'

NeilBrown says:

====================
Fix rhashtable bit-locking for m68k

As reported by Guenter Roeck, the new rhashtable bit-locking
doesn't work on m68k as it only requires 2-byte alignment, so BIT(1)
is addresses is not unused.

We current use BIT(0) to identify a NULLS marker, but that is only
needed in ->next pointers.  The bucket head does not need a NULLS
marker, so the lsb there can be used for locking.

the first 4 patches make some small improvements and re-arrange some
code.  The final patch converts to using only BIT(0) for these two
different special purposes.

I had previously suggested dropping the series until I fix it.  Given
that this was fairly easy, I retract that I think it best simply to
add these patches to fix the code.
====================
Tested-by: default avatarGuenter Roeck <linux@roeck-us.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c252aa3e ca0b709d
...@@ -35,12 +35,12 @@ ...@@ -35,12 +35,12 @@
* the least significant bit set but otherwise stores the address of * the least significant bit set but otherwise stores the address of
* the hash bucket. This allows us to be be sure we've found the end * the hash bucket. This allows us to be be sure we've found the end
* of the right list. * of the right list.
* The value stored in the hash bucket has BIT(2) used as a lock bit. * The value stored in the hash bucket has BIT(0) used as a lock bit.
* This bit must be atomically set before any changes are made to * This bit must be atomically set before any changes are made to
* the chain. To avoid dereferencing this pointer without clearing * the chain. To avoid dereferencing this pointer without clearing
* the bit first, we use an opaque 'struct rhash_lock_head *' for the * the bit first, we use an opaque 'struct rhash_lock_head *' for the
* pointer stored in the bucket. This struct needs to be defined so * pointer stored in the bucket. This struct needs to be defined so
* that rcu_derefernce() works on it, but it has no content so a * that rcu_dereference() works on it, but it has no content so a
* cast is needed for it to be useful. This ensures it isn't * cast is needed for it to be useful. This ensures it isn't
* used by mistake with clearing the lock bit first. * used by mistake with clearing the lock bit first.
*/ */
...@@ -87,90 +87,23 @@ struct bucket_table { ...@@ -87,90 +87,23 @@ struct bucket_table {
struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp; struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp;
}; };
/*
* We lock a bucket by setting BIT(1) in the pointer - this is always
* zero in real pointers and in the nulls marker.
* bit_spin_locks do not handle contention well, but the whole point
* of the hashtable design is to achieve minimum per-bucket contention.
* A nested hash table might not have a bucket pointer. In that case
* we cannot get a lock. For remove and replace the bucket cannot be
* interesting and doesn't need locking.
* For insert we allocate the bucket if this is the last bucket_table,
* and then take the lock.
* Sometimes we unlock a bucket by writing a new pointer there. In that
* case we don't need to unlock, but we do need to reset state such as
* local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer()
* provides the same release semantics that bit_spin_unlock() provides,
* this is safe.
*/
static inline void rht_lock(struct bucket_table *tbl,
struct rhash_lock_head **bkt)
{
local_bh_disable();
bit_spin_lock(1, (unsigned long *)bkt);
lock_map_acquire(&tbl->dep_map);
}
static inline void rht_lock_nested(struct bucket_table *tbl,
struct rhash_lock_head **bucket,
unsigned int subclass)
{
local_bh_disable();
bit_spin_lock(1, (unsigned long *)bucket);
lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
}
static inline void rht_unlock(struct bucket_table *tbl,
struct rhash_lock_head **bkt)
{
lock_map_release(&tbl->dep_map);
bit_spin_unlock(1, (unsigned long *)bkt);
local_bh_enable();
}
static inline void rht_assign_unlock(struct bucket_table *tbl,
struct rhash_lock_head **bkt,
struct rhash_head *obj)
{
struct rhash_head **p = (struct rhash_head **)bkt;
lock_map_release(&tbl->dep_map);
rcu_assign_pointer(*p, obj);
preempt_enable();
__release(bitlock);
local_bh_enable();
}
/*
* If 'p' is a bucket head and might be locked:
* rht_ptr() returns the address without the lock bit.
* rht_ptr_locked() returns the address WITH the lock bit.
*/
static inline struct rhash_head __rcu *rht_ptr(const struct rhash_lock_head *p)
{
return (void *)(((unsigned long)p) & ~BIT(1));
}
static inline struct rhash_lock_head __rcu *rht_ptr_locked(const
struct rhash_head *p)
{
return (void *)(((unsigned long)p) | BIT(1));
}
/* /*
* NULLS_MARKER() expects a hash value with the low * NULLS_MARKER() expects a hash value with the low
* bits mostly likely to be significant, and it discards * bits mostly likely to be significant, and it discards
* the msb. * the msb.
* We git it an address, in which the bottom 2 bits are * We give it an address, in which the bottom bit is
* always 0, and the msb might be significant. * always 0, and the msb might be significant.
* So we shift the address down one bit to align with * So we shift the address down one bit to align with
* expectations and avoid losing a significant bit. * expectations and avoid losing a significant bit.
*
* We never store the NULLS_MARKER in the hash table
* itself as we need the lsb for locking.
* Instead we store a NULL
*/ */
#define RHT_NULLS_MARKER(ptr) \ #define RHT_NULLS_MARKER(ptr) \
((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1)) ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1))
#define INIT_RHT_NULLS_HEAD(ptr) \ #define INIT_RHT_NULLS_HEAD(ptr) \
((ptr) = RHT_NULLS_MARKER(&(ptr))) ((ptr) = NULL)
static inline bool rht_is_a_nulls(const struct rhash_head *ptr) static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
{ {
...@@ -372,6 +305,105 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert( ...@@ -372,6 +305,105 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
&tbl->buckets[hash]; &tbl->buckets[hash];
} }
/*
* We lock a bucket by setting BIT(0) in the pointer - this is always
* zero in real pointers. The NULLS mark is never stored in the bucket,
* rather we store NULL if the bucket is empty.
* bit_spin_locks do not handle contention well, but the whole point
* of the hashtable design is to achieve minimum per-bucket contention.
* A nested hash table might not have a bucket pointer. In that case
* we cannot get a lock. For remove and replace the bucket cannot be
* interesting and doesn't need locking.
* For insert we allocate the bucket if this is the last bucket_table,
* and then take the lock.
* Sometimes we unlock a bucket by writing a new pointer there. In that
* case we don't need to unlock, but we do need to reset state such as
* local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer()
* provides the same release semantics that bit_spin_unlock() provides,
* this is safe.
* When we write to a bucket without unlocking, we use rht_assign_locked().
*/
static inline void rht_lock(struct bucket_table *tbl,
struct rhash_lock_head **bkt)
{
local_bh_disable();
bit_spin_lock(0, (unsigned long *)bkt);
lock_map_acquire(&tbl->dep_map);
}
static inline void rht_lock_nested(struct bucket_table *tbl,
struct rhash_lock_head **bucket,
unsigned int subclass)
{
local_bh_disable();
bit_spin_lock(0, (unsigned long *)bucket);
lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
}
static inline void rht_unlock(struct bucket_table *tbl,
struct rhash_lock_head **bkt)
{
lock_map_release(&tbl->dep_map);
bit_spin_unlock(0, (unsigned long *)bkt);
local_bh_enable();
}
/*
* Where 'bkt' is a bucket and might be locked:
* rht_ptr() dereferences that pointer and clears the lock bit.
* rht_ptr_exclusive() dereferences in a context where exclusive
* access is guaranteed, such as when destroying the table.
*/
static inline struct rhash_head *rht_ptr(
struct rhash_lock_head __rcu * const *bkt,
struct bucket_table *tbl,
unsigned int hash)
{
const struct rhash_lock_head *p =
rht_dereference_bucket_rcu(*bkt, tbl, hash);
if ((((unsigned long)p) & ~BIT(0)) == 0)
return RHT_NULLS_MARKER(bkt);
return (void *)(((unsigned long)p) & ~BIT(0));
}
static inline struct rhash_head *rht_ptr_exclusive(
struct rhash_lock_head __rcu * const *bkt)
{
const struct rhash_lock_head *p =
rcu_dereference_protected(*bkt, 1);
if (!p)
return RHT_NULLS_MARKER(bkt);
return (void *)(((unsigned long)p) & ~BIT(0));
}
static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
struct rhash_head *obj)
{
struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
if (rht_is_a_nulls(obj))
obj = NULL;
rcu_assign_pointer(*p, (void *)((unsigned long)obj | BIT(0)));
}
static inline void rht_assign_unlock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt,
struct rhash_head *obj)
{
struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
if (rht_is_a_nulls(obj))
obj = NULL;
lock_map_release(&tbl->dep_map);
rcu_assign_pointer(*p, obj);
preempt_enable();
__release(bitlock);
local_bh_enable();
}
/** /**
* rht_for_each_from - iterate over hash chain from given head * rht_for_each_from - iterate over hash chain from given head
* @pos: the &struct rhash_head to use as a loop cursor. * @pos: the &struct rhash_head to use as a loop cursor.
...@@ -380,8 +412,8 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert( ...@@ -380,8 +412,8 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
* @hash: the hash value / bucket index * @hash: the hash value / bucket index
*/ */
#define rht_for_each_from(pos, head, tbl, hash) \ #define rht_for_each_from(pos, head, tbl, hash) \
for (pos = rht_dereference_bucket(head, tbl, hash); \ for (pos = head; \
!rht_is_a_nulls(pos); \ !rht_is_a_nulls(pos); \
pos = rht_dereference_bucket((pos)->next, tbl, hash)) pos = rht_dereference_bucket((pos)->next, tbl, hash))
/** /**
...@@ -391,7 +423,8 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert( ...@@ -391,7 +423,8 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
* @hash: the hash value / bucket index * @hash: the hash value / bucket index
*/ */
#define rht_for_each(pos, tbl, hash) \ #define rht_for_each(pos, tbl, hash) \
rht_for_each_from(pos, rht_ptr(*rht_bucket(tbl, hash)), tbl, hash) rht_for_each_from(pos, rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
tbl, hash)
/** /**
* rht_for_each_entry_from - iterate over hash chain from given head * rht_for_each_entry_from - iterate over hash chain from given head
...@@ -403,7 +436,7 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert( ...@@ -403,7 +436,7 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
* @member: name of the &struct rhash_head within the hashable struct. * @member: name of the &struct rhash_head within the hashable struct.
*/ */
#define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \ #define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \
for (pos = rht_dereference_bucket(head, tbl, hash); \ for (pos = head; \
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
pos = rht_dereference_bucket((pos)->next, tbl, hash)) pos = rht_dereference_bucket((pos)->next, tbl, hash))
...@@ -416,8 +449,9 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert( ...@@ -416,8 +449,9 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
* @member: name of the &struct rhash_head within the hashable struct. * @member: name of the &struct rhash_head within the hashable struct.
*/ */
#define rht_for_each_entry(tpos, pos, tbl, hash, member) \ #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
rht_for_each_entry_from(tpos, pos, rht_ptr(*rht_bucket(tbl, hash)), \ rht_for_each_entry_from(tpos, pos, \
tbl, hash, member) rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
tbl, hash, member)
/** /**
* rht_for_each_entry_safe - safely iterate over hash chain of given type * rht_for_each_entry_safe - safely iterate over hash chain of given type
...@@ -432,8 +466,7 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert( ...@@ -432,8 +466,7 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
* remove the loop cursor from the list. * remove the loop cursor from the list.
*/ */
#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
for (pos = rht_dereference_bucket(rht_ptr(*rht_bucket(tbl, hash)), \ for (pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
tbl, hash), \
next = !rht_is_a_nulls(pos) ? \ next = !rht_is_a_nulls(pos) ? \
rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
...@@ -454,7 +487,7 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert( ...@@ -454,7 +487,7 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
*/ */
#define rht_for_each_rcu_from(pos, head, tbl, hash) \ #define rht_for_each_rcu_from(pos, head, tbl, hash) \
for (({barrier(); }), \ for (({barrier(); }), \
pos = rht_dereference_bucket_rcu(head, tbl, hash); \ pos = head; \
!rht_is_a_nulls(pos); \ !rht_is_a_nulls(pos); \
pos = rcu_dereference_raw(pos->next)) pos = rcu_dereference_raw(pos->next))
...@@ -469,10 +502,9 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert( ...@@ -469,10 +502,9 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
* traversal is guarded by rcu_read_lock(). * traversal is guarded by rcu_read_lock().
*/ */
#define rht_for_each_rcu(pos, tbl, hash) \ #define rht_for_each_rcu(pos, tbl, hash) \
for (({barrier(); }), \ for (({barrier(); }), \
pos = rht_ptr(rht_dereference_bucket_rcu( \ pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash); \
*rht_bucket(tbl, hash), tbl, hash)); \ !rht_is_a_nulls(pos); \
!rht_is_a_nulls(pos); \
pos = rcu_dereference_raw(pos->next)) pos = rcu_dereference_raw(pos->next))
/** /**
...@@ -490,7 +522,7 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert( ...@@ -490,7 +522,7 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
*/ */
#define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \ #define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \
for (({barrier(); }), \ for (({barrier(); }), \
pos = rht_dereference_bucket_rcu(head, tbl, hash); \ pos = head; \
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
pos = rht_dereference_bucket_rcu(pos->next, tbl, hash)) pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
...@@ -508,8 +540,9 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert( ...@@ -508,8 +540,9 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
*/ */
#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
rht_for_each_entry_rcu_from(tpos, pos, \ rht_for_each_entry_rcu_from(tpos, pos, \
rht_ptr(*rht_bucket(tbl, hash)), \ rht_ptr(rht_bucket(tbl, hash), \
tbl, hash, member) tbl, hash), \
tbl, hash, member)
/** /**
* rhl_for_each_rcu - iterate over rcu hash table list * rhl_for_each_rcu - iterate over rcu hash table list
...@@ -564,8 +597,7 @@ static inline struct rhash_head *__rhashtable_lookup( ...@@ -564,8 +597,7 @@ static inline struct rhash_head *__rhashtable_lookup(
hash = rht_key_hashfn(ht, tbl, key, params); hash = rht_key_hashfn(ht, tbl, key, params);
bkt = rht_bucket(tbl, hash); bkt = rht_bucket(tbl, hash);
do { do {
he = rht_ptr(rht_dereference_bucket_rcu(*bkt, tbl, hash)); rht_for_each_rcu_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
rht_for_each_rcu_from(he, he, tbl, hash) {
if (params.obj_cmpfn ? if (params.obj_cmpfn ?
params.obj_cmpfn(&arg, rht_obj(ht, he)) : params.obj_cmpfn(&arg, rht_obj(ht, he)) :
rhashtable_compare(&arg, rht_obj(ht, he))) rhashtable_compare(&arg, rht_obj(ht, he)))
...@@ -698,7 +730,7 @@ static inline void *__rhashtable_insert_fast( ...@@ -698,7 +730,7 @@ static inline void *__rhashtable_insert_fast(
return rhashtable_insert_slow(ht, key, obj); return rhashtable_insert_slow(ht, key, obj);
} }
rht_for_each_from(head, rht_ptr(*bkt), tbl, hash) { rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
struct rhlist_head *plist; struct rhlist_head *plist;
struct rhlist_head *list; struct rhlist_head *list;
...@@ -743,7 +775,7 @@ static inline void *__rhashtable_insert_fast( ...@@ -743,7 +775,7 @@ static inline void *__rhashtable_insert_fast(
goto slow_path; goto slow_path;
/* Inserting at head of list makes unlocking free. */ /* Inserting at head of list makes unlocking free. */
head = rht_ptr(rht_dereference_bucket(*bkt, tbl, hash)); head = rht_ptr(bkt, tbl, hash);
RCU_INIT_POINTER(obj->next, head); RCU_INIT_POINTER(obj->next, head);
if (rhlist) { if (rhlist) {
...@@ -970,7 +1002,7 @@ static inline int __rhashtable_remove_fast_one( ...@@ -970,7 +1002,7 @@ static inline int __rhashtable_remove_fast_one(
pprev = NULL; pprev = NULL;
rht_lock(tbl, bkt); rht_lock(tbl, bkt);
rht_for_each_from(he, rht_ptr(*bkt), tbl, hash) { rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
struct rhlist_head *list; struct rhlist_head *list;
list = container_of(he, struct rhlist_head, rhead); list = container_of(he, struct rhlist_head, rhead);
...@@ -1129,7 +1161,7 @@ static inline int __rhashtable_replace_fast( ...@@ -1129,7 +1161,7 @@ static inline int __rhashtable_replace_fast(
pprev = NULL; pprev = NULL;
rht_lock(tbl, bkt); rht_lock(tbl, bkt);
rht_for_each_from(he, rht_ptr(*bkt), tbl, hash) { rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
if (he != obj_old) { if (he != obj_old) {
pprev = &he->next; pprev = &he->next;
continue; continue;
......
...@@ -59,7 +59,7 @@ int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) ...@@ -59,7 +59,7 @@ int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
return 1; return 1;
if (unlikely(tbl->nest)) if (unlikely(tbl->nest))
return 1; return 1;
return bit_spin_is_locked(1, (unsigned long *)&tbl->buckets[hash]); return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]);
} }
EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#else #else
...@@ -223,7 +223,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, ...@@ -223,7 +223,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht,
struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl); struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
int err = -EAGAIN; int err = -EAGAIN;
struct rhash_head *head, *next, *entry; struct rhash_head *head, *next, *entry;
struct rhash_head **pprev = NULL; struct rhash_head __rcu **pprev = NULL;
unsigned int new_hash; unsigned int new_hash;
if (new_tbl->nest) if (new_tbl->nest)
...@@ -231,7 +231,8 @@ static int rhashtable_rehash_one(struct rhashtable *ht, ...@@ -231,7 +231,8 @@ static int rhashtable_rehash_one(struct rhashtable *ht,
err = -ENOENT; err = -ENOENT;
rht_for_each_from(entry, rht_ptr(*bkt), old_tbl, old_hash) { rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash),
old_tbl, old_hash) {
err = 0; err = 0;
next = rht_dereference_bucket(entry->next, old_tbl, old_hash); next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
...@@ -248,8 +249,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, ...@@ -248,8 +249,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht,
rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING); rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING);
head = rht_ptr(rht_dereference_bucket(new_tbl->buckets[new_hash], head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);
new_tbl, new_hash));
RCU_INIT_POINTER(entry->next, head); RCU_INIT_POINTER(entry->next, head);
...@@ -259,7 +259,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, ...@@ -259,7 +259,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht,
rcu_assign_pointer(*pprev, next); rcu_assign_pointer(*pprev, next);
else else
/* Need to preserved the bit lock. */ /* Need to preserved the bit lock. */
rcu_assign_pointer(*bkt, rht_ptr_locked(next)); rht_assign_locked(bkt, next);
out: out:
return err; return err;
...@@ -486,12 +486,12 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, ...@@ -486,12 +486,12 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
.ht = ht, .ht = ht,
.key = key, .key = key,
}; };
struct rhash_head **pprev = NULL; struct rhash_head __rcu **pprev = NULL;
struct rhash_head *head; struct rhash_head *head;
int elasticity; int elasticity;
elasticity = RHT_ELASTICITY; elasticity = RHT_ELASTICITY;
rht_for_each_from(head, rht_ptr(*bkt), tbl, hash) { rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
struct rhlist_head *list; struct rhlist_head *list;
struct rhlist_head *plist; struct rhlist_head *plist;
...@@ -517,7 +517,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, ...@@ -517,7 +517,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
rcu_assign_pointer(*pprev, obj); rcu_assign_pointer(*pprev, obj);
else else
/* Need to preserve the bit lock */ /* Need to preserve the bit lock */
rcu_assign_pointer(*bkt, rht_ptr_locked(obj)); rht_assign_locked(bkt, obj);
return NULL; return NULL;
} }
...@@ -557,7 +557,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, ...@@ -557,7 +557,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
if (unlikely(rht_grow_above_100(ht, tbl))) if (unlikely(rht_grow_above_100(ht, tbl)))
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
head = rht_ptr(rht_dereference_bucket(*bkt, tbl, hash)); head = rht_ptr(bkt, tbl, hash);
RCU_INIT_POINTER(obj->next, head); RCU_INIT_POINTER(obj->next, head);
if (ht->rhlist) { if (ht->rhlist) {
...@@ -570,7 +570,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, ...@@ -570,7 +570,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
/* bkt is always the head of the list, so it holds /* bkt is always the head of the list, so it holds
* the lock, which we need to preserve * the lock, which we need to preserve
*/ */
rcu_assign_pointer(*bkt, rht_ptr_locked(obj)); rht_assign_locked(bkt, obj);
atomic_inc(&ht->nelems); atomic_inc(&ht->nelems);
if (rht_grow_above_75(ht, tbl)) if (rht_grow_above_75(ht, tbl))
...@@ -1139,7 +1139,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, ...@@ -1139,7 +1139,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
struct rhash_head *pos, *next; struct rhash_head *pos, *next;
cond_resched(); cond_resched();
for (pos = rht_ptr(rht_dereference(*rht_bucket(tbl, i), ht)), for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)),
next = !rht_is_a_nulls(pos) ? next = !rht_is_a_nulls(pos) ?
rht_dereference(pos->next, ht) : NULL; rht_dereference(pos->next, ht) : NULL;
!rht_is_a_nulls(pos); !rht_is_a_nulls(pos);
......
...@@ -500,7 +500,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt) ...@@ -500,7 +500,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
struct rhash_head *pos, *next; struct rhash_head *pos, *next;
struct test_obj_rhl *p; struct test_obj_rhl *p;
pos = rht_ptr(rht_dereference(tbl->buckets[i], ht)); pos = rht_ptr_exclusive(tbl->buckets + i);
next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL;
if (!rht_is_a_nulls(pos)) { if (!rht_is_a_nulls(pos)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment