Commit bf3f14d6 authored by David S. Miller's avatar David S. Miller

rhashtable: Revert nested table changes.

This reverts commits:

6a254780
9dbbfb0a
40137906

It's too risky to put in this late in the release
cycle.  We'll put these changes into the next merge
window instead.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 75224c93
...@@ -1420,32 +1420,26 @@ static struct shrinker glock_shrinker = { ...@@ -1420,32 +1420,26 @@ static struct shrinker glock_shrinker = {
* @sdp: the filesystem * @sdp: the filesystem
* @bucket: the bucket * @bucket: the bucket
* *
* Note that the function can be called multiple times on the same
* object. So the user must ensure that the function can cope with
* that.
*/ */
static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
{ {
struct gfs2_glock *gl; struct gfs2_glock *gl;
struct rhashtable_iter iter; struct rhash_head *pos;
const struct bucket_table *tbl;
rhashtable_walk_enter(&gl_hash_table, &iter); int i;
do {
gl = ERR_PTR(rhashtable_walk_start(&iter));
if (gl)
continue;
while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) rcu_read_lock();
tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table);
for (i = 0; i < tbl->size; i++) {
rht_for_each_entry_rcu(gl, pos, tbl, i, gl_node) {
if ((gl->gl_name.ln_sbd == sdp) && if ((gl->gl_name.ln_sbd == sdp) &&
lockref_get_not_dead(&gl->gl_lockref)) lockref_get_not_dead(&gl->gl_lockref))
examiner(gl); examiner(gl);
}
rhashtable_walk_stop(&iter); }
} while (cond_resched(), gl == ERR_PTR(-EAGAIN)); rcu_read_unlock();
cond_resched();
rhashtable_walk_exit(&iter);
} }
/** /**
......
...@@ -61,7 +61,6 @@ struct rhlist_head { ...@@ -61,7 +61,6 @@ struct rhlist_head {
/** /**
* struct bucket_table - Table of hash buckets * struct bucket_table - Table of hash buckets
* @size: Number of hash buckets * @size: Number of hash buckets
* @nest: Number of bits of first-level nested table.
* @rehash: Current bucket being rehashed * @rehash: Current bucket being rehashed
* @hash_rnd: Random seed to fold into hash * @hash_rnd: Random seed to fold into hash
* @locks_mask: Mask to apply before accessing locks[] * @locks_mask: Mask to apply before accessing locks[]
...@@ -69,12 +68,10 @@ struct rhlist_head { ...@@ -69,12 +68,10 @@ struct rhlist_head {
* @walkers: List of active walkers * @walkers: List of active walkers
* @rcu: RCU structure for freeing the table * @rcu: RCU structure for freeing the table
* @future_tbl: Table under construction during rehashing * @future_tbl: Table under construction during rehashing
* @ntbl: Nested table used when out of memory.
* @buckets: size * hash buckets * @buckets: size * hash buckets
*/ */
struct bucket_table { struct bucket_table {
unsigned int size; unsigned int size;
unsigned int nest;
unsigned int rehash; unsigned int rehash;
u32 hash_rnd; u32 hash_rnd;
unsigned int locks_mask; unsigned int locks_mask;
...@@ -84,7 +81,7 @@ struct bucket_table { ...@@ -84,7 +81,7 @@ struct bucket_table {
struct bucket_table __rcu *future_tbl; struct bucket_table __rcu *future_tbl;
struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
}; };
/** /**
...@@ -377,12 +374,6 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, ...@@ -377,12 +374,6 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
void *arg); void *arg);
void rhashtable_destroy(struct rhashtable *ht); void rhashtable_destroy(struct rhashtable *ht);
struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash);
struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
struct bucket_table *tbl,
unsigned int hash);
#define rht_dereference(p, ht) \ #define rht_dereference(p, ht) \
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
...@@ -398,27 +389,6 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, ...@@ -398,27 +389,6 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
#define rht_entry(tpos, pos, member) \ #define rht_entry(tpos, pos, member) \
({ tpos = container_of(pos, typeof(*tpos), member); 1; }) ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
static inline struct rhash_head __rcu *const *rht_bucket(
const struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}
static inline struct rhash_head __rcu **rht_bucket_var(
struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}
static inline struct rhash_head __rcu **rht_bucket_insert(
struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
&tbl->buckets[hash];
}
/** /**
* rht_for_each_continue - continue iterating over hash chain * rht_for_each_continue - continue iterating over hash chain
* @pos: the &struct rhash_head to use as a loop cursor. * @pos: the &struct rhash_head to use as a loop cursor.
...@@ -438,7 +408,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert( ...@@ -438,7 +408,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* @hash: the hash value / bucket index * @hash: the hash value / bucket index
*/ */
#define rht_for_each(pos, tbl, hash) \ #define rht_for_each(pos, tbl, hash) \
rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash) rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
/** /**
* rht_for_each_entry_continue - continue iterating over hash chain * rht_for_each_entry_continue - continue iterating over hash chain
...@@ -463,7 +433,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert( ...@@ -463,7 +433,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* @member: name of the &struct rhash_head within the hashable struct. * @member: name of the &struct rhash_head within the hashable struct.
*/ */
#define rht_for_each_entry(tpos, pos, tbl, hash, member) \ #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \ rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
tbl, hash, member) tbl, hash, member)
/** /**
...@@ -478,13 +448,13 @@ static inline struct rhash_head __rcu **rht_bucket_insert( ...@@ -478,13 +448,13 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* This hash chain list-traversal primitive allows for the looped code to * This hash chain list-traversal primitive allows for the looped code to
* remove the loop cursor from the list. * remove the loop cursor from the list.
*/ */
#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \ for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
next = !rht_is_a_nulls(pos) ? \ next = !rht_is_a_nulls(pos) ? \
rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
pos = next, \ pos = next, \
next = !rht_is_a_nulls(pos) ? \ next = !rht_is_a_nulls(pos) ? \
rht_dereference_bucket(pos->next, tbl, hash) : NULL) rht_dereference_bucket(pos->next, tbl, hash) : NULL)
/** /**
...@@ -515,7 +485,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert( ...@@ -515,7 +485,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* traversal is guarded by rcu_read_lock(). * traversal is guarded by rcu_read_lock().
*/ */
#define rht_for_each_rcu(pos, tbl, hash) \ #define rht_for_each_rcu(pos, tbl, hash) \
rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash) rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
/** /**
* rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
...@@ -548,8 +518,8 @@ static inline struct rhash_head __rcu **rht_bucket_insert( ...@@ -548,8 +518,8 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* the _rcu mutation primitives such as rhashtable_insert() as long as the * the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock(). * traversal is guarded by rcu_read_lock().
*/ */
#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \ rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
tbl, hash, member) tbl, hash, member)
/** /**
...@@ -595,7 +565,7 @@ static inline struct rhash_head *__rhashtable_lookup( ...@@ -595,7 +565,7 @@ static inline struct rhash_head *__rhashtable_lookup(
.ht = ht, .ht = ht,
.key = key, .key = key,
}; };
struct bucket_table *tbl; const struct bucket_table *tbl;
struct rhash_head *he; struct rhash_head *he;
unsigned int hash; unsigned int hash;
...@@ -727,12 +697,8 @@ static inline void *__rhashtable_insert_fast( ...@@ -727,12 +697,8 @@ static inline void *__rhashtable_insert_fast(
} }
elasticity = ht->elasticity; elasticity = ht->elasticity;
pprev = rht_bucket_insert(ht, tbl, hash); pprev = &tbl->buckets[hash];
data = ERR_PTR(-ENOMEM); rht_for_each(head, tbl, hash) {
if (!pprev)
goto out;
rht_for_each_continue(head, *pprev, tbl, hash) {
struct rhlist_head *plist; struct rhlist_head *plist;
struct rhlist_head *list; struct rhlist_head *list;
...@@ -770,7 +736,7 @@ static inline void *__rhashtable_insert_fast( ...@@ -770,7 +736,7 @@ static inline void *__rhashtable_insert_fast(
if (unlikely(rht_grow_above_100(ht, tbl))) if (unlikely(rht_grow_above_100(ht, tbl)))
goto slow_path; goto slow_path;
head = rht_dereference_bucket(*pprev, tbl, hash); head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
RCU_INIT_POINTER(obj->next, head); RCU_INIT_POINTER(obj->next, head);
if (rhlist) { if (rhlist) {
...@@ -780,7 +746,7 @@ static inline void *__rhashtable_insert_fast( ...@@ -780,7 +746,7 @@ static inline void *__rhashtable_insert_fast(
RCU_INIT_POINTER(list->next, NULL); RCU_INIT_POINTER(list->next, NULL);
} }
rcu_assign_pointer(*pprev, obj); rcu_assign_pointer(tbl->buckets[hash], obj);
atomic_inc(&ht->nelems); atomic_inc(&ht->nelems);
if (rht_grow_above_75(ht, tbl)) if (rht_grow_above_75(ht, tbl))
...@@ -989,8 +955,8 @@ static inline int __rhashtable_remove_fast_one( ...@@ -989,8 +955,8 @@ static inline int __rhashtable_remove_fast_one(
spin_lock_bh(lock); spin_lock_bh(lock);
pprev = rht_bucket_var(tbl, hash); pprev = &tbl->buckets[hash];
rht_for_each_continue(he, *pprev, tbl, hash) { rht_for_each(he, tbl, hash) {
struct rhlist_head *list; struct rhlist_head *list;
list = container_of(he, struct rhlist_head, rhead); list = container_of(he, struct rhlist_head, rhead);
...@@ -1141,8 +1107,8 @@ static inline int __rhashtable_replace_fast( ...@@ -1141,8 +1107,8 @@ static inline int __rhashtable_replace_fast(
spin_lock_bh(lock); spin_lock_bh(lock);
pprev = rht_bucket_var(tbl, hash); pprev = &tbl->buckets[hash];
rht_for_each_continue(he, *pprev, tbl, hash) { rht_for_each(he, tbl, hash) {
if (he != obj_old) { if (he != obj_old) {
pprev = &he->next; pprev = &he->next;
continue; continue;
......
This diff is collapsed.
...@@ -110,10 +110,6 @@ int tipc_net_start(struct net *net, u32 addr) ...@@ -110,10 +110,6 @@ int tipc_net_start(struct net *net, u32 addr)
char addr_string[16]; char addr_string[16];
tn->own_addr = addr; tn->own_addr = addr;
/* Ensure that the new address is visible before we reinit. */
smp_mb();
tipc_named_reinit(net); tipc_named_reinit(net);
tipc_sk_reinit(net); tipc_sk_reinit(net);
......
...@@ -384,6 +384,8 @@ static int tipc_sk_create(struct net *net, struct socket *sock, ...@@ -384,6 +384,8 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
INIT_LIST_HEAD(&tsk->publications); INIT_LIST_HEAD(&tsk->publications);
msg = &tsk->phdr; msg = &tsk->phdr;
tn = net_generic(sock_net(sk), tipc_net_id); tn = net_generic(sock_net(sk), tipc_net_id);
tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
NAMED_H_SIZE, 0);
/* Finish initializing socket data structures */ /* Finish initializing socket data structures */
sock->ops = ops; sock->ops = ops;
...@@ -393,13 +395,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock, ...@@ -393,13 +395,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
pr_warn("Socket create failed; port number exhausted\n"); pr_warn("Socket create failed; port number exhausted\n");
return -EINVAL; return -EINVAL;
} }
/* Ensure tsk is visible before we read own_addr. */
smp_mb();
tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
NAMED_H_SIZE, 0);
msg_set_origport(msg, tsk->portid); msg_set_origport(msg, tsk->portid);
setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk); setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
sk->sk_shutdown = 0; sk->sk_shutdown = 0;
...@@ -2274,27 +2269,24 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, ...@@ -2274,27 +2269,24 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
void tipc_sk_reinit(struct net *net) void tipc_sk_reinit(struct net *net)
{ {
struct tipc_net *tn = net_generic(net, tipc_net_id); struct tipc_net *tn = net_generic(net, tipc_net_id);
struct rhashtable_iter iter; const struct bucket_table *tbl;
struct rhash_head *pos;
struct tipc_sock *tsk; struct tipc_sock *tsk;
struct tipc_msg *msg; struct tipc_msg *msg;
int i;
rhashtable_walk_enter(&tn->sk_rht, &iter); rcu_read_lock();
tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
do { for (i = 0; i < tbl->size; i++) {
tsk = ERR_PTR(rhashtable_walk_start(&iter)); rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
if (tsk)
continue;
while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
spin_lock_bh(&tsk->sk.sk_lock.slock); spin_lock_bh(&tsk->sk.sk_lock.slock);
msg = &tsk->phdr; msg = &tsk->phdr;
msg_set_prevnode(msg, tn->own_addr); msg_set_prevnode(msg, tn->own_addr);
msg_set_orignode(msg, tn->own_addr); msg_set_orignode(msg, tn->own_addr);
spin_unlock_bh(&tsk->sk.sk_lock.slock); spin_unlock_bh(&tsk->sk.sk_lock.slock);
} }
}
rhashtable_walk_stop(&iter); rcu_read_unlock();
} while (tsk == ERR_PTR(-EAGAIN));
} }
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment