Commit 5f8ddeab authored by Florian Westphal's avatar Florian Westphal Committed by David S. Miller

rhashtable: remove insecure_elasticity

commit 83e7e4ce ("mac80211: Use rhltable instead of rhashtable")
removed the last user that made use of 'insecure_elasticity' parameter,
i.e. the default of 16 is used everywhere.

Replace it with a constant.
Signed-off-by: default avatarFlorian Westphal <fw@strlen.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3a9ca1e2
...@@ -49,6 +49,21 @@ ...@@ -49,6 +49,21 @@
/* Base bits plus 1 bit for nulls marker */ /* Base bits plus 1 bit for nulls marker */
#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) #define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
/* Maximum chain length before rehash
*
* The maximum (not average) chain length grows with the size of the hash
* table, at a rate of (log N)/(log log N).
*
* The value of 16 is selected so that even if the hash table grew to
* 2^32 you would not expect the maximum chain length to exceed it
* unless we are under attack (or extremely unlucky).
*
* As this limit is only to detect attacks, we don't need to set it to a
* lower value as you'd need the chain length to vastly exceed 16 to have
* any real effect on the system.
*/
#define RHT_ELASTICITY 16u
struct rhash_head { struct rhash_head {
struct rhash_head __rcu *next; struct rhash_head __rcu *next;
}; };
...@@ -114,7 +129,6 @@ struct rhashtable; ...@@ -114,7 +129,6 @@ struct rhashtable;
* @max_size: Maximum size while expanding * @max_size: Maximum size while expanding
* @min_size: Minimum size while shrinking * @min_size: Minimum size while shrinking
* @nulls_base: Base value to generate nulls marker * @nulls_base: Base value to generate nulls marker
* @insecure_elasticity: Set to true to disable chain length checks
* @automatic_shrinking: Enable automatic shrinking of tables * @automatic_shrinking: Enable automatic shrinking of tables
* @locks_mul: Number of bucket locks to allocate per cpu (default: 128) * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
* @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash) * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
...@@ -130,7 +144,6 @@ struct rhashtable_params { ...@@ -130,7 +144,6 @@ struct rhashtable_params {
unsigned int max_size; unsigned int max_size;
unsigned int min_size; unsigned int min_size;
u32 nulls_base; u32 nulls_base;
bool insecure_elasticity;
bool automatic_shrinking; bool automatic_shrinking;
size_t locks_mul; size_t locks_mul;
rht_hashfn_t hashfn; rht_hashfn_t hashfn;
...@@ -143,7 +156,6 @@ struct rhashtable_params { ...@@ -143,7 +156,6 @@ struct rhashtable_params {
* @tbl: Bucket table * @tbl: Bucket table
* @nelems: Number of elements in table * @nelems: Number of elements in table
* @key_len: Key length for hashfn * @key_len: Key length for hashfn
* @elasticity: Maximum chain length before rehash
* @p: Configuration parameters * @p: Configuration parameters
* @rhlist: True if this is an rhltable * @rhlist: True if this is an rhltable
* @run_work: Deferred worker to expand/shrink asynchronously * @run_work: Deferred worker to expand/shrink asynchronously
...@@ -154,7 +166,6 @@ struct rhashtable { ...@@ -154,7 +166,6 @@ struct rhashtable {
struct bucket_table __rcu *tbl; struct bucket_table __rcu *tbl;
atomic_t nelems; atomic_t nelems;
unsigned int key_len; unsigned int key_len;
unsigned int elasticity;
struct rhashtable_params p; struct rhashtable_params p;
bool rhlist; bool rhlist;
struct work_struct run_work; struct work_struct run_work;
...@@ -726,7 +737,7 @@ static inline void *__rhashtable_insert_fast( ...@@ -726,7 +737,7 @@ static inline void *__rhashtable_insert_fast(
return rhashtable_insert_slow(ht, key, obj); return rhashtable_insert_slow(ht, key, obj);
} }
elasticity = ht->elasticity; elasticity = RHT_ELASTICITY;
pprev = rht_bucket_insert(ht, tbl, hash); pprev = rht_bucket_insert(ht, tbl, hash);
data = ERR_PTR(-ENOMEM); data = ERR_PTR(-ENOMEM);
if (!pprev) if (!pprev)
......
...@@ -535,7 +535,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, ...@@ -535,7 +535,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
struct rhash_head *head; struct rhash_head *head;
int elasticity; int elasticity;
elasticity = ht->elasticity; elasticity = RHT_ELASTICITY;
pprev = rht_bucket_var(tbl, hash); pprev = rht_bucket_var(tbl, hash);
rht_for_each_continue(head, *pprev, tbl, hash) { rht_for_each_continue(head, *pprev, tbl, hash) {
struct rhlist_head *list; struct rhlist_head *list;
...@@ -972,21 +972,6 @@ int rhashtable_init(struct rhashtable *ht, ...@@ -972,21 +972,6 @@ int rhashtable_init(struct rhashtable *ht,
if (params->nelem_hint) if (params->nelem_hint)
size = rounded_hashtable_size(&ht->p); size = rounded_hashtable_size(&ht->p);
/* The maximum (not average) chain length grows with the
* size of the hash table, at a rate of (log N)/(log log N).
* The value of 16 is selected so that even if the hash
* table grew to 2^32 you would not expect the maximum
* chain length to exceed it unless we are under attack
* (or extremely unlucky).
*
* As this limit is only to detect attacks, we don't need
* to set it to a lower value as you'd need the chain
* length to vastly exceed 16 to have any real effect
* on the system.
*/
if (!params->insecure_elasticity)
ht->elasticity = 16;
if (params->locks_mul) if (params->locks_mul)
ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment