Commit 86fe28f7 authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by Daniel Borkmann

bpf: Optimize element count in non-preallocated hash map.

The atomic_inc/dec might cause extreme cache line bouncing when multiple cpus
access the same bpf map. Based on specified max_entries for the hash map
calculate when percpu_counter becomes faster than atomic_t and use it for such
maps. For example samples/bpf/map_perf_test is using hash map with max_entries
1000. On a system with 16 cpus the 'map_perf_test 4' shows 14k events per
second using atomic_t. On a system with 15 cpus it shows 100k events per second
using percpu. map_perf_test is an extreme case where all cpus colliding on
atomic_t which causes extreme cache bouncing. Note that the slow path of
percpu_counter is 5k events per secound vs 14k for atomic, so the heuristic is
necessary. See comment in the code why the heuristic is based on
num_online_cpus().
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarKumar Kartikeya Dwivedi <memxor@gmail.com>
Acked-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20220902211058.60789-7-alexei.starovoitov@gmail.com
parent 34dd3bad
...@@ -101,7 +101,12 @@ struct bpf_htab { ...@@ -101,7 +101,12 @@ struct bpf_htab {
struct bpf_lru lru; struct bpf_lru lru;
}; };
struct htab_elem *__percpu *extra_elems; struct htab_elem *__percpu *extra_elems;
atomic_t count; /* number of elements in this hashtable */ /* number of elements in non-preallocated hashtable are kept
* in either pcount or count
*/
struct percpu_counter pcount;
atomic_t count;
bool use_percpu_counter;
u32 n_buckets; /* number of hash buckets */ u32 n_buckets; /* number of hash buckets */
u32 elem_size; /* size of each element in bytes */ u32 elem_size; /* size of each element in bytes */
u32 hashrnd; u32 hashrnd;
...@@ -565,6 +570,29 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ...@@ -565,6 +570,29 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
htab_init_buckets(htab); htab_init_buckets(htab);
/* compute_batch_value() computes batch value as num_online_cpus() * 2
* and __percpu_counter_compare() needs
* htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus()
* for percpu_counter to be faster than atomic_t. In practice the average bpf
* hash map size is 10k, which means that a system with 64 cpus will fill
* hashmap to 20% of 10k before percpu_counter becomes ineffective. Therefore
* define our own batch count as 32 then 10k hash map can be filled up to 80%:
* 10k - 8k > 32 _batch_ * 64 _cpus_
* and __percpu_counter_compare() will still be fast. At that point hash map
* collisions will dominate its performance anyway. Assume that hash map filled
* to 50+% isn't going to be O(1) and use the following formula to choose
* between percpu_counter and atomic_t.
*/
#define PERCPU_COUNTER_BATCH 32
if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH)
htab->use_percpu_counter = true;
if (htab->use_percpu_counter) {
err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL);
if (err)
goto free_map_locked;
}
if (prealloc) { if (prealloc) {
err = prealloc_init(htab); err = prealloc_init(htab);
if (err) if (err)
...@@ -891,6 +919,31 @@ static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) ...@@ -891,6 +919,31 @@ static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
} }
} }
static bool is_map_full(struct bpf_htab *htab)
{
if (htab->use_percpu_counter)
return __percpu_counter_compare(&htab->pcount, htab->map.max_entries,
PERCPU_COUNTER_BATCH) >= 0;
return atomic_read(&htab->count) >= htab->map.max_entries;
}
static void inc_elem_count(struct bpf_htab *htab)
{
if (htab->use_percpu_counter)
percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH);
else
atomic_inc(&htab->count);
}
static void dec_elem_count(struct bpf_htab *htab)
{
if (htab->use_percpu_counter)
percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH);
else
atomic_dec(&htab->count);
}
static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
{ {
htab_put_fd_value(htab, l); htab_put_fd_value(htab, l);
...@@ -899,7 +952,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) ...@@ -899,7 +952,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
check_and_free_fields(htab, l); check_and_free_fields(htab, l);
__pcpu_freelist_push(&htab->freelist, &l->fnode); __pcpu_freelist_push(&htab->freelist, &l->fnode);
} else { } else {
atomic_dec(&htab->count); dec_elem_count(htab);
l->htab = htab; l->htab = htab;
call_rcu(&l->rcu, htab_elem_free_rcu); call_rcu(&l->rcu, htab_elem_free_rcu);
} }
...@@ -983,16 +1036,15 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, ...@@ -983,16 +1036,15 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
l_new = container_of(l, struct htab_elem, fnode); l_new = container_of(l, struct htab_elem, fnode);
} }
} else { } else {
if (atomic_inc_return(&htab->count) > htab->map.max_entries) if (is_map_full(htab))
if (!old_elem) { if (!old_elem)
/* when map is full and update() is replacing /* when map is full and update() is replacing
* old element, it's ok to allocate, since * old element, it's ok to allocate, since
* old element will be freed immediately. * old element will be freed immediately.
* Otherwise return an error * Otherwise return an error
*/ */
l_new = ERR_PTR(-E2BIG); return ERR_PTR(-E2BIG);
goto dec_count; inc_elem_count(htab);
}
l_new = bpf_mem_cache_alloc(&htab->ma); l_new = bpf_mem_cache_alloc(&htab->ma);
if (!l_new) { if (!l_new) {
l_new = ERR_PTR(-ENOMEM); l_new = ERR_PTR(-ENOMEM);
...@@ -1034,7 +1086,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, ...@@ -1034,7 +1086,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
l_new->hash = hash; l_new->hash = hash;
return l_new; return l_new;
dec_count: dec_count:
atomic_dec(&htab->count); dec_elem_count(htab);
return l_new; return l_new;
} }
...@@ -1513,6 +1565,8 @@ static void htab_map_free(struct bpf_map *map) ...@@ -1513,6 +1565,8 @@ static void htab_map_free(struct bpf_map *map)
free_percpu(htab->extra_elems); free_percpu(htab->extra_elems);
bpf_map_area_free(htab->buckets); bpf_map_area_free(htab->buckets);
bpf_mem_alloc_destroy(&htab->ma); bpf_mem_alloc_destroy(&htab->ma);
if (htab->use_percpu_counter)
percpu_counter_destroy(&htab->pcount);
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
free_percpu(htab->map_locked[i]); free_percpu(htab->map_locked[i]);
lockdep_unregister_key(&htab->lockdep_key); lockdep_unregister_key(&htab->lockdep_key);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment