Commit 824bd0ce authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by David S. Miller

bpf: introduce BPF_MAP_TYPE_PERCPU_HASH map

Introduce BPF_MAP_TYPE_PERCPU_HASH map type which is used to do
accurate counters without need to use BPF_XADD instruction which turned
out to be too costly for high-performance network monitoring.
In the typical use case the 'key' is the flow tuple or other long
living object that sees a lot of events per second.

bpf_map_lookup_elem() returns per-cpu area.
Example:
struct {
  u32 packets;
  u32 bytes;
} * ptr = bpf_map_lookup_elem(&map, &key);
/* ptr points to this_cpu area of the value, so the following
 * increments will not collide with other cpus
 */
ptr->packets ++;
ptr->bytes += skb->len;

bpf_update_elem() atomically creates a new element where all per-cpu
values are zero initialized and this_cpu value is populated with
given 'value'.
Note that non-per-cpu hash map always allocates new element
and then deletes old after rcu grace period to maintain atomicity
of update. Per-cpu hash map updates element values in-place.
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ba905f5e
...@@ -81,6 +81,7 @@ enum bpf_map_type { ...@@ -81,6 +81,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_ARRAY,
BPF_MAP_TYPE_PROG_ARRAY, BPF_MAP_TYPE_PROG_ARRAY,
BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_MAP_TYPE_PERF_EVENT_ARRAY,
BPF_MAP_TYPE_PERCPU_HASH,
}; };
enum bpf_prog_type { enum bpf_prog_type {
......
...@@ -31,21 +31,27 @@ struct bpf_htab { ...@@ -31,21 +31,27 @@ struct bpf_htab {
struct htab_elem { struct htab_elem {
struct hlist_node hash_node; struct hlist_node hash_node;
struct rcu_head rcu; struct rcu_head rcu;
union {
u32 hash; u32 hash;
u32 key_size;
};
char key[0] __aligned(8); char key[0] __aligned(8);
}; };
/* Called from syscall */ /* Called from syscall */
static struct bpf_map *htab_map_alloc(union bpf_attr *attr) static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
{ {
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_HASH;
struct bpf_htab *htab; struct bpf_htab *htab;
int err, i; int err, i;
u64 cost;
htab = kzalloc(sizeof(*htab), GFP_USER); htab = kzalloc(sizeof(*htab), GFP_USER);
if (!htab) if (!htab)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
/* mandatory map attributes */ /* mandatory map attributes */
htab->map.map_type = attr->map_type;
htab->map.key_size = attr->key_size; htab->map.key_size = attr->key_size;
htab->map.value_size = attr->value_size; htab->map.value_size = attr->value_size;
htab->map.max_entries = attr->max_entries; htab->map.max_entries = attr->max_entries;
...@@ -77,24 +83,34 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ...@@ -77,24 +83,34 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
*/ */
goto free_htab; goto free_htab;
if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE)
/* make sure the size for pcpu_alloc() is reasonable */
goto free_htab;
htab->elem_size = sizeof(struct htab_elem) + htab->elem_size = sizeof(struct htab_elem) +
round_up(htab->map.key_size, 8) + round_up(htab->map.key_size, 8);
htab->map.value_size; if (percpu)
htab->elem_size += sizeof(void *);
else
htab->elem_size += htab->map.value_size;
/* prevent zero size kmalloc and check for u32 overflow */ /* prevent zero size kmalloc and check for u32 overflow */
if (htab->n_buckets == 0 || if (htab->n_buckets == 0 ||
htab->n_buckets > U32_MAX / sizeof(struct bucket)) htab->n_buckets > U32_MAX / sizeof(struct bucket))
goto free_htab; goto free_htab;
if ((u64) htab->n_buckets * sizeof(struct bucket) + cost = (u64) htab->n_buckets * sizeof(struct bucket) +
(u64) htab->elem_size * htab->map.max_entries >= (u64) htab->elem_size * htab->map.max_entries;
U32_MAX - PAGE_SIZE)
if (percpu)
cost += (u64) round_up(htab->map.value_size, 8) *
num_possible_cpus() * htab->map.max_entries;
if (cost >= U32_MAX - PAGE_SIZE)
/* make sure page count doesn't overflow */ /* make sure page count doesn't overflow */
goto free_htab; goto free_htab;
htab->map.pages = round_up(htab->n_buckets * sizeof(struct bucket) + htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
htab->elem_size * htab->map.max_entries,
PAGE_SIZE) >> PAGE_SHIFT;
err = -ENOMEM; err = -ENOMEM;
htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket), htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket),
...@@ -148,7 +164,7 @@ static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash, ...@@ -148,7 +164,7 @@ static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
} }
/* Called from syscall or from eBPF program */ /* Called from syscall or from eBPF program */
static void *htab_map_lookup_elem(struct bpf_map *map, void *key) static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
{ {
struct bpf_htab *htab = container_of(map, struct bpf_htab, map); struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
struct hlist_head *head; struct hlist_head *head;
...@@ -166,6 +182,13 @@ static void *htab_map_lookup_elem(struct bpf_map *map, void *key) ...@@ -166,6 +182,13 @@ static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
l = lookup_elem_raw(head, hash, key, key_size); l = lookup_elem_raw(head, hash, key, key_size);
return l;
}
static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
{
struct htab_elem *l = __htab_map_lookup_elem(map, key);
if (l) if (l)
return l->key + round_up(map->key_size, 8); return l->key + round_up(map->key_size, 8);
...@@ -230,65 +253,139 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) ...@@ -230,65 +253,139 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
return -ENOENT; return -ENOENT;
} }
static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
void __percpu *pptr)
{
*(void __percpu **)(l->key + key_size) = pptr;
}
static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
{
return *(void __percpu **)(l->key + key_size);
}
static void htab_percpu_elem_free(struct htab_elem *l)
{
free_percpu(htab_elem_get_ptr(l, l->key_size));
kfree(l);
}
static void htab_percpu_elem_free_rcu(struct rcu_head *head)
{
struct htab_elem *l = container_of(head, struct htab_elem, rcu);
htab_percpu_elem_free(l);
}
static void free_htab_elem(struct htab_elem *l, bool percpu, u32 key_size)
{
if (percpu) {
l->key_size = key_size;
call_rcu(&l->rcu, htab_percpu_elem_free_rcu);
} else {
kfree_rcu(l, rcu);
}
}
static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
void *value, u32 key_size, u32 hash,
bool percpu)
{
u32 size = htab->map.value_size;
struct htab_elem *l_new;
void __percpu *pptr;
l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
if (!l_new)
return NULL;
memcpy(l_new->key, key, key_size);
if (percpu) {
/* round up value_size to 8 bytes */
size = round_up(size, 8);
/* alloc_percpu zero-fills */
pptr = __alloc_percpu_gfp(size, 8, GFP_ATOMIC | __GFP_NOWARN);
if (!pptr) {
kfree(l_new);
return NULL;
}
/* copy true value_size bytes */
memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
htab_elem_set_ptr(l_new, key_size, pptr);
} else {
memcpy(l_new->key + round_up(key_size, 8), value, size);
}
l_new->hash = hash;
return l_new;
}
static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
u64 map_flags)
{
if (!l_old && unlikely(atomic_read(&htab->count) >= htab->map.max_entries))
/* if elem with this 'key' doesn't exist and we've reached
* max_entries limit, fail insertion of new elem
*/
return -E2BIG;
if (l_old && map_flags == BPF_NOEXIST)
/* elem already exists */
return -EEXIST;
if (!l_old && map_flags == BPF_EXIST)
/* elem doesn't exist, cannot update it */
return -ENOENT;
return 0;
}
/* Called from syscall or from eBPF program */ /* Called from syscall or from eBPF program */
static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
u64 map_flags) u64 map_flags)
{ {
struct bpf_htab *htab = container_of(map, struct bpf_htab, map); struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
struct htab_elem *l_new, *l_old; struct htab_elem *l_new = NULL, *l_old;
struct hlist_head *head; struct hlist_head *head;
struct bucket *b;
unsigned long flags; unsigned long flags;
u32 key_size; struct bucket *b;
u32 key_size, hash;
int ret; int ret;
if (map_flags > BPF_EXIST) if (unlikely(map_flags > BPF_EXIST))
/* unknown flags */ /* unknown flags */
return -EINVAL; return -EINVAL;
WARN_ON_ONCE(!rcu_read_lock_held()); WARN_ON_ONCE(!rcu_read_lock_held());
/* allocate new element outside of lock */
l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
if (!l_new)
return -ENOMEM;
key_size = map->key_size; key_size = map->key_size;
memcpy(l_new->key, key, key_size); hash = htab_map_hash(key, key_size);
memcpy(l_new->key + round_up(key_size, 8), value, map->value_size);
l_new->hash = htab_map_hash(l_new->key, key_size); /* allocate new element outside of the lock, since
b = __select_bucket(htab, l_new->hash); * we're most likley going to insert it
*/
l_new = alloc_htab_elem(htab, key, value, key_size, hash, false);
if (!l_new)
return -ENOMEM;
b = __select_bucket(htab, hash);
head = &b->head; head = &b->head;
/* bpf_map_update_elem() can be called in_irq() */ /* bpf_map_update_elem() can be called in_irq() */
raw_spin_lock_irqsave(&b->lock, flags); raw_spin_lock_irqsave(&b->lock, flags);
l_old = lookup_elem_raw(head, l_new->hash, key, key_size); l_old = lookup_elem_raw(head, hash, key, key_size);
if (!l_old && unlikely(atomic_read(&htab->count) >= map->max_entries)) { ret = check_flags(htab, l_old, map_flags);
/* if elem with this 'key' doesn't exist and we've reached if (ret)
* max_entries limit, fail insertion of new elem
*/
ret = -E2BIG;
goto err; goto err;
}
if (l_old && map_flags == BPF_NOEXIST) { /* add new element to the head of the list, so that
/* elem already exists */ * concurrent search will find it before old elem
ret = -EEXIST;
goto err;
}
if (!l_old && map_flags == BPF_EXIST) {
/* elem doesn't exist, cannot update it */
ret = -ENOENT;
goto err;
}
/* add new element to the head of the list, so that concurrent
* search will find it before old elem
*/ */
hlist_add_head_rcu(&l_new->hash_node, head); hlist_add_head_rcu(&l_new->hash_node, head);
if (l_old) { if (l_old) {
...@@ -298,7 +395,6 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, ...@@ -298,7 +395,6 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
atomic_inc(&htab->count); atomic_inc(&htab->count);
} }
raw_spin_unlock_irqrestore(&b->lock, flags); raw_spin_unlock_irqrestore(&b->lock, flags);
return 0; return 0;
err: err:
raw_spin_unlock_irqrestore(&b->lock, flags); raw_spin_unlock_irqrestore(&b->lock, flags);
...@@ -306,10 +402,64 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, ...@@ -306,10 +402,64 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
return ret; return ret;
} }
static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
void *value, u64 map_flags)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
struct htab_elem *l_new = NULL, *l_old;
struct hlist_head *head;
unsigned long flags;
struct bucket *b;
u32 key_size, hash;
int ret;
if (unlikely(map_flags > BPF_EXIST))
/* unknown flags */
return -EINVAL;
WARN_ON_ONCE(!rcu_read_lock_held());
key_size = map->key_size;
hash = htab_map_hash(key, key_size);
b = __select_bucket(htab, hash);
head = &b->head;
/* bpf_map_update_elem() can be called in_irq() */
raw_spin_lock_irqsave(&b->lock, flags);
l_old = lookup_elem_raw(head, hash, key, key_size);
ret = check_flags(htab, l_old, map_flags);
if (ret)
goto err;
if (l_old) {
/* per-cpu hash map can update value in-place */
memcpy(this_cpu_ptr(htab_elem_get_ptr(l_old, key_size)),
value, htab->map.value_size);
} else {
l_new = alloc_htab_elem(htab, key, value, key_size,
hash, true);
if (!l_new) {
ret = -ENOMEM;
goto err;
}
hlist_add_head_rcu(&l_new->hash_node, head);
atomic_inc(&htab->count);
}
ret = 0;
err:
raw_spin_unlock_irqrestore(&b->lock, flags);
return ret;
}
/* Called from syscall or from eBPF program */ /* Called from syscall or from eBPF program */
static int htab_map_delete_elem(struct bpf_map *map, void *key) static int htab_map_delete_elem(struct bpf_map *map, void *key)
{ {
struct bpf_htab *htab = container_of(map, struct bpf_htab, map); struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_HASH;
struct hlist_head *head; struct hlist_head *head;
struct bucket *b; struct bucket *b;
struct htab_elem *l; struct htab_elem *l;
...@@ -332,7 +482,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key) ...@@ -332,7 +482,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
if (l) { if (l) {
hlist_del_rcu(&l->hash_node); hlist_del_rcu(&l->hash_node);
atomic_dec(&htab->count); atomic_dec(&htab->count);
kfree_rcu(l, rcu); free_htab_elem(l, percpu, key_size);
ret = 0; ret = 0;
} }
...@@ -352,9 +502,14 @@ static void delete_all_elements(struct bpf_htab *htab) ...@@ -352,9 +502,14 @@ static void delete_all_elements(struct bpf_htab *htab)
hlist_for_each_entry_safe(l, n, head, hash_node) { hlist_for_each_entry_safe(l, n, head, hash_node) {
hlist_del_rcu(&l->hash_node); hlist_del_rcu(&l->hash_node);
atomic_dec(&htab->count); atomic_dec(&htab->count);
if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) {
l->key_size = htab->map.key_size;
htab_percpu_elem_free(l);
} else {
kfree(l); kfree(l);
} }
} }
}
} }
/* Called when map->refcnt goes to zero, either from workqueue or from syscall */ /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
...@@ -391,9 +546,35 @@ static struct bpf_map_type_list htab_type __read_mostly = { ...@@ -391,9 +546,35 @@ static struct bpf_map_type_list htab_type __read_mostly = {
.type = BPF_MAP_TYPE_HASH, .type = BPF_MAP_TYPE_HASH,
}; };
/* Called from eBPF program */
static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
{
struct htab_elem *l = __htab_map_lookup_elem(map, key);
if (l)
return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
else
return NULL;
}
static const struct bpf_map_ops htab_percpu_ops = {
.map_alloc = htab_map_alloc,
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
.map_lookup_elem = htab_percpu_map_lookup_elem,
.map_update_elem = htab_percpu_map_update_elem,
.map_delete_elem = htab_map_delete_elem,
};
static struct bpf_map_type_list htab_percpu_type __read_mostly = {
.ops = &htab_percpu_ops,
.type = BPF_MAP_TYPE_PERCPU_HASH,
};
static int __init register_htab_map(void) static int __init register_htab_map(void)
{ {
bpf_register_map_type(&htab_type); bpf_register_map_type(&htab_type);
bpf_register_map_type(&htab_percpu_type);
return 0; return 0;
} }
late_initcall(register_htab_map); late_initcall(register_htab_map);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment