Commit f3f1c054 authored by Martin KaFai Lau's avatar Martin KaFai Lau Committed by David S. Miller

bpf: Introduce bpf_map ID

This patch generates an unique ID for each created bpf_map.
The approach is similar to the earlier patch for bpf_prog ID.

It is worth to note that the bpf_map's ID and bpf_prog's ID
are in two independent ID spaces and both have the same valid range:
[1, INT_MAX).
Signed-off-by: default avatarMartin KaFai Lau <kafai@fb.com>
Acked-by: default avatarAlexei Starovoitov <ast@fb.com>
Acked-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent dc4bb0e2
...@@ -46,6 +46,7 @@ struct bpf_map { ...@@ -46,6 +46,7 @@ struct bpf_map {
u32 max_entries; u32 max_entries;
u32 map_flags; u32 map_flags;
u32 pages; u32 pages;
u32 id;
struct user_struct *user; struct user_struct *user;
const struct bpf_map_ops *ops; const struct bpf_map_ops *ops;
struct work_struct work; struct work_struct work;
......
...@@ -27,6 +27,8 @@ ...@@ -27,6 +27,8 @@
DEFINE_PER_CPU(int, bpf_prog_active); DEFINE_PER_CPU(int, bpf_prog_active);
static DEFINE_IDR(prog_idr); static DEFINE_IDR(prog_idr);
static DEFINE_SPINLOCK(prog_idr_lock); static DEFINE_SPINLOCK(prog_idr_lock);
static DEFINE_IDR(map_idr);
static DEFINE_SPINLOCK(map_idr_lock);
int sysctl_unprivileged_bpf_disabled __read_mostly; int sysctl_unprivileged_bpf_disabled __read_mostly;
...@@ -117,6 +119,29 @@ static void bpf_map_uncharge_memlock(struct bpf_map *map) ...@@ -117,6 +119,29 @@ static void bpf_map_uncharge_memlock(struct bpf_map *map)
free_uid(user); free_uid(user);
} }
static int bpf_map_alloc_id(struct bpf_map *map)
{
int id;
spin_lock_bh(&map_idr_lock);
id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
if (id > 0)
map->id = id;
spin_unlock_bh(&map_idr_lock);
if (WARN_ON_ONCE(!id))
return -ENOSPC;
return id > 0 ? 0 : id;
}
static void bpf_map_free_id(struct bpf_map *map)
{
spin_lock_bh(&map_idr_lock);
idr_remove(&map_idr, map->id);
spin_unlock_bh(&map_idr_lock);
}
/* called from workqueue */ /* called from workqueue */
static void bpf_map_free_deferred(struct work_struct *work) static void bpf_map_free_deferred(struct work_struct *work)
{ {
...@@ -141,6 +166,7 @@ static void bpf_map_put_uref(struct bpf_map *map) ...@@ -141,6 +166,7 @@ static void bpf_map_put_uref(struct bpf_map *map)
void bpf_map_put(struct bpf_map *map) void bpf_map_put(struct bpf_map *map)
{ {
if (atomic_dec_and_test(&map->refcnt)) { if (atomic_dec_and_test(&map->refcnt)) {
bpf_map_free_id(map);
INIT_WORK(&map->work, bpf_map_free_deferred); INIT_WORK(&map->work, bpf_map_free_deferred);
schedule_work(&map->work); schedule_work(&map->work);
} }
...@@ -239,14 +265,20 @@ static int map_create(union bpf_attr *attr) ...@@ -239,14 +265,20 @@ static int map_create(union bpf_attr *attr)
if (err) if (err)
goto free_map_nouncharge; goto free_map_nouncharge;
err = bpf_map_alloc_id(map);
if (err)
goto free_map;
err = bpf_map_new_fd(map); err = bpf_map_new_fd(map);
if (err < 0) if (err < 0)
/* failed to allocate fd */ /* failed to allocate fd */
goto free_map; goto free_id;
trace_bpf_map_create(map, err); trace_bpf_map_create(map, err);
return err; return err;
free_id:
bpf_map_free_id(map);
free_map: free_map:
bpf_map_uncharge_memlock(map); bpf_map_uncharge_memlock(map);
free_map_nouncharge: free_map_nouncharge:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment