Commit 92117d84 authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by David S. Miller

bpf: fix refcnt overflow

On a system with >32Gbyte of phyiscal memory and infinite RLIMIT_MEMLOCK,
the malicious application may overflow 32-bit bpf program refcnt.
It's also possible to overflow map refcnt on 1Tb system.
Impose 32k hard limit which means that the same bpf program or
map cannot be shared by more than 32k processes.

Fixes: 1be7f75d ("bpf: enable non-root eBPF programs")
Reported-by: default avatarJann Horn <jannh@google.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent bd34cf66
...@@ -171,12 +171,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl); ...@@ -171,12 +171,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);
void bpf_register_map_type(struct bpf_map_type_list *tl); void bpf_register_map_type(struct bpf_map_type_list *tl);
struct bpf_prog *bpf_prog_get(u32 ufd); struct bpf_prog *bpf_prog_get(u32 ufd);
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog); void bpf_prog_put(struct bpf_prog *prog);
void bpf_prog_put_rcu(struct bpf_prog *prog); void bpf_prog_put_rcu(struct bpf_prog *prog);
struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f); struct bpf_map *__bpf_map_get(struct fd f);
void bpf_map_inc(struct bpf_map *map, bool uref); struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map); void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages); int bpf_map_precharge_memlock(u32 pages);
......
...@@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type) ...@@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
{ {
switch (type) { switch (type) {
case BPF_TYPE_PROG: case BPF_TYPE_PROG:
atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt); raw = bpf_prog_inc(raw);
break; break;
case BPF_TYPE_MAP: case BPF_TYPE_MAP:
bpf_map_inc(raw, true); raw = bpf_map_inc(raw, true);
break; break;
default: default:
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
...@@ -297,6 +297,7 @@ static void *bpf_obj_do_get(const struct filename *pathname, ...@@ -297,6 +297,7 @@ static void *bpf_obj_do_get(const struct filename *pathname,
goto out; goto out;
raw = bpf_any_get(inode->i_private, *type); raw = bpf_any_get(inode->i_private, *type);
if (!IS_ERR(raw))
touch_atime(&path); touch_atime(&path);
path_put(&path); path_put(&path);
......
...@@ -218,11 +218,18 @@ struct bpf_map *__bpf_map_get(struct fd f) ...@@ -218,11 +218,18 @@ struct bpf_map *__bpf_map_get(struct fd f)
return f.file->private_data; return f.file->private_data;
} }
void bpf_map_inc(struct bpf_map *map, bool uref) /* prog's and map's refcnt limit */
#define BPF_MAX_REFCNT 32768
struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
{ {
atomic_inc(&map->refcnt); if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
atomic_dec(&map->refcnt);
return ERR_PTR(-EBUSY);
}
if (uref) if (uref)
atomic_inc(&map->usercnt); atomic_inc(&map->usercnt);
return map;
} }
struct bpf_map *bpf_map_get_with_uref(u32 ufd) struct bpf_map *bpf_map_get_with_uref(u32 ufd)
...@@ -234,7 +241,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd) ...@@ -234,7 +241,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
if (IS_ERR(map)) if (IS_ERR(map))
return map; return map;
bpf_map_inc(map, true); map = bpf_map_inc(map, true);
fdput(f); fdput(f);
return map; return map;
...@@ -658,6 +665,15 @@ static struct bpf_prog *__bpf_prog_get(struct fd f) ...@@ -658,6 +665,15 @@ static struct bpf_prog *__bpf_prog_get(struct fd f)
return f.file->private_data; return f.file->private_data;
} }
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
{
if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
atomic_dec(&prog->aux->refcnt);
return ERR_PTR(-EBUSY);
}
return prog;
}
/* called by sockets/tracing/seccomp before attaching program to an event /* called by sockets/tracing/seccomp before attaching program to an event
* pairs with bpf_prog_put() * pairs with bpf_prog_put()
*/ */
...@@ -670,7 +686,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd) ...@@ -670,7 +686,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
if (IS_ERR(prog)) if (IS_ERR(prog))
return prog; return prog;
atomic_inc(&prog->aux->refcnt); prog = bpf_prog_inc(prog);
fdput(f); fdput(f);
return prog; return prog;
......
...@@ -2049,15 +2049,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env) ...@@ -2049,15 +2049,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
return -E2BIG; return -E2BIG;
} }
/* remember this map */
env->used_maps[env->used_map_cnt++] = map;
/* hold the map. If the program is rejected by verifier, /* hold the map. If the program is rejected by verifier,
* the map will be released by release_maps() or it * the map will be released by release_maps() or it
* will be used by the valid program until it's unloaded * will be used by the valid program until it's unloaded
* and all maps are released in free_bpf_prog_info() * and all maps are released in free_bpf_prog_info()
*/ */
bpf_map_inc(map, false); map = bpf_map_inc(map, false);
if (IS_ERR(map)) {
fdput(f);
return PTR_ERR(map);
}
env->used_maps[env->used_map_cnt++] = map;
fdput(f); fdput(f);
next_insn: next_insn:
insn++; insn++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment