Commit 1dfcd832 authored by David S. Miller's avatar David S. Miller

Merge branch 'bpf-fixes'

Alexei Starovoitov says:

====================
bpf: fix several bugs

First two patches address bugs found by Jann Horn.
Last patch is a minor samples fix spotted during the testing.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents bd34cf66 569cc39d
......@@ -171,12 +171,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);
void bpf_register_map_type(struct bpf_map_type_list *tl);
struct bpf_prog *bpf_prog_get(u32 ufd);
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
void bpf_prog_put_rcu(struct bpf_prog *prog);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
void bpf_map_inc(struct bpf_map *map, bool uref);
struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
......
......@@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
{
switch (type) {
case BPF_TYPE_PROG:
atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
raw = bpf_prog_inc(raw);
break;
case BPF_TYPE_MAP:
bpf_map_inc(raw, true);
raw = bpf_map_inc(raw, true);
break;
default:
WARN_ON_ONCE(1);
......@@ -297,6 +297,7 @@ static void *bpf_obj_do_get(const struct filename *pathname,
goto out;
raw = bpf_any_get(inode->i_private, *type);
if (!IS_ERR(raw))
touch_atime(&path);
path_put(&path);
......
......@@ -218,11 +218,18 @@ struct bpf_map *__bpf_map_get(struct fd f)
return f.file->private_data;
}
void bpf_map_inc(struct bpf_map *map, bool uref)
/* prog's and map's refcnt limit */
#define BPF_MAX_REFCNT 32768
struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
{
atomic_inc(&map->refcnt);
if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
atomic_dec(&map->refcnt);
return ERR_PTR(-EBUSY);
}
if (uref)
atomic_inc(&map->usercnt);
return map;
}
struct bpf_map *bpf_map_get_with_uref(u32 ufd)
......@@ -234,7 +241,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
if (IS_ERR(map))
return map;
bpf_map_inc(map, true);
map = bpf_map_inc(map, true);
fdput(f);
return map;
......@@ -658,6 +665,15 @@ static struct bpf_prog *__bpf_prog_get(struct fd f)
return f.file->private_data;
}
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
{
if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
atomic_dec(&prog->aux->refcnt);
return ERR_PTR(-EBUSY);
}
return prog;
}
/* called by sockets/tracing/seccomp before attaching program to an event
* pairs with bpf_prog_put()
*/
......@@ -670,7 +686,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
if (IS_ERR(prog))
return prog;
atomic_inc(&prog->aux->refcnt);
prog = bpf_prog_inc(prog);
fdput(f);
return prog;
......
......@@ -239,16 +239,6 @@ static const char * const reg_type_str[] = {
[CONST_IMM] = "imm",
};
static const struct {
int map_type;
int func_id;
} func_limit[] = {
{BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
{BPF_MAP_TYPE_STACK_TRACE, BPF_FUNC_get_stackid},
};
static void print_verifier_state(struct verifier_env *env)
{
enum bpf_reg_type t;
......@@ -921,27 +911,52 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
static int check_map_func_compatibility(struct bpf_map *map, int func_id)
{
bool bool_map, bool_func;
int i;
if (!map)
return 0;
for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
bool_map = (map->map_type == func_limit[i].map_type);
bool_func = (func_id == func_limit[i].func_id);
/* only when map & func pair match it can continue.
* don't allow any other map type to be passed into
* the special func;
*/
if (bool_func && bool_map != bool_func) {
verbose("cannot pass map_type %d into func %d\n",
map->map_type, func_id);
return -EINVAL;
/* We need a two way check, first is from map perspective ... */
switch (map->map_type) {
case BPF_MAP_TYPE_PROG_ARRAY:
if (func_id != BPF_FUNC_tail_call)
goto error;
break;
case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
if (func_id != BPF_FUNC_perf_event_read &&
func_id != BPF_FUNC_perf_event_output)
goto error;
break;
case BPF_MAP_TYPE_STACK_TRACE:
if (func_id != BPF_FUNC_get_stackid)
goto error;
break;
default:
break;
}
/* ... and second from the function itself. */
switch (func_id) {
case BPF_FUNC_tail_call:
if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
goto error;
break;
case BPF_FUNC_perf_event_read:
case BPF_FUNC_perf_event_output:
if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
goto error;
break;
case BPF_FUNC_get_stackid:
if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
goto error;
break;
default:
break;
}
return 0;
error:
verbose("cannot pass map_type %d into func %d\n",
map->map_type, func_id);
return -EINVAL;
}
static int check_call(struct verifier_env *env, int func_id)
......@@ -2049,15 +2064,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
return -E2BIG;
}
/* remember this map */
env->used_maps[env->used_map_cnt++] = map;
/* hold the map. If the program is rejected by verifier,
* the map will be released by release_maps() or it
* will be used by the valid program until it's unloaded
* and all maps are released in free_bpf_prog_info()
*/
bpf_map_inc(map, false);
map = bpf_map_inc(map, false);
if (IS_ERR(map)) {
fdput(f);
return PTR_ERR(map);
}
env->used_maps[env->used_map_cnt++] = map;
fdput(f);
next_insn:
insn++;
......
......@@ -18,7 +18,6 @@ int bpf_prog1(struct pt_regs *ctx)
u64 cookie;
} data;
memset(&data, 0, sizeof(data));
data.pid = bpf_get_current_pid_tgid();
data.cookie = 0x12345678;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment