Commit 3af43ba4 authored by Hou Tao's avatar Hou Tao Committed by Daniel Borkmann

bpf: Pass map file to .map_update_batch directly

Currently bpf_map_do_batch() first invokes fdget(batch.map_fd) to get
the target map file, then it invokes generic_map_update_batch() to do
batch update. generic_map_update_batch() will get the target map file
by using fdget(batch.map_fd) again and pass it to bpf_map_update_value().

The problem is map file returned by the second fdget() may be NULL or a
totally different file compared by map file in bpf_map_do_batch(). The
reason is that the first fdget() only guarantees the liveness of struct
file instead of file descriptor and the file description may be released
by concurrent close() through pick_file().

It doesn't incur any problem as for now, because maps with batch update
support don't use map file in .map_fd_get_ptr() ops. But it is better to
fix the potential access of an invalid map file.

Using __bpf_map_get() again in generic_map_update_batch() can not fix
the problem, because batch.map_fd may be closed and reopened, and the
returned map file may be different with map file got in
bpf_map_do_batch(), so just passing the map file directly to
.map_update_batch() in bpf_map_do_batch().
Signed-off-by: default avatarHou Tao <houtao1@huawei.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarYonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20221116075059.1551277-1-houtao@huaweicloud.com
parent 383f1a8d
...@@ -85,7 +85,8 @@ struct bpf_map_ops { ...@@ -85,7 +85,8 @@ struct bpf_map_ops {
int (*map_lookup_and_delete_batch)(struct bpf_map *map, int (*map_lookup_and_delete_batch)(struct bpf_map *map,
const union bpf_attr *attr, const union bpf_attr *attr,
union bpf_attr __user *uattr); union bpf_attr __user *uattr);
int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr, int (*map_update_batch)(struct bpf_map *map, struct file *map_file,
const union bpf_attr *attr,
union bpf_attr __user *uattr); union bpf_attr __user *uattr);
int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr __user *uattr); union bpf_attr __user *uattr);
...@@ -1789,7 +1790,7 @@ void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); ...@@ -1789,7 +1790,7 @@ void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
int generic_map_lookup_batch(struct bpf_map *map, int generic_map_lookup_batch(struct bpf_map *map,
const union bpf_attr *attr, const union bpf_attr *attr,
union bpf_attr __user *uattr); union bpf_attr __user *uattr);
int generic_map_update_batch(struct bpf_map *map, int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
const union bpf_attr *attr, const union bpf_attr *attr,
union bpf_attr __user *uattr); union bpf_attr __user *uattr);
int generic_map_delete_batch(struct bpf_map *map, int generic_map_delete_batch(struct bpf_map *map,
......
...@@ -175,8 +175,8 @@ static void maybe_wait_bpf_programs(struct bpf_map *map) ...@@ -175,8 +175,8 @@ static void maybe_wait_bpf_programs(struct bpf_map *map)
synchronize_rcu(); synchronize_rcu();
} }
static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key, static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
void *value, __u64 flags) void *key, void *value, __u64 flags)
{ {
int err; int err;
...@@ -190,7 +190,7 @@ static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key, ...@@ -190,7 +190,7 @@ static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
map->map_type == BPF_MAP_TYPE_SOCKMAP) { map->map_type == BPF_MAP_TYPE_SOCKMAP) {
return sock_map_update_elem_sys(map, key, value, flags); return sock_map_update_elem_sys(map, key, value, flags);
} else if (IS_FD_PROG_ARRAY(map)) { } else if (IS_FD_PROG_ARRAY(map)) {
return bpf_fd_array_map_update_elem(map, f.file, key, value, return bpf_fd_array_map_update_elem(map, map_file, key, value,
flags); flags);
} }
...@@ -205,12 +205,12 @@ static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key, ...@@ -205,12 +205,12 @@ static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
flags); flags);
} else if (IS_FD_ARRAY(map)) { } else if (IS_FD_ARRAY(map)) {
rcu_read_lock(); rcu_read_lock();
err = bpf_fd_array_map_update_elem(map, f.file, key, value, err = bpf_fd_array_map_update_elem(map, map_file, key, value,
flags); flags);
rcu_read_unlock(); rcu_read_unlock();
} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
rcu_read_lock(); rcu_read_lock();
err = bpf_fd_htab_map_update_elem(map, f.file, key, value, err = bpf_fd_htab_map_update_elem(map, map_file, key, value,
flags); flags);
rcu_read_unlock(); rcu_read_unlock();
} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
...@@ -1410,7 +1410,7 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr) ...@@ -1410,7 +1410,7 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
goto free_key; goto free_key;
} }
err = bpf_map_update_value(map, f, key, value, attr->flags); err = bpf_map_update_value(map, f.file, key, value, attr->flags);
kvfree(value); kvfree(value);
free_key: free_key:
...@@ -1596,16 +1596,14 @@ int generic_map_delete_batch(struct bpf_map *map, ...@@ -1596,16 +1596,14 @@ int generic_map_delete_batch(struct bpf_map *map,
return err; return err;
} }
int generic_map_update_batch(struct bpf_map *map, int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
const union bpf_attr *attr, const union bpf_attr *attr,
union bpf_attr __user *uattr) union bpf_attr __user *uattr)
{ {
void __user *values = u64_to_user_ptr(attr->batch.values); void __user *values = u64_to_user_ptr(attr->batch.values);
void __user *keys = u64_to_user_ptr(attr->batch.keys); void __user *keys = u64_to_user_ptr(attr->batch.keys);
u32 value_size, cp, max_count; u32 value_size, cp, max_count;
int ufd = attr->batch.map_fd;
void *key, *value; void *key, *value;
struct fd f;
int err = 0; int err = 0;
if (attr->batch.elem_flags & ~BPF_F_LOCK) if (attr->batch.elem_flags & ~BPF_F_LOCK)
...@@ -1632,7 +1630,6 @@ int generic_map_update_batch(struct bpf_map *map, ...@@ -1632,7 +1630,6 @@ int generic_map_update_batch(struct bpf_map *map,
return -ENOMEM; return -ENOMEM;
} }
f = fdget(ufd); /* bpf_map_do_batch() guarantees ufd is valid */
for (cp = 0; cp < max_count; cp++) { for (cp = 0; cp < max_count; cp++) {
err = -EFAULT; err = -EFAULT;
if (copy_from_user(key, keys + cp * map->key_size, if (copy_from_user(key, keys + cp * map->key_size,
...@@ -1640,7 +1637,7 @@ int generic_map_update_batch(struct bpf_map *map, ...@@ -1640,7 +1637,7 @@ int generic_map_update_batch(struct bpf_map *map,
copy_from_user(value, values + cp * value_size, value_size)) copy_from_user(value, values + cp * value_size, value_size))
break; break;
err = bpf_map_update_value(map, f, key, value, err = bpf_map_update_value(map, map_file, key, value,
attr->batch.elem_flags); attr->batch.elem_flags);
if (err) if (err)
...@@ -1653,7 +1650,6 @@ int generic_map_update_batch(struct bpf_map *map, ...@@ -1653,7 +1650,6 @@ int generic_map_update_batch(struct bpf_map *map,
kvfree(value); kvfree(value);
kvfree(key); kvfree(key);
fdput(f);
return err; return err;
} }
...@@ -4446,13 +4442,13 @@ static int bpf_task_fd_query(const union bpf_attr *attr, ...@@ -4446,13 +4442,13 @@ static int bpf_task_fd_query(const union bpf_attr *attr,
#define BPF_MAP_BATCH_LAST_FIELD batch.flags #define BPF_MAP_BATCH_LAST_FIELD batch.flags
#define BPF_DO_BATCH(fn) \ #define BPF_DO_BATCH(fn, ...) \
do { \ do { \
if (!fn) { \ if (!fn) { \
err = -ENOTSUPP; \ err = -ENOTSUPP; \
goto err_put; \ goto err_put; \
} \ } \
err = fn(map, attr, uattr); \ err = fn(__VA_ARGS__); \
} while (0) } while (0)
static int bpf_map_do_batch(const union bpf_attr *attr, static int bpf_map_do_batch(const union bpf_attr *attr,
...@@ -4486,13 +4482,13 @@ static int bpf_map_do_batch(const union bpf_attr *attr, ...@@ -4486,13 +4482,13 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
} }
if (cmd == BPF_MAP_LOOKUP_BATCH) if (cmd == BPF_MAP_LOOKUP_BATCH)
BPF_DO_BATCH(map->ops->map_lookup_batch); BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr);
else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch); BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr);
else if (cmd == BPF_MAP_UPDATE_BATCH) else if (cmd == BPF_MAP_UPDATE_BATCH)
BPF_DO_BATCH(map->ops->map_update_batch); BPF_DO_BATCH(map->ops->map_update_batch, map, f.file, attr, uattr);
else else
BPF_DO_BATCH(map->ops->map_delete_batch); BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr);
err_put: err_put:
if (has_write) if (has_write)
bpf_map_write_active_dec(map); bpf_map_write_active_dec(map);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment