Commit 82583daa authored by Song Liu's avatar Song Liu Committed by Alexei Starovoitov

bpf: Add helpers for trampoline image management

As BPF trampoline of different archs moves from bpf_jit_[alloc|free]_exec()
to bpf_prog_pack_[alloc|free](), we need to use different _alloc, _free for
different archs during the transition. Add the following helpers for this
transition:

void *arch_alloc_bpf_trampoline(unsigned int size);
void arch_free_bpf_trampoline(void *image, unsigned int size);
void arch_protect_bpf_trampoline(void *image, unsigned int size);
void arch_unprotect_bpf_trampoline(void *image, unsigned int size);

The fallback version of these helpers require size <= PAGE_SIZE, but they
are only called with size == PAGE_SIZE. They will be called with size <
PAGE_SIZE when arch_bpf_trampoline_size() helper is introduced later.
Signed-off-by: default avatarSong Liu <song@kernel.org>
Acked-by: default avatarIlya Leoshkevich <iii@linux.ibm.com>
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>  # on s390x
Acked-by: default avatarJiri Olsa <jolsa@kernel.org>
Link: https://lore.kernel.org/r/20231206224054.492250-4-song@kernel.orgSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 7a3d9a15
...@@ -1102,6 +1102,11 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i ...@@ -1102,6 +1102,11 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
const struct btf_func_model *m, u32 flags, const struct btf_func_model *m, u32 flags,
struct bpf_tramp_links *tlinks, struct bpf_tramp_links *tlinks,
void *func_addr); void *func_addr);
void *arch_alloc_bpf_trampoline(unsigned int size);
void arch_free_bpf_trampoline(void *image, unsigned int size);
void arch_protect_bpf_trampoline(void *image, unsigned int size);
void arch_unprotect_bpf_trampoline(void *image, unsigned int size);
u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog, u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
struct bpf_tramp_run_ctx *run_ctx); struct bpf_tramp_run_ctx *run_ctx);
void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start, void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
......
...@@ -515,7 +515,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, ...@@ -515,7 +515,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
if (err) if (err)
goto reset_unlock; goto reset_unlock;
} }
set_memory_rox((long)st_map->image, 1); arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE);
/* Let bpf_link handle registration & unregistration. /* Let bpf_link handle registration & unregistration.
* *
* Pair with smp_load_acquire() during lookup_elem(). * Pair with smp_load_acquire() during lookup_elem().
...@@ -524,7 +524,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, ...@@ -524,7 +524,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
goto unlock; goto unlock;
} }
set_memory_rox((long)st_map->image, 1); arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE);
err = st_ops->reg(kdata); err = st_ops->reg(kdata);
if (likely(!err)) { if (likely(!err)) {
/* This refcnt increment on the map here after /* This refcnt increment on the map here after
...@@ -547,8 +547,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, ...@@ -547,8 +547,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
* there was a race in registering the struct_ops (under the same name) to * there was a race in registering the struct_ops (under the same name) to
* a sub-system through different struct_ops's maps. * a sub-system through different struct_ops's maps.
*/ */
set_memory_nx((long)st_map->image, 1); arch_unprotect_bpf_trampoline(st_map->image, PAGE_SIZE);
set_memory_rw((long)st_map->image, 1);
reset_unlock: reset_unlock:
bpf_struct_ops_map_put_progs(st_map); bpf_struct_ops_map_put_progs(st_map);
...@@ -616,7 +615,7 @@ static void __bpf_struct_ops_map_free(struct bpf_map *map) ...@@ -616,7 +615,7 @@ static void __bpf_struct_ops_map_free(struct bpf_map *map)
bpf_struct_ops_map_put_progs(st_map); bpf_struct_ops_map_put_progs(st_map);
bpf_map_area_free(st_map->links); bpf_map_area_free(st_map->links);
if (st_map->image) { if (st_map->image) {
bpf_jit_free_exec(st_map->image); arch_free_bpf_trampoline(st_map->image, PAGE_SIZE);
bpf_jit_uncharge_modmem(PAGE_SIZE); bpf_jit_uncharge_modmem(PAGE_SIZE);
} }
bpf_map_area_free(st_map->uvalue); bpf_map_area_free(st_map->uvalue);
...@@ -691,7 +690,7 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) ...@@ -691,7 +690,7 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
return ERR_PTR(ret); return ERR_PTR(ret);
} }
st_map->image = bpf_jit_alloc_exec(PAGE_SIZE); st_map->image = arch_alloc_bpf_trampoline(PAGE_SIZE);
if (!st_map->image) { if (!st_map->image) {
/* __bpf_struct_ops_map_free() uses st_map->image as flag /* __bpf_struct_ops_map_free() uses st_map->image as flag
* for "charged or not". In this case, we need to unchange * for "charged or not". In this case, we need to unchange
...@@ -711,7 +710,6 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) ...@@ -711,7 +710,6 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
} }
mutex_init(&st_map->lock); mutex_init(&st_map->lock);
set_vm_flush_reset_perms(st_map->image);
bpf_map_init_from_attr(map, attr); bpf_map_init_from_attr(map, attr);
return map; return map;
......
...@@ -254,7 +254,7 @@ bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_a ...@@ -254,7 +254,7 @@ bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_a
static void bpf_tramp_image_free(struct bpf_tramp_image *im) static void bpf_tramp_image_free(struct bpf_tramp_image *im)
{ {
bpf_image_ksym_del(&im->ksym); bpf_image_ksym_del(&im->ksym);
bpf_jit_free_exec(im->image); arch_free_bpf_trampoline(im->image, PAGE_SIZE);
bpf_jit_uncharge_modmem(PAGE_SIZE); bpf_jit_uncharge_modmem(PAGE_SIZE);
percpu_ref_exit(&im->pcref); percpu_ref_exit(&im->pcref);
kfree_rcu(im, rcu); kfree_rcu(im, rcu);
...@@ -365,10 +365,9 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key) ...@@ -365,10 +365,9 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key)
goto out_free_im; goto out_free_im;
err = -ENOMEM; err = -ENOMEM;
im->image = image = bpf_jit_alloc_exec(PAGE_SIZE); im->image = image = arch_alloc_bpf_trampoline(PAGE_SIZE);
if (!image) if (!image)
goto out_uncharge; goto out_uncharge;
set_vm_flush_reset_perms(image);
err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL); err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
if (err) if (err)
...@@ -381,7 +380,7 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key) ...@@ -381,7 +380,7 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key)
return im; return im;
out_free_image: out_free_image:
bpf_jit_free_exec(im->image); arch_free_bpf_trampoline(im->image, PAGE_SIZE);
out_uncharge: out_uncharge:
bpf_jit_uncharge_modmem(PAGE_SIZE); bpf_jit_uncharge_modmem(PAGE_SIZE);
out_free_im: out_free_im:
...@@ -444,7 +443,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut ...@@ -444,7 +443,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
if (err < 0) if (err < 0)
goto out_free; goto out_free;
set_memory_rox((long)im->image, 1); arch_protect_bpf_trampoline(im->image, PAGE_SIZE);
WARN_ON(tr->cur_image && total == 0); WARN_ON(tr->cur_image && total == 0);
if (tr->cur_image) if (tr->cur_image)
...@@ -465,8 +464,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut ...@@ -465,8 +464,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
tr->fops->trampoline = 0; tr->fops->trampoline = 0;
/* reset im->image memory attr for arch_prepare_bpf_trampoline */ /* reset im->image memory attr for arch_prepare_bpf_trampoline */
set_memory_nx((long)im->image, 1); arch_unprotect_bpf_trampoline(im->image, PAGE_SIZE);
set_memory_rw((long)im->image, 1);
goto again; goto again;
} }
#endif #endif
...@@ -1040,6 +1038,40 @@ arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image ...@@ -1040,6 +1038,40 @@ arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image
return -ENOTSUPP; return -ENOTSUPP;
} }
void * __weak arch_alloc_bpf_trampoline(unsigned int size)
{
void *image;
if (WARN_ON_ONCE(size > PAGE_SIZE))
return NULL;
image = bpf_jit_alloc_exec(PAGE_SIZE);
if (image)
set_vm_flush_reset_perms(image);
return image;
}
void __weak arch_free_bpf_trampoline(void *image, unsigned int size)
{
WARN_ON_ONCE(size > PAGE_SIZE);
/* bpf_jit_free_exec doesn't need "size", but
* bpf_prog_pack_free() needs it.
*/
bpf_jit_free_exec(image);
}
void __weak arch_protect_bpf_trampoline(void *image, unsigned int size)
{
WARN_ON_ONCE(size > PAGE_SIZE);
set_memory_rox((long)image, 1);
}
void __weak arch_unprotect_bpf_trampoline(void *image, unsigned int size)
{
WARN_ON_ONCE(size > PAGE_SIZE);
set_memory_nx((long)image, 1);
set_memory_rw((long)image, 1);
}
static int __init init_trampolines(void) static int __init init_trampolines(void)
{ {
int i; int i;
......
...@@ -101,12 +101,11 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, ...@@ -101,12 +101,11 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
goto out; goto out;
} }
image = bpf_jit_alloc_exec(PAGE_SIZE); image = arch_alloc_bpf_trampoline(PAGE_SIZE);
if (!image) { if (!image) {
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
} }
set_vm_flush_reset_perms(image);
link = kzalloc(sizeof(*link), GFP_USER); link = kzalloc(sizeof(*link), GFP_USER);
if (!link) { if (!link) {
...@@ -124,7 +123,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, ...@@ -124,7 +123,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
if (err < 0) if (err < 0)
goto out; goto out;
set_memory_rox((long)image, 1); arch_protect_bpf_trampoline(image, PAGE_SIZE);
prog_ret = dummy_ops_call_op(image, args); prog_ret = dummy_ops_call_op(image, args);
err = dummy_ops_copy_args(args); err = dummy_ops_copy_args(args);
...@@ -134,7 +133,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, ...@@ -134,7 +133,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
err = -EFAULT; err = -EFAULT;
out: out:
kfree(args); kfree(args);
bpf_jit_free_exec(image); arch_free_bpf_trampoline(image, PAGE_SIZE);
if (link) if (link)
bpf_link_put(&link->link); bpf_link_put(&link->link);
kfree(tlinks); kfree(tlinks);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment