Commit 98e8627e authored by Björn Töpel's avatar Björn Töpel Committed by Alexei Starovoitov

bpf: Move trampoline JIT image allocation to a function

Refactor the image allocation in the BPF trampoline code into a
separate function, so it can be shared with the BPF dispatcher in
upcoming commits.
Signed-off-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20191213175112.30208-2-bjorn.topel@gmail.com
parent 91cbdf74
...@@ -475,6 +475,7 @@ struct bpf_trampoline *bpf_trampoline_lookup(u64 key); ...@@ -475,6 +475,7 @@ struct bpf_trampoline *bpf_trampoline_lookup(u64 key);
int bpf_trampoline_link_prog(struct bpf_prog *prog); int bpf_trampoline_link_prog(struct bpf_prog *prog);
int bpf_trampoline_unlink_prog(struct bpf_prog *prog); int bpf_trampoline_unlink_prog(struct bpf_prog *prog);
void bpf_trampoline_put(struct bpf_trampoline *tr); void bpf_trampoline_put(struct bpf_trampoline *tr);
void *bpf_jit_alloc_exec_page(void);
#else #else
static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key) static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
{ {
......
...@@ -13,6 +13,22 @@ static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE]; ...@@ -13,6 +13,22 @@ static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
/* serializes access to trampoline_table */ /* serializes access to trampoline_table */
static DEFINE_MUTEX(trampoline_mutex); static DEFINE_MUTEX(trampoline_mutex);
void *bpf_jit_alloc_exec_page(void)
{
void *image;
image = bpf_jit_alloc_exec(PAGE_SIZE);
if (!image)
return NULL;
set_vm_flush_reset_perms(image);
/* Keep image as writeable. The alternative is to keep flipping ro/rw
* everytime new program is attached or detached.
*/
set_memory_x((long)image, 1);
return image;
}
struct bpf_trampoline *bpf_trampoline_lookup(u64 key) struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
{ {
struct bpf_trampoline *tr; struct bpf_trampoline *tr;
...@@ -33,7 +49,7 @@ struct bpf_trampoline *bpf_trampoline_lookup(u64 key) ...@@ -33,7 +49,7 @@ struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
goto out; goto out;
/* is_root was checked earlier. No need for bpf_jit_charge_modmem() */ /* is_root was checked earlier. No need for bpf_jit_charge_modmem() */
image = bpf_jit_alloc_exec(PAGE_SIZE); image = bpf_jit_alloc_exec_page();
if (!image) { if (!image) {
kfree(tr); kfree(tr);
tr = NULL; tr = NULL;
...@@ -47,12 +63,6 @@ struct bpf_trampoline *bpf_trampoline_lookup(u64 key) ...@@ -47,12 +63,6 @@ struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
mutex_init(&tr->mutex); mutex_init(&tr->mutex);
for (i = 0; i < BPF_TRAMP_MAX; i++) for (i = 0; i < BPF_TRAMP_MAX; i++)
INIT_HLIST_HEAD(&tr->progs_hlist[i]); INIT_HLIST_HEAD(&tr->progs_hlist[i]);
set_vm_flush_reset_perms(image);
/* Keep image as writeable. The alternative is to keep flipping ro/rw
* everytime new program is attached or detached.
*/
set_memory_x((long)image, 1);
tr->image = image; tr->image = image;
out: out:
mutex_unlock(&trampoline_mutex); mutex_unlock(&trampoline_mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment