Commit 7ac88eba authored by Jiri Olsa's avatar Jiri Olsa Committed by Alexei Starovoitov

bpf: Remove bpf_image tree

Now that we have all the objects (bpf_prog, bpf_trampoline,
bpf_dispatcher) linked in bpf_tree, there's no need to have
separate bpf_image tree for images.

Reverting the bpf_image tree together with struct bpf_image,
because it's no longer needed.

Also removing bpf_image_alloc function and adding the original
bpf_jit_alloc_exec_page interface instead.

The kernel_text_address function can now rely only on is_bpf_text_address,
because it checks the bpf_tree that contains all the objects.

Keeping bpf_image_ksym_add and bpf_image_ksym_del because they are
useful wrappers with perf's ksymbol interface calls.
Signed-off-by: default avatarJiri Olsa <jolsa@kernel.org>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200312195610.346362-13-jolsa@kernel.orgSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 517b75e4
...@@ -583,14 +583,8 @@ void bpf_trampoline_put(struct bpf_trampoline *tr); ...@@ -583,14 +583,8 @@ void bpf_trampoline_put(struct bpf_trampoline *tr);
#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name) #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
struct bpf_prog *to); struct bpf_prog *to);
struct bpf_image {
struct latch_tree_node tnode;
unsigned char data[];
};
#define BPF_IMAGE_SIZE (PAGE_SIZE - sizeof(struct bpf_image))
bool is_bpf_image_address(unsigned long address);
void *bpf_image_alloc(void);
/* Called only from JIT-enabled code, so there's no need for stubs. */ /* Called only from JIT-enabled code, so there's no need for stubs. */
void *bpf_jit_alloc_exec_page(void);
void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym); void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
void bpf_image_ksym_del(struct bpf_ksym *ksym); void bpf_image_ksym_del(struct bpf_ksym *ksym);
void bpf_ksym_add(struct bpf_ksym *ksym); void bpf_ksym_add(struct bpf_ksym *ksym);
......
...@@ -113,7 +113,7 @@ static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs) ...@@ -113,7 +113,7 @@ static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs)
noff = 0; noff = 0;
} else { } else {
old = d->image + d->image_off; old = d->image + d->image_off;
noff = d->image_off ^ (BPF_IMAGE_SIZE / 2); noff = d->image_off ^ (PAGE_SIZE / 2);
} }
new = d->num_progs ? d->image + noff : NULL; new = d->num_progs ? d->image + noff : NULL;
...@@ -140,7 +140,7 @@ void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, ...@@ -140,7 +140,7 @@ void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
mutex_lock(&d->mutex); mutex_lock(&d->mutex);
if (!d->image) { if (!d->image) {
d->image = bpf_image_alloc(); d->image = bpf_jit_alloc_exec_page();
if (!d->image) if (!d->image)
goto out; goto out;
bpf_image_ksym_add(d->image, &d->ksym); bpf_image_ksym_add(d->image, &d->ksym);
......
...@@ -18,12 +18,11 @@ const struct bpf_prog_ops bpf_extension_prog_ops = { ...@@ -18,12 +18,11 @@ const struct bpf_prog_ops bpf_extension_prog_ops = {
#define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS) #define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE]; static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
static struct latch_tree_root image_tree __cacheline_aligned;
/* serializes access to trampoline_table and image_tree */ /* serializes access to trampoline_table */
static DEFINE_MUTEX(trampoline_mutex); static DEFINE_MUTEX(trampoline_mutex);
static void *bpf_jit_alloc_exec_page(void) void *bpf_jit_alloc_exec_page(void)
{ {
void *image; void *image;
...@@ -39,78 +38,20 @@ static void *bpf_jit_alloc_exec_page(void) ...@@ -39,78 +38,20 @@ static void *bpf_jit_alloc_exec_page(void)
return image; return image;
} }
static __always_inline bool image_tree_less(struct latch_tree_node *a,
struct latch_tree_node *b)
{
struct bpf_image *ia = container_of(a, struct bpf_image, tnode);
struct bpf_image *ib = container_of(b, struct bpf_image, tnode);
return ia < ib;
}
static __always_inline int image_tree_comp(void *addr, struct latch_tree_node *n)
{
void *image = container_of(n, struct bpf_image, tnode);
if (addr < image)
return -1;
if (addr >= image + PAGE_SIZE)
return 1;
return 0;
}
static const struct latch_tree_ops image_tree_ops = {
.less = image_tree_less,
.comp = image_tree_comp,
};
static void *__bpf_image_alloc(bool lock)
{
struct bpf_image *image;
image = bpf_jit_alloc_exec_page();
if (!image)
return NULL;
if (lock)
mutex_lock(&trampoline_mutex);
latch_tree_insert(&image->tnode, &image_tree, &image_tree_ops);
if (lock)
mutex_unlock(&trampoline_mutex);
return image->data;
}
void *bpf_image_alloc(void)
{
return __bpf_image_alloc(true);
}
bool is_bpf_image_address(unsigned long addr)
{
bool ret;
rcu_read_lock();
ret = latch_tree_find((void *) addr, &image_tree, &image_tree_ops) != NULL;
rcu_read_unlock();
return ret;
}
void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym) void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym)
{ {
ksym->start = (unsigned long) data; ksym->start = (unsigned long) data;
ksym->end = ksym->start + BPF_IMAGE_SIZE; ksym->end = ksym->start + PAGE_SIZE;
bpf_ksym_add(ksym); bpf_ksym_add(ksym);
perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start, perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
BPF_IMAGE_SIZE, false, ksym->name); PAGE_SIZE, false, ksym->name);
} }
void bpf_image_ksym_del(struct bpf_ksym *ksym) void bpf_image_ksym_del(struct bpf_ksym *ksym)
{ {
bpf_ksym_del(ksym); bpf_ksym_del(ksym);
perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start, perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
BPF_IMAGE_SIZE, true, ksym->name); PAGE_SIZE, true, ksym->name);
} }
static void bpf_trampoline_ksym_add(struct bpf_trampoline *tr) static void bpf_trampoline_ksym_add(struct bpf_trampoline *tr)
...@@ -141,7 +82,7 @@ struct bpf_trampoline *bpf_trampoline_lookup(u64 key) ...@@ -141,7 +82,7 @@ struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
goto out; goto out;
/* is_root was checked earlier. No need for bpf_jit_charge_modmem() */ /* is_root was checked earlier. No need for bpf_jit_charge_modmem() */
image = __bpf_image_alloc(false); image = bpf_jit_alloc_exec_page();
if (!image) { if (!image) {
kfree(tr); kfree(tr);
tr = NULL; tr = NULL;
...@@ -243,8 +184,8 @@ bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total) ...@@ -243,8 +184,8 @@ bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total)
static int bpf_trampoline_update(struct bpf_trampoline *tr) static int bpf_trampoline_update(struct bpf_trampoline *tr)
{ {
void *old_image = tr->image + ((tr->selector + 1) & 1) * BPF_IMAGE_SIZE/2; void *old_image = tr->image + ((tr->selector + 1) & 1) * PAGE_SIZE/2;
void *new_image = tr->image + (tr->selector & 1) * BPF_IMAGE_SIZE/2; void *new_image = tr->image + (tr->selector & 1) * PAGE_SIZE/2;
struct bpf_tramp_progs *tprogs; struct bpf_tramp_progs *tprogs;
u32 flags = BPF_TRAMP_F_RESTORE_REGS; u32 flags = BPF_TRAMP_F_RESTORE_REGS;
int err, total; int err, total;
...@@ -272,7 +213,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr) ...@@ -272,7 +213,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
synchronize_rcu_tasks(); synchronize_rcu_tasks();
err = arch_prepare_bpf_trampoline(new_image, new_image + BPF_IMAGE_SIZE / 2, err = arch_prepare_bpf_trampoline(new_image, new_image + PAGE_SIZE / 2,
&tr->func.model, flags, tprogs, &tr->func.model, flags, tprogs,
tr->func.addr); tr->func.addr);
if (err < 0) if (err < 0)
...@@ -383,8 +324,6 @@ int bpf_trampoline_unlink_prog(struct bpf_prog *prog) ...@@ -383,8 +324,6 @@ int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
void bpf_trampoline_put(struct bpf_trampoline *tr) void bpf_trampoline_put(struct bpf_trampoline *tr)
{ {
struct bpf_image *image;
if (!tr) if (!tr)
return; return;
mutex_lock(&trampoline_mutex); mutex_lock(&trampoline_mutex);
...@@ -396,11 +335,9 @@ void bpf_trampoline_put(struct bpf_trampoline *tr) ...@@ -396,11 +335,9 @@ void bpf_trampoline_put(struct bpf_trampoline *tr)
if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT]))) if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
goto out; goto out;
bpf_image_ksym_del(&tr->ksym); bpf_image_ksym_del(&tr->ksym);
image = container_of(tr->image, struct bpf_image, data);
latch_tree_erase(&image->tnode, &image_tree, &image_tree_ops);
/* wait for tasks to get out of trampoline before freeing it */ /* wait for tasks to get out of trampoline before freeing it */
synchronize_rcu_tasks(); synchronize_rcu_tasks();
bpf_jit_free_exec(image); bpf_jit_free_exec(tr->image);
hlist_del(&tr->hlist); hlist_del(&tr->hlist);
kfree(tr); kfree(tr);
out: out:
......
...@@ -149,8 +149,6 @@ int kernel_text_address(unsigned long addr) ...@@ -149,8 +149,6 @@ int kernel_text_address(unsigned long addr)
goto out; goto out;
if (is_bpf_text_address(addr)) if (is_bpf_text_address(addr))
goto out; goto out;
if (is_bpf_image_address(addr))
goto out;
ret = 0; ret = 0;
out: out:
if (no_rcu) if (no_rcu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment