Commit d4e7dd48 authored by Andrii Nakryiko's avatar Andrii Nakryiko

Merge branch 'bpf-add-link_info-support-for-uprobe-multi-link'

Jiri Olsa says:

====================
bpf: Add link_info support for uprobe multi link

hi,
this patchset adds support to get bpf_link_info details for
uprobe_multi links and adding support for bpftool link to
display them.

v4 changes:
  - move flags field up in bpf_uprobe_multi_link [Andrii]
  - include zero terminating byte in path_size [Andrii]
  - return d_path error directly [Yonghong]
  - use SEC(".probes") for semaphores [Yonghong]
  - fix ref_ctr_offsets leak in test [Yonghong]
  - other smaller fixes [Yonghong]

thanks,
jirka
---
====================

Link: https://lore.kernel.org/r/20231125193130.834322-1-jolsa@kernel.orgSigned-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents cf979163 a7795698
......@@ -6562,6 +6562,16 @@ struct bpf_link_info {
__u32 flags;
__u64 missed;
} kprobe_multi;
struct {
__aligned_u64 path;
__aligned_u64 offsets;
__aligned_u64 ref_ctr_offsets;
__aligned_u64 cookies;
__u32 path_size; /* in/out: real path size on success, including zero byte */
__u32 count; /* in/out: uprobe_multi offsets/ref_ctr_offsets/cookies count */
__u32 flags;
__u32 pid;
} uprobe_multi;
struct {
__u32 type; /* enum bpf_perf_event_type */
__u32 :32;
......
......@@ -3033,6 +3033,7 @@ struct bpf_uprobe_multi_link;
struct bpf_uprobe {
struct bpf_uprobe_multi_link *link;
loff_t offset;
unsigned long ref_ctr_offset;
u64 cookie;
struct uprobe_consumer consumer;
};
......@@ -3041,6 +3042,7 @@ struct bpf_uprobe_multi_link {
struct path path;
struct bpf_link link;
u32 cnt;
u32 flags;
struct bpf_uprobe *uprobes;
struct task_struct *task;
};
......@@ -3082,9 +3084,79 @@ static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
kfree(umulti_link);
}
static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
struct bpf_link_info *info)
{
u64 __user *uref_ctr_offsets = u64_to_user_ptr(info->uprobe_multi.ref_ctr_offsets);
u64 __user *ucookies = u64_to_user_ptr(info->uprobe_multi.cookies);
u64 __user *uoffsets = u64_to_user_ptr(info->uprobe_multi.offsets);
u64 __user *upath = u64_to_user_ptr(info->uprobe_multi.path);
u32 upath_size = info->uprobe_multi.path_size;
struct bpf_uprobe_multi_link *umulti_link;
u32 ucount = info->uprobe_multi.count;
int err = 0, i;
long left;
if (!upath ^ !upath_size)
return -EINVAL;
if ((uoffsets || uref_ctr_offsets || ucookies) && !ucount)
return -EINVAL;
umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
info->uprobe_multi.count = umulti_link->cnt;
info->uprobe_multi.flags = umulti_link->flags;
info->uprobe_multi.pid = umulti_link->task ?
task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0;
if (upath) {
char *p, *buf;
upath_size = min_t(u32, upath_size, PATH_MAX);
buf = kmalloc(upath_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
p = d_path(&umulti_link->path, buf, upath_size);
if (IS_ERR(p)) {
kfree(buf);
return PTR_ERR(p);
}
upath_size = buf + upath_size - p;
left = copy_to_user(upath, p, upath_size);
kfree(buf);
if (left)
return -EFAULT;
info->uprobe_multi.path_size = upath_size;
}
if (!uoffsets && !ucookies && !uref_ctr_offsets)
return 0;
if (ucount < umulti_link->cnt)
err = -ENOSPC;
else
ucount = umulti_link->cnt;
for (i = 0; i < ucount; i++) {
if (uoffsets &&
put_user(umulti_link->uprobes[i].offset, uoffsets + i))
return -EFAULT;
if (uref_ctr_offsets &&
put_user(umulti_link->uprobes[i].ref_ctr_offset, uref_ctr_offsets + i))
return -EFAULT;
if (ucookies &&
put_user(umulti_link->uprobes[i].cookie, ucookies + i))
return -EFAULT;
}
return err;
}
static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
.release = bpf_uprobe_multi_link_release,
.dealloc = bpf_uprobe_multi_link_dealloc,
.fill_link_info = bpf_uprobe_multi_link_fill_link_info,
};
static int uprobe_prog_run(struct bpf_uprobe *uprobe,
......@@ -3172,7 +3244,6 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
{
struct bpf_uprobe_multi_link *link = NULL;
unsigned long __user *uref_ctr_offsets;
unsigned long *ref_ctr_offsets = NULL;
struct bpf_link_primer link_primer;
struct bpf_uprobe *uprobes = NULL;
struct task_struct *task = NULL;
......@@ -3245,18 +3316,12 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
if (!uprobes || !link)
goto error_free;
if (uref_ctr_offsets) {
ref_ctr_offsets = kvcalloc(cnt, sizeof(*ref_ctr_offsets), GFP_KERNEL);
if (!ref_ctr_offsets)
goto error_free;
}
for (i = 0; i < cnt; i++) {
if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
err = -EFAULT;
goto error_free;
}
if (uref_ctr_offsets && __get_user(ref_ctr_offsets[i], uref_ctr_offsets + i)) {
if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) {
err = -EFAULT;
goto error_free;
}
......@@ -3280,6 +3345,7 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
link->uprobes = uprobes;
link->path = path;
link->task = task;
link->flags = flags;
bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI,
&bpf_uprobe_multi_link_lops, prog);
......@@ -3287,7 +3353,7 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
for (i = 0; i < cnt; i++) {
err = uprobe_register_refctr(d_real_inode(link->path.dentry),
uprobes[i].offset,
ref_ctr_offsets ? ref_ctr_offsets[i] : 0,
uprobes[i].ref_ctr_offset,
&uprobes[i].consumer);
if (err) {
bpf_uprobe_unregister(&path, uprobes, i);
......@@ -3299,11 +3365,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
if (err)
goto error_free;
kvfree(ref_ctr_offsets);
return bpf_link_settle(&link_primer);
error_free:
kvfree(ref_ctr_offsets);
kvfree(uprobes);
kfree(link);
if (task)
......
......@@ -294,6 +294,37 @@ show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
jsonw_end_array(json_wtr);
}
static __u64 *u64_to_arr(__u64 val)
{
return (__u64 *) u64_to_ptr(val);
}
static void
show_uprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
{
__u32 i;
jsonw_bool_field(json_wtr, "retprobe",
info->uprobe_multi.flags & BPF_F_UPROBE_MULTI_RETURN);
jsonw_string_field(json_wtr, "path", (char *) u64_to_ptr(info->uprobe_multi.path));
jsonw_uint_field(json_wtr, "func_cnt", info->uprobe_multi.count);
jsonw_int_field(json_wtr, "pid", (int) info->uprobe_multi.pid);
jsonw_name(json_wtr, "funcs");
jsonw_start_array(json_wtr);
for (i = 0; i < info->uprobe_multi.count; i++) {
jsonw_start_object(json_wtr);
jsonw_uint_field(json_wtr, "offset",
u64_to_arr(info->uprobe_multi.offsets)[i]);
jsonw_uint_field(json_wtr, "ref_ctr_offset",
u64_to_arr(info->uprobe_multi.ref_ctr_offsets)[i]);
jsonw_uint_field(json_wtr, "cookie",
u64_to_arr(info->uprobe_multi.cookies)[i]);
jsonw_end_object(json_wtr);
}
jsonw_end_array(json_wtr);
}
static void
show_perf_event_kprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
{
......@@ -465,6 +496,9 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
case BPF_LINK_TYPE_KPROBE_MULTI:
show_kprobe_multi_json(info, json_wtr);
break;
case BPF_LINK_TYPE_UPROBE_MULTI:
show_uprobe_multi_json(info, json_wtr);
break;
case BPF_LINK_TYPE_PERF_EVENT:
switch (info->perf_event.type) {
case BPF_PERF_EVENT_EVENT:
......@@ -674,6 +708,33 @@ static void show_kprobe_multi_plain(struct bpf_link_info *info)
}
}
static void show_uprobe_multi_plain(struct bpf_link_info *info)
{
__u32 i;
if (!info->uprobe_multi.count)
return;
if (info->uprobe_multi.flags & BPF_F_UPROBE_MULTI_RETURN)
printf("\n\turetprobe.multi ");
else
printf("\n\tuprobe.multi ");
printf("path %s ", (char *) u64_to_ptr(info->uprobe_multi.path));
printf("func_cnt %u ", info->uprobe_multi.count);
if (info->uprobe_multi.pid)
printf("pid %d ", info->uprobe_multi.pid);
printf("\n\t%-16s %-16s %-16s", "offset", "ref_ctr_offset", "cookies");
for (i = 0; i < info->uprobe_multi.count; i++) {
printf("\n\t0x%-16llx 0x%-16llx 0x%-16llx",
u64_to_arr(info->uprobe_multi.offsets)[i],
u64_to_arr(info->uprobe_multi.ref_ctr_offsets)[i],
u64_to_arr(info->uprobe_multi.cookies)[i]);
}
}
static void show_perf_event_kprobe_plain(struct bpf_link_info *info)
{
const char *buf;
......@@ -807,6 +868,9 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
case BPF_LINK_TYPE_KPROBE_MULTI:
show_kprobe_multi_plain(info);
break;
case BPF_LINK_TYPE_UPROBE_MULTI:
show_uprobe_multi_plain(info);
break;
case BPF_LINK_TYPE_PERF_EVENT:
switch (info->perf_event.type) {
case BPF_PERF_EVENT_EVENT:
......@@ -846,8 +910,10 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
static int do_show_link(int fd)
{
__u64 *ref_ctr_offsets = NULL, *offsets = NULL, *cookies = NULL;
struct bpf_link_info info;
__u32 len = sizeof(info);
char path_buf[PATH_MAX];
__u64 *addrs = NULL;
char buf[PATH_MAX];
int count;
......@@ -889,6 +955,39 @@ static int do_show_link(int fd)
goto again;
}
}
if (info.type == BPF_LINK_TYPE_UPROBE_MULTI &&
!info.uprobe_multi.offsets) {
count = info.uprobe_multi.count;
if (count) {
offsets = calloc(count, sizeof(__u64));
if (!offsets) {
p_err("mem alloc failed");
close(fd);
return -ENOMEM;
}
info.uprobe_multi.offsets = ptr_to_u64(offsets);
ref_ctr_offsets = calloc(count, sizeof(__u64));
if (!ref_ctr_offsets) {
p_err("mem alloc failed");
free(offsets);
close(fd);
return -ENOMEM;
}
info.uprobe_multi.ref_ctr_offsets = ptr_to_u64(ref_ctr_offsets);
cookies = calloc(count, sizeof(__u64));
if (!cookies) {
p_err("mem alloc failed");
free(cookies);
free(offsets);
close(fd);
return -ENOMEM;
}
info.uprobe_multi.cookies = ptr_to_u64(cookies);
info.uprobe_multi.path = ptr_to_u64(path_buf);
info.uprobe_multi.path_size = sizeof(path_buf);
goto again;
}
}
if (info.type == BPF_LINK_TYPE_PERF_EVENT) {
switch (info.perf_event.type) {
case BPF_PERF_EVENT_TRACEPOINT:
......@@ -924,8 +1023,10 @@ static int do_show_link(int fd)
else
show_link_close_plain(fd, &info);
if (addrs)
free(addrs);
free(ref_ctr_offsets);
free(cookies);
free(offsets);
free(addrs);
close(fd);
return 0;
}
......
......@@ -6562,6 +6562,16 @@ struct bpf_link_info {
__u32 flags;
__u64 missed;
} kprobe_multi;
struct {
__aligned_u64 path;
__aligned_u64 offsets;
__aligned_u64 ref_ctr_offsets;
__aligned_u64 cookies;
__u32 path_size; /* in/out: real path size on success, including zero byte */
__u32 count; /* in/out: uprobe_multi offsets/ref_ctr_offsets/cookies count */
__u32 flags;
__u32 pid;
} uprobe_multi;
struct {
__u32 type; /* enum bpf_perf_event_type */
__u32 :32;
......
......@@ -407,7 +407,8 @@ static int symbol_cmp(const void *a, const void *b)
* size, that needs to be released by the caller.
*/
int elf_resolve_syms_offsets(const char *binary_path, int cnt,
const char **syms, unsigned long **poffsets)
const char **syms, unsigned long **poffsets,
int st_type)
{
int sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB };
int err = 0, i, cnt_done = 0;
......@@ -438,7 +439,7 @@ int elf_resolve_syms_offsets(const char *binary_path, int cnt,
struct elf_sym_iter iter;
struct elf_sym *sym;
err = elf_sym_iter_new(&iter, elf_fd.elf, binary_path, sh_types[i], STT_FUNC);
err = elf_sym_iter_new(&iter, elf_fd.elf, binary_path, sh_types[i], st_type);
if (err == -ENOENT)
continue;
if (err)
......
......@@ -11447,7 +11447,7 @@ bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
return libbpf_err_ptr(err);
offsets = resolved_offsets;
} else if (syms) {
err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets);
err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets, STT_FUNC);
if (err < 0)
return libbpf_err_ptr(err);
offsets = resolved_offsets;
......
......@@ -594,7 +594,8 @@ int elf_open(const char *binary_path, struct elf_fd *elf_fd);
void elf_close(struct elf_fd *elf_fd);
int elf_resolve_syms_offsets(const char *binary_path, int cnt,
const char **syms, unsigned long **poffsets);
const char **syms, unsigned long **poffsets,
int st_type);
int elf_resolve_pattern_offsets(const char *binary_path, const char *pattern,
unsigned long **poffsets, size_t *pcnt);
......
......@@ -7,6 +7,7 @@
#include <test_progs.h>
#include "trace_helpers.h"
#include "test_fill_link_info.skel.h"
#include "bpf/libbpf_internal.h"
#define TP_CAT "sched"
#define TP_NAME "sched_switch"
......@@ -140,14 +141,14 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel,
.retprobe = type == BPF_PERF_EVENT_KRETPROBE,
);
ssize_t entry_offset = 0;
struct bpf_link *link;
int link_fd, err;
skel->links.kprobe_run = bpf_program__attach_kprobe_opts(skel->progs.kprobe_run,
KPROBE_FUNC, &opts);
if (!ASSERT_OK_PTR(skel->links.kprobe_run, "attach_kprobe"))
link = bpf_program__attach_kprobe_opts(skel->progs.kprobe_run, KPROBE_FUNC, &opts);
if (!ASSERT_OK_PTR(link, "attach_kprobe"))
return;
link_fd = bpf_link__fd(skel->links.kprobe_run);
link_fd = bpf_link__fd(link);
if (!invalid) {
/* See also arch_adjust_kprobe_addr(). */
if (skel->kconfig->CONFIG_X86_KERNEL_IBT)
......@@ -157,39 +158,41 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel,
} else {
kprobe_fill_invalid_user_buffer(link_fd);
}
bpf_link__detach(skel->links.kprobe_run);
bpf_link__destroy(link);
}
static void test_tp_fill_link_info(struct test_fill_link_info *skel)
{
struct bpf_link *link;
int link_fd, err;
skel->links.tp_run = bpf_program__attach_tracepoint(skel->progs.tp_run, TP_CAT, TP_NAME);
if (!ASSERT_OK_PTR(skel->links.tp_run, "attach_tp"))
link = bpf_program__attach_tracepoint(skel->progs.tp_run, TP_CAT, TP_NAME);
if (!ASSERT_OK_PTR(link, "attach_tp"))
return;
link_fd = bpf_link__fd(skel->links.tp_run);
link_fd = bpf_link__fd(link);
err = verify_perf_link_info(link_fd, BPF_PERF_EVENT_TRACEPOINT, 0, 0, 0);
ASSERT_OK(err, "verify_perf_link_info");
bpf_link__detach(skel->links.tp_run);
bpf_link__destroy(link);
}
static void test_uprobe_fill_link_info(struct test_fill_link_info *skel,
enum bpf_perf_event_type type)
{
struct bpf_link *link;
int link_fd, err;
skel->links.uprobe_run = bpf_program__attach_uprobe(skel->progs.uprobe_run,
type == BPF_PERF_EVENT_URETPROBE,
0, /* self pid */
UPROBE_FILE, uprobe_offset);
if (!ASSERT_OK_PTR(skel->links.uprobe_run, "attach_uprobe"))
link = bpf_program__attach_uprobe(skel->progs.uprobe_run,
type == BPF_PERF_EVENT_URETPROBE,
0, /* self pid */
UPROBE_FILE, uprobe_offset);
if (!ASSERT_OK_PTR(link, "attach_uprobe"))
return;
link_fd = bpf_link__fd(skel->links.uprobe_run);
link_fd = bpf_link__fd(link);
err = verify_perf_link_info(link_fd, type, 0, uprobe_offset, 0);
ASSERT_OK(err, "verify_perf_link_info");
bpf_link__detach(skel->links.uprobe_run);
bpf_link__destroy(link);
}
static int verify_kmulti_link_info(int fd, bool retprobe)
......@@ -278,24 +281,214 @@ static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel,
bool retprobe, bool invalid)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
struct bpf_link *link;
int link_fd, err;
opts.syms = kmulti_syms;
opts.cnt = KMULTI_CNT;
opts.retprobe = retprobe;
skel->links.kmulti_run = bpf_program__attach_kprobe_multi_opts(skel->progs.kmulti_run,
NULL, &opts);
if (!ASSERT_OK_PTR(skel->links.kmulti_run, "attach_kprobe_multi"))
link = bpf_program__attach_kprobe_multi_opts(skel->progs.kmulti_run, NULL, &opts);
if (!ASSERT_OK_PTR(link, "attach_kprobe_multi"))
return;
link_fd = bpf_link__fd(skel->links.kmulti_run);
link_fd = bpf_link__fd(link);
if (!invalid) {
err = verify_kmulti_link_info(link_fd, retprobe);
ASSERT_OK(err, "verify_kmulti_link_info");
} else {
verify_kmulti_invalid_user_buffer(link_fd);
}
bpf_link__detach(skel->links.kmulti_run);
bpf_link__destroy(link);
}
#define SEC(name) __attribute__((section(name), used))
static short uprobe_link_info_sema_1 SEC(".probes");
static short uprobe_link_info_sema_2 SEC(".probes");
static short uprobe_link_info_sema_3 SEC(".probes");
noinline void uprobe_link_info_func_1(void)
{
asm volatile ("");
uprobe_link_info_sema_1++;
}
noinline void uprobe_link_info_func_2(void)
{
asm volatile ("");
uprobe_link_info_sema_2++;
}
noinline void uprobe_link_info_func_3(void)
{
asm volatile ("");
uprobe_link_info_sema_3++;
}
static int
verify_umulti_link_info(int fd, bool retprobe, __u64 *offsets,
__u64 *cookies, __u64 *ref_ctr_offsets)
{
char path[PATH_MAX], path_buf[PATH_MAX];
struct bpf_link_info info;
__u32 len = sizeof(info);
__u64 ref_ctr_offsets_buf[3];
__u64 offsets_buf[3];
__u64 cookies_buf[3];
int i, err, bit;
__u32 count = 0;
memset(path, 0, sizeof(path));
err = readlink("/proc/self/exe", path, sizeof(path));
if (!ASSERT_NEQ(err, -1, "readlink"))
return -1;
for (bit = 0; bit < 8; bit++) {
memset(&info, 0, sizeof(info));
info.uprobe_multi.path = ptr_to_u64(path_buf);
info.uprobe_multi.path_size = sizeof(path_buf);
info.uprobe_multi.count = count;
if (bit & 0x1)
info.uprobe_multi.offsets = ptr_to_u64(offsets_buf);
if (bit & 0x2)
info.uprobe_multi.cookies = ptr_to_u64(cookies_buf);
if (bit & 0x4)
info.uprobe_multi.ref_ctr_offsets = ptr_to_u64(ref_ctr_offsets_buf);
err = bpf_link_get_info_by_fd(fd, &info, &len);
if (!ASSERT_OK(err, "bpf_link_get_info_by_fd"))
return -1;
if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_UPROBE_MULTI, "info.type"))
return -1;
ASSERT_EQ(info.uprobe_multi.pid, getpid(), "info.uprobe_multi.pid");
ASSERT_EQ(info.uprobe_multi.count, 3, "info.uprobe_multi.count");
ASSERT_EQ(info.uprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN,
retprobe, "info.uprobe_multi.flags.retprobe");
ASSERT_EQ(info.uprobe_multi.path_size, strlen(path) + 1, "info.uprobe_multi.path_size");
ASSERT_STREQ(path_buf, path, "info.uprobe_multi.path");
for (i = 0; i < info.uprobe_multi.count; i++) {
if (info.uprobe_multi.offsets)
ASSERT_EQ(offsets_buf[i], offsets[i], "info.uprobe_multi.offsets");
if (info.uprobe_multi.cookies)
ASSERT_EQ(cookies_buf[i], cookies[i], "info.uprobe_multi.cookies");
if (info.uprobe_multi.ref_ctr_offsets) {
ASSERT_EQ(ref_ctr_offsets_buf[i], ref_ctr_offsets[i],
"info.uprobe_multi.ref_ctr_offsets");
}
}
count = count ?: info.uprobe_multi.count;
}
return 0;
}
static void verify_umulti_invalid_user_buffer(int fd)
{
struct bpf_link_info info;
__u32 len = sizeof(info);
__u64 buf[3];
int err;
/* upath_size defined, not path */
memset(&info, 0, sizeof(info));
info.uprobe_multi.path_size = 3;
err = bpf_link_get_info_by_fd(fd, &info, &len);
ASSERT_EQ(err, -EINVAL, "failed_upath_size");
/* path defined, but small */
memset(&info, 0, sizeof(info));
info.uprobe_multi.path = ptr_to_u64(buf);
info.uprobe_multi.path_size = 3;
err = bpf_link_get_info_by_fd(fd, &info, &len);
ASSERT_LT(err, 0, "failed_upath_small");
/* path has wrong pointer */
memset(&info, 0, sizeof(info));
info.uprobe_multi.path_size = PATH_MAX;
info.uprobe_multi.path = 123;
err = bpf_link_get_info_by_fd(fd, &info, &len);
ASSERT_EQ(err, -EFAULT, "failed_bad_path_ptr");
/* count zero, with offsets */
memset(&info, 0, sizeof(info));
info.uprobe_multi.offsets = ptr_to_u64(buf);
err = bpf_link_get_info_by_fd(fd, &info, &len);
ASSERT_EQ(err, -EINVAL, "failed_count");
/* offsets not big enough */
memset(&info, 0, sizeof(info));
info.uprobe_multi.offsets = ptr_to_u64(buf);
info.uprobe_multi.count = 2;
err = bpf_link_get_info_by_fd(fd, &info, &len);
ASSERT_EQ(err, -ENOSPC, "failed_small_count");
/* offsets has wrong pointer */
memset(&info, 0, sizeof(info));
info.uprobe_multi.offsets = 123;
info.uprobe_multi.count = 3;
err = bpf_link_get_info_by_fd(fd, &info, &len);
ASSERT_EQ(err, -EFAULT, "failed_wrong_offsets");
}
static void test_uprobe_multi_fill_link_info(struct test_fill_link_info *skel,
bool retprobe, bool invalid)
{
LIBBPF_OPTS(bpf_uprobe_multi_opts, opts,
.retprobe = retprobe,
);
const char *syms[3] = {
"uprobe_link_info_func_1",
"uprobe_link_info_func_2",
"uprobe_link_info_func_3",
};
__u64 cookies[3] = {
0xdead,
0xbeef,
0xcafe,
};
const char *sema[3] = {
"uprobe_link_info_sema_1",
"uprobe_link_info_sema_2",
"uprobe_link_info_sema_3",
};
__u64 *offsets = NULL, *ref_ctr_offsets;
struct bpf_link *link;
int link_fd, err;
err = elf_resolve_syms_offsets("/proc/self/exe", 3, sema,
(unsigned long **) &ref_ctr_offsets, STT_OBJECT);
if (!ASSERT_OK(err, "elf_resolve_syms_offsets_object"))
return;
err = elf_resolve_syms_offsets("/proc/self/exe", 3, syms,
(unsigned long **) &offsets, STT_FUNC);
if (!ASSERT_OK(err, "elf_resolve_syms_offsets_func"))
goto out;
opts.syms = syms;
opts.cookies = &cookies[0];
opts.ref_ctr_offsets = (unsigned long *) &ref_ctr_offsets[0];
opts.cnt = ARRAY_SIZE(syms);
link = bpf_program__attach_uprobe_multi(skel->progs.umulti_run, 0,
"/proc/self/exe", NULL, &opts);
if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_multi"))
goto out;
link_fd = bpf_link__fd(link);
if (invalid)
verify_umulti_invalid_user_buffer(link_fd);
else
verify_umulti_link_info(link_fd, retprobe, offsets, cookies, ref_ctr_offsets);
bpf_link__destroy(link);
out:
free(ref_ctr_offsets);
free(offsets);
}
void test_fill_link_info(void)
......@@ -337,6 +530,13 @@ void test_fill_link_info(void)
if (test__start_subtest("kprobe_multi_invalid_ubuff"))
test_kprobe_multi_fill_link_info(skel, true, true);
if (test__start_subtest("uprobe_multi_link_info"))
test_uprobe_multi_fill_link_info(skel, false, false);
if (test__start_subtest("uretprobe_multi_link_info"))
test_uprobe_multi_fill_link_info(skel, true, false);
if (test__start_subtest("uprobe_multi_invalid"))
test_uprobe_multi_fill_link_info(skel, false, true);
cleanup:
test_fill_link_info__destroy(skel);
}
......@@ -249,7 +249,7 @@ static void __test_link_api(struct child *child)
int link_extra_fd = -1;
int err;
err = elf_resolve_syms_offsets(path, 3, syms, (unsigned long **) &offsets);
err = elf_resolve_syms_offsets(path, 3, syms, (unsigned long **) &offsets, STT_FUNC);
if (!ASSERT_OK(err, "elf_resolve_syms_offsets"))
return;
......
......@@ -39,4 +39,10 @@ int BPF_PROG(kmulti_run)
return 0;
}
SEC("uprobe.multi")
int BPF_PROG(umulti_run)
{
return 0;
}
char _license[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment