Commit 71ee10e2 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'Allow attaching to bare tracepoints'

Qais Yousef says:

====================

Changes in v3:
	* Fix not returning error value correctly in
	  trigger_module_test_write() (Yonghong)
	* Add Yonghong acked-by to patch 1.

Changes in v2:
	* Fix compilation error. (Andrii)
	* Make the new test use write() instead of read() (Andrii)

Add some missing glue logic to teach bpf about bare tracepoints - tracepoints
without any trace event associated with them.

Bare tracepoints are declare with DECLARE_TRACE(). Full tracepoints are declare
with TRACE_EVENT().

BPF can attach to these tracepoints as RAW_TRACEPOINT() only as there're no
events in tracefs created with them.
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 86e6b4e9 407be922
......@@ -208,6 +208,12 @@ data structures and compile with kernel internal headers. Both of these
kernel internals are subject to change and can break with newer kernels
such that the program needs to be adapted accordingly.
Q: Are tracepoints part of the stable ABI?
------------------------------------------
A: NO. Tracepoints are tied to internal implementation details hence they are
subject to change and can break with newer kernels. BPF programs need to change
accordingly when this happens.
Q: How much stack space a BPF program uses?
-------------------------------------------
A: Currently all program types are limited to 512 bytes of stack
......
......@@ -55,8 +55,7 @@
/* tracepoints with more than 12 arguments will hit build error */
#define CAST_TO_U64(...) CONCATENATE(__CAST, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
#define __BPF_DECLARE_TRACE(call, proto, args) \
static notrace void \
__bpf_trace_##call(void *__data, proto) \
{ \
......@@ -64,6 +63,10 @@ __bpf_trace_##call(void *__data, proto) \
CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(prog, CAST_TO_U64(args)); \
}
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
__BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args))
/*
* This part is compiled out, it is only here as a build time check
* to make sure that if the tracepoint handling changes, the
......@@ -111,6 +114,11 @@ __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size)
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
#undef DECLARE_TRACE
#define DECLARE_TRACE(call, proto, args) \
__BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \
__DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), 0)
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#undef DEFINE_EVENT_WRITABLE
......
......@@ -28,6 +28,12 @@ TRACE_EVENT(bpf_testmod_test_read,
__entry->pid, __entry->comm, __entry->off, __entry->len)
);
/* A bare tracepoint with no event associated with it */
DECLARE_TRACE(bpf_testmod_test_write_bare,
TP_PROTO(struct task_struct *task, struct bpf_testmod_test_write_ctx *ctx),
TP_ARGS(task, ctx)
);
#endif /* _BPF_TESTMOD_EVENTS_H */
#undef TRACE_INCLUDE_PATH
......
......@@ -31,9 +31,28 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
EXPORT_SYMBOL(bpf_testmod_test_read);
ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
noinline ssize_t
bpf_testmod_test_write(struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t len)
{
struct bpf_testmod_test_write_ctx ctx = {
.buf = buf,
.off = off,
.len = len,
};
trace_bpf_testmod_test_write_bare(current, &ctx);
return -EIO; /* always fail */
}
EXPORT_SYMBOL(bpf_testmod_test_write);
ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
.attr = { .name = "bpf_testmod", .mode = 0444, },
.attr = { .name = "bpf_testmod", .mode = 0666, },
.read = bpf_testmod_test_read,
.write = bpf_testmod_test_write,
};
static int bpf_testmod_init(void)
......
......@@ -11,4 +11,10 @@ struct bpf_testmod_test_read_ctx {
size_t len;
};
struct bpf_testmod_test_write_ctx {
char *buf;
loff_t off;
size_t len;
};
#endif /* _BPF_TESTMOD_H */
......@@ -21,9 +21,34 @@ static int trigger_module_test_read(int read_sz)
return 0;
}
static int trigger_module_test_write(int write_sz)
{
int fd, err;
char *buf = malloc(write_sz);
if (!buf)
return -ENOMEM;
memset(buf, 'a', write_sz);
buf[write_sz-1] = '\0';
fd = open("/sys/kernel/bpf_testmod", O_WRONLY);
err = -errno;
if (CHECK(fd < 0, "testmod_file_open", "failed: %d\n", err)) {
free(buf);
return err;
}
write(fd, buf, write_sz);
close(fd);
free(buf);
return 0;
}
void test_module_attach(void)
{
const int READ_SZ = 456;
const int WRITE_SZ = 457;
struct test_module_attach* skel;
struct test_module_attach__bss *bss;
int err;
......@@ -48,8 +73,10 @@ void test_module_attach(void)
/* trigger tracepoint */
ASSERT_OK(trigger_module_test_read(READ_SZ), "trigger_read");
ASSERT_OK(trigger_module_test_write(WRITE_SZ), "trigger_write");
ASSERT_EQ(bss->raw_tp_read_sz, READ_SZ, "raw_tp");
ASSERT_EQ(bss->raw_tp_bare_write_sz, WRITE_SZ, "raw_tp_bare");
ASSERT_EQ(bss->tp_btf_read_sz, READ_SZ, "tp_btf");
ASSERT_EQ(bss->fentry_read_sz, READ_SZ, "fentry");
ASSERT_EQ(bss->fentry_manual_read_sz, READ_SZ, "fentry_manual");
......
......@@ -17,6 +17,16 @@ int BPF_PROG(handle_raw_tp,
return 0;
}
__u32 raw_tp_bare_write_sz = 0;
SEC("raw_tp/bpf_testmod_test_write_bare")
int BPF_PROG(handle_raw_tp_bare,
struct task_struct *task, struct bpf_testmod_test_write_ctx *write_ctx)
{
raw_tp_bare_write_sz = BPF_CORE_READ(write_ctx, len);
return 0;
}
__u32 tp_btf_read_sz = 0;
SEC("tp_btf/bpf_testmod_test_read")
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment