Commit 4d220ed0 authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by Daniel Borkmann

bpf: remove tracepoints from bpf core

tracepoints to bpf core were added as a way to provide introspection
to bpf programs and maps, but after some time it became clear that
this approach is inadequate, so prog_id, map_id and corresponding
get_next_id, get_fd_by_id, get_info_by_fd, prog_query APIs were
introduced and fully adopted by bpftool and other applications.
The tracepoints in bpf core started to rot and causing syzbot warnings:
WARNING: CPU: 0 PID: 3008 at kernel/trace/trace_event_perf.c:274
Kernel panic - not syncing: panic_on_warn set ...
perf_trace_bpf_map_keyval+0x260/0xbd0 include/trace/events/bpf.h:228
trace_bpf_map_update_elem include/trace/events/bpf.h:274 [inline]
map_update_elem kernel/bpf/syscall.c:597 [inline]
SYSC_bpf kernel/bpf/syscall.c:1478 [inline]
Hence this patch deletes tracepoints in bpf core.
Reported-by: default avatarEric Biggers <ebiggers3@gmail.com>
Reported-by: default avatarsyzbot <bot+a9dbb3c3e64b62536a4bc5ee7bbd4ca627566188@syzkaller.appspotmail.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent 34745aed
...@@ -2727,7 +2727,6 @@ F: Documentation/networking/filter.txt ...@@ -2727,7 +2727,6 @@ F: Documentation/networking/filter.txt
F: Documentation/bpf/ F: Documentation/bpf/
F: include/linux/bpf* F: include/linux/bpf*
F: include/linux/filter.h F: include/linux/filter.h
F: include/trace/events/bpf.h
F: include/trace/events/xdp.h F: include/trace/events/xdp.h
F: include/uapi/linux/bpf* F: include/uapi/linux/bpf*
F: include/uapi/linux/filter.h F: include/uapi/linux/filter.h
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
#ifndef __LINUX_BPF_TRACE_H__ #ifndef __LINUX_BPF_TRACE_H__
#define __LINUX_BPF_TRACE_H__ #define __LINUX_BPF_TRACE_H__
#include <trace/events/bpf.h>
#include <trace/events/xdp.h> #include <trace/events/xdp.h>
#endif /* __LINUX_BPF_TRACE_H__ */ #endif /* __LINUX_BPF_TRACE_H__ */
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM bpf
#if !defined(_TRACE_BPF_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_BPF_H
/* These are only used within the BPF_SYSCALL code */
#ifdef CONFIG_BPF_SYSCALL
#include <linux/filter.h>
#include <linux/bpf.h>
#include <linux/fs.h>
#include <linux/tracepoint.h>
#define __PROG_TYPE_MAP(FN) \
FN(SOCKET_FILTER) \
FN(KPROBE) \
FN(SCHED_CLS) \
FN(SCHED_ACT) \
FN(TRACEPOINT) \
FN(XDP) \
FN(PERF_EVENT) \
FN(CGROUP_SKB) \
FN(CGROUP_SOCK) \
FN(LWT_IN) \
FN(LWT_OUT) \
FN(LWT_XMIT)
#define __MAP_TYPE_MAP(FN) \
FN(HASH) \
FN(ARRAY) \
FN(PROG_ARRAY) \
FN(PERF_EVENT_ARRAY) \
FN(PERCPU_HASH) \
FN(PERCPU_ARRAY) \
FN(STACK_TRACE) \
FN(CGROUP_ARRAY) \
FN(LRU_HASH) \
FN(LRU_PERCPU_HASH) \
FN(LPM_TRIE)
#define __PROG_TYPE_TP_FN(x) \
TRACE_DEFINE_ENUM(BPF_PROG_TYPE_##x);
#define __PROG_TYPE_SYM_FN(x) \
{ BPF_PROG_TYPE_##x, #x },
#define __PROG_TYPE_SYM_TAB \
__PROG_TYPE_MAP(__PROG_TYPE_SYM_FN) { -1, 0 }
__PROG_TYPE_MAP(__PROG_TYPE_TP_FN)
#define __MAP_TYPE_TP_FN(x) \
TRACE_DEFINE_ENUM(BPF_MAP_TYPE_##x);
#define __MAP_TYPE_SYM_FN(x) \
{ BPF_MAP_TYPE_##x, #x },
#define __MAP_TYPE_SYM_TAB \
__MAP_TYPE_MAP(__MAP_TYPE_SYM_FN) { -1, 0 }
__MAP_TYPE_MAP(__MAP_TYPE_TP_FN)
DECLARE_EVENT_CLASS(bpf_prog_event,
TP_PROTO(const struct bpf_prog *prg),
TP_ARGS(prg),
TP_STRUCT__entry(
__array(u8, prog_tag, 8)
__field(u32, type)
),
TP_fast_assign(
BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag));
memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag));
__entry->type = prg->type;
),
TP_printk("prog=%s type=%s",
__print_hex_str(__entry->prog_tag, 8),
__print_symbolic(__entry->type, __PROG_TYPE_SYM_TAB))
);
DEFINE_EVENT(bpf_prog_event, bpf_prog_get_type,
TP_PROTO(const struct bpf_prog *prg),
TP_ARGS(prg)
);
DEFINE_EVENT(bpf_prog_event, bpf_prog_put_rcu,
TP_PROTO(const struct bpf_prog *prg),
TP_ARGS(prg)
);
TRACE_EVENT(bpf_prog_load,
TP_PROTO(const struct bpf_prog *prg, int ufd),
TP_ARGS(prg, ufd),
TP_STRUCT__entry(
__array(u8, prog_tag, 8)
__field(u32, type)
__field(int, ufd)
),
TP_fast_assign(
BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag));
memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag));
__entry->type = prg->type;
__entry->ufd = ufd;
),
TP_printk("prog=%s type=%s ufd=%d",
__print_hex_str(__entry->prog_tag, 8),
__print_symbolic(__entry->type, __PROG_TYPE_SYM_TAB),
__entry->ufd)
);
TRACE_EVENT(bpf_map_create,
TP_PROTO(const struct bpf_map *map, int ufd),
TP_ARGS(map, ufd),
TP_STRUCT__entry(
__field(u32, type)
__field(u32, size_key)
__field(u32, size_value)
__field(u32, max_entries)
__field(u32, flags)
__field(int, ufd)
),
TP_fast_assign(
__entry->type = map->map_type;
__entry->size_key = map->key_size;
__entry->size_value = map->value_size;
__entry->max_entries = map->max_entries;
__entry->flags = map->map_flags;
__entry->ufd = ufd;
),
TP_printk("map type=%s ufd=%d key=%u val=%u max=%u flags=%x",
__print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
__entry->ufd, __entry->size_key, __entry->size_value,
__entry->max_entries, __entry->flags)
);
DECLARE_EVENT_CLASS(bpf_obj_prog,
TP_PROTO(const struct bpf_prog *prg, int ufd,
const struct filename *pname),
TP_ARGS(prg, ufd, pname),
TP_STRUCT__entry(
__array(u8, prog_tag, 8)
__field(int, ufd)
__string(path, pname->name)
),
TP_fast_assign(
BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag));
memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag));
__assign_str(path, pname->name);
__entry->ufd = ufd;
),
TP_printk("prog=%s path=%s ufd=%d",
__print_hex_str(__entry->prog_tag, 8),
__get_str(path), __entry->ufd)
);
DEFINE_EVENT(bpf_obj_prog, bpf_obj_pin_prog,
TP_PROTO(const struct bpf_prog *prg, int ufd,
const struct filename *pname),
TP_ARGS(prg, ufd, pname)
);
DEFINE_EVENT(bpf_obj_prog, bpf_obj_get_prog,
TP_PROTO(const struct bpf_prog *prg, int ufd,
const struct filename *pname),
TP_ARGS(prg, ufd, pname)
);
DECLARE_EVENT_CLASS(bpf_obj_map,
TP_PROTO(const struct bpf_map *map, int ufd,
const struct filename *pname),
TP_ARGS(map, ufd, pname),
TP_STRUCT__entry(
__field(u32, type)
__field(int, ufd)
__string(path, pname->name)
),
TP_fast_assign(
__assign_str(path, pname->name);
__entry->type = map->map_type;
__entry->ufd = ufd;
),
TP_printk("map type=%s ufd=%d path=%s",
__print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
__entry->ufd, __get_str(path))
);
DEFINE_EVENT(bpf_obj_map, bpf_obj_pin_map,
TP_PROTO(const struct bpf_map *map, int ufd,
const struct filename *pname),
TP_ARGS(map, ufd, pname)
);
DEFINE_EVENT(bpf_obj_map, bpf_obj_get_map,
TP_PROTO(const struct bpf_map *map, int ufd,
const struct filename *pname),
TP_ARGS(map, ufd, pname)
);
DECLARE_EVENT_CLASS(bpf_map_keyval,
TP_PROTO(const struct bpf_map *map, int ufd,
const void *key, const void *val),
TP_ARGS(map, ufd, key, val),
TP_STRUCT__entry(
__field(u32, type)
__field(u32, key_len)
__dynamic_array(u8, key, map->key_size)
__field(bool, key_trunc)
__field(u32, val_len)
__dynamic_array(u8, val, map->value_size)
__field(bool, val_trunc)
__field(int, ufd)
),
TP_fast_assign(
memcpy(__get_dynamic_array(key), key, map->key_size);
memcpy(__get_dynamic_array(val), val, map->value_size);
__entry->type = map->map_type;
__entry->key_len = min(map->key_size, 16U);
__entry->key_trunc = map->key_size != __entry->key_len;
__entry->val_len = min(map->value_size, 16U);
__entry->val_trunc = map->value_size != __entry->val_len;
__entry->ufd = ufd;
),
TP_printk("map type=%s ufd=%d key=[%s%s] val=[%s%s]",
__print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
__entry->ufd,
__print_hex(__get_dynamic_array(key), __entry->key_len),
__entry->key_trunc ? " ..." : "",
__print_hex(__get_dynamic_array(val), __entry->val_len),
__entry->val_trunc ? " ..." : "")
);
DEFINE_EVENT(bpf_map_keyval, bpf_map_lookup_elem,
TP_PROTO(const struct bpf_map *map, int ufd,
const void *key, const void *val),
TP_ARGS(map, ufd, key, val)
);
DEFINE_EVENT(bpf_map_keyval, bpf_map_update_elem,
TP_PROTO(const struct bpf_map *map, int ufd,
const void *key, const void *val),
TP_ARGS(map, ufd, key, val)
);
TRACE_EVENT(bpf_map_delete_elem,
TP_PROTO(const struct bpf_map *map, int ufd,
const void *key),
TP_ARGS(map, ufd, key),
TP_STRUCT__entry(
__field(u32, type)
__field(u32, key_len)
__dynamic_array(u8, key, map->key_size)
__field(bool, key_trunc)
__field(int, ufd)
),
TP_fast_assign(
memcpy(__get_dynamic_array(key), key, map->key_size);
__entry->type = map->map_type;
__entry->key_len = min(map->key_size, 16U);
__entry->key_trunc = map->key_size != __entry->key_len;
__entry->ufd = ufd;
),
TP_printk("map type=%s ufd=%d key=[%s%s]",
__print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
__entry->ufd,
__print_hex(__get_dynamic_array(key), __entry->key_len),
__entry->key_trunc ? " ..." : "")
);
TRACE_EVENT(bpf_map_next_key,
TP_PROTO(const struct bpf_map *map, int ufd,
const void *key, const void *key_next),
TP_ARGS(map, ufd, key, key_next),
TP_STRUCT__entry(
__field(u32, type)
__field(u32, key_len)
__dynamic_array(u8, key, map->key_size)
__dynamic_array(u8, nxt, map->key_size)
__field(bool, key_trunc)
__field(bool, key_null)
__field(int, ufd)
),
TP_fast_assign(
if (key)
memcpy(__get_dynamic_array(key), key, map->key_size);
__entry->key_null = !key;
memcpy(__get_dynamic_array(nxt), key_next, map->key_size);
__entry->type = map->map_type;
__entry->key_len = min(map->key_size, 16U);
__entry->key_trunc = map->key_size != __entry->key_len;
__entry->ufd = ufd;
),
TP_printk("map type=%s ufd=%d key=[%s%s] next=[%s%s]",
__print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
__entry->ufd,
__entry->key_null ? "NULL" : __print_hex(__get_dynamic_array(key),
__entry->key_len),
__entry->key_trunc && !__entry->key_null ? " ..." : "",
__print_hex(__get_dynamic_array(nxt), __entry->key_len),
__entry->key_trunc ? " ..." : "")
);
#endif /* CONFIG_BPF_SYSCALL */
#endif /* _TRACE_BPF_H */
#include <trace/define_trace.h>
...@@ -1845,9 +1845,3 @@ int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to, ...@@ -1845,9 +1845,3 @@ int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
#include <linux/bpf_trace.h> #include <linux/bpf_trace.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
/* These are only used within the BPF_SYSCALL code */
#ifdef CONFIG_BPF_SYSCALL
EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type);
EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu);
#endif
...@@ -429,13 +429,6 @@ int bpf_obj_pin_user(u32 ufd, const char __user *pathname) ...@@ -429,13 +429,6 @@ int bpf_obj_pin_user(u32 ufd, const char __user *pathname)
ret = bpf_obj_do_pin(pname, raw, type); ret = bpf_obj_do_pin(pname, raw, type);
if (ret != 0) if (ret != 0)
bpf_any_put(raw, type); bpf_any_put(raw, type);
if ((trace_bpf_obj_pin_prog_enabled() ||
trace_bpf_obj_pin_map_enabled()) && !ret) {
if (type == BPF_TYPE_PROG)
trace_bpf_obj_pin_prog(raw, ufd, pname);
if (type == BPF_TYPE_MAP)
trace_bpf_obj_pin_map(raw, ufd, pname);
}
out: out:
putname(pname); putname(pname);
return ret; return ret;
...@@ -502,15 +495,8 @@ int bpf_obj_get_user(const char __user *pathname, int flags) ...@@ -502,15 +495,8 @@ int bpf_obj_get_user(const char __user *pathname, int flags)
else else
goto out; goto out;
if (ret < 0) { if (ret < 0)
bpf_any_put(raw, type); bpf_any_put(raw, type);
} else if (trace_bpf_obj_get_prog_enabled() ||
trace_bpf_obj_get_map_enabled()) {
if (type == BPF_TYPE_PROG)
trace_bpf_obj_get_prog(raw, ret, pname);
if (type == BPF_TYPE_MAP)
trace_bpf_obj_get_map(raw, ret, pname);
}
out: out:
putname(pname); putname(pname);
return ret; return ret;
......
...@@ -503,7 +503,6 @@ static int map_create(union bpf_attr *attr) ...@@ -503,7 +503,6 @@ static int map_create(union bpf_attr *attr)
return err; return err;
} }
trace_bpf_map_create(map, err);
return err; return err;
free_map: free_map:
...@@ -663,7 +662,6 @@ static int map_lookup_elem(union bpf_attr *attr) ...@@ -663,7 +662,6 @@ static int map_lookup_elem(union bpf_attr *attr)
if (copy_to_user(uvalue, value, value_size) != 0) if (copy_to_user(uvalue, value, value_size) != 0)
goto free_value; goto free_value;
trace_bpf_map_lookup_elem(map, ufd, key, value);
err = 0; err = 0;
free_value: free_value:
...@@ -760,8 +758,6 @@ static int map_update_elem(union bpf_attr *attr) ...@@ -760,8 +758,6 @@ static int map_update_elem(union bpf_attr *attr)
__this_cpu_dec(bpf_prog_active); __this_cpu_dec(bpf_prog_active);
preempt_enable(); preempt_enable();
out: out:
if (!err)
trace_bpf_map_update_elem(map, ufd, key, value);
free_value: free_value:
kfree(value); kfree(value);
free_key: free_key:
...@@ -814,8 +810,6 @@ static int map_delete_elem(union bpf_attr *attr) ...@@ -814,8 +810,6 @@ static int map_delete_elem(union bpf_attr *attr)
__this_cpu_dec(bpf_prog_active); __this_cpu_dec(bpf_prog_active);
preempt_enable(); preempt_enable();
out: out:
if (!err)
trace_bpf_map_delete_elem(map, ufd, key);
kfree(key); kfree(key);
err_put: err_put:
fdput(f); fdput(f);
...@@ -879,7 +873,6 @@ static int map_get_next_key(union bpf_attr *attr) ...@@ -879,7 +873,6 @@ static int map_get_next_key(union bpf_attr *attr)
if (copy_to_user(unext_key, next_key, map->key_size) != 0) if (copy_to_user(unext_key, next_key, map->key_size) != 0)
goto free_next_key; goto free_next_key;
trace_bpf_map_next_key(map, ufd, key, next_key);
err = 0; err = 0;
free_next_key: free_next_key:
...@@ -1027,7 +1020,6 @@ static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) ...@@ -1027,7 +1020,6 @@ static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
if (atomic_dec_and_test(&prog->aux->refcnt)) { if (atomic_dec_and_test(&prog->aux->refcnt)) {
int i; int i;
trace_bpf_prog_put_rcu(prog);
/* bpf_prog_free_id() must be called first */ /* bpf_prog_free_id() must be called first */
bpf_prog_free_id(prog, do_idr_lock); bpf_prog_free_id(prog, do_idr_lock);
...@@ -1194,11 +1186,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd) ...@@ -1194,11 +1186,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
bool attach_drv) bool attach_drv)
{ {
struct bpf_prog *prog = __bpf_prog_get(ufd, &type, attach_drv); return __bpf_prog_get(ufd, &type, attach_drv);
if (!IS_ERR(prog))
trace_bpf_prog_get_type(prog);
return prog;
} }
EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
...@@ -1373,7 +1361,6 @@ static int bpf_prog_load(union bpf_attr *attr) ...@@ -1373,7 +1361,6 @@ static int bpf_prog_load(union bpf_attr *attr)
} }
bpf_prog_kallsyms_add(prog); bpf_prog_kallsyms_add(prog);
trace_bpf_prog_load(prog, err);
return err; return err;
free_used_maps: free_used_maps:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment