Commit 9e4dc892 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'Forbid bpf_ktime_get_coarse_ns and bpf_timer_* in tracing progs'

Dmitrii Banshchikov says:

====================

Various locking issues are possible with bpf_ktime_get_coarse_ns() and
bpf_timer_* set of helpers.

syzbot found a locking issue with bpf_ktime_get_coarse_ns() helper executed in
BPF_PROG_TYPE_PERF_EVENT prog type - [1]. The issue is possible because the
helper uses non fast version of time accessor that isn't safe for any context.
The helper was added because it provided performance benefits in comparison to
bpf_ktime_get_ns() helper.

A similar locking issue is possible with bpf_timer_* set of helpers when used
in tracing progs.

The solution is to restrict use of the helpers in tracing progs.

In the [1] discussion it was stated that bpf_spin_lock related helpers shall
also be excluded for tracing progs. The verifier has a compatibility check
between a map and a program. If a tracing program tries to use a map which
value has struct bpf_spin_lock the verifier fails that is why bpf_spin_lock is
already restricted.

Patch 1 restricts helpers
Patch 2 adds tests

v1 -> v2:
 * Limit the helpers via func proto getters instead of allowed callback
 * Add note about helpers' restrictions to linux/bpf.h
 * Add Fixes tag
 * Remove extra \0 from btf_str_sec
 * Beside asm tests add prog tests
 * Trim CC

1. https://lore.kernel.org/all/00000000000013aebd05cff8e064@google.com/
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents ba05fd36 e60e6962
...@@ -1809,6 +1809,8 @@ sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -1809,6 +1809,8 @@ sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sysctl_get_new_value_proto; return &bpf_sysctl_get_new_value_proto;
case BPF_FUNC_sysctl_set_new_value: case BPF_FUNC_sysctl_set_new_value:
return &bpf_sysctl_set_new_value_proto; return &bpf_sysctl_set_new_value_proto;
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default: default:
return cgroup_base_func_proto(func_id, prog); return cgroup_base_func_proto(func_id, prog);
} }
......
...@@ -1364,8 +1364,6 @@ bpf_base_func_proto(enum bpf_func_id func_id) ...@@ -1364,8 +1364,6 @@ bpf_base_func_proto(enum bpf_func_id func_id)
return &bpf_ktime_get_ns_proto; return &bpf_ktime_get_ns_proto;
case BPF_FUNC_ktime_get_boot_ns: case BPF_FUNC_ktime_get_boot_ns:
return &bpf_ktime_get_boot_ns_proto; return &bpf_ktime_get_boot_ns_proto;
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
case BPF_FUNC_ringbuf_output: case BPF_FUNC_ringbuf_output:
return &bpf_ringbuf_output_proto; return &bpf_ringbuf_output_proto;
case BPF_FUNC_ringbuf_reserve: case BPF_FUNC_ringbuf_reserve:
......
...@@ -11632,6 +11632,13 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env, ...@@ -11632,6 +11632,13 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
} }
} }
if (map_value_has_timer(map)) {
if (is_tracing_prog_type(prog_type)) {
verbose(env, "tracing progs cannot use bpf_timer yet\n");
return -EINVAL;
}
}
if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
!bpf_offload_prog_map_match(prog, map)) { !bpf_offload_prog_map_match(prog, map)) {
verbose(env, "offload device mismatch between prog and map\n"); verbose(env, "offload device mismatch between prog and map\n");
......
...@@ -1111,8 +1111,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -1111,8 +1111,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_ktime_get_ns_proto; return &bpf_ktime_get_ns_proto;
case BPF_FUNC_ktime_get_boot_ns: case BPF_FUNC_ktime_get_boot_ns:
return &bpf_ktime_get_boot_ns_proto; return &bpf_ktime_get_boot_ns_proto;
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
case BPF_FUNC_tail_call: case BPF_FUNC_tail_call:
return &bpf_tail_call_proto; return &bpf_tail_call_proto;
case BPF_FUNC_get_current_pid_tgid: case BPF_FUNC_get_current_pid_tgid:
......
...@@ -7162,6 +7162,8 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -7162,6 +7162,8 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
#endif #endif
case BPF_FUNC_sk_storage_get: case BPF_FUNC_sk_storage_get:
return &bpf_sk_storage_get_cg_sock_proto; return &bpf_sk_storage_get_cg_sock_proto;
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default: default:
return bpf_base_func_proto(func_id); return bpf_base_func_proto(func_id);
} }
...@@ -10327,6 +10329,8 @@ sk_reuseport_func_proto(enum bpf_func_id func_id, ...@@ -10327,6 +10329,8 @@ sk_reuseport_func_proto(enum bpf_func_id func_id,
return &sk_reuseport_load_bytes_relative_proto; return &sk_reuseport_load_bytes_relative_proto;
case BPF_FUNC_get_socket_cookie: case BPF_FUNC_get_socket_cookie:
return &bpf_get_socket_ptr_cookie_proto; return &bpf_get_socket_ptr_cookie_proto;
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default: default:
return bpf_base_func_proto(func_id); return bpf_base_func_proto(func_id);
} }
...@@ -10833,6 +10837,8 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id) ...@@ -10833,6 +10837,8 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_skc_to_unix_sock: case BPF_FUNC_skc_to_unix_sock:
func = &bpf_skc_to_unix_sock_proto; func = &bpf_skc_to_unix_sock_proto;
break; break;
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default: default:
return bpf_base_func_proto(func_id); return bpf_base_func_proto(func_id);
} }
......
...@@ -205,6 +205,8 @@ bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id, ...@@ -205,6 +205,8 @@ bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
offsetof(struct tcp_congestion_ops, release)) offsetof(struct tcp_congestion_ops, release))
return &bpf_sk_getsockopt_proto; return &bpf_sk_getsockopt_proto;
return NULL; return NULL;
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default: default:
return bpf_base_func_proto(func_id); return bpf_base_func_proto(func_id);
} }
......
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "test_helper_restricted.skel.h"
void test_helper_restricted(void)
{
int prog_i = 0, prog_cnt;
int duration = 0;
do {
struct test_helper_restricted *test;
int maybeOK;
test = test_helper_restricted__open();
if (!ASSERT_OK_PTR(test, "open"))
return;
prog_cnt = test->skeleton->prog_cnt;
for (int j = 0; j < prog_cnt; ++j) {
struct bpf_program *prog = *test->skeleton->progs[j].prog;
maybeOK = bpf_program__set_autoload(prog, prog_i == j);
ASSERT_OK(maybeOK, "set autoload");
}
maybeOK = test_helper_restricted__load(test);
CHECK(!maybeOK, test->skeleton->progs[prog_i].name, "helper isn't restricted");
test_helper_restricted__destroy(test);
} while (++prog_i < prog_cnt);
}
// SPDX-License-Identifier: GPL-2.0-only
#include <time.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct timer {
struct bpf_timer t;
};
struct lock {
struct bpf_spin_lock l;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, struct timer);
} timers SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, struct lock);
} locks SEC(".maps");
static int timer_cb(void *map, int *key, struct timer *timer)
{
return 0;
}
static void timer_work(void)
{
struct timer *timer;
const int key = 0;
timer = bpf_map_lookup_elem(&timers, &key);
if (timer) {
bpf_timer_init(&timer->t, &timers, CLOCK_MONOTONIC);
bpf_timer_set_callback(&timer->t, timer_cb);
bpf_timer_start(&timer->t, 10E9, 0);
bpf_timer_cancel(&timer->t);
}
}
static void spin_lock_work(void)
{
const int key = 0;
struct lock *lock;
lock = bpf_map_lookup_elem(&locks, &key);
if (lock) {
bpf_spin_lock(&lock->l);
bpf_spin_unlock(&lock->l);
}
}
SEC("raw_tp/sys_enter")
int raw_tp_timer(void *ctx)
{
timer_work();
return 0;
}
SEC("tp/syscalls/sys_enter_nanosleep")
int tp_timer(void *ctx)
{
timer_work();
return 0;
}
SEC("kprobe/sys_nanosleep")
int kprobe_timer(void *ctx)
{
timer_work();
return 0;
}
SEC("perf_event")
int perf_event_timer(void *ctx)
{
timer_work();
return 0;
}
SEC("raw_tp/sys_enter")
int raw_tp_spin_lock(void *ctx)
{
spin_lock_work();
return 0;
}
SEC("tp/syscalls/sys_enter_nanosleep")
int tp_spin_lock(void *ctx)
{
spin_lock_work();
return 0;
}
SEC("kprobe/sys_nanosleep")
int kprobe_spin_lock(void *ctx)
{
spin_lock_work();
return 0;
}
SEC("perf_event")
int perf_event_spin_lock(void *ctx)
{
spin_lock_work();
return 0;
}
const char LICENSE[] SEC("license") = "GPL";
...@@ -92,6 +92,7 @@ struct bpf_test { ...@@ -92,6 +92,7 @@ struct bpf_test {
int fixup_map_event_output[MAX_FIXUPS]; int fixup_map_event_output[MAX_FIXUPS];
int fixup_map_reuseport_array[MAX_FIXUPS]; int fixup_map_reuseport_array[MAX_FIXUPS];
int fixup_map_ringbuf[MAX_FIXUPS]; int fixup_map_ringbuf[MAX_FIXUPS];
int fixup_map_timer[MAX_FIXUPS];
/* Expected verifier log output for result REJECT or VERBOSE_ACCEPT. /* Expected verifier log output for result REJECT or VERBOSE_ACCEPT.
* Can be a tab-separated sequence of expected strings. An empty string * Can be a tab-separated sequence of expected strings. An empty string
* means no log verification. * means no log verification.
...@@ -604,8 +605,15 @@ static int create_cgroup_storage(bool percpu) ...@@ -604,8 +605,15 @@ static int create_cgroup_storage(bool percpu)
* int cnt; * int cnt;
* struct bpf_spin_lock l; * struct bpf_spin_lock l;
* }; * };
* struct bpf_timer {
* __u64 :64;
* __u64 :64;
* } __attribute__((aligned(8)));
* struct timer {
* struct bpf_timer t;
* };
*/ */
static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l"; static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t";
static __u32 btf_raw_types[] = { static __u32 btf_raw_types[] = {
/* int */ /* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
...@@ -616,6 +624,11 @@ static __u32 btf_raw_types[] = { ...@@ -616,6 +624,11 @@ static __u32 btf_raw_types[] = {
BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8), BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */ BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */ BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
/* struct bpf_timer */ /* [4] */
BTF_TYPE_ENC(25, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0), 16),
/* struct timer */ /* [5] */
BTF_TYPE_ENC(35, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16),
BTF_MEMBER_ENC(41, 4, 0), /* struct bpf_timer t; */
}; };
static int load_btf(void) static int load_btf(void)
...@@ -696,6 +709,29 @@ static int create_sk_storage_map(void) ...@@ -696,6 +709,29 @@ static int create_sk_storage_map(void)
return fd; return fd;
} }
static int create_map_timer(void)
{
struct bpf_create_map_attr attr = {
.name = "test_map",
.map_type = BPF_MAP_TYPE_ARRAY,
.key_size = 4,
.value_size = 16,
.max_entries = 1,
.btf_key_type_id = 1,
.btf_value_type_id = 5,
};
int fd, btf_fd;
btf_fd = load_btf();
if (btf_fd < 0)
return -1;
attr.btf_fd = btf_fd;
fd = bpf_create_map_xattr(&attr);
if (fd < 0)
printf("Failed to create map with timer\n");
return fd;
}
static char bpf_vlog[UINT_MAX >> 8]; static char bpf_vlog[UINT_MAX >> 8];
static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
...@@ -722,6 +758,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, ...@@ -722,6 +758,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
int *fixup_map_event_output = test->fixup_map_event_output; int *fixup_map_event_output = test->fixup_map_event_output;
int *fixup_map_reuseport_array = test->fixup_map_reuseport_array; int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
int *fixup_map_ringbuf = test->fixup_map_ringbuf; int *fixup_map_ringbuf = test->fixup_map_ringbuf;
int *fixup_map_timer = test->fixup_map_timer;
if (test->fill_helper) { if (test->fill_helper) {
test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn)); test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
...@@ -907,6 +944,13 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, ...@@ -907,6 +944,13 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
fixup_map_ringbuf++; fixup_map_ringbuf++;
} while (*fixup_map_ringbuf); } while (*fixup_map_ringbuf);
} }
if (*fixup_map_timer) {
map_fds[21] = create_map_timer();
do {
prog[*fixup_map_timer].imm = map_fds[21];
fixup_map_timer++;
} while (*fixup_map_timer);
}
} }
struct libcap { struct libcap {
......
{
"bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_KPROBE",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "unknown func bpf_ktime_get_coarse_ns",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_KPROBE,
},
{
"bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_TRACEPOINT",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "unknown func bpf_ktime_get_coarse_ns",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_PERF_EVENT",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "unknown func bpf_ktime_get_coarse_ns",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
},
{
"bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "unknown func bpf_ktime_get_coarse_ns",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT,
},
{
"bpf_timer_init isn restricted in BPF_PROG_TYPE_KPROBE",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 1),
BPF_EMIT_CALL(BPF_FUNC_timer_init),
BPF_EXIT_INSN(),
},
.fixup_map_timer = { 3, 8 },
.errstr = "tracing progs cannot use bpf_timer yet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_KPROBE,
},
{
"bpf_timer_init is forbidden in BPF_PROG_TYPE_PERF_EVENT",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 1),
BPF_EMIT_CALL(BPF_FUNC_timer_init),
BPF_EXIT_INSN(),
},
.fixup_map_timer = { 3, 8 },
.errstr = "tracing progs cannot use bpf_timer yet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
},
{
"bpf_timer_init is forbidden in BPF_PROG_TYPE_TRACEPOINT",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 1),
BPF_EMIT_CALL(BPF_FUNC_timer_init),
BPF_EXIT_INSN(),
},
.fixup_map_timer = { 3, 8 },
.errstr = "tracing progs cannot use bpf_timer yet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"bpf_timer_init is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LD_MAP_FD(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 1),
BPF_EMIT_CALL(BPF_FUNC_timer_init),
BPF_EXIT_INSN(),
},
.fixup_map_timer = { 3, 8 },
.errstr = "tracing progs cannot use bpf_timer yet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT,
},
{
"bpf_spin_lock is forbidden in BPF_PROG_TYPE_KPROBE",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_EMIT_CALL(BPF_FUNC_spin_lock),
BPF_EXIT_INSN(),
},
.fixup_map_spin_lock = { 3 },
.errstr = "tracing progs cannot use bpf_spin_lock yet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_KPROBE,
},
{
"bpf_spin_lock is forbidden in BPF_PROG_TYPE_TRACEPOINT",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_EMIT_CALL(BPF_FUNC_spin_lock),
BPF_EXIT_INSN(),
},
.fixup_map_spin_lock = { 3 },
.errstr = "tracing progs cannot use bpf_spin_lock yet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"bpf_spin_lock is forbidden in BPF_PROG_TYPE_PERF_EVENT",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_EMIT_CALL(BPF_FUNC_spin_lock),
BPF_EXIT_INSN(),
},
.fixup_map_spin_lock = { 3 },
.errstr = "tracing progs cannot use bpf_spin_lock yet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
},
{
"bpf_spin_lock is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_EMIT_CALL(BPF_FUNC_spin_lock),
BPF_EXIT_INSN(),
},
.fixup_map_spin_lock = { 3 },
.errstr = "tracing progs cannot use bpf_spin_lock yet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT,
},
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment