Commit 5640a771 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'bpf_send_signal_thread'

Yonghong Song says:

====================
Commit 8b401f9e ("bpf: implement bpf_send_signal() helper")
added helper bpf_send_signal() which permits bpf program to
send a signal to the current process. The signal may send to
any thread of the process.

This patch implemented a new helper bpf_send_signal_thread()
to send a signal to the thread corresponding to the kernel current task.
This helper can simplify user space code if the thread context of
bpf sending signal is needed in user space. Please see Patch #1 for
details of use case and kernel implementation.

Patch #2 added some bpf self tests for the new helper.

Changelogs:
  v2 -> v3:
    - More simplification for skeleton codes by removing not-needed
      mmap code and redundantly created tracepoint link.
  v1 -> v2:
    - More description for the difference between bpf_send_signal()
      and bpf_send_signal_thread() in the uapi header bpf.h.
    - Use skeleton and mmap for send_signal test.
====================
Acked-by: default avatarAndrii Nakryiko <andriin@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents d3a56931 ab8b7f0c
...@@ -2714,7 +2714,8 @@ union bpf_attr { ...@@ -2714,7 +2714,8 @@ union bpf_attr {
* *
* int bpf_send_signal(u32 sig) * int bpf_send_signal(u32 sig)
* Description * Description
* Send signal *sig* to the current task. * Send signal *sig* to the process of the current task.
* The signal may be delivered to any of this process's threads.
* Return * Return
* 0 on success or successfully queued. * 0 on success or successfully queued.
* *
...@@ -2850,6 +2851,19 @@ union bpf_attr { ...@@ -2850,6 +2851,19 @@ union bpf_attr {
* Return * Return
* 0 on success, or a negative error in case of failure. * 0 on success, or a negative error in case of failure.
* *
* int bpf_send_signal_thread(u32 sig)
* Description
* Send signal *sig* to the thread corresponding to the current task.
* Return
* 0 on success or successfully queued.
*
* **-EBUSY** if work queue under nmi is full.
*
* **-EINVAL** if *sig* is invalid.
*
* **-EPERM** if no permission to send the *sig*.
*
* **-EAGAIN** if bpf program can try again.
*/ */
#define __BPF_FUNC_MAPPER(FN) \ #define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \ FN(unspec), \
...@@ -2968,7 +2982,8 @@ union bpf_attr { ...@@ -2968,7 +2982,8 @@ union bpf_attr {
FN(probe_read_kernel), \ FN(probe_read_kernel), \
FN(probe_read_user_str), \ FN(probe_read_user_str), \
FN(probe_read_kernel_str), \ FN(probe_read_kernel_str), \
FN(tcp_send_ack), FN(tcp_send_ack), \
FN(send_signal_thread),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call * function eBPF program intends to call
......
...@@ -703,6 +703,7 @@ struct send_signal_irq_work { ...@@ -703,6 +703,7 @@ struct send_signal_irq_work {
struct irq_work irq_work; struct irq_work irq_work;
struct task_struct *task; struct task_struct *task;
u32 sig; u32 sig;
enum pid_type type;
}; };
static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
...@@ -712,10 +713,10 @@ static void do_bpf_send_signal(struct irq_work *entry) ...@@ -712,10 +713,10 @@ static void do_bpf_send_signal(struct irq_work *entry)
struct send_signal_irq_work *work; struct send_signal_irq_work *work;
work = container_of(entry, struct send_signal_irq_work, irq_work); work = container_of(entry, struct send_signal_irq_work, irq_work);
group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, PIDTYPE_TGID); group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
} }
BPF_CALL_1(bpf_send_signal, u32, sig) static int bpf_send_signal_common(u32 sig, enum pid_type type)
{ {
struct send_signal_irq_work *work = NULL; struct send_signal_irq_work *work = NULL;
...@@ -748,11 +749,17 @@ BPF_CALL_1(bpf_send_signal, u32, sig) ...@@ -748,11 +749,17 @@ BPF_CALL_1(bpf_send_signal, u32, sig)
*/ */
work->task = current; work->task = current;
work->sig = sig; work->sig = sig;
work->type = type;
irq_work_queue(&work->irq_work); irq_work_queue(&work->irq_work);
return 0; return 0;
} }
return group_send_sig_info(sig, SEND_SIG_PRIV, current, PIDTYPE_TGID); return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
}
BPF_CALL_1(bpf_send_signal, u32, sig)
{
return bpf_send_signal_common(sig, PIDTYPE_TGID);
} }
static const struct bpf_func_proto bpf_send_signal_proto = { static const struct bpf_func_proto bpf_send_signal_proto = {
...@@ -762,6 +769,18 @@ static const struct bpf_func_proto bpf_send_signal_proto = { ...@@ -762,6 +769,18 @@ static const struct bpf_func_proto bpf_send_signal_proto = {
.arg1_type = ARG_ANYTHING, .arg1_type = ARG_ANYTHING,
}; };
BPF_CALL_1(bpf_send_signal_thread, u32, sig)
{
return bpf_send_signal_common(sig, PIDTYPE_PID);
}
static const struct bpf_func_proto bpf_send_signal_thread_proto = {
.func = bpf_send_signal_thread,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_ANYTHING,
};
static const struct bpf_func_proto * static const struct bpf_func_proto *
tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{ {
...@@ -822,6 +841,8 @@ tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -822,6 +841,8 @@ tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
#endif #endif
case BPF_FUNC_send_signal: case BPF_FUNC_send_signal:
return &bpf_send_signal_proto; return &bpf_send_signal_proto;
case BPF_FUNC_send_signal_thread:
return &bpf_send_signal_thread_proto;
default: default:
return NULL; return NULL;
} }
......
...@@ -2714,7 +2714,8 @@ union bpf_attr { ...@@ -2714,7 +2714,8 @@ union bpf_attr {
* *
* int bpf_send_signal(u32 sig) * int bpf_send_signal(u32 sig)
* Description * Description
* Send signal *sig* to the current task. * Send signal *sig* to the process of the current task.
* The signal may be delivered to any of this process's threads.
* Return * Return
* 0 on success or successfully queued. * 0 on success or successfully queued.
* *
...@@ -2850,6 +2851,19 @@ union bpf_attr { ...@@ -2850,6 +2851,19 @@ union bpf_attr {
* Return * Return
* 0 on success, or a negative error in case of failure. * 0 on success, or a negative error in case of failure.
* *
* int bpf_send_signal_thread(u32 sig)
* Description
* Send signal *sig* to the thread corresponding to the current task.
* Return
* 0 on success or successfully queued.
*
* **-EBUSY** if work queue under nmi is full.
*
* **-EINVAL** if *sig* is invalid.
*
* **-EPERM** if no permission to send the *sig*.
*
* **-EAGAIN** if bpf program can try again.
*/ */
#define __BPF_FUNC_MAPPER(FN) \ #define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \ FN(unspec), \
...@@ -2968,7 +2982,8 @@ union bpf_attr { ...@@ -2968,7 +2982,8 @@ union bpf_attr {
FN(probe_read_kernel), \ FN(probe_read_kernel), \
FN(probe_read_user_str), \ FN(probe_read_user_str), \
FN(probe_read_kernel_str), \ FN(probe_read_kernel_str), \
FN(tcp_send_ack), FN(tcp_send_ack), \
FN(send_signal_thread),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call * function eBPF program intends to call
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <test_progs.h> #include <test_progs.h>
#include "test_send_signal_kern.skel.h"
static volatile int sigusr1_received = 0; static volatile int sigusr1_received = 0;
...@@ -9,17 +10,15 @@ static void sigusr1_handler(int signum) ...@@ -9,17 +10,15 @@ static void sigusr1_handler(int signum)
} }
static void test_send_signal_common(struct perf_event_attr *attr, static void test_send_signal_common(struct perf_event_attr *attr,
int prog_type, bool signal_thread,
const char *test_name) const char *test_name)
{ {
int err = -1, pmu_fd, prog_fd, info_map_fd, status_map_fd; struct test_send_signal_kern *skel;
const char *file = "./test_send_signal_kern.o";
struct bpf_object *obj = NULL;
int pipe_c2p[2], pipe_p2c[2]; int pipe_c2p[2], pipe_p2c[2];
__u32 key = 0, duration = 0; int err = -1, pmu_fd = -1;
__u32 duration = 0;
char buf[256]; char buf[256];
pid_t pid; pid_t pid;
__u64 val;
if (CHECK(pipe(pipe_c2p), test_name, if (CHECK(pipe(pipe_c2p), test_name,
"pipe pipe_c2p error: %s\n", strerror(errno))) "pipe pipe_c2p error: %s\n", strerror(errno)))
...@@ -73,45 +72,42 @@ static void test_send_signal_common(struct perf_event_attr *attr, ...@@ -73,45 +72,42 @@ static void test_send_signal_common(struct perf_event_attr *attr,
close(pipe_c2p[1]); /* close write */ close(pipe_c2p[1]); /* close write */
close(pipe_p2c[0]); /* close read */ close(pipe_p2c[0]); /* close read */
err = bpf_prog_load(file, prog_type, &obj, &prog_fd); skel = test_send_signal_kern__open_and_load();
if (CHECK(err < 0, test_name, "bpf_prog_load error: %s\n", if (CHECK(!skel, "skel_open_and_load", "skeleton open_and_load failed\n"))
strerror(errno))) goto skel_open_load_failure;
goto prog_load_failure;
pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1,
-1 /* group id */, 0 /* flags */);
if (CHECK(pmu_fd < 0, test_name, "perf_event_open error: %s\n",
strerror(errno))) {
err = -1;
goto close_prog;
}
err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
if (CHECK(err < 0, test_name, "ioctl perf_event_ioc_enable error: %s\n",
strerror(errno)))
goto disable_pmu;
err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); /* add a delay for child thread to ramp up */
if (CHECK(err < 0, test_name, "ioctl perf_event_ioc_set_bpf error: %s\n", usleep(100);
strerror(errno)))
goto disable_pmu;
err = -1; if (!attr) {
info_map_fd = bpf_object__find_map_fd_by_name(obj, "info_map"); err = test_send_signal_kern__attach(skel);
if (CHECK(info_map_fd < 0, test_name, "find map %s error\n", "info_map")) if (CHECK(err, "skel_attach", "skeleton attach failed\n")) {
goto disable_pmu; err = -1;
goto destroy_skel;
}
} else {
pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1,
-1 /* group id */, 0 /* flags */);
if (CHECK(pmu_fd < 0, test_name, "perf_event_open error: %s\n",
strerror(errno))) {
err = -1;
goto destroy_skel;
}
status_map_fd = bpf_object__find_map_fd_by_name(obj, "status_map"); skel->links.send_signal_perf =
if (CHECK(status_map_fd < 0, test_name, "find map %s error\n", "status_map")) bpf_program__attach_perf_event(skel->progs.send_signal_perf, pmu_fd);
goto disable_pmu; if (CHECK(IS_ERR(skel->links.send_signal_perf), "attach_perf_event",
"err %ld\n", PTR_ERR(skel->links.send_signal_perf)))
goto disable_pmu;
}
/* wait until child signal handler installed */ /* wait until child signal handler installed */
read(pipe_c2p[0], buf, 1); read(pipe_c2p[0], buf, 1);
/* trigger the bpf send_signal */ /* trigger the bpf send_signal */
key = 0; skel->bss->pid = pid;
val = (((__u64)(SIGUSR1)) << 32) | pid; skel->bss->sig = SIGUSR1;
bpf_map_update_elem(info_map_fd, &key, &val, 0); skel->bss->signal_thread = signal_thread;
/* notify child that bpf program can send_signal now */ /* notify child that bpf program can send_signal now */
write(pipe_p2c[1], buf, 1); write(pipe_p2c[1], buf, 1);
...@@ -132,46 +128,20 @@ static void test_send_signal_common(struct perf_event_attr *attr, ...@@ -132,46 +128,20 @@ static void test_send_signal_common(struct perf_event_attr *attr,
disable_pmu: disable_pmu:
close(pmu_fd); close(pmu_fd);
close_prog: destroy_skel:
bpf_object__close(obj); test_send_signal_kern__destroy(skel);
prog_load_failure: skel_open_load_failure:
close(pipe_c2p[0]); close(pipe_c2p[0]);
close(pipe_p2c[1]); close(pipe_p2c[1]);
wait(NULL); wait(NULL);
} }
static void test_send_signal_tracepoint(void) static void test_send_signal_tracepoint(bool signal_thread)
{ {
const char *id_path = "/sys/kernel/debug/tracing/events/syscalls/sys_enter_nanosleep/id"; test_send_signal_common(NULL, signal_thread, "tracepoint");
struct perf_event_attr attr = {
.type = PERF_TYPE_TRACEPOINT,
.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN,
.sample_period = 1,
.wakeup_events = 1,
};
__u32 duration = 0;
int bytes, efd;
char buf[256];
efd = open(id_path, O_RDONLY, 0);
if (CHECK(efd < 0, "tracepoint",
"open syscalls/sys_enter_nanosleep/id failure: %s\n",
strerror(errno)))
return;
bytes = read(efd, buf, sizeof(buf));
close(efd);
if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "tracepoint",
"read syscalls/sys_enter_nanosleep/id failure: %s\n",
strerror(errno)))
return;
attr.config = strtol(buf, NULL, 0);
test_send_signal_common(&attr, BPF_PROG_TYPE_TRACEPOINT, "tracepoint");
} }
static void test_send_signal_perf(void) static void test_send_signal_perf(bool signal_thread)
{ {
struct perf_event_attr attr = { struct perf_event_attr attr = {
.sample_period = 1, .sample_period = 1,
...@@ -179,11 +149,10 @@ static void test_send_signal_perf(void) ...@@ -179,11 +149,10 @@ static void test_send_signal_perf(void)
.config = PERF_COUNT_SW_CPU_CLOCK, .config = PERF_COUNT_SW_CPU_CLOCK,
}; };
test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT, test_send_signal_common(&attr, signal_thread, "perf_sw_event");
"perf_sw_event");
} }
static void test_send_signal_nmi(void) static void test_send_signal_nmi(bool signal_thread)
{ {
struct perf_event_attr attr = { struct perf_event_attr attr = {
.sample_freq = 50, .sample_freq = 50,
...@@ -210,16 +179,21 @@ static void test_send_signal_nmi(void) ...@@ -210,16 +179,21 @@ static void test_send_signal_nmi(void)
close(pmu_fd); close(pmu_fd);
} }
test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT, test_send_signal_common(&attr, signal_thread, "perf_hw_event");
"perf_hw_event");
} }
void test_send_signal(void) void test_send_signal(void)
{ {
if (test__start_subtest("send_signal_tracepoint")) if (test__start_subtest("send_signal_tracepoint"))
test_send_signal_tracepoint(); test_send_signal_tracepoint(false);
if (test__start_subtest("send_signal_perf")) if (test__start_subtest("send_signal_perf"))
test_send_signal_perf(); test_send_signal_perf(false);
if (test__start_subtest("send_signal_nmi")) if (test__start_subtest("send_signal_nmi"))
test_send_signal_nmi(); test_send_signal_nmi(false);
if (test__start_subtest("send_signal_tracepoint_thread"))
test_send_signal_tracepoint(true);
if (test__start_subtest("send_signal_perf_thread"))
test_send_signal_perf(true);
if (test__start_subtest("send_signal_nmi_thread"))
test_send_signal_nmi(true);
} }
...@@ -4,44 +4,37 @@ ...@@ -4,44 +4,37 @@
#include <linux/version.h> #include <linux/version.h>
#include "bpf_helpers.h" #include "bpf_helpers.h"
struct { __u32 sig = 0, pid = 0, status = 0, signal_thread = 0;
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1); static __always_inline int bpf_send_signal_test(void *ctx)
__type(key, __u32);
__type(value, __u64);
} info_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} status_map SEC(".maps");
SEC("send_signal_demo")
int bpf_send_signal_test(void *ctx)
{ {
__u64 *info_val, *status_val;
__u32 key = 0, pid, sig;
int ret; int ret;
status_val = bpf_map_lookup_elem(&status_map, &key); if (status != 0 || sig == 0 || pid == 0)
if (!status_val || *status_val != 0)
return 0;
info_val = bpf_map_lookup_elem(&info_map, &key);
if (!info_val || *info_val == 0)
return 0; return 0;
sig = *info_val >> 32;
pid = *info_val & 0xffffFFFF;
if ((bpf_get_current_pid_tgid() >> 32) == pid) { if ((bpf_get_current_pid_tgid() >> 32) == pid) {
ret = bpf_send_signal(sig); if (signal_thread)
ret = bpf_send_signal_thread(sig);
else
ret = bpf_send_signal(sig);
if (ret == 0) if (ret == 0)
*status_val = 1; status = 1;
} }
return 0; return 0;
} }
SEC("tracepoint/syscalls/sys_enter_nanosleep")
int send_signal_tp(void *ctx)
{
return bpf_send_signal_test(ctx);
}
SEC("perf_event")
int send_signal_perf(void *ctx)
{
return bpf_send_signal_test(ctx);
}
char __license[] SEC("license") = "GPL"; char __license[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment