Commit 30b8fdbb authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'bpf: Fixes for CONFIG_X86_KERNEL_IBT'

Jiri Olsa says:

====================
Martynas reported bpf_get_func_ip returning +4 address when
CONFIG_X86_KERNEL_IBT option is enabled and I found there are
some failing bpf tests when this option is enabled.

The CONFIG_X86_KERNEL_IBT option adds endbr instruction at the
function entry, so the idea is to 'fix' entry ip for kprobe_multi
and trampoline probes, because they are placed on the function
entry.

v5 changes:
  - updated uapi/linux/bpf.h headers with comment for
    bpf_get_func_ip returning 0 [Andrii]
  - added acks

v4 changes:
  - used get_kernel_nofault to read previous instruction [Peter]
  - used movabs instruction in trampoline comment [Peter]
  - renamed fentry_ip argument in kprobe_multi_link_handler [Peter]

v3 changes:
  - using 'unused' bpf function to get IBT config option
    into selftest skeleton
  - rebased to current bpf-next/master
  - added ack/review from Masami

v2 changes:
  - change kprobes get_func_ip to return zero for kprobes
    attached within the function body [Andrii]
  - detect IBT config and properly test kprobe with offset
    [Andrii]

v1 changes:
  - read previous instruction in kprobe_multi link handler
    and adjust entry_ip for CONFIG_X86_KERNEL_IBT option
  - split first patch into 2 separate changes
  - update changelogs
====================
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents bec21719 738c345b
...@@ -662,7 +662,7 @@ static void emit_mov_imm64(u8 **pprog, u32 dst_reg, ...@@ -662,7 +662,7 @@ static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
*/ */
emit_mov_imm32(&prog, false, dst_reg, imm32_lo); emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
} else { } else {
/* movabsq %rax, imm64 */ /* movabsq rax, imm64 */
EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
EMIT(imm32_lo, 4); EMIT(imm32_lo, 4);
EMIT(imm32_hi, 4); EMIT(imm32_hi, 4);
...@@ -2039,13 +2039,14 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, ...@@ -2039,13 +2039,14 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
const struct btf_func_model *m, u32 flags, const struct btf_func_model *m, u32 flags,
struct bpf_tramp_links *tlinks, struct bpf_tramp_links *tlinks,
void *orig_call) void *func_addr)
{ {
int ret, i, nr_args = m->nr_args, extra_nregs = 0; int ret, i, nr_args = m->nr_args, extra_nregs = 0;
int regs_off, ip_off, args_off, stack_size = nr_args * 8, run_ctx_off; int regs_off, ip_off, args_off, stack_size = nr_args * 8, run_ctx_off;
struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
void *orig_call = func_addr;
u8 **branches = NULL; u8 **branches = NULL;
u8 *prog; u8 *prog;
bool save_ret; bool save_ret;
...@@ -2126,12 +2127,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i ...@@ -2126,12 +2127,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
if (flags & BPF_TRAMP_F_IP_ARG) { if (flags & BPF_TRAMP_F_IP_ARG) {
/* Store IP address of the traced function: /* Store IP address of the traced function:
* mov rax, QWORD PTR [rbp + 8] * movabsq rax, func_addr
* sub rax, X86_PATCH_SIZE
* mov QWORD PTR [rbp - ip_off], rax * mov QWORD PTR [rbp - ip_off], rax
*/ */
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8); emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr);
EMIT4(0x48, 0x83, 0xe8, X86_PATCH_SIZE);
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off); emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
} }
......
...@@ -103,6 +103,7 @@ struct kprobe { ...@@ -103,6 +103,7 @@ struct kprobe {
* this flag is only for optimized_kprobe. * this flag is only for optimized_kprobe.
*/ */
#define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */ #define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */
#define KPROBE_FLAG_ON_FUNC_ENTRY 16 /* probe is on the function entry */
/* Has this kprobe gone ? */ /* Has this kprobe gone ? */
static inline bool kprobe_gone(struct kprobe *p) static inline bool kprobe_gone(struct kprobe *p)
......
...@@ -4951,6 +4951,7 @@ union bpf_attr { ...@@ -4951,6 +4951,7 @@ union bpf_attr {
* Get address of the traced function (for tracing and kprobe programs). * Get address of the traced function (for tracing and kprobe programs).
* Return * Return
* Address of the traced function. * Address of the traced function.
* 0 for kprobes placed within the function (not at the entry).
* *
* u64 bpf_get_attach_cookie(void *ctx) * u64 bpf_get_attach_cookie(void *ctx)
* Description * Description
......
...@@ -1606,9 +1606,10 @@ int register_kprobe(struct kprobe *p) ...@@ -1606,9 +1606,10 @@ int register_kprobe(struct kprobe *p)
struct kprobe *old_p; struct kprobe *old_p;
struct module *probed_mod; struct module *probed_mod;
kprobe_opcode_t *addr; kprobe_opcode_t *addr;
bool on_func_entry;
/* Adjust probe address from symbol */ /* Adjust probe address from symbol */
addr = kprobe_addr(p); addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
if (IS_ERR(addr)) if (IS_ERR(addr))
return PTR_ERR(addr); return PTR_ERR(addr);
p->addr = addr; p->addr = addr;
...@@ -1628,6 +1629,9 @@ int register_kprobe(struct kprobe *p) ...@@ -1628,6 +1629,9 @@ int register_kprobe(struct kprobe *p)
mutex_lock(&kprobe_mutex); mutex_lock(&kprobe_mutex);
if (on_func_entry)
p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY;
old_p = get_kprobe(p->addr); old_p = get_kprobe(p->addr);
if (old_p) { if (old_p) {
/* Since this may unoptimize 'old_p', locking 'text_mutex'. */ /* Since this may unoptimize 'old_p', locking 'text_mutex'. */
......
...@@ -1028,11 +1028,30 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = { ...@@ -1028,11 +1028,30 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
.arg1_type = ARG_PTR_TO_CTX, .arg1_type = ARG_PTR_TO_CTX,
}; };
#ifdef CONFIG_X86_KERNEL_IBT
static unsigned long get_entry_ip(unsigned long fentry_ip)
{
u32 instr;
/* Being extra safe in here in case entry ip is on the page-edge. */
if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1))
return fentry_ip;
if (is_endbr(instr))
fentry_ip -= ENDBR_INSN_SIZE;
return fentry_ip;
}
#else
#define get_entry_ip(fentry_ip) fentry_ip
#endif
BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs) BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
{ {
struct kprobe *kp = kprobe_running(); struct kprobe *kp = kprobe_running();
return kp ? (uintptr_t)kp->addr : 0; if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
return 0;
return get_entry_ip((uintptr_t)kp->addr);
} }
static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
...@@ -2600,13 +2619,13 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, ...@@ -2600,13 +2619,13 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
} }
static void static void
kprobe_multi_link_handler(struct fprobe *fp, unsigned long entry_ip, kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct bpf_kprobe_multi_link *link; struct bpf_kprobe_multi_link *link;
link = container_of(fp, struct bpf_kprobe_multi_link, fp); link = container_of(fp, struct bpf_kprobe_multi_link, fp);
kprobe_multi_link_prog_run(link, entry_ip, regs); kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
} }
static int symbols_cmp_r(const void *a, const void *b, const void *priv) static int symbols_cmp_r(const void *a, const void *b, const void *priv)
......
...@@ -8265,8 +8265,7 @@ static int kallsyms_callback(void *data, const char *name, ...@@ -8265,8 +8265,7 @@ static int kallsyms_callback(void *data, const char *name,
if (args->addrs[idx]) if (args->addrs[idx])
return 0; return 0;
addr = ftrace_location(addr); if (!ftrace_location(addr))
if (!addr)
return 0; return 0;
args->addrs[idx] = addr; args->addrs[idx] = addr;
......
...@@ -4951,6 +4951,7 @@ union bpf_attr { ...@@ -4951,6 +4951,7 @@ union bpf_attr {
* Get address of the traced function (for tracing and kprobe programs). * Get address of the traced function (for tracing and kprobe programs).
* Return * Return
* Address of the traced function. * Address of the traced function.
* 0 for kprobes placed within the function (not at the entry).
* *
* u64 bpf_get_attach_cookie(void *ctx) * u64 bpf_get_attach_cookie(void *ctx)
* Description * Description
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#include <test_progs.h> #include <test_progs.h>
#include "get_func_ip_test.skel.h" #include "get_func_ip_test.skel.h"
void test_get_func_ip_test(void) static void test_function_entry(void)
{ {
struct get_func_ip_test *skel = NULL; struct get_func_ip_test *skel = NULL;
int err, prog_fd; int err, prog_fd;
...@@ -12,14 +12,6 @@ void test_get_func_ip_test(void) ...@@ -12,14 +12,6 @@ void test_get_func_ip_test(void)
if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open")) if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open"))
return; return;
/* test6 is x86_64 specifc because of the instruction
* offset, disabling it for all other archs
*/
#ifndef __x86_64__
bpf_program__set_autoload(skel->progs.test6, false);
bpf_program__set_autoload(skel->progs.test7, false);
#endif
err = get_func_ip_test__load(skel); err = get_func_ip_test__load(skel);
if (!ASSERT_OK(err, "get_func_ip_test__load")) if (!ASSERT_OK(err, "get_func_ip_test__load"))
goto cleanup; goto cleanup;
...@@ -43,11 +35,56 @@ void test_get_func_ip_test(void) ...@@ -43,11 +35,56 @@ void test_get_func_ip_test(void)
ASSERT_EQ(skel->bss->test3_result, 1, "test3_result"); ASSERT_EQ(skel->bss->test3_result, 1, "test3_result");
ASSERT_EQ(skel->bss->test4_result, 1, "test4_result"); ASSERT_EQ(skel->bss->test4_result, 1, "test4_result");
ASSERT_EQ(skel->bss->test5_result, 1, "test5_result"); ASSERT_EQ(skel->bss->test5_result, 1, "test5_result");
cleanup:
get_func_ip_test__destroy(skel);
}
/* test6 is x86_64 specific because of the instruction
* offset, disabling it for all other archs
*/
#ifdef __x86_64__ #ifdef __x86_64__
static void test_function_body(void)
{
struct get_func_ip_test *skel = NULL;
LIBBPF_OPTS(bpf_test_run_opts, topts);
LIBBPF_OPTS(bpf_kprobe_opts, kopts);
struct bpf_link *link6 = NULL;
int err, prog_fd;
skel = get_func_ip_test__open();
if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open"))
return;
bpf_program__set_autoload(skel->progs.test6, true);
err = get_func_ip_test__load(skel);
if (!ASSERT_OK(err, "get_func_ip_test__load"))
goto cleanup;
kopts.offset = skel->kconfig->CONFIG_X86_KERNEL_IBT ? 9 : 5;
link6 = bpf_program__attach_kprobe_opts(skel->progs.test6, "bpf_fentry_test6", &kopts);
if (!ASSERT_OK_PTR(link6, "link6"))
goto cleanup;
prog_fd = bpf_program__fd(skel->progs.test1);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, 0, "test_run");
ASSERT_EQ(skel->bss->test6_result, 1, "test6_result"); ASSERT_EQ(skel->bss->test6_result, 1, "test6_result");
ASSERT_EQ(skel->bss->test7_result, 1, "test7_result");
#endif
cleanup: cleanup:
bpf_link__destroy(link6);
get_func_ip_test__destroy(skel); get_func_ip_test__destroy(skel);
} }
#else
#define test_function_body()
#endif
void test_get_func_ip_test(void)
{
test_function_entry();
test_function_body();
}
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include <linux/bpf.h> #include <linux/bpf.h>
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h> #include <bpf/bpf_tracing.h>
#include <stdbool.h>
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
...@@ -13,6 +14,16 @@ extern const void bpf_modify_return_test __ksym; ...@@ -13,6 +14,16 @@ extern const void bpf_modify_return_test __ksym;
extern const void bpf_fentry_test6 __ksym; extern const void bpf_fentry_test6 __ksym;
extern const void bpf_fentry_test7 __ksym; extern const void bpf_fentry_test7 __ksym;
extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak;
/* This function is here to have CONFIG_X86_KERNEL_IBT
* used and added to object BTF.
*/
int unused(void)
{
return CONFIG_X86_KERNEL_IBT ? 0 : 1;
}
__u64 test1_result = 0; __u64 test1_result = 0;
SEC("fentry/bpf_fentry_test1") SEC("fentry/bpf_fentry_test1")
int BPF_PROG(test1, int a) int BPF_PROG(test1, int a)
...@@ -64,21 +75,11 @@ int BPF_PROG(test5, int a, int *b, int ret) ...@@ -64,21 +75,11 @@ int BPF_PROG(test5, int a, int *b, int ret)
} }
__u64 test6_result = 0; __u64 test6_result = 0;
SEC("kprobe/bpf_fentry_test6+0x5") SEC("?kprobe")
int test6(struct pt_regs *ctx) int test6(struct pt_regs *ctx)
{ {
__u64 addr = bpf_get_func_ip(ctx); __u64 addr = bpf_get_func_ip(ctx);
test6_result = (const void *) addr == &bpf_fentry_test6 + 5; test6_result = (const void *) addr == 0;
return 0;
}
__u64 test7_result = 0;
SEC("kprobe/bpf_fentry_test7+5")
int test7(struct pt_regs *ctx)
{
__u64 addr = bpf_get_func_ip(ctx);
test7_result = (const void *) addr == &bpf_fentry_test7 + 5;
return 0; return 0;
} }
...@@ -36,15 +36,13 @@ __u64 kretprobe_test6_result = 0; ...@@ -36,15 +36,13 @@ __u64 kretprobe_test6_result = 0;
__u64 kretprobe_test7_result = 0; __u64 kretprobe_test7_result = 0;
__u64 kretprobe_test8_result = 0; __u64 kretprobe_test8_result = 0;
extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak;
static void kprobe_multi_check(void *ctx, bool is_return) static void kprobe_multi_check(void *ctx, bool is_return)
{ {
if (bpf_get_current_pid_tgid() >> 32 != pid) if (bpf_get_current_pid_tgid() >> 32 != pid)
return; return;
__u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0; __u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0;
__u64 addr = bpf_get_func_ip(ctx) - (CONFIG_X86_KERNEL_IBT ? 4 : 0); __u64 addr = bpf_get_func_ip(ctx);
#define SET(__var, __addr, __cookie) ({ \ #define SET(__var, __addr, __cookie) ({ \
if (((const void *) addr == __addr) && \ if (((const void *) addr == __addr) && \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment