Commit 5b7fe93d authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Daniel Borkmann says:

====================
pull-request: bpf-next 2019-10-27

The following pull-request contains BPF updates for your *net-next* tree.

We've added 52 non-merge commits during the last 11 day(s) which contain
a total of 65 files changed, 2604 insertions(+), 1100 deletions(-).

The main changes are:

 1) Revolutionize BPF tracing by using in-kernel BTF to type check BPF
    assembly code. The work here teaches BPF verifier to recognize
    kfree_skb()'s first argument as 'struct sk_buff *' in tracepoints
    such that verifier allows direct use of bpf_skb_event_output() helper
    used in tc BPF et al (w/o probing memory access) that dumps skb data
    into perf ring buffer. Also add direct loads to probe memory in order
    to speed up/replace bpf_probe_read() calls, from Alexei Starovoitov.

 2) Big batch of changes to improve libbpf and BPF kselftests. Besides
    others: generalization of libbpf's CO-RE relocation support to now
    also include field existence relocations, revamp the BPF kselftest
    Makefile to add test runner concept allowing to exercise various
    ways to build BPF programs, and teach bpf_object__open() and friends
    to automatically derive BPF program type/expected attach type from
    section names to ease their use, from Andrii Nakryiko.

 3) Fix deadlock in stackmap's build-id lookup on rq_lock(), from Song Liu.

 4) Allow to read BTF as raw data from bpftool. Most notable use case
    is to dump /sys/kernel/btf/vmlinux through this, from Jiri Olsa.

 5) Use bpf_redirect_map() helper in libbpf's AF_XDP helper prog which
    manages to improve "rx_drop" performance by ~4%., from Björn Töpel.

 6) Fix to restore the flow dissector after reattach BPF test and also
    fix error handling in bpf_helper_defs.h generation, from Jakub Sitnicki.

 7) Improve verifier's BTF ctx access for use outside of raw_tp, from
    Martin KaFai Lau.

 8) Improve documentation for AF_XDP with new sections and to reflect
    latest features, from Magnus Karlsson.

 9) Add back 'version' section parsing to libbpf for old kernels, from
    John Fastabend.

10) Fix strncat bounds error in libbpf's libbpf_prog_type_by_name(),
    from KP Singh.

11) Turn on -mattr=+alu32 in LLVM by default for BPF kselftests in order
    to improve insn coverage for built BPF progs, from Yonghong Song.

12) Misc minor cleanups and fixes, from various others.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b9512485 027cbaaf
This diff is collapsed.
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
#include <linux/filter.h> #include <linux/filter.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include <asm/extable.h>
#include <asm/set_memory.h> #include <asm/set_memory.h>
#include <asm/nospec-branch.h> #include <asm/nospec-branch.h>
...@@ -123,6 +123,19 @@ static const int reg2hex[] = { ...@@ -123,6 +123,19 @@ static const int reg2hex[] = {
[AUX_REG] = 3, /* R11 temp register */ [AUX_REG] = 3, /* R11 temp register */
}; };
static const int reg2pt_regs[] = {
[BPF_REG_0] = offsetof(struct pt_regs, ax),
[BPF_REG_1] = offsetof(struct pt_regs, di),
[BPF_REG_2] = offsetof(struct pt_regs, si),
[BPF_REG_3] = offsetof(struct pt_regs, dx),
[BPF_REG_4] = offsetof(struct pt_regs, cx),
[BPF_REG_5] = offsetof(struct pt_regs, r8),
[BPF_REG_6] = offsetof(struct pt_regs, bx),
[BPF_REG_7] = offsetof(struct pt_regs, r13),
[BPF_REG_8] = offsetof(struct pt_regs, r14),
[BPF_REG_9] = offsetof(struct pt_regs, r15),
};
/* /*
* is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
* which need extra byte of encoding. * which need extra byte of encoding.
...@@ -377,6 +390,19 @@ static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) ...@@ -377,6 +390,19 @@ static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
*pprog = prog; *pprog = prog;
} }
static bool ex_handler_bpf(const struct exception_table_entry *x,
struct pt_regs *regs, int trapnr,
unsigned long error_code, unsigned long fault_addr)
{
u32 reg = x->fixup >> 8;
/* jump over faulting load and clear dest register */
*(unsigned long *)((void *)regs + reg) = 0;
regs->ip += x->fixup & 0xff;
return true;
}
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
int oldproglen, struct jit_context *ctx) int oldproglen, struct jit_context *ctx)
{ {
...@@ -384,7 +410,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, ...@@ -384,7 +410,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
int insn_cnt = bpf_prog->len; int insn_cnt = bpf_prog->len;
bool seen_exit = false; bool seen_exit = false;
u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
int i, cnt = 0; int i, cnt = 0, excnt = 0;
int proglen = 0; int proglen = 0;
u8 *prog = temp; u8 *prog = temp;
...@@ -778,14 +804,17 @@ stx: if (is_imm8(insn->off)) ...@@ -778,14 +804,17 @@ stx: if (is_imm8(insn->off))
/* LDX: dst_reg = *(u8*)(src_reg + off) */ /* LDX: dst_reg = *(u8*)(src_reg + off) */
case BPF_LDX | BPF_MEM | BPF_B: case BPF_LDX | BPF_MEM | BPF_B:
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
/* Emit 'movzx rax, byte ptr [rax + off]' */ /* Emit 'movzx rax, byte ptr [rax + off]' */
EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
goto ldx; goto ldx;
case BPF_LDX | BPF_MEM | BPF_H: case BPF_LDX | BPF_MEM | BPF_H:
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
/* Emit 'movzx rax, word ptr [rax + off]' */ /* Emit 'movzx rax, word ptr [rax + off]' */
EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
goto ldx; goto ldx;
case BPF_LDX | BPF_MEM | BPF_W: case BPF_LDX | BPF_MEM | BPF_W:
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
/* Emit 'mov eax, dword ptr [rax+0x14]' */ /* Emit 'mov eax, dword ptr [rax+0x14]' */
if (is_ereg(dst_reg) || is_ereg(src_reg)) if (is_ereg(dst_reg) || is_ereg(src_reg))
EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
...@@ -793,6 +822,7 @@ stx: if (is_imm8(insn->off)) ...@@ -793,6 +822,7 @@ stx: if (is_imm8(insn->off))
EMIT1(0x8B); EMIT1(0x8B);
goto ldx; goto ldx;
case BPF_LDX | BPF_MEM | BPF_DW: case BPF_LDX | BPF_MEM | BPF_DW:
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
/* Emit 'mov rax, qword ptr [rax+0x14]' */ /* Emit 'mov rax, qword ptr [rax+0x14]' */
EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
ldx: /* ldx: /*
...@@ -805,6 +835,48 @@ stx: if (is_imm8(insn->off)) ...@@ -805,6 +835,48 @@ stx: if (is_imm8(insn->off))
else else
EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
insn->off); insn->off);
if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
struct exception_table_entry *ex;
u8 *_insn = image + proglen;
s64 delta;
if (!bpf_prog->aux->extable)
break;
if (excnt >= bpf_prog->aux->num_exentries) {
pr_err("ex gen bug\n");
return -EFAULT;
}
ex = &bpf_prog->aux->extable[excnt++];
delta = _insn - (u8 *)&ex->insn;
if (!is_simm32(delta)) {
pr_err("extable->insn doesn't fit into 32-bit\n");
return -EFAULT;
}
ex->insn = delta;
delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
if (!is_simm32(delta)) {
pr_err("extable->handler doesn't fit into 32-bit\n");
return -EFAULT;
}
ex->handler = delta;
if (dst_reg > BPF_REG_9) {
pr_err("verifier error\n");
return -EFAULT;
}
/*
* Compute size of x86 insn and its target dest x86 register.
* ex_handler_bpf() will use lower 8 bits to adjust
* pt_regs->ip to jump over this x86 instruction
* and upper bits to figure out which pt_regs to zero out.
* End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
* of 4 bytes will be ignored and rbx will be zero inited.
*/
ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
}
break; break;
/* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */ /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
...@@ -1058,6 +1130,11 @@ xadd: if (is_imm8(insn->off)) ...@@ -1058,6 +1130,11 @@ xadd: if (is_imm8(insn->off))
addrs[i] = proglen; addrs[i] = proglen;
prog = temp; prog = temp;
} }
if (image && excnt != bpf_prog->aux->num_exentries) {
pr_err("extable is not populated\n");
return -EFAULT;
}
return proglen; return proglen;
} }
...@@ -1158,12 +1235,24 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1158,12 +1235,24 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
break; break;
} }
if (proglen == oldproglen) { if (proglen == oldproglen) {
header = bpf_jit_binary_alloc(proglen, &image, /*
1, jit_fill_hole); * The number of entries in extable is the number of BPF_LDX
* insns that access kernel memory via "pointer to BTF type".
* The verifier changed their opcode from LDX|MEM|size
* to LDX|PROBE_MEM|size to make JITing easier.
*/
u32 align = __alignof__(struct exception_table_entry);
u32 extable_size = prog->aux->num_exentries *
sizeof(struct exception_table_entry);
/* allocate module memory for x86 insns and extable */
header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
&image, align, jit_fill_hole);
if (!header) { if (!header) {
prog = orig_prog; prog = orig_prog;
goto out_addrs; goto out_addrs;
} }
prog->aux->extable = (void *) image + roundup(proglen, align);
} }
oldproglen = proglen; oldproglen = proglen;
cond_resched(); cond_resched();
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/u64_stats_sync.h> #include <linux/u64_stats_sync.h>
struct bpf_verifier_env; struct bpf_verifier_env;
struct bpf_verifier_log;
struct perf_event; struct perf_event;
struct bpf_prog; struct bpf_prog;
struct bpf_map; struct bpf_map;
...@@ -23,6 +24,7 @@ struct sock; ...@@ -23,6 +24,7 @@ struct sock;
struct seq_file; struct seq_file;
struct btf; struct btf;
struct btf_type; struct btf_type;
struct exception_table_entry;
extern struct idr btf_idr; extern struct idr btf_idr;
extern spinlock_t btf_idr_lock; extern spinlock_t btf_idr_lock;
...@@ -211,6 +213,7 @@ enum bpf_arg_type { ...@@ -211,6 +213,7 @@ enum bpf_arg_type {
ARG_PTR_TO_INT, /* pointer to int */ ARG_PTR_TO_INT, /* pointer to int */
ARG_PTR_TO_LONG, /* pointer to long */ ARG_PTR_TO_LONG, /* pointer to long */
ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
}; };
/* type of values returned from helper functions */ /* type of values returned from helper functions */
...@@ -233,11 +236,17 @@ struct bpf_func_proto { ...@@ -233,11 +236,17 @@ struct bpf_func_proto {
bool gpl_only; bool gpl_only;
bool pkt_access; bool pkt_access;
enum bpf_return_type ret_type; enum bpf_return_type ret_type;
enum bpf_arg_type arg1_type; union {
enum bpf_arg_type arg2_type; struct {
enum bpf_arg_type arg3_type; enum bpf_arg_type arg1_type;
enum bpf_arg_type arg4_type; enum bpf_arg_type arg2_type;
enum bpf_arg_type arg5_type; enum bpf_arg_type arg3_type;
enum bpf_arg_type arg4_type;
enum bpf_arg_type arg5_type;
};
enum bpf_arg_type arg_type[5];
};
u32 *btf_id; /* BTF ids of arguments */
}; };
/* bpf_context is intentionally undefined structure. Pointer to bpf_context is /* bpf_context is intentionally undefined structure. Pointer to bpf_context is
...@@ -281,6 +290,7 @@ enum bpf_reg_type { ...@@ -281,6 +290,7 @@ enum bpf_reg_type {
PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
PTR_TO_BTF_ID, /* reg points to kernel struct */
}; };
/* The information passed from prog-specific *_is_valid_access /* The information passed from prog-specific *_is_valid_access
...@@ -288,7 +298,11 @@ enum bpf_reg_type { ...@@ -288,7 +298,11 @@ enum bpf_reg_type {
*/ */
struct bpf_insn_access_aux { struct bpf_insn_access_aux {
enum bpf_reg_type reg_type; enum bpf_reg_type reg_type;
int ctx_field_size; union {
int ctx_field_size;
u32 btf_id;
};
struct bpf_verifier_log *log; /* for verbose logs */
}; };
static inline void static inline void
...@@ -375,8 +389,14 @@ struct bpf_prog_aux { ...@@ -375,8 +389,14 @@ struct bpf_prog_aux {
u32 id; u32 id;
u32 func_cnt; /* used by non-func prog as the number of func progs */ u32 func_cnt; /* used by non-func prog as the number of func progs */
u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
u32 attach_btf_id; /* in-kernel BTF type id to attach to */
bool verifier_zext; /* Zero extensions has been inserted by verifier. */ bool verifier_zext; /* Zero extensions has been inserted by verifier. */
bool offload_requested; bool offload_requested;
bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
const struct btf_type *attach_func_proto;
/* function name for valid attach_btf_id */
const char *attach_func_name;
struct bpf_prog **func; struct bpf_prog **func;
void *jit_data; /* JIT specific data. arch dependent */ void *jit_data; /* JIT specific data. arch dependent */
struct latch_tree_node ksym_tnode; struct latch_tree_node ksym_tnode;
...@@ -416,6 +436,8 @@ struct bpf_prog_aux { ...@@ -416,6 +436,8 @@ struct bpf_prog_aux {
* main prog always has linfo_idx == 0 * main prog always has linfo_idx == 0
*/ */
u32 linfo_idx; u32 linfo_idx;
u32 num_exentries;
struct exception_table_entry *extable;
struct bpf_prog_stats __percpu *stats; struct bpf_prog_stats __percpu *stats;
union { union {
struct work_struct work; struct work_struct work;
...@@ -482,6 +504,7 @@ struct bpf_event_entry { ...@@ -482,6 +504,7 @@ struct bpf_event_entry {
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
int bpf_prog_calc_tag(struct bpf_prog *fp); int bpf_prog_calc_tag(struct bpf_prog *fp);
const char *kernel_type_name(u32 btf_type_id);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void); const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
...@@ -747,6 +770,15 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, ...@@ -747,6 +770,15 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
const union bpf_attr *kattr, const union bpf_attr *kattr,
union bpf_attr __user *uattr); union bpf_attr __user *uattr);
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info);
int btf_struct_access(struct bpf_verifier_log *log,
const struct btf_type *t, int off, int size,
enum bpf_access_type atype,
u32 *next_btf_id);
u32 btf_resolve_helper_id(struct bpf_verifier_log *log, void *, int);
#else /* !CONFIG_BPF_SYSCALL */ #else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd) static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{ {
......
...@@ -52,6 +52,8 @@ struct bpf_reg_state { ...@@ -52,6 +52,8 @@ struct bpf_reg_state {
*/ */
struct bpf_map *map_ptr; struct bpf_map *map_ptr;
u32 btf_id; /* for PTR_TO_BTF_ID */
/* Max size from any of the above. */ /* Max size from any of the above. */
unsigned long raw; unsigned long raw;
}; };
...@@ -330,10 +332,12 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log) ...@@ -330,10 +332,12 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
#define BPF_LOG_STATS 4 #define BPF_LOG_STATS 4
#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2) #define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS) #define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS)
#define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */
static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
{ {
return log->level && log->ubuf && !bpf_verifier_log_full(log); return (log->level && log->ubuf && !bpf_verifier_log_full(log)) ||
log->level == BPF_LOG_KERNEL;
} }
#define BPF_MAX_SUBPROGS 256 #define BPF_MAX_SUBPROGS 256
...@@ -397,6 +401,8 @@ __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, ...@@ -397,6 +401,8 @@ __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
const char *fmt, va_list args); const char *fmt, va_list args);
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
const char *fmt, ...); const char *fmt, ...);
__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
const char *fmt, ...);
static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
{ {
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#define _LINUX_BTF_H 1 #define _LINUX_BTF_H 1
#include <linux/types.h> #include <linux/types.h>
#include <uapi/linux/btf.h>
struct btf; struct btf;
struct btf_member; struct btf_member;
...@@ -53,9 +54,40 @@ bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, ...@@ -53,9 +54,40 @@ bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t); int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t);
bool btf_type_is_void(const struct btf_type *t); bool btf_type_is_void(const struct btf_type *t);
static inline bool btf_type_is_ptr(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_PTR;
}
static inline bool btf_type_is_int(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
}
static inline bool btf_type_is_enum(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM;
}
static inline bool btf_type_is_typedef(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF;
}
static inline bool btf_type_is_func(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC;
}
static inline bool btf_type_is_func_proto(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO;
}
#ifdef CONFIG_BPF_SYSCALL #ifdef CONFIG_BPF_SYSCALL
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id); const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
const char *btf_name_by_offset(const struct btf *btf, u32 offset); const char *btf_name_by_offset(const struct btf *btf, u32 offset);
struct btf *btf_parse_vmlinux(void);
#else #else
static inline const struct btf_type *btf_type_by_id(const struct btf *btf, static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
u32 type_id) u32 type_id)
......
...@@ -33,4 +33,14 @@ search_module_extables(unsigned long addr) ...@@ -33,4 +33,14 @@ search_module_extables(unsigned long addr)
} }
#endif /*CONFIG_MODULES*/ #endif /*CONFIG_MODULES*/
#ifdef CONFIG_BPF_JIT
const struct exception_table_entry *search_bpf_extables(unsigned long addr);
#else
static inline const struct exception_table_entry *
search_bpf_extables(unsigned long addr)
{
return NULL;
}
#endif
#endif /* _LINUX_EXTABLE_H */ #endif /* _LINUX_EXTABLE_H */
...@@ -65,6 +65,9 @@ struct ctl_table_header; ...@@ -65,6 +65,9 @@ struct ctl_table_header;
/* unused opcode to mark special call to bpf_tail_call() helper */ /* unused opcode to mark special call to bpf_tail_call() helper */
#define BPF_TAIL_CALL 0xf0 #define BPF_TAIL_CALL 0xf0
/* unused opcode to mark special load instruction. Same as BPF_ABS */
#define BPF_PROBE_MEM 0x20
/* unused opcode to mark call to interpreter with arguments */ /* unused opcode to mark call to interpreter with arguments */
#define BPF_CALL_ARGS 0xe0 #define BPF_CALL_ARGS 0xe0
...@@ -464,10 +467,11 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) ...@@ -464,10 +467,11 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
#define BPF_CALL_x(x, name, ...) \ #define BPF_CALL_x(x, name, ...) \
static __always_inline \ static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \ u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \ u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
{ \ { \
return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\ return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
} \ } \
static __always_inline \ static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)) u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
......
...@@ -74,11 +74,12 @@ static inline void bpf_test_probe_##call(void) \ ...@@ -74,11 +74,12 @@ static inline void bpf_test_probe_##call(void) \
{ \ { \
check_trace_callback_type_##call(__bpf_trace_##template); \ check_trace_callback_type_##call(__bpf_trace_##template); \
} \ } \
typedef void (*btf_trace_##call)(void *__data, proto); \
static struct bpf_raw_event_map __used \ static struct bpf_raw_event_map __used \
__attribute__((section("__bpf_raw_tp_map"))) \ __attribute__((section("__bpf_raw_tp_map"))) \
__bpf_trace_tp_map_##call = { \ __bpf_trace_tp_map_##call = { \
.tp = &__tracepoint_##call, \ .tp = &__tracepoint_##call, \
.bpf_func = (void *)__bpf_trace_##template, \ .bpf_func = (void *)(btf_trace_##call)__bpf_trace_##template, \
.num_args = COUNT_ARGS(args), \ .num_args = COUNT_ARGS(args), \
.writable_size = size, \ .writable_size = size, \
}; };
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#define __XDP_ACT_SYM_FN(x) \ #define __XDP_ACT_SYM_FN(x) \
{ XDP_##x, #x }, { XDP_##x, #x },
#define __XDP_ACT_SYM_TAB \ #define __XDP_ACT_SYM_TAB \
__XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, 0 } __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, NULL }
__XDP_ACT_MAP(__XDP_ACT_TP_FN) __XDP_ACT_MAP(__XDP_ACT_TP_FN)
TRACE_EVENT(xdp_exception, TRACE_EVENT(xdp_exception,
......
...@@ -420,6 +420,7 @@ union bpf_attr { ...@@ -420,6 +420,7 @@ union bpf_attr {
__u32 line_info_rec_size; /* userspace bpf_line_info size */ __u32 line_info_rec_size; /* userspace bpf_line_info size */
__aligned_u64 line_info; /* line info */ __aligned_u64 line_info; /* line info */
__u32 line_info_cnt; /* number of bpf_line_info records */ __u32 line_info_cnt; /* number of bpf_line_info records */
__u32 attach_btf_id; /* in-kernel BTF type id to attach to */
}; };
struct { /* anonymous struct used by BPF_OBJ_* commands */ struct { /* anonymous struct used by BPF_OBJ_* commands */
...@@ -2750,6 +2751,30 @@ union bpf_attr { ...@@ -2750,6 +2751,30 @@ union bpf_attr {
* **-EOPNOTSUPP** kernel configuration does not enable SYN cookies * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies
* *
* **-EPROTONOSUPPORT** IP packet version is not 4 or 6 * **-EPROTONOSUPPORT** IP packet version is not 4 or 6
*
* int bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
* Description
* Write raw *data* blob into a special BPF perf event held by
* *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
* event must have the following attributes: **PERF_SAMPLE_RAW**
* as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
* **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
*
* The *flags* are used to indicate the index in *map* for which
* the value must be put, masked with **BPF_F_INDEX_MASK**.
* Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
* to indicate that the index of the current CPU core should be
* used.
*
* The value to write, of *size*, is passed through eBPF stack and
* pointed by *data*.
*
* *ctx* is a pointer to in-kernel struct sk_buff.
*
* This helper is similar to **bpf_perf_event_output**\ () but
* restricted to raw_tracepoint bpf programs.
* Return
* 0 on success, or a negative error in case of failure.
*/ */
#define __BPF_FUNC_MAPPER(FN) \ #define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \ FN(unspec), \
...@@ -2862,7 +2887,8 @@ union bpf_attr { ...@@ -2862,7 +2887,8 @@ union bpf_attr {
FN(sk_storage_get), \ FN(sk_storage_get), \
FN(sk_storage_delete), \ FN(sk_storage_delete), \
FN(send_signal), \ FN(send_signal), \
FN(tcp_gen_syncookie), FN(tcp_gen_syncookie), \
FN(skb_output),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call * function eBPF program intends to call
......
This diff is collapsed.
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/extable.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
/* Registers */ /* Registers */
...@@ -712,6 +712,24 @@ bool is_bpf_text_address(unsigned long addr) ...@@ -712,6 +712,24 @@ bool is_bpf_text_address(unsigned long addr)
return ret; return ret;
} }
const struct exception_table_entry *search_bpf_extables(unsigned long addr)
{
const struct exception_table_entry *e = NULL;
struct bpf_prog *prog;
rcu_read_lock();
prog = bpf_prog_kallsyms_find(addr);
if (!prog)
goto out;
if (!prog->aux->num_exentries)
goto out;
e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
out:
rcu_read_unlock();
return e;
}
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym) char *sym)
{ {
...@@ -1291,6 +1309,11 @@ bool bpf_opcode_in_insntable(u8 code) ...@@ -1291,6 +1309,11 @@ bool bpf_opcode_in_insntable(u8 code)
} }
#ifndef CONFIG_BPF_JIT_ALWAYS_ON #ifndef CONFIG_BPF_JIT_ALWAYS_ON
u64 __weak bpf_probe_read(void * dst, u32 size, const void * unsafe_ptr)
{
memset(dst, 0, size);
return -EFAULT;
}
/** /**
* __bpf_prog_run - run eBPF program on a given context * __bpf_prog_run - run eBPF program on a given context
* @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
...@@ -1310,6 +1333,10 @@ static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u6 ...@@ -1310,6 +1333,10 @@ static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u6
/* Non-UAPI available opcodes. */ /* Non-UAPI available opcodes. */
[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS, [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
[BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
[BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
}; };
#undef BPF_INSN_3_LBL #undef BPF_INSN_3_LBL
#undef BPF_INSN_2_LBL #undef BPF_INSN_2_LBL
...@@ -1542,6 +1569,16 @@ static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u6 ...@@ -1542,6 +1569,16 @@ static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u6
LDST(W, u32) LDST(W, u32)
LDST(DW, u64) LDST(DW, u64)
#undef LDST #undef LDST
#define LDX_PROBE(SIZEOP, SIZE) \
LDX_PROBE_MEM_##SIZEOP: \
bpf_probe_read(&DST, SIZE, (const void *)(long) SRC); \
CONT;
LDX_PROBE(B, 1)
LDX_PROBE(H, 2)
LDX_PROBE(W, 4)
LDX_PROBE(DW, 8)
#undef LDX_PROBE
STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */ STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
atomic_add((u32) SRC, (atomic_t *)(unsigned long) atomic_add((u32) SRC, (atomic_t *)(unsigned long)
(DST + insn->off)); (DST + insn->off));
......
...@@ -287,7 +287,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, ...@@ -287,7 +287,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
bool irq_work_busy = false; bool irq_work_busy = false;
struct stack_map_irq_work *work = NULL; struct stack_map_irq_work *work = NULL;
if (in_nmi()) { if (irqs_disabled()) {
work = this_cpu_ptr(&up_read_work); work = this_cpu_ptr(&up_read_work);
if (work->irq_work.flags & IRQ_WORK_BUSY) if (work->irq_work.flags & IRQ_WORK_BUSY)
/* cannot queue more up_read, fallback */ /* cannot queue more up_read, fallback */
...@@ -295,8 +295,9 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, ...@@ -295,8 +295,9 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
} }
/* /*
* We cannot do up_read() in nmi context. To do build_id lookup * We cannot do up_read() when the irq is disabled, because of
* in nmi context, we need to run up_read() in irq_work. We use * risk to deadlock with rq_lock. To do build_id lookup when the
* irqs are disabled, we need to run up_read() in irq_work. We use
* a percpu variable to do the irq_work. If the irq_work is * a percpu variable to do the irq_work. If the irq_work is
* already used by another lookup, we fall back to report ips. * already used by another lookup, we fall back to report ips.
* *
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/timekeeping.h> #include <linux/timekeeping.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/nospec.h> #include <linux/nospec.h>
#include <uapi/linux/btf.h>
#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \ #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
(map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
...@@ -1565,9 +1566,21 @@ static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) ...@@ -1565,9 +1566,21 @@ static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
} }
static int static int
bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type, bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
enum bpf_attach_type expected_attach_type) enum bpf_attach_type expected_attach_type,
u32 btf_id)
{ {
switch (prog_type) {
case BPF_PROG_TYPE_RAW_TRACEPOINT:
if (btf_id > BTF_MAX_TYPE)
return -EINVAL;
break;
default:
if (btf_id)
return -EINVAL;
break;
}
switch (prog_type) { switch (prog_type) {
case BPF_PROG_TYPE_CGROUP_SOCK: case BPF_PROG_TYPE_CGROUP_SOCK:
switch (expected_attach_type) { switch (expected_attach_type) {
...@@ -1614,7 +1627,7 @@ bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type, ...@@ -1614,7 +1627,7 @@ bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type,
} }
/* last field in 'union bpf_attr' used by this command */ /* last field in 'union bpf_attr' used by this command */
#define BPF_PROG_LOAD_LAST_FIELD line_info_cnt #define BPF_PROG_LOAD_LAST_FIELD attach_btf_id
static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
{ {
...@@ -1656,7 +1669,8 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) ...@@ -1656,7 +1669,8 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
return -EPERM; return -EPERM;
bpf_prog_load_fixup_attach_type(attr); bpf_prog_load_fixup_attach_type(attr);
if (bpf_prog_load_check_attach_type(type, attr->expected_attach_type)) if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
attr->attach_btf_id))
return -EINVAL; return -EINVAL;
/* plain bpf_prog allocation */ /* plain bpf_prog allocation */
...@@ -1665,6 +1679,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) ...@@ -1665,6 +1679,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
return -ENOMEM; return -ENOMEM;
prog->expected_attach_type = attr->expected_attach_type; prog->expected_attach_type = attr->expected_attach_type;
prog->aux->attach_btf_id = attr->attach_btf_id;
prog->aux->offload_requested = !!attr->prog_ifindex; prog->aux->offload_requested = !!attr->prog_ifindex;
...@@ -1806,17 +1821,50 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr) ...@@ -1806,17 +1821,50 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
struct bpf_raw_tracepoint *raw_tp; struct bpf_raw_tracepoint *raw_tp;
struct bpf_raw_event_map *btp; struct bpf_raw_event_map *btp;
struct bpf_prog *prog; struct bpf_prog *prog;
char tp_name[128]; const char *tp_name;
char buf[128];
int tp_fd, err; int tp_fd, err;
if (strncpy_from_user(tp_name, u64_to_user_ptr(attr->raw_tracepoint.name), if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
sizeof(tp_name) - 1) < 0) return -EINVAL;
return -EFAULT;
tp_name[sizeof(tp_name) - 1] = 0; prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
if (IS_ERR(prog))
return PTR_ERR(prog);
if (prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT &&
prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) {
err = -EINVAL;
goto out_put_prog;
}
if (prog->type == BPF_PROG_TYPE_RAW_TRACEPOINT &&
prog->aux->attach_btf_id) {
if (attr->raw_tracepoint.name) {
/* raw_tp name should not be specified in raw_tp
* programs that were verified via in-kernel BTF info
*/
err = -EINVAL;
goto out_put_prog;
}
/* raw_tp name is taken from type name instead */
tp_name = prog->aux->attach_func_name;
} else {
if (strncpy_from_user(buf,
u64_to_user_ptr(attr->raw_tracepoint.name),
sizeof(buf) - 1) < 0) {
err = -EFAULT;
goto out_put_prog;
}
buf[sizeof(buf) - 1] = 0;
tp_name = buf;
}
btp = bpf_get_raw_tracepoint(tp_name); btp = bpf_get_raw_tracepoint(tp_name);
if (!btp) if (!btp) {
return -ENOENT; err = -ENOENT;
goto out_put_prog;
}
raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER); raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER);
if (!raw_tp) { if (!raw_tp) {
...@@ -1824,38 +1872,27 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr) ...@@ -1824,38 +1872,27 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
goto out_put_btp; goto out_put_btp;
} }
raw_tp->btp = btp; raw_tp->btp = btp;
raw_tp->prog = prog;
prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
if (IS_ERR(prog)) {
err = PTR_ERR(prog);
goto out_free_tp;
}
if (prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT &&
prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) {
err = -EINVAL;
goto out_put_prog;
}
err = bpf_probe_register(raw_tp->btp, prog); err = bpf_probe_register(raw_tp->btp, prog);
if (err) if (err)
goto out_put_prog; goto out_free_tp;
raw_tp->prog = prog;
tp_fd = anon_inode_getfd("bpf-raw-tracepoint", &bpf_raw_tp_fops, raw_tp, tp_fd = anon_inode_getfd("bpf-raw-tracepoint", &bpf_raw_tp_fops, raw_tp,
O_CLOEXEC); O_CLOEXEC);
if (tp_fd < 0) { if (tp_fd < 0) {
bpf_probe_unregister(raw_tp->btp, prog); bpf_probe_unregister(raw_tp->btp, prog);
err = tp_fd; err = tp_fd;
goto out_put_prog; goto out_free_tp;
} }
return tp_fd; return tp_fd;
out_put_prog:
bpf_prog_put(prog);
out_free_tp: out_free_tp:
kfree(raw_tp); kfree(raw_tp);
out_put_btp: out_put_btp:
bpf_put_raw_tracepoint(btp); bpf_put_raw_tracepoint(btp);
out_put_prog:
bpf_prog_put(prog);
return err; return err;
} }
......
This diff is collapsed.
...@@ -56,6 +56,8 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr) ...@@ -56,6 +56,8 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr)
e = search_kernel_exception_table(addr); e = search_kernel_exception_table(addr);
if (!e) if (!e)
e = search_module_extables(addr); e = search_module_extables(addr);
if (!e)
e = search_bpf_extables(addr);
return e; return e;
} }
......
...@@ -995,6 +995,8 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { ...@@ -995,6 +995,8 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
.arg5_type = ARG_CONST_SIZE_OR_ZERO, .arg5_type = ARG_CONST_SIZE_OR_ZERO,
}; };
extern const struct bpf_func_proto bpf_skb_output_proto;
BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
struct bpf_map *, map, u64, flags) struct bpf_map *, map, u64, flags)
{ {
...@@ -1053,6 +1055,10 @@ raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -1053,6 +1055,10 @@ raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
switch (func_id) { switch (func_id) {
case BPF_FUNC_perf_event_output: case BPF_FUNC_perf_event_output:
return &bpf_perf_event_output_proto_raw_tp; return &bpf_perf_event_output_proto_raw_tp;
#ifdef CONFIG_NET
case BPF_FUNC_skb_output:
return &bpf_skb_output_proto;
#endif
case BPF_FUNC_get_stackid: case BPF_FUNC_get_stackid:
return &bpf_get_stackid_proto_raw_tp; return &bpf_get_stackid_proto_raw_tp;
case BPF_FUNC_get_stack: case BPF_FUNC_get_stack:
...@@ -1074,7 +1080,9 @@ static bool raw_tp_prog_is_valid_access(int off, int size, ...@@ -1074,7 +1080,9 @@ static bool raw_tp_prog_is_valid_access(int off, int size,
return false; return false;
if (off % size != 0) if (off % size != 0)
return false; return false;
return true; if (!prog->aux->attach_btf_id)
return true;
return btf_ctx_access(off, size, type, prog, info);
} }
const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
......
...@@ -218,10 +218,18 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) ...@@ -218,10 +218,18 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
if (!range_is_zero(__skb, offsetof(struct __sk_buff, cb) + if (!range_is_zero(__skb, offsetof(struct __sk_buff, cb) +
FIELD_SIZEOF(struct __sk_buff, cb), FIELD_SIZEOF(struct __sk_buff, cb),
offsetof(struct __sk_buff, tstamp)))
return -EINVAL;
/* tstamp is allowed */
if (!range_is_zero(__skb, offsetof(struct __sk_buff, tstamp) +
FIELD_SIZEOF(struct __sk_buff, tstamp),
sizeof(struct __sk_buff))) sizeof(struct __sk_buff)))
return -EINVAL; return -EINVAL;
skb->priority = __skb->priority; skb->priority = __skb->priority;
skb->tstamp = __skb->tstamp;
memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN); memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
return 0; return 0;
...@@ -235,6 +243,7 @@ static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb) ...@@ -235,6 +243,7 @@ static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
return; return;
__skb->priority = skb->priority; __skb->priority = skb->priority;
__skb->tstamp = skb->tstamp;
memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN); memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
} }
......
...@@ -3798,7 +3798,7 @@ BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map, ...@@ -3798,7 +3798,7 @@ BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
return -EINVAL; return -EINVAL;
if (unlikely(skb_size > skb->len)) if (unlikely(!skb || skb_size > skb->len))
return -EFAULT; return -EFAULT;
return bpf_event_output(map, flags, meta, meta_size, skb, skb_size, return bpf_event_output(map, flags, meta, meta_size, skb, skb_size,
...@@ -3816,6 +3816,19 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = { ...@@ -3816,6 +3816,19 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = {
.arg5_type = ARG_CONST_SIZE_OR_ZERO, .arg5_type = ARG_CONST_SIZE_OR_ZERO,
}; };
static u32 bpf_skb_output_btf_ids[5];
const struct bpf_func_proto bpf_skb_output_proto = {
.func = bpf_skb_event_output,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
.btf_id = bpf_skb_output_btf_ids,
};
static unsigned short bpf_tunnel_key_af(u64 flags) static unsigned short bpf_tunnel_key_af(u64 flags)
{ {
return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET; return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
......
...@@ -488,8 +488,8 @@ class PrinterHelpers(Printer): ...@@ -488,8 +488,8 @@ class PrinterHelpers(Printer):
return t return t
if t in self.mapped_types: if t in self.mapped_types:
return self.mapped_types[t] return self.mapped_types[t]
print("") print("Unrecognized type '%s', please add it to known types!" % t,
print("Unrecognized type '%s', please add it to known types!" % t) file=sys.stderr)
sys.exit(1) sys.exit(1)
seen_helpers = set() seen_helpers = set()
......
...@@ -12,6 +12,9 @@ ...@@ -12,6 +12,9 @@
#include <libbpf.h> #include <libbpf.h>
#include <linux/btf.h> #include <linux/btf.h>
#include <linux/hashtable.h> #include <linux/hashtable.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include "btf.h" #include "btf.h"
#include "json_writer.h" #include "json_writer.h"
...@@ -388,6 +391,54 @@ static int dump_btf_c(const struct btf *btf, ...@@ -388,6 +391,54 @@ static int dump_btf_c(const struct btf *btf,
return err; return err;
} }
static struct btf *btf__parse_raw(const char *file)
{
struct btf *btf;
struct stat st;
__u8 *buf;
FILE *f;
if (stat(file, &st))
return NULL;
f = fopen(file, "rb");
if (!f)
return NULL;
buf = malloc(st.st_size);
if (!buf) {
btf = ERR_PTR(-ENOMEM);
goto exit_close;
}
if ((size_t) st.st_size != fread(buf, 1, st.st_size, f)) {
btf = ERR_PTR(-EINVAL);
goto exit_free;
}
btf = btf__new(buf, st.st_size);
exit_free:
free(buf);
exit_close:
fclose(f);
return btf;
}
static bool is_btf_raw(const char *file)
{
__u16 magic = 0;
int fd;
fd = open(file, O_RDONLY);
if (fd < 0)
return false;
read(fd, &magic, sizeof(magic));
close(fd);
return magic == BTF_MAGIC;
}
static int do_dump(int argc, char **argv) static int do_dump(int argc, char **argv)
{ {
struct btf *btf = NULL; struct btf *btf = NULL;
...@@ -465,7 +516,11 @@ static int do_dump(int argc, char **argv) ...@@ -465,7 +516,11 @@ static int do_dump(int argc, char **argv)
} }
NEXT_ARG(); NEXT_ARG();
} else if (is_prefix(src, "file")) { } else if (is_prefix(src, "file")) {
btf = btf__parse_elf(*argv, NULL); if (is_btf_raw(*argv))
btf = btf__parse_raw(*argv);
else
btf = btf__parse_elf(*argv, NULL);
if (IS_ERR(btf)) { if (IS_ERR(btf)) {
err = PTR_ERR(btf); err = PTR_ERR(btf);
btf = NULL; btf = NULL;
......
...@@ -1091,8 +1091,11 @@ static int do_run(int argc, char **argv) ...@@ -1091,8 +1091,11 @@ static int do_run(int argc, char **argv)
static int load_with_options(int argc, char **argv, bool first_prog_only) static int load_with_options(int argc, char **argv, bool first_prog_only)
{ {
struct bpf_object_load_attr load_attr = { 0 };
enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC; enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
.relaxed_maps = relaxed_maps,
);
struct bpf_object_load_attr load_attr = { 0 };
enum bpf_attach_type expected_attach_type; enum bpf_attach_type expected_attach_type;
struct map_replace *map_replace = NULL; struct map_replace *map_replace = NULL;
struct bpf_program *prog = NULL, *pos; struct bpf_program *prog = NULL, *pos;
...@@ -1106,9 +1109,6 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) ...@@ -1106,9 +1109,6 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
const char *file; const char *file;
int idx, err; int idx, err;
LIBBPF_OPTS(bpf_object_open_opts, open_opts,
.relaxed_maps = relaxed_maps,
);
if (!REQ_ARGS(2)) if (!REQ_ARGS(2))
return -1; return -1;
......
...@@ -420,6 +420,7 @@ union bpf_attr { ...@@ -420,6 +420,7 @@ union bpf_attr {
__u32 line_info_rec_size; /* userspace bpf_line_info size */ __u32 line_info_rec_size; /* userspace bpf_line_info size */
__aligned_u64 line_info; /* line info */ __aligned_u64 line_info; /* line info */
__u32 line_info_cnt; /* number of bpf_line_info records */ __u32 line_info_cnt; /* number of bpf_line_info records */
__u32 attach_btf_id; /* in-kernel BTF type id to attach to */
}; };
struct { /* anonymous struct used by BPF_OBJ_* commands */ struct { /* anonymous struct used by BPF_OBJ_* commands */
...@@ -2750,6 +2751,30 @@ union bpf_attr { ...@@ -2750,6 +2751,30 @@ union bpf_attr {
* **-EOPNOTSUPP** kernel configuration does not enable SYN cookies * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies
* *
* **-EPROTONOSUPPORT** IP packet version is not 4 or 6 * **-EPROTONOSUPPORT** IP packet version is not 4 or 6
*
* int bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
* Description
* Write raw *data* blob into a special BPF perf event held by
* *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
* event must have the following attributes: **PERF_SAMPLE_RAW**
* as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
* **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
*
* The *flags* are used to indicate the index in *map* for which
* the value must be put, masked with **BPF_F_INDEX_MASK**.
* Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
* to indicate that the index of the current CPU core should be
* used.
*
* The value to write, of *size*, is passed through eBPF stack and
* pointed by *data*.
*
* *ctx* is a pointer to in-kernel struct sk_buff.
*
* This helper is similar to **bpf_perf_event_output**\ () but
* restricted to raw_tracepoint bpf programs.
* Return
* 0 on success, or a negative error in case of failure.
*/ */
#define __BPF_FUNC_MAPPER(FN) \ #define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \ FN(unspec), \
...@@ -2862,7 +2887,8 @@ union bpf_attr { ...@@ -2862,7 +2887,8 @@ union bpf_attr {
FN(sk_storage_get), \ FN(sk_storage_get), \
FN(sk_storage_delete), \ FN(sk_storage_delete), \
FN(send_signal), \ FN(send_signal), \
FN(tcp_gen_syncookie), FN(tcp_gen_syncookie), \
FN(skb_output),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call * function eBPF program intends to call
......
...@@ -167,6 +167,8 @@ enum { ...@@ -167,6 +167,8 @@ enum {
IFLA_NEW_IFINDEX, IFLA_NEW_IFINDEX,
IFLA_MIN_MTU, IFLA_MIN_MTU,
IFLA_MAX_MTU, IFLA_MAX_MTU,
IFLA_PROP_LIST,
IFLA_ALT_IFNAME, /* Alternative ifname */
__IFLA_MAX __IFLA_MAX
}; };
......
...@@ -300,3 +300,6 @@ tags: ...@@ -300,3 +300,6 @@ tags:
# Declare the contents of the .PHONY variable as phony. We keep that # Declare the contents of the .PHONY variable as phony. We keep that
# information in a variable so we can use it in if_changed and friends. # information in a variable so we can use it in if_changed and friends.
.PHONY: $(PHONY) .PHONY: $(PHONY)
# Delete partially updated (corrupted) files on error
.DELETE_ON_ERROR:
...@@ -228,6 +228,9 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, ...@@ -228,6 +228,9 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, sizeof(attr));
attr.prog_type = load_attr->prog_type; attr.prog_type = load_attr->prog_type;
attr.expected_attach_type = load_attr->expected_attach_type; attr.expected_attach_type = load_attr->expected_attach_type;
if (attr.prog_type == BPF_PROG_TYPE_RAW_TRACEPOINT)
/* expected_attach_type is ignored for tracing progs */
attr.attach_btf_id = attr.expected_attach_type;
attr.insn_cnt = (__u32)load_attr->insns_cnt; attr.insn_cnt = (__u32)load_attr->insns_cnt;
attr.insns = ptr_to_u64(load_attr->insns); attr.insns = ptr_to_u64(load_attr->insns);
attr.license = ptr_to_u64(load_attr->license); attr.license = ptr_to_u64(load_attr->license);
......
...@@ -2,6 +2,28 @@ ...@@ -2,6 +2,28 @@
#ifndef __BPF_CORE_READ_H__ #ifndef __BPF_CORE_READ_H__
#define __BPF_CORE_READ_H__ #define __BPF_CORE_READ_H__
/*
* enum bpf_field_info_kind is passed as a second argument into
* __builtin_preserve_field_info() built-in to get a specific aspect of
* a field, captured as a first argument. __builtin_preserve_field_info(field,
* info_kind) returns __u32 integer and produces BTF field relocation, which
* is understood and processed by libbpf during BPF object loading. See
* selftests/bpf for examples.
*/
enum bpf_field_info_kind {
BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */
BPF_FIELD_EXISTS = 2, /* field existence in target kernel */
};
/*
* Convenience macro to check that field actually exists in target kernel's.
* Returns:
* 1, if matching field is present in target kernel;
* 0, if no matching field found.
*/
#define bpf_core_field_exists(field) \
__builtin_preserve_field_info(field, BPF_FIELD_EXISTS)
/* /*
* bpf_core_read() abstracts away bpf_probe_read() call and captures offset * bpf_core_read() abstracts away bpf_probe_read() call and captures offset
* relocation for source address using __builtin_preserve_access_index() * relocation for source address using __builtin_preserve_access_index()
...@@ -12,7 +34,7 @@ ...@@ -12,7 +34,7 @@
* a relocation, which records BTF type ID describing root struct/union and an * a relocation, which records BTF type ID describing root struct/union and an
* accessor string which describes exact embedded field that was used to take * accessor string which describes exact embedded field that was used to take
* an address. See detailed description of this relocation format and * an address. See detailed description of this relocation format and
* semantics in comments to struct bpf_offset_reloc in libbpf_internal.h. * semantics in comments to struct bpf_field_reloc in libbpf_internal.h.
* *
* This relocation allows libbpf to adjust BPF instruction to use correct * This relocation allows libbpf to adjust BPF instruction to use correct
* actual field offset, based on target kernel BTF type that matches original * actual field offset, based on target kernel BTF type that matches original
......
...@@ -390,14 +390,14 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) ...@@ -390,14 +390,14 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
GElf_Ehdr ehdr; GElf_Ehdr ehdr;
if (elf_version(EV_CURRENT) == EV_NONE) { if (elf_version(EV_CURRENT) == EV_NONE) {
pr_warning("failed to init libelf for %s\n", path); pr_warn("failed to init libelf for %s\n", path);
return ERR_PTR(-LIBBPF_ERRNO__LIBELF); return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
} }
fd = open(path, O_RDONLY); fd = open(path, O_RDONLY);
if (fd < 0) { if (fd < 0) {
err = -errno; err = -errno;
pr_warning("failed to open %s: %s\n", path, strerror(errno)); pr_warn("failed to open %s: %s\n", path, strerror(errno));
return ERR_PTR(err); return ERR_PTR(err);
} }
...@@ -405,19 +405,19 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) ...@@ -405,19 +405,19 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
elf = elf_begin(fd, ELF_C_READ, NULL); elf = elf_begin(fd, ELF_C_READ, NULL);
if (!elf) { if (!elf) {
pr_warning("failed to open %s as ELF file\n", path); pr_warn("failed to open %s as ELF file\n", path);
goto done; goto done;
} }
if (!gelf_getehdr(elf, &ehdr)) { if (!gelf_getehdr(elf, &ehdr)) {
pr_warning("failed to get EHDR from %s\n", path); pr_warn("failed to get EHDR from %s\n", path);
goto done; goto done;
} }
if (!btf_check_endianness(&ehdr)) { if (!btf_check_endianness(&ehdr)) {
pr_warning("non-native ELF endianness is not supported\n"); pr_warn("non-native ELF endianness is not supported\n");
goto done; goto done;
} }
if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) { if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) {
pr_warning("failed to get e_shstrndx from %s\n", path); pr_warn("failed to get e_shstrndx from %s\n", path);
goto done; goto done;
} }
...@@ -427,29 +427,29 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) ...@@ -427,29 +427,29 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
idx++; idx++;
if (gelf_getshdr(scn, &sh) != &sh) { if (gelf_getshdr(scn, &sh) != &sh) {
pr_warning("failed to get section(%d) header from %s\n", pr_warn("failed to get section(%d) header from %s\n",
idx, path); idx, path);
goto done; goto done;
} }
name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name); name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name);
if (!name) { if (!name) {
pr_warning("failed to get section(%d) name from %s\n", pr_warn("failed to get section(%d) name from %s\n",
idx, path); idx, path);
goto done; goto done;
} }
if (strcmp(name, BTF_ELF_SEC) == 0) { if (strcmp(name, BTF_ELF_SEC) == 0) {
btf_data = elf_getdata(scn, 0); btf_data = elf_getdata(scn, 0);
if (!btf_data) { if (!btf_data) {
pr_warning("failed to get section(%d, %s) data from %s\n", pr_warn("failed to get section(%d, %s) data from %s\n",
idx, name, path); idx, name, path);
goto done; goto done;
} }
continue; continue;
} else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) { } else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
btf_ext_data = elf_getdata(scn, 0); btf_ext_data = elf_getdata(scn, 0);
if (!btf_ext_data) { if (!btf_ext_data) {
pr_warning("failed to get section(%d, %s) data from %s\n", pr_warn("failed to get section(%d, %s) data from %s\n",
idx, name, path); idx, name, path);
goto done; goto done;
} }
continue; continue;
...@@ -600,9 +600,9 @@ int btf__load(struct btf *btf) ...@@ -600,9 +600,9 @@ int btf__load(struct btf *btf)
log_buf, log_buf_size, false); log_buf, log_buf_size, false);
if (btf->fd < 0) { if (btf->fd < 0) {
err = -errno; err = -errno;
pr_warning("Error loading BTF: %s(%d)\n", strerror(errno), errno); pr_warn("Error loading BTF: %s(%d)\n", strerror(errno), errno);
if (*log_buf) if (*log_buf)
pr_warning("%s\n", log_buf); pr_warn("%s\n", log_buf);
goto done; goto done;
} }
...@@ -707,8 +707,8 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, ...@@ -707,8 +707,8 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
if (snprintf(container_name, max_name, "____btf_map_%s", map_name) == if (snprintf(container_name, max_name, "____btf_map_%s", map_name) ==
max_name) { max_name) {
pr_warning("map:%s length of '____btf_map_%s' is too long\n", pr_warn("map:%s length of '____btf_map_%s' is too long\n",
map_name, map_name); map_name, map_name);
return -EINVAL; return -EINVAL;
} }
...@@ -721,14 +721,14 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, ...@@ -721,14 +721,14 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
container_type = btf__type_by_id(btf, container_id); container_type = btf__type_by_id(btf, container_id);
if (!container_type) { if (!container_type) {
pr_warning("map:%s cannot find BTF type for container_id:%u\n", pr_warn("map:%s cannot find BTF type for container_id:%u\n",
map_name, container_id); map_name, container_id);
return -EINVAL; return -EINVAL;
} }
if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) { if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) {
pr_warning("map:%s container_name:%s is an invalid container struct\n", pr_warn("map:%s container_name:%s is an invalid container struct\n",
map_name, container_name); map_name, container_name);
return -EINVAL; return -EINVAL;
} }
...@@ -737,25 +737,25 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, ...@@ -737,25 +737,25 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
key_size = btf__resolve_size(btf, key->type); key_size = btf__resolve_size(btf, key->type);
if (key_size < 0) { if (key_size < 0) {
pr_warning("map:%s invalid BTF key_type_size\n", map_name); pr_warn("map:%s invalid BTF key_type_size\n", map_name);
return key_size; return key_size;
} }
if (expected_key_size != key_size) { if (expected_key_size != key_size) {
pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n", pr_warn("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
map_name, (__u32)key_size, expected_key_size); map_name, (__u32)key_size, expected_key_size);
return -EINVAL; return -EINVAL;
} }
value_size = btf__resolve_size(btf, value->type); value_size = btf__resolve_size(btf, value->type);
if (value_size < 0) { if (value_size < 0) {
pr_warning("map:%s invalid BTF value_type_size\n", map_name); pr_warn("map:%s invalid BTF value_type_size\n", map_name);
return value_size; return value_size;
} }
if (expected_value_size != value_size) { if (expected_value_size != value_size) {
pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n", pr_warn("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
map_name, (__u32)value_size, expected_value_size); map_name, (__u32)value_size, expected_value_size);
return -EINVAL; return -EINVAL;
} }
...@@ -888,14 +888,14 @@ static int btf_ext_setup_line_info(struct btf_ext *btf_ext) ...@@ -888,14 +888,14 @@ static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
return btf_ext_setup_info(btf_ext, &param); return btf_ext_setup_info(btf_ext, &param);
} }
static int btf_ext_setup_offset_reloc(struct btf_ext *btf_ext) static int btf_ext_setup_field_reloc(struct btf_ext *btf_ext)
{ {
struct btf_ext_sec_setup_param param = { struct btf_ext_sec_setup_param param = {
.off = btf_ext->hdr->offset_reloc_off, .off = btf_ext->hdr->field_reloc_off,
.len = btf_ext->hdr->offset_reloc_len, .len = btf_ext->hdr->field_reloc_len,
.min_rec_size = sizeof(struct bpf_offset_reloc), .min_rec_size = sizeof(struct bpf_field_reloc),
.ext_info = &btf_ext->offset_reloc_info, .ext_info = &btf_ext->field_reloc_info,
.desc = "offset_reloc", .desc = "field_reloc",
}; };
return btf_ext_setup_info(btf_ext, &param); return btf_ext_setup_info(btf_ext, &param);
...@@ -975,9 +975,9 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size) ...@@ -975,9 +975,9 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
goto done; goto done;
if (btf_ext->hdr->hdr_len < if (btf_ext->hdr->hdr_len <
offsetofend(struct btf_ext_header, offset_reloc_len)) offsetofend(struct btf_ext_header, field_reloc_len))
goto done; goto done;
err = btf_ext_setup_offset_reloc(btf_ext); err = btf_ext_setup_field_reloc(btf_ext);
if (err) if (err)
goto done; goto done;
......
...@@ -60,8 +60,8 @@ struct btf_ext_header { ...@@ -60,8 +60,8 @@ struct btf_ext_header {
__u32 line_info_len; __u32 line_info_len;
/* optional part of .BTF.ext header */ /* optional part of .BTF.ext header */
__u32 offset_reloc_off; __u32 field_reloc_off;
__u32 offset_reloc_len; __u32 field_reloc_len;
}; };
LIBBPF_API void btf__free(struct btf *btf); LIBBPF_API void btf__free(struct btf *btf);
......
...@@ -428,7 +428,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr) ...@@ -428,7 +428,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
/* type loop, but resolvable through fwd declaration */ /* type loop, but resolvable through fwd declaration */
if (btf_is_composite(t) && through_ptr && t->name_off != 0) if (btf_is_composite(t) && through_ptr && t->name_off != 0)
return 0; return 0;
pr_warning("unsatisfiable type cycle, id:[%u]\n", id); pr_warn("unsatisfiable type cycle, id:[%u]\n", id);
return -ELOOP; return -ELOOP;
} }
...@@ -636,8 +636,8 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id) ...@@ -636,8 +636,8 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
if (id == cont_id) if (id == cont_id)
return; return;
if (t->name_off == 0) { if (t->name_off == 0) {
pr_warning("anonymous struct/union loop, id:[%u]\n", pr_warn("anonymous struct/union loop, id:[%u]\n",
id); id);
return; return;
} }
btf_dump_emit_struct_fwd(d, id, t); btf_dump_emit_struct_fwd(d, id, t);
...@@ -782,7 +782,7 @@ static int btf_align_of(const struct btf *btf, __u32 id) ...@@ -782,7 +782,7 @@ static int btf_align_of(const struct btf *btf, __u32 id)
return align; return align;
} }
default: default:
pr_warning("unsupported BTF_KIND:%u\n", btf_kind(t)); pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
return 1; return 1;
} }
} }
...@@ -1067,7 +1067,7 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id, ...@@ -1067,7 +1067,7 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
* chain, restore stack, emit warning, and try to * chain, restore stack, emit warning, and try to
* proceed nevertheless * proceed nevertheless
*/ */
pr_warning("not enough memory for decl stack:%d", err); pr_warn("not enough memory for decl stack:%d", err);
d->decl_stack_cnt = stack_start; d->decl_stack_cnt = stack_start;
return; return;
} }
...@@ -1096,8 +1096,8 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id, ...@@ -1096,8 +1096,8 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
case BTF_KIND_TYPEDEF: case BTF_KIND_TYPEDEF:
goto done; goto done;
default: default:
pr_warning("unexpected type in decl chain, kind:%u, id:[%u]\n", pr_warn("unexpected type in decl chain, kind:%u, id:[%u]\n",
btf_kind(t), id); btf_kind(t), id);
goto done; goto done;
} }
} }
...@@ -1323,8 +1323,8 @@ static void btf_dump_emit_type_chain(struct btf_dump *d, ...@@ -1323,8 +1323,8 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
return; return;
} }
default: default:
pr_warning("unexpected type in decl chain, kind:%u, id:[%u]\n", pr_warn("unexpected type in decl chain, kind:%u, id:[%u]\n",
kind, id); kind, id);
return; return;
} }
......
This diff is collapsed.
...@@ -75,14 +75,19 @@ struct bpf_object_open_attr { ...@@ -75,14 +75,19 @@ struct bpf_object_open_attr {
* have all the padding bytes initialized to zero. It's not guaranteed though, * have all the padding bytes initialized to zero. It's not guaranteed though,
* when copying literal, that compiler won't copy garbage in literal's padding * when copying literal, that compiler won't copy garbage in literal's padding
* bytes, but that's the best way I've found and it seems to work in practice. * bytes, but that's the best way I've found and it seems to work in practice.
*
* Macro declares opts struct of given type and name, zero-initializes,
* including any extra padding, it with memset() and then assigns initial
* values provided by users in struct initializer-syntax as varargs.
*/ */
#define LIBBPF_OPTS(TYPE, NAME, ...) \ #define DECLARE_LIBBPF_OPTS(TYPE, NAME, ...) \
struct TYPE NAME; \ struct TYPE NAME = ({ \
memset(&NAME, 0, sizeof(struct TYPE)); \ memset(&NAME, 0, sizeof(struct TYPE)); \
NAME = (struct TYPE) { \ (struct TYPE) { \
.sz = sizeof(struct TYPE), \ .sz = sizeof(struct TYPE), \
__VA_ARGS__ \ __VA_ARGS__ \
} }; \
})
struct bpf_object_open_opts { struct bpf_object_open_opts {
/* size of this struct, for forward/backward compatiblity */ /* size of this struct, for forward/backward compatiblity */
...@@ -96,8 +101,10 @@ struct bpf_object_open_opts { ...@@ -96,8 +101,10 @@ struct bpf_object_open_opts {
const char *object_name; const char *object_name;
/* parse map definitions non-strictly, allowing extra attributes/data */ /* parse map definitions non-strictly, allowing extra attributes/data */
bool relaxed_maps; bool relaxed_maps;
/* process CO-RE relocations non-strictly, allowing them to fail */
bool relaxed_core_relocs;
}; };
#define bpf_object_open_opts__last_field relaxed_maps #define bpf_object_open_opts__last_field relaxed_core_relocs
LIBBPF_API struct bpf_object *bpf_object__open(const char *path); LIBBPF_API struct bpf_object *bpf_object__open(const char *path);
LIBBPF_API struct bpf_object * LIBBPF_API struct bpf_object *
...@@ -300,8 +307,13 @@ LIBBPF_API int bpf_program__set_sched_cls(struct bpf_program *prog); ...@@ -300,8 +307,13 @@ LIBBPF_API int bpf_program__set_sched_cls(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog); LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog); LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog);
LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog); LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog);
LIBBPF_API enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog);
LIBBPF_API void bpf_program__set_type(struct bpf_program *prog, LIBBPF_API void bpf_program__set_type(struct bpf_program *prog,
enum bpf_prog_type type); enum bpf_prog_type type);
LIBBPF_API enum bpf_attach_type
bpf_program__get_expected_attach_type(struct bpf_program *prog);
LIBBPF_API void LIBBPF_API void
bpf_program__set_expected_attach_type(struct bpf_program *prog, bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type); enum bpf_attach_type type);
......
...@@ -195,4 +195,6 @@ LIBBPF_0.0.6 { ...@@ -195,4 +195,6 @@ LIBBPF_0.0.6 {
global: global:
bpf_object__open_file; bpf_object__open_file;
bpf_object__open_mem; bpf_object__open_mem;
bpf_program__get_expected_attach_type;
bpf_program__get_type;
} LIBBPF_0.0.5; } LIBBPF_0.0.5;
...@@ -59,7 +59,7 @@ do { \ ...@@ -59,7 +59,7 @@ do { \
libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__); \ libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__); \
} while (0) } while (0)
#define pr_warning(fmt, ...) __pr(LIBBPF_WARN, fmt, ##__VA_ARGS__) #define pr_warn(fmt, ...) __pr(LIBBPF_WARN, fmt, ##__VA_ARGS__)
#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__) #define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__) #define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
...@@ -68,7 +68,7 @@ static inline bool libbpf_validate_opts(const char *opts, ...@@ -68,7 +68,7 @@ static inline bool libbpf_validate_opts(const char *opts,
const char *type_name) const char *type_name)
{ {
if (user_sz < sizeof(size_t)) { if (user_sz < sizeof(size_t)) {
pr_warning("%s size (%zu) is too small\n", type_name, user_sz); pr_warn("%s size (%zu) is too small\n", type_name, user_sz);
return false; return false;
} }
if (user_sz > opts_sz) { if (user_sz > opts_sz) {
...@@ -76,8 +76,8 @@ static inline bool libbpf_validate_opts(const char *opts, ...@@ -76,8 +76,8 @@ static inline bool libbpf_validate_opts(const char *opts,
for (i = opts_sz; i < user_sz; i++) { for (i = opts_sz; i < user_sz; i++) {
if (opts[i]) { if (opts[i]) {
pr_warning("%s has non-zero extra bytes", pr_warn("%s has non-zero extra bytes",
type_name); type_name);
return false; return false;
} }
} }
...@@ -126,7 +126,7 @@ struct btf_ext { ...@@ -126,7 +126,7 @@ struct btf_ext {
}; };
struct btf_ext_info func_info; struct btf_ext_info func_info;
struct btf_ext_info line_info; struct btf_ext_info line_info;
struct btf_ext_info offset_reloc_info; struct btf_ext_info field_reloc_info;
__u32 data_size; __u32 data_size;
}; };
...@@ -151,13 +151,23 @@ struct bpf_line_info_min { ...@@ -151,13 +151,23 @@ struct bpf_line_info_min {
__u32 line_col; __u32 line_col;
}; };
/* The minimum bpf_offset_reloc checked by the loader /* bpf_field_info_kind encodes which aspect of captured field has to be
* adjusted by relocations. Currently supported values are:
* - BPF_FIELD_BYTE_OFFSET: field offset (in bytes);
* - BPF_FIELD_EXISTS: field existence (1, if field exists; 0, otherwise);
*/
enum bpf_field_info_kind {
BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */
BPF_FIELD_EXISTS = 2, /* field existence in target kernel */
};
/* The minimum bpf_field_reloc checked by the loader
* *
* Offset relocation captures the following data: * Field relocation captures the following data:
* - insn_off - instruction offset (in bytes) within a BPF program that needs * - insn_off - instruction offset (in bytes) within a BPF program that needs
* its insn->imm field to be relocated with actual offset; * its insn->imm field to be relocated with actual field info;
* - type_id - BTF type ID of the "root" (containing) entity of a relocatable * - type_id - BTF type ID of the "root" (containing) entity of a relocatable
* offset; * field;
* - access_str_off - offset into corresponding .BTF string section. String * - access_str_off - offset into corresponding .BTF string section. String
* itself encodes an accessed field using a sequence of field and array * itself encodes an accessed field using a sequence of field and array
* indicies, separated by colon (:). It's conceptually very close to LLVM's * indicies, separated by colon (:). It's conceptually very close to LLVM's
...@@ -188,15 +198,16 @@ struct bpf_line_info_min { ...@@ -188,15 +198,16 @@ struct bpf_line_info_min {
* bpf_probe_read(&dst, sizeof(dst), * bpf_probe_read(&dst, sizeof(dst),
* __builtin_preserve_access_index(&src->a.b.c)); * __builtin_preserve_access_index(&src->a.b.c));
* *
* In this case Clang will emit offset relocation recording necessary data to * In this case Clang will emit field relocation recording necessary data to
* be able to find offset of embedded `a.b.c` field within `src` struct. * be able to find offset of embedded `a.b.c` field within `src` struct.
* *
* [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction
*/ */
struct bpf_offset_reloc { struct bpf_field_reloc {
__u32 insn_off; __u32 insn_off;
__u32 type_id; __u32 type_id;
__u32 access_str_off; __u32 access_str_off;
enum bpf_field_info_kind kind;
}; };
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */ #endif /* __LIBBPF_LIBBPF_INTERNAL_H */
...@@ -274,33 +274,55 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk) ...@@ -274,33 +274,55 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
/* This is the C-program: /* This is the C-program:
* SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx) * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
* { * {
* int index = ctx->rx_queue_index; * int ret, index = ctx->rx_queue_index;
* *
* // A set entry here means that the correspnding queue_id * // A set entry here means that the correspnding queue_id
* // has an active AF_XDP socket bound to it. * // has an active AF_XDP socket bound to it.
* ret = bpf_redirect_map(&xsks_map, index, XDP_PASS);
* if (ret > 0)
* return ret;
*
* // Fallback for pre-5.3 kernels, not supporting default
* // action in the flags parameter.
* if (bpf_map_lookup_elem(&xsks_map, &index)) * if (bpf_map_lookup_elem(&xsks_map, &index))
* return bpf_redirect_map(&xsks_map, index, 0); * return bpf_redirect_map(&xsks_map, index, 0);
*
* return XDP_PASS; * return XDP_PASS;
* } * }
*/ */
struct bpf_insn prog[] = { struct bpf_insn prog[] = {
/* r1 = *(u32 *)(r1 + 16) */ /* r2 = *(u32 *)(r1 + 16) */
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 16), BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 16),
/* *(u32 *)(r10 - 4) = r1 */ /* *(u32 *)(r10 - 4) = r2 */
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_1, -4), BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -4),
/* r1 = xskmap[] */
BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
/* r3 = XDP_PASS */
BPF_MOV64_IMM(BPF_REG_3, 2),
/* call bpf_redirect_map */
BPF_EMIT_CALL(BPF_FUNC_redirect_map),
/* if w0 != 0 goto pc+13 */
BPF_JMP32_IMM(BPF_JSGT, BPF_REG_0, 0, 13),
/* r2 = r10 */
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
/* r2 += -4 */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
/* r1 = xskmap[] */
BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd), BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
/* call bpf_map_lookup_elem */
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
/* r1 = r0 */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV32_IMM(BPF_REG_0, 2), /* r0 = XDP_PASS */
/* if r1 == 0 goto +5 */ BPF_MOV64_IMM(BPF_REG_0, 2),
/* if r1 == 0 goto pc+5 */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5), BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5),
/* r2 = *(u32 *)(r10 - 4) */ /* r2 = *(u32 *)(r10 - 4) */
BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4), BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4),
BPF_MOV32_IMM(BPF_REG_3, 0), /* r1 = xskmap[] */
BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
/* r3 = 0 */
BPF_MOV64_IMM(BPF_REG_3, 0),
/* call bpf_redirect_map */
BPF_EMIT_CALL(BPF_FUNC_redirect_map), BPF_EMIT_CALL(BPF_FUNC_redirect_map),
/* The jumps are to this instruction */ /* The jumps are to this instruction */
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
...@@ -311,7 +333,7 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk) ...@@ -311,7 +333,7 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
"LGPL-2.1 or BSD-2-Clause", 0, log_buf, "LGPL-2.1 or BSD-2-Clause", 0, log_buf,
log_buf_size); log_buf_size);
if (prog_fd < 0) { if (prog_fd < 0) {
pr_warning("BPF log buffer:\n%s", log_buf); pr_warn("BPF log buffer:\n%s", log_buf);
return prog_fd; return prog_fd;
} }
...@@ -499,7 +521,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, ...@@ -499,7 +521,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
return -EFAULT; return -EFAULT;
if (umem->refcount) { if (umem->refcount) {
pr_warning("Error: shared umems not supported by libbpf.\n"); pr_warn("Error: shared umems not supported by libbpf.\n");
return -EBUSY; return -EBUSY;
} }
......
...@@ -7,11 +7,10 @@ FEATURE-DUMP.libbpf ...@@ -7,11 +7,10 @@ FEATURE-DUMP.libbpf
fixdep fixdep
test_align test_align
test_dev_cgroup test_dev_cgroup
test_progs /test_progs*
test_tcpbpf_user test_tcpbpf_user
test_verifier_log test_verifier_log
feature feature
test_libbpf_open
test_sock test_sock
test_sock_addr test_sock_addr
test_sock_fields test_sock_fields
...@@ -33,9 +32,10 @@ test_tcpnotify_user ...@@ -33,9 +32,10 @@ test_tcpnotify_user
test_libbpf test_libbpf
test_tcp_check_syncookie_user test_tcp_check_syncookie_user
test_sysctl test_sysctl
alu32
libbpf.pc libbpf.pc
libbpf.so.* libbpf.so.*
test_hashmap test_hashmap
test_btf_dump test_btf_dump
xdping xdping
/no_alu32
/bpf_gcc
This diff is collapsed.
...@@ -50,7 +50,7 @@ void test_attach_probe(void) ...@@ -50,7 +50,7 @@ void test_attach_probe(void)
const int kprobe_idx = 0, kretprobe_idx = 1; const int kprobe_idx = 0, kretprobe_idx = 1;
const int uprobe_idx = 2, uretprobe_idx = 3; const int uprobe_idx = 2, uretprobe_idx = 3;
const char *obj_name = "attach_probe"; const char *obj_name = "attach_probe";
LIBBPF_OPTS(bpf_object_open_opts, open_opts, DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
.object_name = obj_name, .object_name = obj_name,
.relaxed_maps = true, .relaxed_maps = true,
); );
...@@ -99,11 +99,6 @@ void test_attach_probe(void) ...@@ -99,11 +99,6 @@ void test_attach_probe(void)
"prog '%s' not found\n", uretprobe_name)) "prog '%s' not found\n", uretprobe_name))
goto cleanup; goto cleanup;
bpf_program__set_kprobe(kprobe_prog);
bpf_program__set_kprobe(kretprobe_prog);
bpf_program__set_kprobe(uprobe_prog);
bpf_program__set_kprobe(uretprobe_prog);
/* create maps && load programs */ /* create maps && load programs */
err = bpf_object__load(obj); err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "err %d\n", err)) if (CHECK(err, "obj_load", "err %d\n", err))
......
...@@ -174,6 +174,21 @@ ...@@ -174,6 +174,21 @@
.fails = true, \ .fails = true, \
} }
#define EXISTENCE_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \
.a = 42, \
}
#define EXISTENCE_CASE_COMMON(name) \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_existence.o", \
.btf_src_file = "btf__core_reloc_" #name ".o", \
.relaxed_core_relocs = true \
#define EXISTENCE_ERR_CASE(name) { \
EXISTENCE_CASE_COMMON(name), \
.fails = true, \
}
struct core_reloc_test_case { struct core_reloc_test_case {
const char *case_name; const char *case_name;
const char *bpf_obj_file; const char *bpf_obj_file;
...@@ -183,6 +198,7 @@ struct core_reloc_test_case { ...@@ -183,6 +198,7 @@ struct core_reloc_test_case {
const char *output; const char *output;
int output_len; int output_len;
bool fails; bool fails;
bool relaxed_core_relocs;
}; };
static struct core_reloc_test_case test_cases[] = { static struct core_reloc_test_case test_cases[] = {
...@@ -195,8 +211,8 @@ static struct core_reloc_test_case test_cases[] = { ...@@ -195,8 +211,8 @@ static struct core_reloc_test_case test_cases[] = {
.input_len = 0, .input_len = 0,
.output = STRUCT_TO_CHAR_PTR(core_reloc_kernel_output) { .output = STRUCT_TO_CHAR_PTR(core_reloc_kernel_output) {
.valid = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }, .valid = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, },
.comm = "test_progs\0\0\0\0\0", .comm = "test_progs",
.comm_len = 11, .comm_len = sizeof("test_progs"),
}, },
.output_len = sizeof(struct core_reloc_kernel_output), .output_len = sizeof(struct core_reloc_kernel_output),
}, },
...@@ -283,6 +299,59 @@ static struct core_reloc_test_case test_cases[] = { ...@@ -283,6 +299,59 @@ static struct core_reloc_test_case test_cases[] = {
}, },
.output_len = sizeof(struct core_reloc_misc_output), .output_len = sizeof(struct core_reloc_misc_output),
}, },
/* validate field existence checks */
{
EXISTENCE_CASE_COMMON(existence),
.input = STRUCT_TO_CHAR_PTR(core_reloc_existence) {
.a = 1,
.b = 2,
.c = 3,
.arr = { 4 },
.s = { .x = 5 },
},
.input_len = sizeof(struct core_reloc_existence),
.output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) {
.a_exists = 1,
.b_exists = 1,
.c_exists = 1,
.arr_exists = 1,
.s_exists = 1,
.a_value = 1,
.b_value = 2,
.c_value = 3,
.arr_value = 4,
.s_value = 5,
},
.output_len = sizeof(struct core_reloc_existence_output),
},
{
EXISTENCE_CASE_COMMON(existence___minimal),
.input = STRUCT_TO_CHAR_PTR(core_reloc_existence___minimal) {
.a = 42,
},
.input_len = sizeof(struct core_reloc_existence),
.output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) {
.a_exists = 1,
.b_exists = 0,
.c_exists = 0,
.arr_exists = 0,
.s_exists = 0,
.a_value = 42,
.b_value = 0xff000002u,
.c_value = 0xff000003u,
.arr_value = 0xff000004u,
.s_value = 0xff000005u,
},
.output_len = sizeof(struct core_reloc_existence_output),
},
EXISTENCE_ERR_CASE(existence__err_int_sz),
EXISTENCE_ERR_CASE(existence__err_int_type),
EXISTENCE_ERR_CASE(existence__err_int_kind),
EXISTENCE_ERR_CASE(existence__err_arr_kind),
EXISTENCE_ERR_CASE(existence__err_arr_value_type),
EXISTENCE_ERR_CASE(existence__err_struct_type),
}; };
struct data { struct data {
...@@ -305,11 +374,14 @@ void test_core_reloc(void) ...@@ -305,11 +374,14 @@ void test_core_reloc(void)
for (i = 0; i < ARRAY_SIZE(test_cases); i++) { for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
test_case = &test_cases[i]; test_case = &test_cases[i];
if (!test__start_subtest(test_case->case_name)) if (!test__start_subtest(test_case->case_name))
continue; continue;
obj = bpf_object__open(test_case->bpf_obj_file); DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
.relaxed_core_relocs = test_case->relaxed_core_relocs,
);
obj = bpf_object__open_file(test_case->bpf_obj_file, &opts);
if (CHECK(IS_ERR_OR_NULL(obj), "obj_open", if (CHECK(IS_ERR_OR_NULL(obj), "obj_open",
"failed to open '%s': %ld\n", "failed to open '%s': %ld\n",
test_case->bpf_obj_file, PTR_ERR(obj))) test_case->bpf_obj_file, PTR_ERR(obj)))
...@@ -319,7 +391,6 @@ void test_core_reloc(void) ...@@ -319,7 +391,6 @@ void test_core_reloc(void)
if (CHECK(!prog, "find_probe", if (CHECK(!prog, "find_probe",
"prog '%s' not found\n", probe_name)) "prog '%s' not found\n", probe_name))
goto cleanup; goto cleanup;
bpf_program__set_type(prog, BPF_PROG_TYPE_RAW_TRACEPOINT);
load_attr.obj = obj; load_attr.obj = obj;
load_attr.log_level = 0; load_attr.log_level = 0;
......
...@@ -91,12 +91,18 @@ static void do_flow_dissector_reattach(void) ...@@ -91,12 +91,18 @@ static void do_flow_dissector_reattach(void)
void test_flow_dissector_reattach(void) void test_flow_dissector_reattach(void)
{ {
int init_net, err; int init_net, self_net, err;
self_net = open("/proc/self/ns/net", O_RDONLY);
if (CHECK_FAIL(self_net < 0)) {
perror("open(/proc/self/ns/net");
return;
}
init_net = open("/proc/1/ns/net", O_RDONLY); init_net = open("/proc/1/ns/net", O_RDONLY);
if (CHECK_FAIL(init_net < 0)) { if (CHECK_FAIL(init_net < 0)) {
perror("open(/proc/1/ns/net)"); perror("open(/proc/1/ns/net)");
return; goto out_close;
} }
err = setns(init_net, CLONE_NEWNET); err = setns(init_net, CLONE_NEWNET);
...@@ -108,7 +114,7 @@ void test_flow_dissector_reattach(void) ...@@ -108,7 +114,7 @@ void test_flow_dissector_reattach(void)
if (is_attached(init_net)) { if (is_attached(init_net)) {
test__skip(); test__skip();
printf("Can't test with flow dissector attached to init_net\n"); printf("Can't test with flow dissector attached to init_net\n");
return; goto out_setns;
} }
/* First run tests in root network namespace */ /* First run tests in root network namespace */
...@@ -118,10 +124,17 @@ void test_flow_dissector_reattach(void) ...@@ -118,10 +124,17 @@ void test_flow_dissector_reattach(void)
err = unshare(CLONE_NEWNET); err = unshare(CLONE_NEWNET);
if (CHECK_FAIL(err)) { if (CHECK_FAIL(err)) {
perror("unshare(CLONE_NEWNET)"); perror("unshare(CLONE_NEWNET)");
goto out_close; goto out_setns;
} }
do_flow_dissector_reattach(); do_flow_dissector_reattach();
out_setns:
/* Move back to netns we started in. */
err = setns(self_net, CLONE_NEWNET);
if (CHECK_FAIL(err))
perror("setns(/proc/self/ns/net)");
out_close: out_close:
close(init_net); close(init_net);
close(self_net);
} }
This diff is collapsed.
#include "core_reloc_types.h"
void f(struct core_reloc_existence x) {}
#include "core_reloc_types.h"
void f(struct core_reloc_existence___err_wrong_arr_kind x) {}
#include "core_reloc_types.h"
void f(struct core_reloc_existence___err_wrong_arr_value_type x) {}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment