Commit addb0679 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Daniel Borkmann says:

====================
pull-request: bpf-next 2018-12-11

The following pull-request contains BPF updates for your *net-next* tree.

It has three minor merge conflicts, resolutions:

1) tools/testing/selftests/bpf/test_verifier.c

 Take first chunk with alignment_prevented_execution.

2) net/core/filter.c

  [...]
  case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
  case bpf_ctx_range(struct __sk_buff, wire_len):
        return false;
  [...]

3) include/uapi/linux/bpf.h

  Take the second chunk for the two cases each.

The main changes are:

1) Add support for BPF line info via BTF and extend libbpf as well
   as bpftool's program dump to annotate output with BPF C code to
   facilitate debugging and introspection, from Martin.

2) Add support for BPF_ALU | BPF_ARSH | BPF_{K,X} in interpreter
   and all JIT backends, from Jiong.

3) Improve BPF test coverage on archs with no efficient unaligned
   access by adding an "any alignment" flag to the BPF program load
   to forcefully disable verifier alignment checks, from David.

4) Add a new bpf_prog_test_run_xattr() API to libbpf which allows for
   proper use of BPF_PROG_TEST_RUN with data_out, from Lorenz.

5) Extend tc BPF programs to use a new __sk_buff field called wire_len
   for more accurate accounting of packets going to wire, from Petar.

6) Improve bpftool to allow dumping the trace pipe from it and add
   several improvements in bash completion and map/prog dump,
   from Quentin.

7) Optimize arm64 BPF JIT to always emit movn/movk/movk sequence for
   kernel addresses and add a dedicated BPF JIT backend allocator,
   from Ard.

8) Add a BPF helper function for IR remotes to report mouse movements,
   from Sean.

9) Various cleanups in BPF prog dump e.g. to make UAPI bpf_prog_info
   member naming consistent with existing conventions, from Yonghong
   and Song.

10) Misc cleanups and improvements in allowing to pass interface name
    via cmdline for xdp1 BPF example, from Matteo.

11) Fix a potential segfault in BPF sample loader's kprobes handling,
    from Daniel T.

12) Fix SPDX license in libbpf's README.rst, from Andrey.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8cc196d6 aa570ff4
......@@ -62,8 +62,11 @@
#define PAGE_OFFSET (UL(0xffffffffffffffff) - \
(UL(1) << (VA_BITS - 1)) + 1)
#define KIMAGE_VADDR (MODULES_END)
#define BPF_JIT_REGION_START (VA_START + KASAN_SHADOW_SIZE)
#define BPF_JIT_REGION_SIZE (SZ_128M)
#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
#define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE)
#define MODULES_VADDR (BPF_JIT_REGION_END)
#define MODULES_VSIZE (SZ_128M)
#define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE)
#define PCI_IO_END (VMEMMAP_START - SZ_2M)
......
......@@ -134,10 +134,9 @@ static inline void emit_a64_mov_i64(const int reg, const u64 val,
}
/*
* This is an unoptimized 64 immediate emission used for BPF to BPF call
* addresses. It will always do a full 64 bit decomposition as otherwise
* more complexity in the last extra pass is required since we previously
* reserved 4 instructions for the address.
* Kernel addresses in the vmalloc space use at most 48 bits, and the
* remaining bits are guaranteed to be 0x1. So we can compose the address
* with a fixed length movn/movk/movk sequence.
*/
static inline void emit_addr_mov_i64(const int reg, const u64 val,
struct jit_ctx *ctx)
......@@ -145,8 +144,8 @@ static inline void emit_addr_mov_i64(const int reg, const u64 val,
u64 tmp = val;
int shift = 0;
emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx);
for (;shift < 48;) {
emit(A64_MOVN(1, reg, ~tmp & 0xffff, shift), ctx);
while (shift < 32) {
tmp >>= 16;
shift += 16;
emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
......@@ -634,11 +633,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
&func_addr, &func_addr_fixed);
if (ret < 0)
return ret;
if (func_addr_fixed)
/* We can use optimized emission here. */
emit_a64_mov_i64(tmp, func_addr, ctx);
else
emit_addr_mov_i64(tmp, func_addr, ctx);
emit_addr_mov_i64(tmp, func_addr, ctx);
emit(A64_BLR(tmp), ctx);
emit(A64_MOV(1, r0, A64_R(0)), ctx);
break;
......@@ -948,3 +943,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
tmp : orig_prog);
return prog;
}
void *bpf_jit_alloc_exec(unsigned long size)
{
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
BPF_JIT_REGION_END, GFP_KERNEL,
PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
void bpf_jit_free_exec(void *addr)
{
return vfree(addr);
}
......@@ -157,6 +157,7 @@ Ip_u2u1s3(_slti);
Ip_u2u1s3(_sltiu);
Ip_u3u1u2(_sltu);
Ip_u2u1u3(_sra);
Ip_u3u2u1(_srav);
Ip_u2u1u3(_srl);
Ip_u3u2u1(_srlv);
Ip_u3u1u2(_subu);
......
......@@ -369,8 +369,9 @@ enum mm_32a_minor_op {
mm_ext_op = 0x02c,
mm_pool32axf_op = 0x03c,
mm_srl32_op = 0x040,
mm_srlv32_op = 0x050,
mm_sra_op = 0x080,
mm_srlv32_op = 0x090,
mm_srav_op = 0x090,
mm_rotr_op = 0x0c0,
mm_lwxs_op = 0x118,
mm_addu32_op = 0x150,
......
......@@ -104,6 +104,7 @@ static const struct insn insn_table_MM[insn_invalid] = {
[insn_sltiu] = {M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
[insn_sltu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD},
[insn_sra] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD},
[insn_srav] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srav_op), RT | RS | RD},
[insn_srl] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD},
[insn_srlv] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srlv32_op), RT | RS | RD},
[insn_rotr] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD},
......
......@@ -171,6 +171,7 @@ static const struct insn insn_table[insn_invalid] = {
[insn_sltiu] = {M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
[insn_sltu] = {M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD},
[insn_sra] = {M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE},
[insn_srav] = {M(spec_op, 0, 0, 0, 0, srav_op), RS | RT | RD},
[insn_srl] = {M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE},
[insn_srlv] = {M(spec_op, 0, 0, 0, 0, srlv_op), RS | RT | RD},
[insn_subu] = {M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD},
......
......@@ -61,10 +61,10 @@ enum opcode {
insn_mthc0, insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_nor,
insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb,
insn_sc, insn_scd, insn_sd, insn_sh, insn_sll, insn_sllv,
insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra, insn_srl,
insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, insn_tlbp,
insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, insn_xor,
insn_xori, insn_yield,
insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra, insn_srav,
insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall,
insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh,
insn_xor, insn_xori, insn_yield,
insn_invalid /* insn_invalid must be last */
};
......@@ -353,6 +353,7 @@ I_u2u1s3(_slti)
I_u2u1s3(_sltiu)
I_u3u1u2(_sltu)
I_u2u1u3(_sra)
I_u3u2u1(_srav)
I_u2u1u3(_srl)
I_u3u2u1(_srlv)
I_u2u1u3(_rotr)
......
......@@ -854,6 +854,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */
case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */
case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */
case BPF_ALU | BPF_ARSH | BPF_X: /* ALU_REG */
src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
if (src < 0 || dst < 0)
......@@ -913,6 +914,9 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
case BPF_RSH:
emit_instr(ctx, srlv, dst, dst, src);
break;
case BPF_ARSH:
emit_instr(ctx, srav, dst, dst, src);
break;
default:
pr_err("ALU_REG NOT HANDLED\n");
return -EINVAL;
......
......@@ -342,6 +342,8 @@
#define PPC_INST_SLW 0x7c000030
#define PPC_INST_SLD 0x7c000036
#define PPC_INST_SRW 0x7c000430
#define PPC_INST_SRAW 0x7c000630
#define PPC_INST_SRAWI 0x7c000670
#define PPC_INST_SRD 0x7c000436
#define PPC_INST_SRAD 0x7c000634
#define PPC_INST_SRADI 0x7c000674
......
......@@ -152,6 +152,10 @@
___PPC_RS(a) | ___PPC_RB(s))
#define PPC_SRW(d, a, s) EMIT(PPC_INST_SRW | ___PPC_RA(d) | \
___PPC_RS(a) | ___PPC_RB(s))
#define PPC_SRAW(d, a, s) EMIT(PPC_INST_SRAW | ___PPC_RA(d) | \
___PPC_RS(a) | ___PPC_RB(s))
#define PPC_SRAWI(d, a, i) EMIT(PPC_INST_SRAWI | ___PPC_RA(d) | \
___PPC_RS(a) | __PPC_SH(i))
#define PPC_SRD(d, a, s) EMIT(PPC_INST_SRD | ___PPC_RA(d) | \
___PPC_RS(a) | ___PPC_RB(s))
#define PPC_SRAD(d, a, s) EMIT(PPC_INST_SRAD | ___PPC_RA(d) | \
......
......@@ -529,9 +529,15 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
if (imm != 0)
PPC_SRDI(dst_reg, dst_reg, imm);
break;
case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
PPC_SRAW(dst_reg, dst_reg, src_reg);
goto bpf_alu32_trunc;
case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
PPC_SRAD(dst_reg, dst_reg, src_reg);
break;
case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
PPC_SRAWI(dst_reg, dst_reg, imm);
goto bpf_alu32_trunc;
case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
if (imm != 0)
PPC_SRADI(dst_reg, dst_reg, imm);
......
......@@ -821,10 +821,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
/*
* BPF_ARSH
*/
case BPF_ALU | BPF_ARSH | BPF_X: /* ((s32) dst) >>= src */
/* sra %dst,%dst,0(%src) */
EMIT4_DISP(0x8a000000, dst_reg, src_reg, 0);
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */
/* srag %dst,%dst,0(%src) */
EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
break;
case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
if (imm == 0)
break;
/* sra %dst,imm(%r0) */
EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
if (imm == 0)
break;
......
......@@ -1181,6 +1181,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
}
if (!image || !prog->is_func || extra_pass) {
if (image)
bpf_prog_fill_jited_linfo(prog, addrs);
out_addrs:
kfree(addrs);
kfree(jit_data);
......
......@@ -59,6 +59,28 @@ static const struct bpf_func_proto rc_keydown_proto = {
.arg4_type = ARG_ANYTHING,
};
BPF_CALL_3(bpf_rc_pointer_rel, u32*, sample, s32, rel_x, s32, rel_y)
{
struct ir_raw_event_ctrl *ctrl;
ctrl = container_of(sample, struct ir_raw_event_ctrl, bpf_sample);
input_report_rel(ctrl->dev->input_dev, REL_X, rel_x);
input_report_rel(ctrl->dev->input_dev, REL_Y, rel_y);
input_sync(ctrl->dev->input_dev);
return 0;
}
static const struct bpf_func_proto rc_pointer_rel_proto = {
.func = bpf_rc_pointer_rel,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
};
static const struct bpf_func_proto *
lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
......@@ -67,6 +89,8 @@ lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &rc_repeat_proto;
case BPF_FUNC_rc_keydown:
return &rc_keydown_proto;
case BPF_FUNC_rc_pointer_rel:
return &rc_pointer_rel_proto;
case BPF_FUNC_map_lookup_elem:
return &bpf_map_lookup_elem_proto;
case BPF_FUNC_map_update_elem:
......
......@@ -2382,6 +2382,49 @@ static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
{
/* Set signedness bit (MSB of result). */
emit_alu(nfp_prog, reg_none(), reg_a(dst), ALU_OP_OR, reg_imm(0));
emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, reg_b(dst),
SHF_SC_R_SHF, shift_amt);
wrp_immed(nfp_prog, reg_both(dst + 1), 0);
return 0;
}
static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 umin, umax;
u8 dst, src;
dst = insn->dst_reg * 2;
umin = meta->umin_src;
umax = meta->umax_src;
if (umin == umax)
return __ashr_imm(nfp_prog, dst, umin);
src = insn->src_reg * 2;
/* NOTE: the first insn will set both indirect shift amount (source A)
* and signedness bit (MSB of result).
*/
emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst));
emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
reg_b(dst), SHF_SC_R_SHF);
wrp_immed(nfp_prog, reg_both(dst + 1), 0);
return 0;
}
static int ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u8 dst = insn->dst_reg * 2;
return __ashr_imm(nfp_prog, dst, insn->imm);
}
static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
......@@ -3286,6 +3329,8 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ALU | BPF_DIV | BPF_K] = div_imm,
[BPF_ALU | BPF_NEG] = neg_reg,
[BPF_ALU | BPF_LSH | BPF_K] = shl_imm,
[BPF_ALU | BPF_ARSH | BPF_X] = ashr_reg,
[BPF_ALU | BPF_ARSH | BPF_K] = ashr_imm,
[BPF_ALU | BPF_END | BPF_X] = end_reg32,
[BPF_LD | BPF_IMM | BPF_DW] = imm_ld8,
[BPF_LD | BPF_ABS | BPF_B] = data_ld1,
......
......@@ -319,7 +319,28 @@ struct bpf_prog_aux {
struct bpf_prog_offload *offload;
struct btf *btf;
struct bpf_func_info *func_info;
/* bpf_line_info loaded from userspace. linfo->insn_off
* has the xlated insn offset.
* Both the main and sub prog share the same linfo.
* The subprog can access its first linfo by
* using the linfo_idx.
*/
struct bpf_line_info *linfo;
/* jited_linfo is the jited addr of the linfo. It has a
* one to one mapping to linfo:
* jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
* Both the main and sub prog share the same jited_linfo.
* The subprog can access its first jited_linfo by
* using the linfo_idx.
*/
void **jited_linfo;
u32 func_info_cnt;
u32 nr_linfo;
/* subprog can use linfo_idx to access its first linfo and
* jited_linfo.
* main prog always has linfo_idx == 0
*/
u32 linfo_idx;
union {
struct work_struct work;
struct rcu_head rcu;
......
......@@ -203,6 +203,7 @@ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
struct bpf_subprog_info {
u32 start; /* insn idx of function entry point */
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
u16 stack_depth; /* max. stack depth used by this function */
};
......
......@@ -46,6 +46,7 @@ void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
struct seq_file *m);
int btf_get_fd_by_id(u32 id);
u32 btf_id(const struct btf *btf);
bool btf_name_offset_valid(const struct btf *btf, u32 offset);
#ifdef CONFIG_BPF_SYSCALL
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
......
......@@ -725,6 +725,13 @@ void bpf_prog_free(struct bpf_prog *fp);
bool bpf_opcode_in_insntable(u8 code);
void bpf_prog_free_linfo(struct bpf_prog *prog);
void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
const u32 *insn_to_jit_off);
int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog);
void bpf_prog_free_jited_linfo(struct bpf_prog *prog);
void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog);
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags);
......
This diff is collapsed.
......@@ -444,7 +444,7 @@ static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
return kind_ops[BTF_INFO_KIND(t->info)];
}
static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
bool btf_name_offset_valid(const struct btf *btf, u32 offset)
{
return BTF_STR_OFFSET_VALID(offset) &&
offset < btf->hdr.str_len;
......
......@@ -105,6 +105,91 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
}
EXPORT_SYMBOL_GPL(bpf_prog_alloc);
int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
{
if (!prog->aux->nr_linfo || !prog->jit_requested)
return 0;
prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
sizeof(*prog->aux->jited_linfo),
GFP_KERNEL | __GFP_NOWARN);
if (!prog->aux->jited_linfo)
return -ENOMEM;
return 0;
}
void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
{
kfree(prog->aux->jited_linfo);
prog->aux->jited_linfo = NULL;
}
void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
{
if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
bpf_prog_free_jited_linfo(prog);
}
/* The jit engine is responsible to provide an array
* for insn_off to the jited_off mapping (insn_to_jit_off).
*
* The idx to this array is the insn_off. Hence, the insn_off
* here is relative to the prog itself instead of the main prog.
* This array has one entry for each xlated bpf insn.
*
* jited_off is the byte off to the last byte of the jited insn.
*
* Hence, with
* insn_start:
* The first bpf insn off of the prog. The insn off
* here is relative to the main prog.
* e.g. if prog is a subprog, insn_start > 0
* linfo_idx:
* The prog's idx to prog->aux->linfo and jited_linfo
*
* jited_linfo[linfo_idx] = prog->bpf_func
*
* For i > linfo_idx,
*
* jited_linfo[i] = prog->bpf_func +
* insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
*/
void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
const u32 *insn_to_jit_off)
{
u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
const struct bpf_line_info *linfo;
void **jited_linfo;
if (!prog->aux->jited_linfo)
/* Userspace did not provide linfo */
return;
linfo_idx = prog->aux->linfo_idx;
linfo = &prog->aux->linfo[linfo_idx];
insn_start = linfo[0].insn_off;
insn_end = insn_start + prog->len;
jited_linfo = &prog->aux->jited_linfo[linfo_idx];
jited_linfo[0] = prog->bpf_func;
nr_linfo = prog->aux->nr_linfo - linfo_idx;
for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
/* The verifier ensures that linfo[i].insn_off is
* strictly increasing
*/
jited_linfo[i] = prog->bpf_func +
insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
}
void bpf_prog_free_linfo(struct bpf_prog *prog)
{
bpf_prog_free_jited_linfo(prog);
kvfree(prog->aux->linfo);
}
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags)
{
......@@ -294,6 +379,26 @@ static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
return ret;
}
static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
{
struct bpf_line_info *linfo;
u32 i, nr_linfo;
nr_linfo = prog->aux->nr_linfo;
if (!nr_linfo || !delta)
return;
linfo = prog->aux->linfo;
for (i = 0; i < nr_linfo; i++)
if (off < linfo[i].insn_off)
break;
/* Push all off < linfo[i].insn_off by delta */
for (; i < nr_linfo; i++)
linfo[i].insn_off += delta;
}
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len)
{
......@@ -349,6 +454,8 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
*/
BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
bpf_adj_linfo(prog_adj, off, insn_delta);
return prog_adj;
}
......@@ -410,7 +517,7 @@ static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
/* prog->aux->name will be ignored if full btf name is available */
if (prog->aux->btf) {
if (prog->aux->func_info_cnt) {
type = btf_type_by_id(prog->aux->btf,
prog->aux->func_info[prog->aux->func_idx].type_id);
func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
......@@ -623,6 +730,16 @@ static void bpf_jit_uncharge_modmem(u32 pages)
atomic_long_sub(pages, &bpf_jit_current);
}
void *__weak bpf_jit_alloc_exec(unsigned long size)
{
return module_alloc(size);
}
void __weak bpf_jit_free_exec(void *addr)
{
module_memfree(addr);
}
struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment,
......@@ -640,7 +757,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
if (bpf_jit_charge_modmem(pages))
return NULL;
hdr = module_alloc(size);
hdr = bpf_jit_alloc_exec(size);
if (!hdr) {
bpf_jit_uncharge_modmem(pages);
return NULL;
......@@ -664,7 +781,7 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr)
{
u32 pages = hdr->pages;
module_memfree(hdr);
bpf_jit_free_exec(hdr);
bpf_jit_uncharge_modmem(pages);
}
......@@ -923,32 +1040,34 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
#define BPF_INSN_MAP(INSN_2, INSN_3) \
/* 32 bit ALU operations. */ \
/* Register based. */ \
INSN_3(ALU, ADD, X), \
INSN_3(ALU, SUB, X), \
INSN_3(ALU, AND, X), \
INSN_3(ALU, OR, X), \
INSN_3(ALU, LSH, X), \
INSN_3(ALU, RSH, X), \
INSN_3(ALU, XOR, X), \
INSN_3(ALU, MUL, X), \
INSN_3(ALU, MOV, X), \
INSN_3(ALU, DIV, X), \
INSN_3(ALU, MOD, X), \
INSN_3(ALU, ADD, X), \
INSN_3(ALU, SUB, X), \
INSN_3(ALU, AND, X), \
INSN_3(ALU, OR, X), \
INSN_3(ALU, LSH, X), \
INSN_3(ALU, RSH, X), \
INSN_3(ALU, XOR, X), \
INSN_3(ALU, MUL, X), \
INSN_3(ALU, MOV, X), \
INSN_3(ALU, ARSH, X), \
INSN_3(ALU, DIV, X), \
INSN_3(ALU, MOD, X), \
INSN_2(ALU, NEG), \
INSN_3(ALU, END, TO_BE), \
INSN_3(ALU, END, TO_LE), \
/* Immediate based. */ \
INSN_3(ALU, ADD, K), \
INSN_3(ALU, SUB, K), \
INSN_3(ALU, AND, K), \
INSN_3(ALU, OR, K), \
INSN_3(ALU, LSH, K), \
INSN_3(ALU, RSH, K), \
INSN_3(ALU, XOR, K), \
INSN_3(ALU, MUL, K), \
INSN_3(ALU, MOV, K), \
INSN_3(ALU, DIV, K), \
INSN_3(ALU, MOD, K), \
INSN_3(ALU, ADD, K), \
INSN_3(ALU, SUB, K), \
INSN_3(ALU, AND, K), \
INSN_3(ALU, OR, K), \
INSN_3(ALU, LSH, K), \
INSN_3(ALU, RSH, K), \
INSN_3(ALU, XOR, K), \
INSN_3(ALU, MUL, K), \
INSN_3(ALU, MOV, K), \
INSN_3(ALU, ARSH, K), \
INSN_3(ALU, DIV, K), \
INSN_3(ALU, MOD, K), \
/* 64 bit ALU operations. */ \
/* Register based. */ \
INSN_3(ALU64, ADD, X), \
......@@ -1127,6 +1246,12 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
insn++;
CONT;
ALU_ARSH_X:
DST = (u64) (u32) ((*(s32 *) &DST) >> SRC);
CONT;
ALU_ARSH_K:
DST = (u64) (u32) ((*(s32 *) &DST) >> IMM);
CONT;
ALU64_ARSH_X:
(*(s64 *) &DST) >>= SRC;
CONT;
......@@ -1573,13 +1698,20 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
* be JITed, but falls back to the interpreter.
*/
if (!bpf_prog_is_dev_bound(fp->aux)) {
*err = bpf_prog_alloc_jited_linfo(fp);
if (*err)
return fp;
fp = bpf_int_jit_compile(fp);
#ifdef CONFIG_BPF_JIT_ALWAYS_ON
if (!fp->jited) {
bpf_prog_free_jited_linfo(fp);
#ifdef CONFIG_BPF_JIT_ALWAYS_ON
*err = -ENOTSUPP;
return fp;
}
#endif
} else {
bpf_prog_free_unused_jited_linfo(fp);
}
} else {
*err = bpf_prog_offload_compile(fp);
if (*err)
......
......@@ -1215,6 +1215,7 @@ static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
bpf_prog_kallsyms_del_all(prog);
btf_put(prog->aux->btf);
kvfree(prog->aux->func_info);
bpf_prog_free_linfo(prog);
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
}
......@@ -1439,7 +1440,7 @@ bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type,
}
/* last field in 'union bpf_attr' used by this command */
#define BPF_PROG_LOAD_LAST_FIELD func_info_cnt
#define BPF_PROG_LOAD_LAST_FIELD line_info_cnt
static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
{
......@@ -1452,9 +1453,14 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
if (CHECK_ATTR(BPF_PROG_LOAD))
return -EINVAL;
if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT)
if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | BPF_F_ANY_ALIGNMENT))
return -EINVAL;
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
(attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
/* copy eBPF program license from user space */
if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
sizeof(license) - 1) < 0)
......@@ -1555,6 +1561,9 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
return err;
free_used_maps:
bpf_prog_free_linfo(prog);
kvfree(prog->aux->func_info);
btf_put(prog->aux->btf);
bpf_prog_kallsyms_del_subprogs(prog);
free_used_maps(prog->aux);
free_prog:
......@@ -2034,6 +2043,37 @@ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
return insns;
}
static int set_info_rec_size(struct bpf_prog_info *info)
{
/*
* Ensure info.*_rec_size is the same as kernel expected size
*
* or
*
* Only allow zero *_rec_size if both _rec_size and _cnt are
* zero. In this case, the kernel will set the expected
* _rec_size back to the info.
*/
if ((info->nr_func_info || info->func_info_rec_size) &&
info->func_info_rec_size != sizeof(struct bpf_func_info))
return -EINVAL;
if ((info->nr_line_info || info->line_info_rec_size) &&
info->line_info_rec_size != sizeof(struct bpf_line_info))
return -EINVAL;
if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
info->jited_line_info_rec_size != sizeof(__u64))
return -EINVAL;
info->func_info_rec_size = sizeof(struct bpf_func_info);
info->line_info_rec_size = sizeof(struct bpf_line_info);
info->jited_line_info_rec_size = sizeof(__u64);
return 0;
}
static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
......@@ -2076,12 +2116,18 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
return -EFAULT;
}
err = set_info_rec_size(&info);
if (err)
return err;
if (!capable(CAP_SYS_ADMIN)) {
info.jited_prog_len = 0;
info.xlated_prog_len = 0;
info.nr_jited_ksyms = 0;
info.nr_jited_func_lens = 0;
info.func_info_cnt = 0;
info.nr_func_info = 0;
info.nr_line_info = 0;
info.nr_jited_line_info = 0;
goto done;
}
......@@ -2163,7 +2209,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
ulen = info.nr_jited_ksyms;
info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
if (info.nr_jited_ksyms && ulen) {
if (ulen) {
if (bpf_dump_raw_ok()) {
unsigned long ksym_addr;
u64 __user *user_ksyms;
......@@ -2194,7 +2240,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
ulen = info.nr_jited_func_lens;
info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
if (info.nr_jited_func_lens && ulen) {
if (ulen) {
if (bpf_dump_raw_ok()) {
u32 __user *user_lens;
u32 func_len, i;
......@@ -2219,35 +2265,61 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
}
}
if (prog->aux->btf) {
u32 krec_size = sizeof(struct bpf_func_info);
u32 ucnt, urec_size;
if (prog->aux->btf)
info.btf_id = btf_id(prog->aux->btf);
ucnt = info.func_info_cnt;
info.func_info_cnt = prog->aux->func_info_cnt;
urec_size = info.func_info_rec_size;
info.func_info_rec_size = krec_size;
if (ucnt) {
/* expect passed-in urec_size is what the kernel expects */
if (urec_size != info.func_info_rec_size)
return -EINVAL;
if (bpf_dump_raw_ok()) {
char __user *user_finfo;
user_finfo = u64_to_user_ptr(info.func_info);
ucnt = min_t(u32, info.func_info_cnt, ucnt);
if (copy_to_user(user_finfo, prog->aux->func_info,
krec_size * ucnt))
ulen = info.nr_func_info;
info.nr_func_info = prog->aux->func_info_cnt;
if (info.nr_func_info && ulen) {
if (bpf_dump_raw_ok()) {
char __user *user_finfo;
user_finfo = u64_to_user_ptr(info.func_info);
ulen = min_t(u32, info.nr_func_info, ulen);
if (copy_to_user(user_finfo, prog->aux->func_info,
info.func_info_rec_size * ulen))
return -EFAULT;
} else {
info.func_info = 0;
}
}
ulen = info.nr_line_info;
info.nr_line_info = prog->aux->nr_linfo;
if (info.nr_line_info && ulen) {
if (bpf_dump_raw_ok()) {
__u8 __user *user_linfo;
user_linfo = u64_to_user_ptr(info.line_info);
ulen = min_t(u32, info.nr_line_info, ulen);
if (copy_to_user(user_linfo, prog->aux->linfo,
info.line_info_rec_size * ulen))
return -EFAULT;
} else {
info.line_info = 0;
}
}
ulen = info.nr_jited_line_info;
if (prog->aux->jited_linfo)
info.nr_jited_line_info = prog->aux->nr_linfo;
else
info.nr_jited_line_info = 0;
if (info.nr_jited_line_info && ulen) {
if (bpf_dump_raw_ok()) {
__u64 __user *user_linfo;
u32 i;
user_linfo = u64_to_user_ptr(info.jited_line_info);
ulen = min_t(u32, info.nr_jited_line_info, ulen);
for (i = 0; i < ulen; i++) {
if (put_user((__u64)(long)prog->aux->jited_linfo[i],
&user_linfo[i]))
return -EFAULT;
} else {
info.func_info_cnt = 0;
}
} else {
info.jited_line_info = 0;
}
} else {
info.func_info_cnt = 0;
}
done:
......
This diff is collapsed.
......@@ -75,8 +75,18 @@ static int bpf_test_finish(const union bpf_attr *kattr,
{
void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
int err = -EFAULT;
u32 copy_size = size;
/* Clamp copy if the user has provided a size hint, but copy the full
* buffer if not to retain old behaviour.
*/
if (kattr->test.data_size_out &&
copy_size > kattr->test.data_size_out) {
copy_size = kattr->test.data_size_out;
err = -ENOSPC;
}
if (data_out && copy_to_user(data_out, data, size))
if (data_out && copy_to_user(data_out, data, copy_size))
goto out;
if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
goto out;
......@@ -84,7 +94,8 @@ static int bpf_test_finish(const union bpf_attr *kattr,
goto out;
if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
goto out;
err = 0;
if (err != -ENOSPC)
err = 0;
out:
return err;
}
......
......@@ -5774,6 +5774,7 @@ static bool sk_filter_is_valid_access(int off, int size,
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
case bpf_ctx_range(struct __sk_buff, tstamp):
case bpf_ctx_range(struct __sk_buff, wire_len):
return false;
}
......@@ -5798,6 +5799,7 @@ static bool cg_skb_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, tc_classid):
case bpf_ctx_range(struct __sk_buff, data_meta):
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
case bpf_ctx_range(struct __sk_buff, wire_len):
return false;
case bpf_ctx_range(struct __sk_buff, data):
case bpf_ctx_range(struct __sk_buff, data_end):
......@@ -5844,6 +5846,7 @@ static bool lwt_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, data_meta):
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
case bpf_ctx_range(struct __sk_buff, tstamp):
case bpf_ctx_range(struct __sk_buff, wire_len):
return false;
}
......@@ -6274,6 +6277,7 @@ static bool sk_skb_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, data_meta):
case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
case bpf_ctx_range(struct __sk_buff, tstamp):
case bpf_ctx_range(struct __sk_buff, wire_len):
return false;
}
......@@ -6361,6 +6365,7 @@ static bool flow_dissector_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, data_meta):
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
case bpf_ctx_range(struct __sk_buff, tstamp):
case bpf_ctx_range(struct __sk_buff, wire_len):
return false;
}
......@@ -6686,6 +6691,17 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
bpf_target_off(struct sk_buff,
tstamp, 8,
target_size));
break;
case offsetof(struct __sk_buff, wire_len):
BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, pkt_len) != 4);
off = si->off;
off -= offsetof(struct __sk_buff, wire_len);
off += offsetof(struct sk_buff, cb);
off += offsetof(struct qdisc_skb_cb, pkt_len);
*target_size = 4;
*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off);
}
return insn - insn_buf;
......
......@@ -58,7 +58,9 @@ static int write_kprobe_events(const char *val)
{
int fd, ret, flags;
if ((val != NULL) && (val[0] == '\0'))
if (val == NULL)
return -1;
else if (val[0] == '\0')
flags = O_WRONLY | O_TRUNC;
else
flags = O_WRONLY | O_APPEND;
......
......@@ -15,6 +15,7 @@
#include <unistd.h>
#include <libgen.h>
#include <sys/resource.h>
#include <net/if.h>
#include "bpf_util.h"
#include "bpf/bpf.h"
......@@ -34,26 +35,24 @@ static void int_exit(int sig)
static void poll_stats(int map_fd, int interval)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
const unsigned int nr_keys = 256;
__u64 values[nr_cpus], prev[nr_keys][nr_cpus];
__u32 key;
__u64 values[nr_cpus], prev[UINT8_MAX] = { 0 };
int i;
memset(prev, 0, sizeof(prev));
while (1) {
__u32 key = UINT32_MAX;
sleep(interval);
for (key = 0; key < nr_keys; key++) {
while (bpf_map_get_next_key(map_fd, &key, &key) != -1) {
__u64 sum = 0;
assert(bpf_map_lookup_elem(map_fd, &key, values) == 0);
for (i = 0; i < nr_cpus; i++)
sum += (values[i] - prev[key][i]);
if (sum)
sum += values[i];
if (sum > prev[key])
printf("proto %u: %10llu pkt/s\n",
key, sum / interval);
memcpy(prev[key], values, sizeof(values));
key, (sum - prev[key]) / interval);
prev[key] = sum;
}
}
}
......@@ -61,7 +60,7 @@ static void poll_stats(int map_fd, int interval)
static void usage(const char *prog)
{
fprintf(stderr,
"usage: %s [OPTS] IFINDEX\n\n"
"usage: %s [OPTS] IFACE\n\n"
"OPTS:\n"
" -S use skb-mode\n"
" -N enforce native mode\n",
......@@ -104,7 +103,11 @@ int main(int argc, char **argv)
return 1;
}
ifindex = strtoul(argv[optind], NULL, 0);
ifindex = if_nametoindex(argv[1]);
if (!ifindex) {
perror("if_nametoindex");
return 1;
}
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
prog_load_attr.file = filename;
......
......@@ -22,12 +22,13 @@ MAP COMMANDS
=============
| **bpftool** **prog { show | list }** [*PROG*]
| **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes** | **visual**}]
| **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes**}]
| **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes** | **visual** | **linum**}]
| **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes** | **linum**}]
| **bpftool** **prog pin** *PROG* *FILE*
| **bpftool** **prog { load | loadall }** *OBJ* *PATH* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*]
| **bpftool** **prog attach** *PROG* *ATTACH_TYPE* [*MAP*]
| **bpftool** **prog detach** *PROG* *ATTACH_TYPE* [*MAP*]
| **bpftool** **prog attach** *PROG* *ATTACH_TYPE* [*MAP*]
| **bpftool** **prog detach** *PROG* *ATTACH_TYPE* [*MAP*]
| **bpftool** **prog tracelog**
| **bpftool** **prog help**
|
| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
......@@ -55,7 +56,7 @@ DESCRIPTION
Output will start with program ID followed by program type and
zero or more named attributes (depending on kernel version).
**bpftool prog dump xlated** *PROG* [{ **file** *FILE* | **opcodes** | **visual** }]
**bpftool prog dump xlated** *PROG* [{ **file** *FILE* | **opcodes** | **visual** | **linum** }]
Dump eBPF instructions of the program from the kernel. By
default, eBPF will be disassembled and printed to standard
output in human-readable format. In this case, **opcodes**
......@@ -68,13 +69,23 @@ DESCRIPTION
built instead, and eBPF instructions will be presented with
CFG in DOT format, on standard output.
**bpftool prog dump jited** *PROG* [{ **file** *FILE* | **opcodes** }]
If the prog has line_info available, the source line will
be displayed by default. If **linum** is specified,
the filename, line number and line column will also be
displayed on top of the source line.
**bpftool prog dump jited** *PROG* [{ **file** *FILE* | **opcodes** | **linum** }]
Dump jited image (host machine code) of the program.
If *FILE* is specified image will be written to a file,
otherwise it will be disassembled and printed to stdout.
**opcodes** controls if raw opcodes will be printed.
If the prog has line_info available, the source line will
be displayed by default. If **linum** is specified,
the filename, line number and line column will also be
displayed on top of the source line.
**bpftool prog pin** *PROG* *FILE*
Pin program *PROG* as *FILE*.
......@@ -117,6 +128,14 @@ DESCRIPTION
parameter, with the exception of *flow_dissector* which is
detached from the current networking name space.
**bpftool prog tracelog**
Dump the trace pipe of the system to the console (stdout).
Hit <Ctrl+C> to stop printing. BPF programs can write to this
trace pipe at runtime with the **bpf_trace_printk()** helper.
This should be used only for debugging purposes. For
streaming data from BPF programs to user space, one can use
perf events (see also **bpftool-map**\ (8)).
**bpftool prog help**
Print short help message.
......
......@@ -191,7 +191,7 @@ _bpftool()
# Deal with simplest keywords
case $prev in
help|hex|opcodes|visual)
help|hex|opcodes|visual|linum)
return 0
;;
tag)
......@@ -243,16 +243,20 @@ _bpftool()
# Completion depends on object and command in use
case $object in
prog)
if [[ $command != "load" && $command != "loadall" ]]; then
case $prev in
id)
_bpftool_get_prog_ids
return 0
;;
esac
fi
# Complete id, only for subcommands that use prog (but no map) ids
case $command in
show|list|dump|pin)
case $prev in
id)
_bpftool_get_prog_ids
return 0
;;
esac
;;
esac
local PROG_TYPE='id pinned tag'
local MAP_TYPE='id pinned'
case $command in
show|list)
[[ $prev != "$command" ]] && return 0
......@@ -274,10 +278,10 @@ _bpftool()
*)
_bpftool_once_attr 'file'
if _bpftool_search_list 'xlated'; then
COMPREPLY+=( $( compgen -W 'opcodes visual' -- \
COMPREPLY+=( $( compgen -W 'opcodes visual linum' -- \
"$cur" ) )
else
COMPREPLY+=( $( compgen -W 'opcodes' -- \
COMPREPLY+=( $( compgen -W 'opcodes linum' -- \
"$cur" ) )
fi
return 0
......@@ -293,22 +297,43 @@ _bpftool()
return 0
;;
attach|detach)
if [[ ${#words[@]} == 7 ]]; then
COMPREPLY=( $( compgen -W "id pinned" -- "$cur" ) )
return 0
fi
if [[ ${#words[@]} == 6 ]]; then
COMPREPLY=( $( compgen -W "msg_verdict skb_verdict \
skb_parse flow_dissector" -- "$cur" ) )
return 0
fi
if [[ $prev == "$command" ]]; then
COMPREPLY=( $( compgen -W "id pinned" -- "$cur" ) )
return 0
fi
return 0
case $cword in
3)
COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
return 0
;;
4)
case $prev in
id)
_bpftool_get_prog_ids
;;
pinned)
_filedir
;;
esac
return 0
;;
5)
COMPREPLY=( $( compgen -W 'msg_verdict skb_verdict \
skb_parse flow_dissector' -- "$cur" ) )
return 0
;;
6)
COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
return 0
;;
7)
case $prev in
id)
_bpftool_get_map_ids
;;
pinned)
_filedir
;;
esac
return 0
;;
esac
;;
load|loadall)
local obj
......@@ -373,10 +398,13 @@ _bpftool()
;;
esac
;;
tracelog)
return 0
;;
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'dump help pin attach detach load \
show list' -- "$cur" ) )
show list tracelog' -- "$cur" ) )
;;
esac
;;
......@@ -411,7 +439,7 @@ _bpftool()
lru_percpu_hash lpm_trie array_of_maps \
hash_of_maps devmap sockmap cpumap xskmap \
sockhash cgroup_storage reuseport_sockarray \
percpu_cgroup_storage' -- \
percpu_cgroup_storage queue stack' -- \
"$cur" ) )
return 0
;;
......
......@@ -385,3 +385,67 @@ void btf_dumper_type_only(const struct btf *btf, __u32 type_id, char *func_sig,
if (err < 0)
func_sig[0] = '\0';
}
static const char *ltrim(const char *s)
{
while (isspace(*s))
s++;
return s;
}
void btf_dump_linfo_plain(const struct btf *btf,
const struct bpf_line_info *linfo,
const char *prefix, bool linum)
{
const char *line = btf__name_by_offset(btf, linfo->line_off);
if (!line)
return;
line = ltrim(line);
if (!prefix)
prefix = "";
if (linum) {
const char *file = btf__name_by_offset(btf, linfo->file_name_off);
/* More forgiving on file because linum option is
* expected to provide more info than the already
* available src line.
*/
if (!file)
file = "";
printf("%s%s [file:%s line_num:%u line_col:%u]\n",
prefix, line, file,
BPF_LINE_INFO_LINE_NUM(linfo->line_col),
BPF_LINE_INFO_LINE_COL(linfo->line_col));
} else {
printf("%s%s\n", prefix, line);
}
}
void btf_dump_linfo_json(const struct btf *btf,
const struct bpf_line_info *linfo, bool linum)
{
const char *line = btf__name_by_offset(btf, linfo->line_off);
if (line)
jsonw_string_field(json_wtr, "src", ltrim(line));
if (linum) {
const char *file = btf__name_by_offset(btf, linfo->file_name_off);
if (file)
jsonw_string_field(json_wtr, "file", file);
if (BPF_LINE_INFO_LINE_NUM(linfo->line_col))
jsonw_int_field(json_wtr, "line_num",
BPF_LINE_INFO_LINE_NUM(linfo->line_col));
if (BPF_LINE_INFO_LINE_COL(linfo->line_col))
jsonw_int_field(json_wtr, "line_col",
BPF_LINE_INFO_LINE_COL(linfo->line_col));
}
}
......@@ -48,7 +48,6 @@
#include <sys/mount.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/vfs.h>
#include <bpf.h>
......@@ -276,7 +275,7 @@ int get_fd_type(int fd)
char buf[512];
ssize_t n;
snprintf(path, sizeof(path), "/proc/%d/fd/%d", getpid(), fd);
snprintf(path, sizeof(path), "/proc/self/fd/%d", fd);
n = readlink(path, buf, sizeof(buf));
if (n < 0) {
......@@ -304,7 +303,7 @@ char *get_fdinfo(int fd, const char *key)
ssize_t n;
FILE *fdi;
snprintf(path, sizeof(path), "/proc/%d/fdinfo/%d", getpid(), fd);
snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd);
fdi = fopen(path, "r");
if (!fdi) {
......@@ -605,7 +604,7 @@ void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
if (!ifindex)
return;
printf(" dev ");
printf(" offloaded_to ");
if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
printf("%s", name);
else
......
......@@ -19,29 +19,21 @@
#include <string.h>
#include <bfd.h>
#include <dis-asm.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <limits.h>
#include <libbpf.h>
#include "json_writer.h"
#include "main.h"
static void get_exec_path(char *tpath, size_t size)
{
const char *path = "/proc/self/exe";
ssize_t len;
char *path;
snprintf(tpath, size, "/proc/%d/exe", (int) getpid());
tpath[size - 1] = 0;
path = strdup(tpath);
assert(path);
len = readlink(path, tpath, size - 1);
assert(len > 0);
tpath[len] = 0;
free(path);
}
static int oper_count;
......@@ -77,10 +69,16 @@ static int fprintf_json(void *out, const char *fmt, ...)
}
void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
const char *arch, const char *disassembler_options)
const char *arch, const char *disassembler_options,
const struct btf *btf,
const struct bpf_prog_linfo *prog_linfo,
__u64 func_ksym, unsigned int func_idx,
bool linum)
{
const struct bpf_line_info *linfo = NULL;
disassembler_ftype disassemble;
struct disassemble_info info;
unsigned int nr_skip = 0;
int count, i, pc = 0;
char tpath[PATH_MAX];
bfd *bfdf;
......@@ -136,12 +134,26 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
if (json_output)
jsonw_start_array(json_wtr);
do {
if (prog_linfo) {
linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo,
func_ksym + pc,
func_idx,
nr_skip);
if (linfo)
nr_skip++;
}
if (json_output) {
jsonw_start_object(json_wtr);
oper_count = 0;
if (linfo)
btf_dump_linfo_json(btf, linfo, linum);
jsonw_name(json_wtr, "pc");
jsonw_printf(json_wtr, "\"0x%x\"", pc);
} else {
if (linfo)
btf_dump_linfo_plain(btf, linfo, "; ",
linum);
printf("%4x:\t", pc);
}
......
......@@ -78,6 +78,32 @@
#define HELP_SPEC_MAP \
"MAP := { id MAP_ID | pinned FILE }"
static const char * const prog_type_name[] = {
[BPF_PROG_TYPE_UNSPEC] = "unspec",
[BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
[BPF_PROG_TYPE_KPROBE] = "kprobe",
[BPF_PROG_TYPE_SCHED_CLS] = "sched_cls",
[BPF_PROG_TYPE_SCHED_ACT] = "sched_act",
[BPF_PROG_TYPE_TRACEPOINT] = "tracepoint",
[BPF_PROG_TYPE_XDP] = "xdp",
[BPF_PROG_TYPE_PERF_EVENT] = "perf_event",
[BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb",
[BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock",
[BPF_PROG_TYPE_LWT_IN] = "lwt_in",
[BPF_PROG_TYPE_LWT_OUT] = "lwt_out",
[BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
[BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
[BPF_PROG_TYPE_SK_SKB] = "sk_skb",
[BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
[BPF_PROG_TYPE_SK_MSG] = "sk_msg",
[BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
[BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr",
[BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local",
[BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2",
[BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport",
[BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
};
enum bpf_obj_type {
BPF_OBJ_UNKNOWN,
BPF_OBJ_PROG,
......@@ -112,6 +138,9 @@ struct pinned_obj {
struct hlist_node hash;
};
struct btf;
struct bpf_line_info;
int build_pinned_obj_table(struct pinned_obj_table *table,
enum bpf_obj_type type);
void delete_pinned_obj_table(struct pinned_obj_table *tab);
......@@ -141,6 +170,7 @@ int do_event_pipe(int argc, char **argv);
int do_cgroup(int argc, char **arg);
int do_perf(int argc, char **arg);
int do_net(int argc, char **arg);
int do_tracelog(int argc, char **arg);
int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what);
int prog_parse_fd(int *argc, char ***argv);
......@@ -148,13 +178,22 @@ int map_parse_fd(int *argc, char ***argv);
int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len);
#ifdef HAVE_LIBBFD_SUPPORT
struct bpf_prog_linfo;
void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
const char *arch, const char *disassembler_options);
const char *arch, const char *disassembler_options,
const struct btf *btf,
const struct bpf_prog_linfo *prog_linfo,
__u64 func_ksym, unsigned int func_idx,
bool linum);
int disasm_init(void);
#else
static inline
void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
const char *arch, const char *disassembler_options)
const char *arch, const char *disassembler_options,
const struct btf *btf,
const struct bpf_prog_linfo *prog_linfo,
__u64 func_ksym, unsigned int func_idx,
bool linum)
{
}
static inline int disasm_init(void)
......@@ -190,6 +229,12 @@ int btf_dumper_type(const struct btf_dumper *d, __u32 type_id,
void btf_dumper_type_only(const struct btf *btf, __u32 func_type_id,
char *func_only, int size);
void btf_dump_linfo_plain(const struct btf *btf,
const struct bpf_line_info *linfo,
const char *prefix, bool linum);
void btf_dump_linfo_json(const struct btf *btf,
const struct bpf_line_info *linfo, bool linum);
struct nlattr;
struct ifinfomsg;
struct tcmsg;
......
......@@ -487,7 +487,6 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
char *memlock;
memlock = get_fdinfo(fd, "memlock");
close(fd);
jsonw_start_object(json_wtr);
......@@ -514,6 +513,30 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
jsonw_int_field(json_wtr, "bytes_memlock", atoi(memlock));
free(memlock);
if (info->type == BPF_MAP_TYPE_PROG_ARRAY) {
char *owner_prog_type = get_fdinfo(fd, "owner_prog_type");
char *owner_jited = get_fdinfo(fd, "owner_jited");
if (owner_prog_type) {
unsigned int prog_type = atoi(owner_prog_type);
if (prog_type < ARRAY_SIZE(prog_type_name))
jsonw_string_field(json_wtr, "owner_prog_type",
prog_type_name[prog_type]);
else
jsonw_uint_field(json_wtr, "owner_prog_type",
prog_type);
}
if (atoi(owner_jited))
jsonw_bool_field(json_wtr, "owner_jited", true);
else
jsonw_bool_field(json_wtr, "owner_jited", false);
free(owner_prog_type);
free(owner_jited);
}
close(fd);
if (!hash_empty(map_table.table)) {
struct pinned_obj *obj;
......@@ -536,7 +559,6 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
char *memlock;
memlock = get_fdinfo(fd, "memlock");
close(fd);
printf("%u: ", info->id);
if (info->type < ARRAY_SIZE(map_type_name))
......@@ -557,6 +579,30 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
printf(" memlock %sB", memlock);
free(memlock);
if (info->type == BPF_MAP_TYPE_PROG_ARRAY) {
char *owner_prog_type = get_fdinfo(fd, "owner_prog_type");
char *owner_jited = get_fdinfo(fd, "owner_jited");
printf("\n\t");
if (owner_prog_type) {
unsigned int prog_type = atoi(owner_prog_type);
if (prog_type < ARRAY_SIZE(prog_type_name))
printf("owner_prog_type %s ",
prog_type_name[prog_type]);
else
printf("owner_prog_type %d ", prog_type);
}
if (atoi(owner_jited))
printf("owner jited");
else
printf("owner not jited");
free(owner_prog_type);
free(owner_jited);
}
close(fd);
printf("\n");
if (!hash_empty(map_table.table)) {
struct pinned_obj *obj;
......
......@@ -54,30 +54,6 @@
#include "main.h"
#include "xlated_dumper.h"
static const char * const prog_type_name[] = {
[BPF_PROG_TYPE_UNSPEC] = "unspec",
[BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
[BPF_PROG_TYPE_KPROBE] = "kprobe",
[BPF_PROG_TYPE_SCHED_CLS] = "sched_cls",
[BPF_PROG_TYPE_SCHED_ACT] = "sched_act",
[BPF_PROG_TYPE_TRACEPOINT] = "tracepoint",
[BPF_PROG_TYPE_XDP] = "xdp",
[BPF_PROG_TYPE_PERF_EVENT] = "perf_event",
[BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb",
[BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock",
[BPF_PROG_TYPE_LWT_IN] = "lwt_in",
[BPF_PROG_TYPE_LWT_OUT] = "lwt_out",
[BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
[BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
[BPF_PROG_TYPE_SK_SKB] = "sk_skb",
[BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
[BPF_PROG_TYPE_SK_MSG] = "sk_msg",
[BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
[BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr",
[BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2",
[BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
};
static const char * const attach_type_strings[] = {
[BPF_SK_SKB_STREAM_PARSER] = "stream_parser",
[BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict",
......@@ -447,24 +423,26 @@ static int do_show(int argc, char **argv)
static int do_dump(int argc, char **argv)
{
unsigned int finfo_rec_size, linfo_rec_size, jited_linfo_rec_size;
void *func_info = NULL, *linfo = NULL, *jited_linfo = NULL;
unsigned int nr_finfo, nr_linfo = 0, nr_jited_linfo = 0;
struct bpf_prog_linfo *prog_linfo = NULL;
unsigned long *func_ksyms = NULL;
struct bpf_prog_info info = {};
unsigned int *func_lens = NULL;
const char *disasm_opt = NULL;
unsigned int finfo_rec_size;
unsigned int nr_func_ksyms;
unsigned int nr_func_lens;
struct dump_data dd = {};
__u32 len = sizeof(info);
struct btf *btf = NULL;
void *func_info = NULL;
unsigned int finfo_cnt;
unsigned int buf_size;
char *filepath = NULL;
bool opcodes = false;
bool visual = false;
char func_sig[1024];
unsigned char *buf;
bool linum = false;
__u32 *member_len;
__u64 *member_ptr;
ssize_t n;
......@@ -508,6 +486,9 @@ static int do_dump(int argc, char **argv)
} else if (is_prefix(*argv, "visual")) {
visual = true;
NEXT_ARG();
} else if (is_prefix(*argv, "linum")) {
linum = true;
NEXT_ARG();
}
if (argc) {
......@@ -556,10 +537,10 @@ static int do_dump(int argc, char **argv)
}
}
finfo_cnt = info.func_info_cnt;
nr_finfo = info.nr_func_info;
finfo_rec_size = info.func_info_rec_size;
if (finfo_cnt && finfo_rec_size) {
func_info = malloc(finfo_cnt * finfo_rec_size);
if (nr_finfo && finfo_rec_size) {
func_info = malloc(nr_finfo * finfo_rec_size);
if (!func_info) {
p_err("mem alloc failed");
close(fd);
......@@ -567,6 +548,32 @@ static int do_dump(int argc, char **argv)
}
}
linfo_rec_size = info.line_info_rec_size;
if (info.nr_line_info && linfo_rec_size && info.btf_id) {
nr_linfo = info.nr_line_info;
linfo = malloc(nr_linfo * linfo_rec_size);
if (!linfo) {
p_err("mem alloc failed");
close(fd);
goto err_free;
}
}
jited_linfo_rec_size = info.jited_line_info_rec_size;
if (info.nr_jited_line_info &&
jited_linfo_rec_size &&
info.nr_jited_ksyms &&
info.nr_jited_func_lens &&
info.btf_id) {
nr_jited_linfo = info.nr_jited_line_info;
jited_linfo = malloc(nr_jited_linfo * jited_linfo_rec_size);
if (!jited_linfo) {
p_err("mem alloc failed");
close(fd);
goto err_free;
}
}
memset(&info, 0, sizeof(info));
*member_ptr = ptr_to_u64(buf);
......@@ -575,9 +582,15 @@ static int do_dump(int argc, char **argv)
info.nr_jited_ksyms = nr_func_ksyms;
info.jited_func_lens = ptr_to_u64(func_lens);
info.nr_jited_func_lens = nr_func_lens;
info.func_info_cnt = finfo_cnt;
info.nr_func_info = nr_finfo;
info.func_info_rec_size = finfo_rec_size;
info.func_info = ptr_to_u64(func_info);
info.nr_line_info = nr_linfo;
info.line_info_rec_size = linfo_rec_size;
info.line_info = ptr_to_u64(linfo);
info.nr_jited_line_info = nr_jited_linfo;
info.jited_line_info_rec_size = jited_linfo_rec_size;
info.jited_line_info = ptr_to_u64(jited_linfo);
err = bpf_obj_get_info_by_fd(fd, &info, &len);
close(fd);
......@@ -601,9 +614,9 @@ static int do_dump(int argc, char **argv)
goto err_free;
}
if (info.func_info_cnt != finfo_cnt) {
p_err("incorrect func_info_cnt %d vs. expected %d",
info.func_info_cnt, finfo_cnt);
if (info.nr_func_info != nr_finfo) {
p_err("incorrect nr_func_info %d vs. expected %d",
info.nr_func_info, nr_finfo);
goto err_free;
}
......@@ -613,6 +626,37 @@ static int do_dump(int argc, char **argv)
goto err_free;
}
if (func_info && !info.func_info) {
/* kernel.kptr_restrict is set. No func_info available. */
free(func_info);
func_info = NULL;
nr_finfo = 0;
}
if (linfo && info.nr_line_info != nr_linfo) {
p_err("incorrect nr_line_info %u vs. expected %u",
info.nr_line_info, nr_linfo);
goto err_free;
}
if (info.line_info_rec_size != linfo_rec_size) {
p_err("incorrect line_info_rec_size %u vs. expected %u",
info.line_info_rec_size, linfo_rec_size);
goto err_free;
}
if (jited_linfo && info.nr_jited_line_info != nr_jited_linfo) {
p_err("incorrect nr_jited_line_info %u vs. expected %u",
info.nr_jited_line_info, nr_jited_linfo);
goto err_free;
}
if (info.jited_line_info_rec_size != jited_linfo_rec_size) {
p_err("incorrect jited_line_info_rec_size %u vs. expected %u",
info.jited_line_info_rec_size, jited_linfo_rec_size);
goto err_free;
}
if ((member_len == &info.jited_prog_len &&
info.jited_prog_insns == 0) ||
(member_len == &info.xlated_prog_len &&
......@@ -626,6 +670,12 @@ static int do_dump(int argc, char **argv)
goto err_free;
}
if (nr_linfo) {
prog_linfo = bpf_prog_linfo__new(&info);
if (!prog_linfo)
p_info("error in processing bpf_line_info. continue without it.");
}
if (filepath) {
fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
if (fd < 0) {
......@@ -707,8 +757,11 @@ static int do_dump(int argc, char **argv)
printf("%s:\n", sym_name);
}
disasm_print_insn(img, lens[i], opcodes, name,
disasm_opt);
disasm_print_insn(img, lens[i], opcodes,
name, disasm_opt, btf,
prog_linfo, ksyms[i], i,
linum);
img += lens[i];
if (json_output)
......@@ -721,7 +774,7 @@ static int do_dump(int argc, char **argv)
jsonw_end_array(json_wtr);
} else {
disasm_print_insn(buf, *member_len, opcodes, name,
disasm_opt);
disasm_opt, btf, NULL, 0, 0, false);
}
} else if (visual) {
if (json_output)
......@@ -735,11 +788,14 @@ static int do_dump(int argc, char **argv)
dd.btf = btf;
dd.func_info = func_info;
dd.finfo_rec_size = finfo_rec_size;
dd.prog_linfo = prog_linfo;
if (json_output)
dump_xlated_json(&dd, buf, *member_len, opcodes);
dump_xlated_json(&dd, buf, *member_len, opcodes,
linum);
else
dump_xlated_plain(&dd, buf, *member_len, opcodes);
dump_xlated_plain(&dd, buf, *member_len, opcodes,
linum);
kernel_syms_destroy(&dd);
}
......@@ -747,6 +803,9 @@ static int do_dump(int argc, char **argv)
free(func_ksyms);
free(func_lens);
free(func_info);
free(linfo);
free(jited_linfo);
bpf_prog_linfo__free(prog_linfo);
return 0;
err_free:
......@@ -754,6 +813,9 @@ static int do_dump(int argc, char **argv)
free(func_ksyms);
free(func_lens);
free(func_info);
free(linfo);
free(jited_linfo);
bpf_prog_linfo__free(prog_linfo);
return -1;
}
......@@ -1155,8 +1217,8 @@ static int do_help(int argc, char **argv)
fprintf(stderr,
"Usage: %s %s { show | list } [PROG]\n"
" %s %s dump xlated PROG [{ file FILE | opcodes | visual }]\n"
" %s %s dump jited PROG [{ file FILE | opcodes }]\n"
" %s %s dump xlated PROG [{ file FILE | opcodes | visual | linum }]\n"
" %s %s dump jited PROG [{ file FILE | opcodes | linum }]\n"
" %s %s pin PROG FILE\n"
" %s %s { load | loadall } OBJ PATH \\\n"
" [type TYPE] [dev NAME] \\\n"
......@@ -1164,6 +1226,7 @@ static int do_help(int argc, char **argv)
" [pinmaps MAP_DIR]\n"
" %s %s attach PROG ATTACH_TYPE [MAP]\n"
" %s %s detach PROG ATTACH_TYPE [MAP]\n"
" %s %s tracelog\n"
" %s %s help\n"
"\n"
" " HELP_SPEC_MAP "\n"
......@@ -1172,6 +1235,7 @@ static int do_help(int argc, char **argv)
" tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n"
" cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n"
" lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n"
" sk_reuseport | flow_dissector |\n"
" cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
" cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
" cgroup/sendmsg4 | cgroup/sendmsg6 }\n"
......@@ -1181,7 +1245,7 @@ static int do_help(int argc, char **argv)
"",
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2]);
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2]);
return 0;
}
......@@ -1196,6 +1260,7 @@ static const struct cmd cmds[] = {
{ "loadall", do_loadall },
{ "attach", do_attach },
{ "detach", do_detach },
{ "tracelog", do_tracelog },
{ 0 }
};
......
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (c) 2015-2017 Daniel Borkmann */
/* Copyright (c) 2018 Netronome Systems, Inc. */
#include <errno.h>
#include <limits.h>
#include <signal.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <linux/magic.h>
#include <sys/fcntl.h>
#include <sys/vfs.h>
#include "main.h"
#ifndef TRACEFS_MAGIC
# define TRACEFS_MAGIC 0x74726163
#endif
#define _textify(x) #x
#define textify(x) _textify(x)
FILE *trace_pipe_fd;
char *buff;
static int validate_tracefs_mnt(const char *mnt, unsigned long magic)
{
struct statfs st_fs;
if (statfs(mnt, &st_fs) < 0)
return -ENOENT;
if ((unsigned long)st_fs.f_type != magic)
return -ENOENT;
return 0;
}
static bool
find_tracefs_mnt_single(unsigned long magic, char *mnt, const char *mntpt)
{
size_t src_len;
if (validate_tracefs_mnt(mntpt, magic))
return false;
src_len = strlen(mntpt);
if (src_len + 1 >= PATH_MAX) {
p_err("tracefs mount point name too long");
return false;
}
strcpy(mnt, mntpt);
return true;
}
static bool find_tracefs_pipe(char *mnt)
{
static const char * const known_mnts[] = {
"/sys/kernel/debug/tracing",
"/sys/kernel/tracing",
"/tracing",
"/trace",
};
const char *pipe_name = "/trace_pipe";
const char *fstype = "tracefs";
char type[100], format[32];
const char * const *ptr;
bool found = false;
FILE *fp;
for (ptr = known_mnts; ptr < known_mnts + ARRAY_SIZE(known_mnts); ptr++)
if (find_tracefs_mnt_single(TRACEFS_MAGIC, mnt, *ptr))
goto exit_found;
fp = fopen("/proc/mounts", "r");
if (!fp)
return false;
/* Allow room for NULL terminating byte and pipe file name */
snprintf(format, sizeof(format), "%%*s %%%zds %%99s %%*s %%*d %%*d\\n",
PATH_MAX - strlen(pipe_name) - 1);
while (fscanf(fp, format, mnt, type) == 2)
if (strcmp(type, fstype) == 0) {
found = true;
break;
}
fclose(fp);
/* The string from fscanf() might be truncated, check mnt is valid */
if (!found || validate_tracefs_mnt(mnt, TRACEFS_MAGIC))
return false;
exit_found:
strcat(mnt, pipe_name);
return true;
}
static void exit_tracelog(int signum)
{
fclose(trace_pipe_fd);
free(buff);
if (json_output) {
jsonw_end_array(json_wtr);
jsonw_destroy(&json_wtr);
}
exit(0);
}
int do_tracelog(int argc, char **argv)
{
const struct sigaction act = {
.sa_handler = exit_tracelog
};
char trace_pipe[PATH_MAX];
bool found_trace_pipe;
size_t buff_len = 0;
if (json_output)
jsonw_start_array(json_wtr);
found_trace_pipe = find_tracefs_pipe(trace_pipe);
if (!found_trace_pipe) {
p_err("could not find trace pipe, tracefs not mounted?");
return -1;
}
trace_pipe_fd = fopen(trace_pipe, "r");
if (!trace_pipe_fd) {
p_err("could not open trace pipe: %s", strerror(errno));
return -1;
}
sigaction(SIGHUP, &act, NULL);
sigaction(SIGINT, &act, NULL);
sigaction(SIGTERM, &act, NULL);
while (1) {
ssize_t ret;
ret = getline(&buff, &buff_len, trace_pipe_fd);
if (ret <= 0) {
p_err("failed to read content from trace pipe: %s",
strerror(errno));
break;
}
if (json_output)
jsonw_string(json_wtr, buff);
else
printf("%s", buff);
}
fclose(trace_pipe_fd);
free(buff);
return -1;
}
......@@ -41,6 +41,7 @@
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <libbpf.h>
#include "disasm.h"
#include "json_writer.h"
......@@ -234,8 +235,9 @@ static const char *print_imm(void *private_data,
}
void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
bool opcodes)
bool opcodes, bool linum)
{
const struct bpf_prog_linfo *prog_linfo = dd->prog_linfo;
const struct bpf_insn_cbs cbs = {
.cb_print = print_insn_json,
.cb_call = print_call,
......@@ -246,6 +248,7 @@ void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
struct bpf_insn *insn = buf;
struct btf *btf = dd->btf;
bool double_insn = false;
unsigned int nr_skip = 0;
char func_sig[1024];
unsigned int i;
......@@ -261,7 +264,7 @@ void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
jsonw_start_object(json_wtr);
if (btf && record) {
if (record->insn_offset == i) {
if (record->insn_off == i) {
btf_dumper_type_only(btf, record->type_id,
func_sig,
sizeof(func_sig));
......@@ -273,6 +276,16 @@ void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
}
}
if (prog_linfo) {
const struct bpf_line_info *linfo;
linfo = bpf_prog_linfo__lfind(prog_linfo, i, nr_skip);
if (linfo) {
btf_dump_linfo_json(btf, linfo, linum);
nr_skip++;
}
}
jsonw_name(json_wtr, "disasm");
print_bpf_insn(&cbs, insn + i, true);
......@@ -307,8 +320,9 @@ void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
}
void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
bool opcodes)
bool opcodes, bool linum)
{
const struct bpf_prog_linfo *prog_linfo = dd->prog_linfo;
const struct bpf_insn_cbs cbs = {
.cb_print = print_insn,
.cb_call = print_call,
......@@ -318,6 +332,7 @@ void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
struct bpf_func_info *record;
struct bpf_insn *insn = buf;
struct btf *btf = dd->btf;
unsigned int nr_skip = 0;
bool double_insn = false;
char func_sig[1024];
unsigned int i;
......@@ -330,7 +345,7 @@ void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
}
if (btf && record) {
if (record->insn_offset == i) {
if (record->insn_off == i) {
btf_dumper_type_only(btf, record->type_id,
func_sig,
sizeof(func_sig));
......@@ -340,6 +355,17 @@ void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
}
}
if (prog_linfo) {
const struct bpf_line_info *linfo;
linfo = bpf_prog_linfo__lfind(prog_linfo, i, nr_skip);
if (linfo) {
btf_dump_linfo_plain(btf, linfo, "; ",
linum);
nr_skip++;
}
}
double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
printf("% 4d: ", i);
......
......@@ -40,6 +40,8 @@
#define SYM_MAX_NAME 256
struct bpf_prog_linfo;
struct kernel_sym {
unsigned long address;
char name[SYM_MAX_NAME];
......@@ -54,6 +56,7 @@ struct dump_data {
struct btf *btf;
void *func_info;
__u32 finfo_rec_size;
const struct bpf_prog_linfo *prog_linfo;
char scratch_buff[SYM_MAX_NAME + 8];
};
......@@ -61,9 +64,9 @@ void kernel_syms_load(struct dump_data *dd);
void kernel_syms_destroy(struct dump_data *dd);
struct kernel_sym *kernel_syms_search(struct dump_data *dd, unsigned long key);
void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
bool opcodes);
bool opcodes, bool linum);
void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
bool opcodes);
bool opcodes, bool linum);
void dump_xlated_for_graph(struct dump_data *dd, void *buf, void *buf_end,
unsigned int start_index);
......
......@@ -232,6 +232,20 @@ enum bpf_attach_type {
*/
#define BPF_F_STRICT_ALIGNMENT (1U << 0)
/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the
* verifier will allow any alignment whatsoever. On platforms
* with strict alignment requirements for loads ands stores (such
* as sparc and mips) the verifier validates that all loads and
* stores provably follow this requirement. This flag turns that
* checking and enforcement off.
*
* It is mostly used for testing when we want to validate the
* context and memory access aspects of the verifier, but because
* of an unaligned access the alignment check would trigger before
* the one we are interested in.
*/
#define BPF_F_ANY_ALIGNMENT (1U << 1)
/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
#define BPF_PSEUDO_MAP_FD 1
......@@ -342,6 +356,9 @@ union bpf_attr {
__u32 func_info_rec_size; /* userspace bpf_func_info size */
__aligned_u64 func_info; /* func info */
__u32 func_info_cnt; /* number of bpf_func_info records */
__u32 line_info_rec_size; /* userspace bpf_line_info size */
__aligned_u64 line_info; /* line info */
__u32 line_info_cnt; /* number of bpf_line_info records */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
......@@ -360,8 +377,11 @@ union bpf_attr {
struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
__u32 prog_fd;
__u32 retval;
__u32 data_size_in;
__u32 data_size_out;
__u32 data_size_in; /* input: len of data_in */
__u32 data_size_out; /* input/output: len of data_out
* returns ENOSPC if data_out
* is too small.
*/
__aligned_u64 data_in;
__aligned_u64 data_out;
__u32 repeat;
......@@ -2282,9 +2302,22 @@ union bpf_attr {
* if possible. Other errors can occur if input parameters are
* invalid either due to *start* byte not being valid part of msg
* payload and/or *pop* value being to large.
* Return
* 0 on success, or a negative error in case of failure.
*
* int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
* Description
* This helper is used in programs implementing IR decoding, to
* report a successfully decoded pointer movement.
*
* The *ctx* should point to the lirc sample as passed into
* the program.
*
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
* Return
* 0 on success, or a negative erro in case of failure.
* 0
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
......@@ -2378,7 +2411,8 @@ union bpf_attr {
FN(map_pop_elem), \
FN(map_peek_elem), \
FN(msg_push_data), \
FN(msg_pop_data),
FN(msg_pop_data), \
FN(rc_pointer_rel),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
......@@ -2496,6 +2530,7 @@ struct __sk_buff {
__u32 data_meta;
__bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
__u64 tstamp;
__u32 wire_len;
};
struct bpf_tunnel_key {
......@@ -2674,7 +2709,13 @@ struct bpf_prog_info {
__u32 btf_id;
__u32 func_info_rec_size;
__aligned_u64 func_info;
__u32 func_info_cnt;
__u32 nr_func_info;
__u32 nr_line_info;
__aligned_u64 line_info;
__aligned_u64 jited_line_info;
__u32 nr_jited_line_info;
__u32 line_info_rec_size;
__u32 jited_line_info_rec_size;
} __attribute__((aligned(8)));
struct bpf_map_info {
......@@ -2987,8 +3028,18 @@ struct bpf_flow_keys {
};
struct bpf_func_info {
__u32 insn_offset;
__u32 insn_off;
__u32 type_id;
};
#define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10)
#define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff)
struct bpf_line_info {
__u32 insn_off;
__u32 file_name_off;
__u32 line_off;
__u32 line_col;
};
#endif /* _UAPI__LINUX_BPF_H__ */
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o
.. SPDX-License-Identifier: GPL-2.0
.. SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
libbpf API naming convention
============================
......
......@@ -173,11 +173,36 @@ int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
-1);
}
static void *
alloc_zero_tailing_info(const void *orecord, __u32 cnt,
__u32 actual_rec_size, __u32 expected_rec_size)
{
__u64 info_len = actual_rec_size * cnt;
void *info, *nrecord;
int i;
info = malloc(info_len);
if (!info)
return NULL;
/* zero out bytes kernel does not understand */
nrecord = info;
for (i = 0; i < cnt; i++) {
memcpy(nrecord, orecord, expected_rec_size);
memset(nrecord + expected_rec_size, 0,
actual_rec_size - expected_rec_size);
orecord += actual_rec_size;
nrecord += actual_rec_size;
}
return info;
}
int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
char *log_buf, size_t log_buf_sz)
{
void *finfo = NULL, *linfo = NULL;
union bpf_attr attr;
void *finfo = NULL;
__u32 name_len;
int fd;
......@@ -201,53 +226,58 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
attr.func_info_rec_size = load_attr->func_info_rec_size;
attr.func_info_cnt = load_attr->func_info_cnt;
attr.func_info = ptr_to_u64(load_attr->func_info);
attr.line_info_rec_size = load_attr->line_info_rec_size;
attr.line_info_cnt = load_attr->line_info_cnt;
attr.line_info = ptr_to_u64(load_attr->line_info);
memcpy(attr.prog_name, load_attr->name,
min(name_len, BPF_OBJ_NAME_LEN - 1));
fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
if (fd >= 0 || !log_buf || !log_buf_sz)
if (fd >= 0)
return fd;
/* After bpf_prog_load, the kernel may modify certain attributes
* to give user space a hint how to deal with loading failure.
* Check to see whether we can make some changes and load again.
*/
if (errno == E2BIG && attr.func_info_cnt &&
attr.func_info_rec_size < load_attr->func_info_rec_size) {
__u32 actual_rec_size = load_attr->func_info_rec_size;
__u32 expected_rec_size = attr.func_info_rec_size;
__u32 finfo_cnt = load_attr->func_info_cnt;
__u64 finfo_len = actual_rec_size * finfo_cnt;
const void *orecord;
void *nrecord;
int i;
finfo = malloc(finfo_len);
if (!finfo)
/* further try with log buffer won't help */
return fd;
/* zero out bytes kernel does not understand */
orecord = load_attr->func_info;
nrecord = finfo;
for (i = 0; i < load_attr->func_info_cnt; i++) {
memcpy(nrecord, orecord, expected_rec_size);
memset(nrecord + expected_rec_size, 0,
actual_rec_size - expected_rec_size);
orecord += actual_rec_size;
nrecord += actual_rec_size;
while (errno == E2BIG && (!finfo || !linfo)) {
if (!finfo && attr.func_info_cnt &&
attr.func_info_rec_size < load_attr->func_info_rec_size) {
/* try with corrected func info records */
finfo = alloc_zero_tailing_info(load_attr->func_info,
load_attr->func_info_cnt,
load_attr->func_info_rec_size,
attr.func_info_rec_size);
if (!finfo)
goto done;
attr.func_info = ptr_to_u64(finfo);
attr.func_info_rec_size = load_attr->func_info_rec_size;
} else if (!linfo && attr.line_info_cnt &&
attr.line_info_rec_size <
load_attr->line_info_rec_size) {
linfo = alloc_zero_tailing_info(load_attr->line_info,
load_attr->line_info_cnt,
load_attr->line_info_rec_size,
attr.line_info_rec_size);
if (!linfo)
goto done;
attr.line_info = ptr_to_u64(linfo);
attr.line_info_rec_size = load_attr->line_info_rec_size;
} else {
break;
}
/* try with corrected func info records */
attr.func_info = ptr_to_u64(finfo);
attr.func_info_rec_size = load_attr->func_info_rec_size;
fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
if (fd >= 0 || !log_buf || !log_buf_sz)
if (fd >= 0)
goto done;
}
if (!log_buf || !log_buf_sz)
goto done;
/* Try again with log */
attr.log_buf = ptr_to_u64(log_buf);
attr.log_size = log_buf_sz;
......@@ -256,6 +286,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
done:
free(finfo);
free(linfo);
return fd;
}
......@@ -279,9 +310,9 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
}
int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
size_t insns_cnt, int strict_alignment,
const char *license, __u32 kern_version,
char *log_buf, size_t log_buf_sz, int log_level)
size_t insns_cnt, __u32 prog_flags, const char *license,
__u32 kern_version, char *log_buf, size_t log_buf_sz,
int log_level)
{
union bpf_attr attr;
......@@ -295,7 +326,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
attr.log_level = log_level;
log_buf[0] = 0;
attr.kern_version = kern_version;
attr.prog_flags = strict_alignment ? BPF_F_STRICT_ALIGNMENT : 0;
attr.prog_flags = prog_flags;
return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
}
......@@ -463,6 +494,29 @@ int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
return ret;
}
int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
{
union bpf_attr attr;
int ret;
if (!test_attr->data_out && test_attr->data_size_out > 0)
return -EINVAL;
bzero(&attr, sizeof(attr));
attr.test.prog_fd = test_attr->prog_fd;
attr.test.data_in = ptr_to_u64(test_attr->data_in);
attr.test.data_out = ptr_to_u64(test_attr->data_out);
attr.test.data_size_in = test_attr->data_size_in;
attr.test.data_size_out = test_attr->data_size_out;
attr.test.repeat = test_attr->repeat;
ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
test_attr->data_size_out = attr.test.data_size_out;
test_attr->retval = attr.test.retval;
test_attr->duration = attr.test.duration;
return ret;
}
int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
{
union bpf_attr attr;
......
......@@ -82,6 +82,9 @@ struct bpf_load_program_attr {
__u32 func_info_rec_size;
const void *func_info;
__u32 func_info_cnt;
__u32 line_info_rec_size;
const void *line_info;
__u32 line_info_cnt;
};
/* Flags to direct loading requirements */
......@@ -98,7 +101,7 @@ LIBBPF_API int bpf_load_program(enum bpf_prog_type type,
char *log_buf, size_t log_buf_sz);
LIBBPF_API int bpf_verify_program(enum bpf_prog_type type,
const struct bpf_insn *insns,
size_t insns_cnt, int strict_alignment,
size_t insns_cnt, __u32 prog_flags,
const char *license, __u32 kern_version,
char *log_buf, size_t log_buf_sz,
int log_level);
......@@ -118,6 +121,25 @@ LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd,
LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd,
enum bpf_attach_type type);
struct bpf_prog_test_run_attr {
int prog_fd;
int repeat;
const void *data_in;
__u32 data_size_in;
void *data_out; /* optional */
__u32 data_size_out; /* in: max length of data_out
* out: length of data_out */
__u32 retval; /* out: return code of the BPF program */
__u32 duration; /* out: average per repetition in ns */
};
LIBBPF_API int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr);
/*
* bpf_prog_test_run does not check that data_out is large enough. Consider
* using bpf_prog_test_run_xattr instead.
*/
LIBBPF_API int bpf_prog_test_run(int prog_fd, int repeat, void *data,
__u32 size, void *data_out, __u32 *size_out,
__u32 *retval, __u32 *duration);
......
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (c) 2018 Facebook */
#include <string.h>
#include <stdlib.h>
#include <linux/err.h>
#include <linux/bpf.h>
#include "libbpf.h"
#ifndef min
#define min(x, y) ((x) < (y) ? (x) : (y))
#endif
struct bpf_prog_linfo {
void *raw_linfo;
void *raw_jited_linfo;
__u32 *nr_jited_linfo_per_func;
__u32 *jited_linfo_func_idx;
__u32 nr_linfo;
__u32 nr_jited_func;
__u32 rec_size;
__u32 jited_rec_size;
};
static int dissect_jited_func(struct bpf_prog_linfo *prog_linfo,
const __u64 *ksym_func, const __u32 *ksym_len)
{
__u32 nr_jited_func, nr_linfo;
const void *raw_jited_linfo;
const __u64 *jited_linfo;
__u64 last_jited_linfo;
/*
* Index to raw_jited_linfo:
* i: Index for searching the next ksym_func
* prev_i: Index to the last found ksym_func
*/
__u32 i, prev_i;
__u32 f; /* Index to ksym_func */
raw_jited_linfo = prog_linfo->raw_jited_linfo;
jited_linfo = raw_jited_linfo;
if (ksym_func[0] != *jited_linfo)
goto errout;
prog_linfo->jited_linfo_func_idx[0] = 0;
nr_jited_func = prog_linfo->nr_jited_func;
nr_linfo = prog_linfo->nr_linfo;
for (prev_i = 0, i = 1, f = 1;
i < nr_linfo && f < nr_jited_func;
i++) {
raw_jited_linfo += prog_linfo->jited_rec_size;
last_jited_linfo = *jited_linfo;
jited_linfo = raw_jited_linfo;
if (ksym_func[f] == *jited_linfo) {
prog_linfo->jited_linfo_func_idx[f] = i;
/* Sanity check */
if (last_jited_linfo - ksym_func[f - 1] + 1 >
ksym_len[f - 1])
goto errout;
prog_linfo->nr_jited_linfo_per_func[f - 1] =
i - prev_i;
prev_i = i;
/*
* The ksym_func[f] is found in jited_linfo.
* Look for the next one.
*/
f++;
} else if (*jited_linfo <= last_jited_linfo) {
/* Ensure the addr is increasing _within_ a func */
goto errout;
}
}
if (f != nr_jited_func)
goto errout;
prog_linfo->nr_jited_linfo_per_func[nr_jited_func - 1] =
nr_linfo - prev_i;
return 0;
errout:
return -EINVAL;
}
void bpf_prog_linfo__free(struct bpf_prog_linfo *prog_linfo)
{
if (!prog_linfo)
return;
free(prog_linfo->raw_linfo);
free(prog_linfo->raw_jited_linfo);
free(prog_linfo->nr_jited_linfo_per_func);
free(prog_linfo->jited_linfo_func_idx);
free(prog_linfo);
}
struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info)
{
struct bpf_prog_linfo *prog_linfo;
__u32 nr_linfo, nr_jited_func;
nr_linfo = info->nr_line_info;
/*
* Test !info->line_info because the kernel may NULL
* the ptr if kernel.kptr_restrict is set.
*/
if (!nr_linfo || !info->line_info)
return NULL;
/*
* The min size that bpf_prog_linfo has to access for
* searching purpose.
*/
if (info->line_info_rec_size <
offsetof(struct bpf_line_info, file_name_off))
return NULL;
prog_linfo = calloc(1, sizeof(*prog_linfo));
if (!prog_linfo)
return NULL;
/* Copy xlated line_info */
prog_linfo->nr_linfo = nr_linfo;
prog_linfo->rec_size = info->line_info_rec_size;
prog_linfo->raw_linfo = malloc(nr_linfo * prog_linfo->rec_size);
if (!prog_linfo->raw_linfo)
goto err_free;
memcpy(prog_linfo->raw_linfo, (void *)(long)info->line_info,
nr_linfo * prog_linfo->rec_size);
nr_jited_func = info->nr_jited_ksyms;
if (!nr_jited_func ||
!info->jited_line_info ||
info->nr_jited_line_info != nr_linfo ||
info->jited_line_info_rec_size < sizeof(__u64) ||
info->nr_jited_func_lens != nr_jited_func ||
!info->jited_ksyms ||
!info->jited_func_lens)
/* Not enough info to provide jited_line_info */
return prog_linfo;
/* Copy jited_line_info */
prog_linfo->nr_jited_func = nr_jited_func;
prog_linfo->jited_rec_size = info->jited_line_info_rec_size;
prog_linfo->raw_jited_linfo = malloc(nr_linfo *
prog_linfo->jited_rec_size);
if (!prog_linfo->raw_jited_linfo)
goto err_free;
memcpy(prog_linfo->raw_jited_linfo,
(void *)(long)info->jited_line_info,
nr_linfo * prog_linfo->jited_rec_size);
/* Number of jited_line_info per jited func */
prog_linfo->nr_jited_linfo_per_func = malloc(nr_jited_func *
sizeof(__u32));
if (!prog_linfo->nr_jited_linfo_per_func)
goto err_free;
/*
* For each jited func,
* the start idx to the "linfo" and "jited_linfo" array,
*/
prog_linfo->jited_linfo_func_idx = malloc(nr_jited_func *
sizeof(__u32));
if (!prog_linfo->jited_linfo_func_idx)
goto err_free;
if (dissect_jited_func(prog_linfo,
(__u64 *)(long)info->jited_ksyms,
(__u32 *)(long)info->jited_func_lens))
goto err_free;
return prog_linfo;
err_free:
bpf_prog_linfo__free(prog_linfo);
return NULL;
}
const struct bpf_line_info *
bpf_prog_linfo__lfind_addr_func(const struct bpf_prog_linfo *prog_linfo,
__u64 addr, __u32 func_idx, __u32 nr_skip)
{
__u32 jited_rec_size, rec_size, nr_linfo, start, i;
const void *raw_jited_linfo, *raw_linfo;
const __u64 *jited_linfo;
if (func_idx >= prog_linfo->nr_jited_func)
return NULL;
nr_linfo = prog_linfo->nr_jited_linfo_per_func[func_idx];
if (nr_skip >= nr_linfo)
return NULL;
start = prog_linfo->jited_linfo_func_idx[func_idx] + nr_skip;
jited_rec_size = prog_linfo->jited_rec_size;
raw_jited_linfo = prog_linfo->raw_jited_linfo +
(start * jited_rec_size);
jited_linfo = raw_jited_linfo;
if (addr < *jited_linfo)
return NULL;
nr_linfo -= nr_skip;
rec_size = prog_linfo->rec_size;
raw_linfo = prog_linfo->raw_linfo + (start * rec_size);
for (i = 0; i < nr_linfo; i++) {
if (addr < *jited_linfo)
break;
raw_linfo += rec_size;
raw_jited_linfo += jited_rec_size;
jited_linfo = raw_jited_linfo;
}
return raw_linfo - rec_size;
}
const struct bpf_line_info *
bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo,
__u32 insn_off, __u32 nr_skip)
{
const struct bpf_line_info *linfo;
__u32 rec_size, nr_linfo, i;
const void *raw_linfo;
nr_linfo = prog_linfo->nr_linfo;
if (nr_skip >= nr_linfo)
return NULL;
rec_size = prog_linfo->rec_size;
raw_linfo = prog_linfo->raw_linfo + (nr_skip * rec_size);
linfo = raw_linfo;
if (insn_off < linfo->insn_off)
return NULL;
nr_linfo -= nr_skip;
for (i = 0; i < nr_linfo; i++) {
if (insn_off < linfo->insn_off)
break;
raw_linfo += rec_size;
linfo = raw_linfo;
}
return raw_linfo - rec_size;
}
This diff is collapsed.
......@@ -51,13 +51,8 @@ struct btf_ext_header {
/* All offsets are in bytes relative to the end of this header */
__u32 func_info_off;
__u32 func_info_len;
};
struct btf_sec_func_info {
__u32 sec_name_off;
__u32 num_func_info;
/* Followed by num_func_info number of bpf func_info records */
__u8 data[0];
__u32 line_info_off;
__u32 line_info_len;
};
typedef int (*btf_print_fn_t)(const char *, ...)
......@@ -77,12 +72,16 @@ LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf);
struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log);
void btf_ext__free(struct btf_ext *btf_ext);
int btf_ext__reloc_init(struct btf *btf, struct btf_ext *btf_ext,
const char *sec_name, void **func_info,
__u32 *func_info_rec_size, __u32 *func_info_len);
int btf_ext__reloc(struct btf *btf, struct btf_ext *btf_ext,
const char *sec_name, __u32 insns_cnt, void **func_info,
__u32 *func_info_len);
int btf_ext__reloc_func_info(const struct btf *btf,
const struct btf_ext *btf_ext,
const char *sec_name, __u32 insns_cnt,
void **func_info, __u32 *func_info_len);
int btf_ext__reloc_line_info(const struct btf *btf,
const struct btf_ext *btf_ext,
const char *sec_name, __u32 insns_cnt,
void **line_info, __u32 *cnt);
__u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
__u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
#ifdef __cplusplus
} /* extern "C" */
......
......@@ -167,9 +167,13 @@ struct bpf_program {
int btf_fd;
void *func_info;
__u32 func_info_rec_size;
__u32 func_info_len;
__u32 func_info_cnt;
struct bpf_capabilities *caps;
void *line_info;
__u32 line_info_rec_size;
__u32 line_info_cnt;
};
struct bpf_map {
......@@ -779,6 +783,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
{
Elf *elf = obj->efile.elf;
GElf_Ehdr *ep = &obj->efile.ehdr;
Elf_Data *btf_ext_data = NULL;
Elf_Scn *scn = NULL;
int idx = 0, err = 0;
......@@ -841,14 +846,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
obj->btf = NULL;
}
} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
obj->btf_ext = btf_ext__new(data->d_buf, data->d_size,
__pr_debug);
if (IS_ERR(obj->btf_ext)) {
pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
BTF_EXT_ELF_SEC,
PTR_ERR(obj->btf_ext));
obj->btf_ext = NULL;
}
btf_ext_data = data;
} else if (sh.sh_type == SHT_SYMTAB) {
if (obj->efile.symbols) {
pr_warning("bpf: multiple SYMTAB in %s\n",
......@@ -910,6 +908,22 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
pr_warning("Corrupted ELF file: index of strtab invalid\n");
return LIBBPF_ERRNO__FORMAT;
}
if (btf_ext_data) {
if (!obj->btf) {
pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
BTF_EXT_ELF_SEC, BTF_ELF_SEC);
} else {
obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
btf_ext_data->d_size,
__pr_debug);
if (IS_ERR(obj->btf_ext)) {
pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
BTF_EXT_ELF_SEC,
PTR_ERR(obj->btf_ext));
obj->btf_ext = NULL;
}
}
}
if (obj->efile.maps_shndx >= 0) {
err = bpf_object__init_maps(obj, flags);
if (err)
......@@ -1275,6 +1289,82 @@ bpf_object__create_maps(struct bpf_object *obj)
return 0;
}
static int
check_btf_ext_reloc_err(struct bpf_program *prog, int err,
void *btf_prog_info, const char *info_name)
{
if (err != -ENOENT) {
pr_warning("Error in loading %s for sec %s.\n",
info_name, prog->section_name);
return err;
}
/* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */
if (btf_prog_info) {
/*
* Some info has already been found but has problem
* in the last btf_ext reloc. Must have to error
* out.
*/
pr_warning("Error in relocating %s for sec %s.\n",
info_name, prog->section_name);
return err;
}
/*
* Have problem loading the very first info. Ignore
* the rest.
*/
pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n",
info_name, prog->section_name, info_name);
return 0;
}
static int
bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
const char *section_name, __u32 insn_offset)
{
int err;
if (!insn_offset || prog->func_info) {
/*
* !insn_offset => main program
*
* For sub prog, the main program's func_info has to
* be loaded first (i.e. prog->func_info != NULL)
*/
err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
section_name, insn_offset,
&prog->func_info,
&prog->func_info_cnt);
if (err)
return check_btf_ext_reloc_err(prog, err,
prog->func_info,
"bpf_func_info");
prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
}
if (!insn_offset || prog->line_info) {
err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
section_name, insn_offset,
&prog->line_info,
&prog->line_info_cnt);
if (err)
return check_btf_ext_reloc_err(prog, err,
prog->line_info,
"bpf_line_info");
prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
}
if (!insn_offset)
prog->btf_fd = btf__fd(obj->btf);
return 0;
}
static int
bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
struct reloc_desc *relo)
......@@ -1306,17 +1396,12 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
return -ENOMEM;
}
if (obj->btf && obj->btf_ext) {
err = btf_ext__reloc(obj->btf, obj->btf_ext,
text->section_name,
prog->insns_cnt,
&prog->func_info,
&prog->func_info_len);
if (err) {
pr_warning("error in btf_ext__reloc for sec %s\n",
text->section_name);
if (obj->btf_ext) {
err = bpf_program_reloc_btf_ext(prog, obj,
text->section_name,
prog->insns_cnt);
if (err)
return err;
}
}
memcpy(new_insn + prog->insns_cnt, text->insns,
......@@ -1341,18 +1426,11 @@ bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
if (!prog)
return 0;
if (obj->btf && obj->btf_ext) {
err = btf_ext__reloc_init(obj->btf, obj->btf_ext,
prog->section_name,
&prog->func_info,
&prog->func_info_rec_size,
&prog->func_info_len);
if (err) {
pr_warning("err in btf_ext__reloc_init for sec %s\n",
prog->section_name);
if (obj->btf_ext) {
err = bpf_program_reloc_btf_ext(prog, obj,
prog->section_name, 0);
if (err)
return err;
}
prog->btf_fd = btf__fd(obj->btf);
}
if (!prog->reloc_desc)
......@@ -1444,8 +1522,7 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
static int
load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
char *license, __u32 kern_version, int *pfd,
__u32 func_info_cnt)
char *license, __u32 kern_version, int *pfd)
{
struct bpf_load_program_attr load_attr;
char *cp, errmsg[STRERR_BUFSIZE];
......@@ -1465,8 +1542,10 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0;
load_attr.func_info = prog->func_info;
load_attr.func_info_rec_size = prog->func_info_rec_size;
load_attr.func_info_cnt = func_info_cnt;
load_attr.func_info_cnt = prog->func_info_cnt;
load_attr.line_info = prog->line_info;
load_attr.line_info_rec_size = prog->line_info_rec_size;
load_attr.line_info_cnt = prog->line_info_cnt;
if (!load_attr.insns || !load_attr.insns_cnt)
return -EINVAL;
......@@ -1523,14 +1602,8 @@ int
bpf_program__load(struct bpf_program *prog,
char *license, __u32 kern_version)
{
__u32 func_info_cnt;
int err = 0, fd, i;
if (prog->func_info_len == 0)
func_info_cnt = 0;
else
func_info_cnt = prog->func_info_len / prog->func_info_rec_size;
if (prog->instances.nr < 0 || !prog->instances.fds) {
if (prog->preprocessor) {
pr_warning("Internal error: can't load program '%s'\n",
......@@ -1553,8 +1626,7 @@ bpf_program__load(struct bpf_program *prog,
prog->section_name, prog->instances.nr);
}
err = load_program(prog, prog->insns, prog->insns_cnt,
license, kern_version, &fd,
func_info_cnt);
license, kern_version, &fd);
if (!err)
prog->instances.fds[0] = fd;
goto out;
......@@ -1584,8 +1656,7 @@ bpf_program__load(struct bpf_program *prog,
err = load_program(prog, result.new_insn_ptr,
result.new_insn_cnt,
license, kern_version, &fd,
func_info_cnt);
license, kern_version, &fd);
if (err) {
pr_warning("Loading the %dth instance of program '%s' failed\n",
......
......@@ -342,6 +342,19 @@ int libbpf_nl_get_qdisc(int sock, unsigned int nl_pid, int ifindex,
int libbpf_nl_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle,
libbpf_dump_nlmsg_t dump_filter_nlmsg, void *cookie);
struct bpf_prog_linfo;
struct bpf_prog_info;
LIBBPF_API void bpf_prog_linfo__free(struct bpf_prog_linfo *prog_linfo);
LIBBPF_API struct bpf_prog_linfo *
bpf_prog_linfo__new(const struct bpf_prog_info *info);
LIBBPF_API const struct bpf_line_info *
bpf_prog_linfo__lfind_addr_func(const struct bpf_prog_linfo *prog_linfo,
__u64 addr, __u32 func_idx, __u32 nr_skip);
LIBBPF_API const struct bpf_line_info *
bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo,
__u32 insn_off, __u32 nr_skip);
#ifdef __cplusplus
} /* extern "C" */
#endif
......
......@@ -65,6 +65,7 @@ LIBBPF_0.0.1 {
bpf_prog_load_xattr;
bpf_prog_query;
bpf_prog_test_run;
bpf_prog_test_run_xattr;
bpf_program__fd;
bpf_program__is_kprobe;
bpf_program__is_perf_event;
......@@ -98,6 +99,10 @@ LIBBPF_0.0.1 {
bpf_program__unload;
bpf_program__unpin;
bpf_program__unpin_instance;
bpf_prog_linfo__free;
bpf_prog_linfo__new;
bpf_prog_linfo__lfind_addr_func;
bpf_prog_linfo__lfind;
bpf_raw_tracepoint_open;
bpf_set_link_xdp_fd;
bpf_task_fd_query;
......
......@@ -170,6 +170,8 @@ static int (*bpf_skb_vlan_push)(void *ctx, __be16 vlan_proto, __u16 vlan_tci) =
(void *) BPF_FUNC_skb_vlan_push;
static int (*bpf_skb_vlan_pop)(void *ctx) =
(void *) BPF_FUNC_skb_vlan_pop;
static int (*bpf_rc_pointer_rel)(void *ctx, int rel_x, int rel_y) =
(void *) BPF_FUNC_rc_pointer_rel;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
......
......@@ -620,8 +620,8 @@ static int do_test_single(struct bpf_align_test *test)
prog_len = probe_filter_length(prog);
fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
prog, prog_len, 1, "GPL", 0,
bpf_vlog, sizeof(bpf_vlog), 2);
prog, prog_len, BPF_F_STRICT_ALIGNMENT,
"GPL", 0, bpf_vlog, sizeof(bpf_vlog), 2);
if (fd_prog < 0 && test->result != REJECT) {
printf("Failed to load program.\n");
printf("%s", bpf_vlog);
......
This diff is collapsed.
......@@ -21,13 +21,14 @@ do
if grep -q DRV_NAME=rc-loopback $i/uevent
then
LIRCDEV=$(grep DEVNAME= $i/lirc*/uevent | sed sQDEVNAME=Q/dev/Q)
INPUTDEV=$(grep DEVNAME= $i/input*/event*/uevent | sed sQDEVNAME=Q/dev/Q)
fi
done
if [ -n $LIRCDEV ];
then
TYPE=lirc_mode2
./test_lirc_mode2_user $LIRCDEV
./test_lirc_mode2_user $LIRCDEV $INPUTDEV
ret=$?
if [ $ret -ne 0 ]; then
echo -e ${RED}"FAIL: $TYPE"${NC}
......
......@@ -15,6 +15,9 @@ int bpf_decoder(unsigned int *sample)
if (duration & 0x10000)
bpf_rc_keydown(sample, 0x40, duration & 0xffff, 0);
if (duration & 0x20000)
bpf_rc_pointer_rel(sample, (duration >> 8) & 0xff,
duration & 0xff);
}
return 0;
......
......@@ -29,6 +29,7 @@
#include <linux/bpf.h>
#include <linux/lirc.h>
#include <linux/input.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
......@@ -47,12 +48,13 @@
int main(int argc, char **argv)
{
struct bpf_object *obj;
int ret, lircfd, progfd, mode;
int testir = 0x1dead;
int ret, lircfd, progfd, inputfd;
int testir1 = 0x1dead;
int testir2 = 0x20101;
u32 prog_ids[10], prog_flags[10], prog_cnt;
if (argc != 2) {
printf("Usage: %s /dev/lircN\n", argv[0]);
if (argc != 3) {
printf("Usage: %s /dev/lircN /dev/input/eventM\n", argv[0]);
return 2;
}
......@@ -76,9 +78,9 @@ int main(int argc, char **argv)
return 1;
}
mode = LIRC_MODE_SCANCODE;
if (ioctl(lircfd, LIRC_SET_REC_MODE, &mode)) {
printf("failed to set rec mode: %m\n");
inputfd = open(argv[2], O_RDONLY | O_NONBLOCK);
if (inputfd == -1) {
printf("failed to open input device %s: %m\n", argv[1]);
return 1;
}
......@@ -102,29 +104,54 @@ int main(int argc, char **argv)
}
/* Write raw IR */
ret = write(lircfd, &testir, sizeof(testir));
if (ret != sizeof(testir)) {
ret = write(lircfd, &testir1, sizeof(testir1));
if (ret != sizeof(testir1)) {
printf("Failed to send test IR message: %m\n");
return 1;
}
struct pollfd pfd = { .fd = lircfd, .events = POLLIN };
struct lirc_scancode lsc;
struct pollfd pfd = { .fd = inputfd, .events = POLLIN };
struct input_event event;
poll(&pfd, 1, 100);
for (;;) {
poll(&pfd, 1, 100);
/* Read decoded IR */
ret = read(lircfd, &lsc, sizeof(lsc));
if (ret != sizeof(lsc)) {
printf("Failed to read decoded IR: %m\n");
return 1;
/* Read decoded IR */
ret = read(inputfd, &event, sizeof(event));
if (ret != sizeof(event)) {
printf("Failed to read decoded IR: %m\n");
return 1;
}
if (event.type == EV_MSC && event.code == MSC_SCAN &&
event.value == 0xdead) {
break;
}
}
if (lsc.scancode != 0xdead || lsc.rc_proto != 64) {
printf("Incorrect scancode decoded\n");
/* Write raw IR */
ret = write(lircfd, &testir2, sizeof(testir2));
if (ret != sizeof(testir2)) {
printf("Failed to send test IR message: %m\n");
return 1;
}
for (;;) {
poll(&pfd, 1, 100);
/* Read decoded IR */
ret = read(inputfd, &event, sizeof(event));
if (ret != sizeof(event)) {
printf("Failed to read decoded IR: %m\n");
return 1;
}
if (event.type == EV_REL && event.code == REL_Y &&
event.value == 1 ) {
break;
}
}
prog_cnt = 10;
ret = bpf_prog_query(lircfd, BPF_LIRC_MODE2, 0, prog_flags, prog_ids,
&prog_cnt);
......
......@@ -70,7 +70,7 @@ static struct {
.tcp.urg_ptr = 123,
};
#define CHECK(condition, tag, format...) ({ \
#define _CHECK(condition, tag, duration, format...) ({ \
int __ret = !!(condition); \
if (__ret) { \
error_cnt++; \
......@@ -83,6 +83,11 @@ static struct {
__ret; \
})
#define CHECK(condition, tag, format...) \
_CHECK(condition, tag, duration, format)
#define CHECK_ATTR(condition, tag, format...) \
_CHECK(condition, tag, tattr.duration, format)
static int bpf_find_map(const char *test, struct bpf_object *obj,
const char *name)
{
......@@ -124,6 +129,53 @@ static void test_pkt_access(void)
bpf_object__close(obj);
}
static void test_prog_run_xattr(void)
{
const char *file = "./test_pkt_access.o";
struct bpf_object *obj;
char buf[10];
int err;
struct bpf_prog_test_run_attr tattr = {
.repeat = 1,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.data_out = buf,
.data_size_out = 5,
};
err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj,
&tattr.prog_fd);
if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno))
return;
memset(buf, 0, sizeof(buf));
err = bpf_prog_test_run_xattr(&tattr);
CHECK_ATTR(err != -1 || errno != ENOSPC || tattr.retval, "run",
"err %d errno %d retval %d\n", err, errno, tattr.retval);
CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out",
"incorrect output size, want %lu have %u\n",
sizeof(pkt_v4), tattr.data_size_out);
CHECK_ATTR(buf[5] != 0, "overflow",
"BPF_PROG_TEST_RUN ignored size hint\n");
tattr.data_out = NULL;
tattr.data_size_out = 0;
errno = 0;
err = bpf_prog_test_run_xattr(&tattr);
CHECK_ATTR(err || errno || tattr.retval, "run_no_output",
"err %d errno %d retval %d\n", err, errno, tattr.retval);
tattr.data_size_out = 1;
err = bpf_prog_test_run_xattr(&tattr);
CHECK_ATTR(err != -EINVAL, "run_wrong_size_out", "err %d\n", err);
bpf_object__close(obj);
}
static void test_xdp(void)
{
struct vip key4 = {.protocol = 6, .family = AF_INET};
......@@ -1837,6 +1889,7 @@ int main(void)
jit_enabled = is_jit_enabled();
test_pkt_access();
test_prog_run_xattr();
test_xdp();
test_xdp_adjust_tail();
test_l4lb_all();
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment