Commit 63d7c880 authored by David S. Miller's avatar David S. Miller

Merge branch 'bpf-misc-helper-verifier-improvements'

Daniel Borkmann says:

====================
Misc BPF helper/verifier improvements

Miscellanous improvements I still had in my queue, it adds a new
bpf_skb_adjust_room() helper for cls_bpf, exports to fdinfo whether
tail call array owner is JITed, so iproute2 error reporting can be
improved on that regard, a small cleanup and extension to trace
printk, two verifier patches, one to make the code around narrower
ctx access a bit more straight forward and one to allow for imm += x
operations, that we've seen LLVM generating and the verifier currently
rejecting. We've included the patch 6 given it's rather small and
we ran into it from LLVM side, it would be great if it could be
queued for stable as well after the merge window. Last but not least,
test cases are added also related to imm alu improvement.

Thanks a lot!
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a68491f8 6d191ed4
......@@ -156,9 +156,14 @@ struct bpf_prog;
struct bpf_insn_access_aux {
enum bpf_reg_type reg_type;
int ctx_field_size;
int converted_op_size;
};
static inline void
bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
{
aux->ctx_field_size = size;
}
struct bpf_verifier_ops {
/* return eBPF function prototype for verification */
const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
......@@ -173,7 +178,7 @@ struct bpf_verifier_ops {
u32 (*convert_ctx_access)(enum bpf_access_type type,
const struct bpf_insn *src,
struct bpf_insn *dst,
struct bpf_prog *prog);
struct bpf_prog *prog, u32 *target_size);
int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
};
......
......@@ -337,6 +337,22 @@ struct bpf_prog_aux;
bpf_size; \
})
#define bpf_size_to_bytes(bpf_size) \
({ \
int bytes = -EINVAL; \
\
if (bpf_size == BPF_B) \
bytes = sizeof(u8); \
else if (bpf_size == BPF_H) \
bytes = sizeof(u16); \
else if (bpf_size == BPF_W) \
bytes = sizeof(u32); \
else if (bpf_size == BPF_DW) \
bytes = sizeof(u64); \
\
bytes; \
})
#define BPF_SIZEOF(type) \
({ \
const int __size = bytes_to_bpf_size(sizeof(type)); \
......@@ -351,6 +367,13 @@ struct bpf_prog_aux;
__size; \
})
#define BPF_LDST_BYTES(insn) \
({ \
const int __size = bpf_size_to_bytes(BPF_SIZE(insn->code)); \
WARN_ON(__size < 0); \
__size; \
})
#define __BPF_MAP_0(m, v, ...) v
#define __BPF_MAP_1(m, v, t, a, ...) m(t, a)
#define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__)
......@@ -401,6 +424,18 @@ struct bpf_prog_aux;
#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__)
#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__)
#define bpf_ctx_range(TYPE, MEMBER) \
offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
#define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \
offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1
#define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \
({ \
BUILD_BUG_ON(FIELD_SIZEOF(TYPE, MEMBER) != (SIZE)); \
*(PTR_SIZE) = (SIZE); \
offsetof(TYPE, MEMBER); \
})
#ifdef CONFIG_COMPAT
/* A struct sock_filter is architecture independent. */
struct compat_sock_fprog {
......@@ -564,6 +599,18 @@ static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
return prog->type == BPF_PROG_TYPE_UNSPEC;
}
static inline bool
bpf_ctx_narrow_access_ok(u32 off, u32 size, const u32 size_default)
{
bool off_ok;
#ifdef __LITTLE_ENDIAN
off_ok = (off & (size_default - 1)) == 0;
#else
off_ok = (off & (size_default - 1)) + size == size_default;
#endif
return off_ok && size <= size_default && (size & (size - 1)) == 0;
}
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
......
......@@ -2206,6 +2206,11 @@ static inline int skb_mac_offset(const struct sk_buff *skb)
return skb_mac_header(skb) - skb->data;
}
static inline u32 skb_mac_header_len(const struct sk_buff *skb)
{
return skb->network_header - skb->mac_header;
}
static inline int skb_mac_header_was_set(const struct sk_buff *skb)
{
return skb->mac_header != (typeof(skb->mac_header))~0U;
......
......@@ -531,6 +531,14 @@ union bpf_attr {
* @optval: pointer to option value
* @optlen: length of optval in byes
* Return: 0 or negative error
*
* int bpf_skb_adjust_room(skb, len_diff, mode, flags)
* Grow or shrink room in sk_buff.
* @skb: pointer to skb
* @len_diff: (signed) amount of room to grow/shrink
* @mode: operation mode (enum bpf_adj_room_mode)
* @flags: reserved for future use
* Return: 0 on success or negative error code
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
......@@ -582,7 +590,8 @@ union bpf_attr {
FN(get_socket_cookie), \
FN(get_socket_uid), \
FN(set_hash), \
FN(setsockopt),
FN(setsockopt), \
FN(skb_adjust_room),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
......@@ -632,6 +641,11 @@ enum bpf_func_id {
/* BPF_FUNC_perf_event_output for sk_buff input context. */
#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
BPF_ADJ_ROOM_NET,
};
/* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure
*/
......
......@@ -216,10 +216,12 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
const struct bpf_map *map = filp->private_data;
const struct bpf_array *array;
u32 owner_prog_type = 0;
u32 owner_jited = 0;
if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
array = container_of(map, struct bpf_array, map);
owner_prog_type = array->owner_prog_type;
owner_jited = array->owner_jited;
}
seq_printf(m,
......@@ -236,9 +238,12 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
map->map_flags,
map->pages * 1ULL << PAGE_SHIFT);
if (owner_prog_type)
if (owner_prog_type) {
seq_printf(m, "owner_prog_type:\t%u\n",
owner_prog_type);
seq_printf(m, "owner_jited:\t%u\n",
owner_jited);
}
}
#endif
......
......@@ -546,20 +546,6 @@ static int check_reg_arg(struct bpf_reg_state *regs, u32 regno,
return 0;
}
static int bpf_size_to_bytes(int bpf_size)
{
if (bpf_size == BPF_W)
return 4;
else if (bpf_size == BPF_H)
return 2;
else if (bpf_size == BPF_B)
return 1;
else if (bpf_size == BPF_DW)
return 8;
else
return -EINVAL;
}
static bool is_spillable_regtype(enum bpf_reg_type type)
{
switch (type) {
......@@ -761,7 +747,9 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
enum bpf_access_type t, enum bpf_reg_type *reg_type)
{
struct bpf_insn_access_aux info = { .reg_type = *reg_type };
struct bpf_insn_access_aux info = {
.reg_type = *reg_type,
};
/* for analyzer ctx accesses are already validated and converted */
if (env->analyzer_ops)
......@@ -769,25 +757,14 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
if (env->prog->aux->ops->is_valid_access &&
env->prog->aux->ops->is_valid_access(off, size, t, &info)) {
/* a non zero info.ctx_field_size indicates:
* . For this field, the prog type specific ctx conversion algorithm
* only supports whole field access.
* . This ctx access is a candiate for later verifier transformation
* to load the whole field and then apply a mask to get correct result.
* a non zero info.converted_op_size indicates perceived actual converted
* value width in convert_ctx_access.
/* A non zero info.ctx_field_size indicates that this field is a
* candidate for later verifier transformation to load the whole
* field and then apply a mask when accessed with a narrower
* access than actual ctx access size. A zero info.ctx_field_size
* will only allow for whole field access and rejects any other
* type of narrower access.
*/
if ((info.ctx_field_size && !info.converted_op_size) ||
(!info.ctx_field_size && info.converted_op_size)) {
verbose("verifier bug in is_valid_access prog type=%u off=%d size=%d\n",
env->prog->type, off, size);
return -EACCES;
}
if (info.ctx_field_size) {
env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
env->insn_aux_data[insn_idx].converted_op_size = info.converted_op_size;
}
env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
*reg_type = info.reg_type;
/* remember the offset of last byte accessed in ctx */
......@@ -1680,6 +1657,65 @@ static int evaluate_reg_alu(struct bpf_verifier_env *env, struct bpf_insn *insn)
return 0;
}
static int evaluate_reg_imm_alu_unknown(struct bpf_verifier_env *env,
struct bpf_insn *insn)
{
struct bpf_reg_state *regs = env->cur_state.regs;
struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
struct bpf_reg_state *src_reg = &regs[insn->src_reg];
u8 opcode = BPF_OP(insn->code);
s64 imm_log2 = __ilog2_u64((long long)dst_reg->imm);
/* BPF_X code with src_reg->type UNKNOWN_VALUE here. */
if (src_reg->imm > 0 && dst_reg->imm) {
switch (opcode) {
case BPF_ADD:
/* dreg += sreg
* where both have zero upper bits. Adding them
* can only result making one more bit non-zero
* in the larger value.
* Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47)
* 0xffff (imm=48) + 0xffff = 0x1fffe (imm=47)
*/
dst_reg->imm = min(src_reg->imm, 63 - imm_log2);
dst_reg->imm--;
break;
case BPF_AND:
/* dreg &= sreg
* AND can not extend zero bits only shrink
* Ex. 0x00..00ffffff
* & 0x0f..ffffffff
* ----------------
* 0x00..00ffffff
*/
dst_reg->imm = max(src_reg->imm, 63 - imm_log2);
break;
case BPF_OR:
/* dreg |= sreg
* OR can only extend zero bits
* Ex. 0x00..00ffffff
* | 0x0f..ffffffff
* ----------------
* 0x0f..00ffffff
*/
dst_reg->imm = min(src_reg->imm, 63 - imm_log2);
break;
case BPF_SUB:
case BPF_MUL:
case BPF_RSH:
case BPF_LSH:
/* These may be flushed out later */
default:
mark_reg_unknown_value(regs, insn->dst_reg);
}
} else {
mark_reg_unknown_value(regs, insn->dst_reg);
}
dst_reg->type = UNKNOWN_VALUE;
return 0;
}
static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
struct bpf_insn *insn)
{
......@@ -1689,6 +1725,9 @@ static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
u8 opcode = BPF_OP(insn->code);
u64 dst_imm = dst_reg->imm;
if (BPF_SRC(insn->code) == BPF_X && src_reg->type == UNKNOWN_VALUE)
return evaluate_reg_imm_alu_unknown(env, insn);
/* dst_reg->type == CONST_IMM here. Simulate execution of insns
* containing ALU ops. Don't care about overflow or negative
* values, just add/sub/... them; registers are in u64.
......@@ -3401,11 +3440,13 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
static int convert_ctx_accesses(struct bpf_verifier_env *env)
{
const struct bpf_verifier_ops *ops = env->prog->aux->ops;
int i, cnt, size, ctx_field_size, delta = 0;
const int insn_cnt = env->prog->len;
struct bpf_insn insn_buf[16], *insn;
struct bpf_prog *new_prog;
enum bpf_access_type type;
int i, cnt, off, size, ctx_field_size, converted_op_size, is_narrower_load, delta = 0;
bool is_narrower_load;
u32 target_size;
if (ops->gen_prologue) {
cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
......@@ -3445,39 +3486,50 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
continue;
off = insn->off;
size = bpf_size_to_bytes(BPF_SIZE(insn->code));
ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
converted_op_size = env->insn_aux_data[i + delta].converted_op_size;
is_narrower_load = type == BPF_READ && size < ctx_field_size;
size = BPF_LDST_BYTES(insn);
/* If the read access is a narrower load of the field,
* convert to a 4/8-byte load, to minimum program type specific
* convert_ctx_access changes. If conversion is successful,
* we will apply proper mask to the result.
*/
is_narrower_load = size < ctx_field_size;
if (is_narrower_load) {
int size_code = BPF_H;
u32 off = insn->off;
u8 size_code;
if (type == BPF_WRITE) {
verbose("bpf verifier narrow ctx access misconfigured\n");
return -EINVAL;
}
size_code = BPF_H;
if (ctx_field_size == 4)
size_code = BPF_W;
else if (ctx_field_size == 8)
size_code = BPF_DW;
insn->off = off & ~(ctx_field_size - 1);
insn->code = BPF_LDX | BPF_MEM | size_code;
}
cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
target_size = 0;
cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog,
&target_size);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
(ctx_field_size && !target_size)) {
verbose("bpf verifier is misconfigured\n");
return -EINVAL;
}
if (is_narrower_load && size < converted_op_size) {
if (is_narrower_load && size < target_size) {
if (ctx_field_size <= 4)
insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
(1 << size * 8) - 1);
(1 << size * 8) - 1);
else
insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
(1 << size * 8) - 1);
(1 << size * 8) - 1);
}
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
......
......@@ -122,8 +122,8 @@ static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
}
/*
* limited trace_printk()
* only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed
* Only limited trace_printk() conversion specifiers allowed:
* %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
*/
BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
u64, arg2, u64, arg3)
......@@ -198,7 +198,8 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
i++;
}
if (fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x')
if (fmt[i] != 'i' && fmt[i] != 'd' &&
fmt[i] != 'u' && fmt[i] != 'x')
return -EINVAL;
fmt_cnt++;
}
......@@ -583,7 +584,8 @@ const struct bpf_verifier_ops tracepoint_prog_ops = {
static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
int sample_period_off;
const int size_sp = FIELD_SIZEOF(struct bpf_perf_event_data,
sample_period);
if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
return false;
......@@ -592,43 +594,35 @@ static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type
if (off % size != 0)
return false;
/* permit 1, 2, 4 byte narrower and 8 normal read access to sample_period */
sample_period_off = offsetof(struct bpf_perf_event_data, sample_period);
if (off >= sample_period_off && off < sample_period_off + sizeof(__u64)) {
int allowed;
#ifdef __LITTLE_ENDIAN
allowed = (off & 0x7) == 0 && size <= 8 && (size & (size - 1)) == 0;
#else
allowed = ((off & 0x7) + size) == 8 && size <= 8 && (size & (size - 1)) == 0;
#endif
if (!allowed)
switch (off) {
case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
bpf_ctx_record_field_size(info, size_sp);
if (!bpf_ctx_narrow_access_ok(off, size, size_sp))
return false;
info->ctx_field_size = 8;
info->converted_op_size = 8;
} else {
break;
default:
if (size != sizeof(long))
return false;
}
return true;
}
static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog)
struct bpf_prog *prog, u32 *target_size)
{
struct bpf_insn *insn = insn_buf;
switch (si->off) {
case offsetof(struct bpf_perf_event_data, sample_period):
BUILD_BUG_ON(FIELD_SIZEOF(struct perf_sample_data, period) != sizeof(u64));
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
data), si->dst_reg, si->src_reg,
offsetof(struct bpf_perf_event_data_kern, data));
*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
offsetof(struct perf_sample_data, period));
bpf_target_off(struct perf_sample_data, period, 8,
target_size));
break;
default:
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
......
This diff is collapsed.
......@@ -531,6 +531,14 @@ union bpf_attr {
* @optval: pointer to option value
* @optlen: length of optval in byes
* Return: 0 or negative error
*
* int bpf_skb_adjust_room(skb, len_diff, mode, flags)
* Grow or shrink room in sk_buff.
* @skb: pointer to skb
* @len_diff: (signed) amount of room to grow/shrink
* @mode: operation mode (enum bpf_adj_room_mode)
* @flags: reserved for future use
* Return: 0 on success or negative error code
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
......@@ -582,7 +590,8 @@ union bpf_attr {
FN(get_socket_cookie), \
FN(get_socket_uid), \
FN(set_hash), \
FN(setsockopt),
FN(setsockopt), \
FN(skb_adjust_room),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
......@@ -632,6 +641,11 @@ enum bpf_func_id {
/* BPF_FUNC_perf_event_output for sk_buff input context. */
#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
BPF_ADJ_ROOM_NET_OPTS,
};
/* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure
*/
......
......@@ -2657,6 +2657,171 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"direct packet access: test18 (imm += pkt_ptr, 1)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_IMM(BPF_REG_0, 8),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test19 (imm += pkt_ptr, 2)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
BPF_MOV64_IMM(BPF_REG_4, 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test20 (x += pkt_ptr, 1)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xffff - 1),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"direct packet access: test21 (x += pkt_ptr, 2)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0xffff),
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xffff - 1),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"direct packet access: test22 (x += pkt_ptr, 3)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 48),
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"direct packet access: test23 (x += pkt_ptr, 4)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_0, 31),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "cannot add integer value with 47 upper zero bits to ptr_to_packet",
},
{
"direct packet access: test24 (x += pkt_ptr, 5)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_0, 64),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"helper access to packet: test1, valid packet_ptr range",
.insns = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment