Commit ae08ea97 authored by David S. Miller's avatar David S. Miller

Merge branch 'bpf-fixes'

Daniel Borkmann says:

====================
Various BPF fixes

Follow-up to fix incorrect pruning when alignment tracking is
in use and to properly clear regs after call to not leave stale
data behind, also a fix that adds bpf_clone_redirect to the
bpf_helper_changes_pkt_data helper and exposes correct map_flags
for lpm map into fdinfo. For details, please see individual
patches.

v1 -> v2:
  - Reworked first patch so that env->strict_alignment is the
    final indicator on whether we have to deal with strict
    alignment rather than having CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
    checks on various locations, so only checking env->strict_alignment
    is sufficient after that. Thanks for spotting, Dave!
  - Added patch 3 and 4.
  - Rest as is.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5990baaa 614d0d77
......@@ -272,6 +272,16 @@ struct bpf_prog_aux;
.off = OFF, \
.imm = IMM })
/* Unconditional jumps, goto pc + off16 */
#define BPF_JMP_A(OFF) \
((struct bpf_insn) { \
.code = BPF_JMP | BPF_JA, \
.dst_reg = 0, \
.src_reg = 0, \
.off = OFF, \
.imm = 0 })
/* Function call */
#define BPF_EMIT_CALL(FUNC) \
......
......@@ -86,6 +86,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
array->map.key_size = attr->key_size;
array->map.value_size = attr->value_size;
array->map.max_entries = attr->max_entries;
array->map.map_flags = attr->map_flags;
array->elem_size = elem_size;
if (!percpu)
......
......@@ -432,6 +432,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
trie->map.key_size = attr->key_size;
trie->map.value_size = attr->value_size;
trie->map.max_entries = attr->max_entries;
trie->map.map_flags = attr->map_flags;
trie->data_size = attr->key_size -
offsetof(struct bpf_lpm_trie_key, data);
trie->max_prefixlen = trie->data_size * 8;
......
......@@ -88,6 +88,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
smap->map.key_size = attr->key_size;
smap->map.value_size = value_size;
smap->map.max_entries = attr->max_entries;
smap->map.map_flags = attr->map_flags;
smap->n_buckets = n_buckets;
smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
......
......@@ -463,19 +463,22 @@ static const int caller_saved[CALLER_SAVED_REGS] = {
BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
};
static void mark_reg_not_init(struct bpf_reg_state *regs, u32 regno)
{
BUG_ON(regno >= MAX_BPF_REG);
memset(&regs[regno], 0, sizeof(regs[regno]));
regs[regno].type = NOT_INIT;
regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
}
static void init_reg_state(struct bpf_reg_state *regs)
{
int i;
for (i = 0; i < MAX_BPF_REG; i++) {
regs[i].type = NOT_INIT;
regs[i].imm = 0;
regs[i].min_value = BPF_REGISTER_MIN_RANGE;
regs[i].max_value = BPF_REGISTER_MAX_RANGE;
regs[i].min_align = 0;
regs[i].aux_off = 0;
regs[i].aux_off_align = 0;
}
for (i = 0; i < MAX_BPF_REG; i++)
mark_reg_not_init(regs, i);
/* frame pointer */
regs[BPF_REG_FP].type = FRAME_PTR;
......@@ -843,9 +846,6 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
{
bool strict = env->strict_alignment;
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
strict = true;
switch (reg->type) {
case PTR_TO_PACKET:
return check_pkt_ptr_alignment(reg, off, size, strict);
......@@ -1349,7 +1349,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
struct bpf_verifier_state *state = &env->cur_state;
const struct bpf_func_proto *fn = NULL;
struct bpf_reg_state *regs = state->regs;
struct bpf_reg_state *reg;
struct bpf_call_arg_meta meta;
bool changes_data;
int i, err;
......@@ -1416,11 +1415,8 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
}
/* reset caller saved regs */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
reg = regs + caller_saved[i];
reg->type = NOT_INIT;
reg->imm = 0;
}
for (i = 0; i < CALLER_SAVED_REGS; i++)
mark_reg_not_init(regs, caller_saved[i]);
/* update return register */
if (fn->ret_type == RET_INTEGER) {
......@@ -2448,7 +2444,6 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = env->cur_state.regs;
u8 mode = BPF_MODE(insn->code);
struct bpf_reg_state *reg;
int i, err;
if (!may_access_skb(env->prog->type)) {
......@@ -2481,11 +2476,8 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
}
/* reset caller saved regs to unreadable */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
reg = regs + caller_saved[i];
reg->type = NOT_INIT;
reg->imm = 0;
}
for (i = 0; i < CALLER_SAVED_REGS; i++)
mark_reg_not_init(regs, caller_saved[i]);
/* mark destination R0 register as readable, since it contains
* the value fetched from the packet
......@@ -2696,7 +2688,8 @@ static int check_cfg(struct bpf_verifier_env *env)
/* the following conditions reduce the number of explored insns
* from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet
*/
static bool compare_ptrs_to_packet(struct bpf_reg_state *old,
static bool compare_ptrs_to_packet(struct bpf_verifier_env *env,
struct bpf_reg_state *old,
struct bpf_reg_state *cur)
{
if (old->id != cur->id)
......@@ -2739,7 +2732,7 @@ static bool compare_ptrs_to_packet(struct bpf_reg_state *old,
* 'if (R4 > data_end)' and all further insn were already good with r=20,
* so they will be good with r=30 and we can prune the search.
*/
if (old->off <= cur->off &&
if (!env->strict_alignment && old->off <= cur->off &&
old->off >= old->range && cur->off >= cur->range)
return true;
......@@ -2810,7 +2803,7 @@ static bool states_equal(struct bpf_verifier_env *env,
continue;
if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET &&
compare_ptrs_to_packet(rold, rcur))
compare_ptrs_to_packet(env, rold, rcur))
continue;
return false;
......@@ -3588,10 +3581,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
} else {
log_level = 0;
}
if (attr->prog_flags & BPF_F_STRICT_ALIGNMENT)
env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
env->strict_alignment = true;
else
env->strict_alignment = false;
ret = replace_map_fd_with_map_ptr(env);
if (ret < 0)
......@@ -3697,7 +3690,10 @@ int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
mutex_lock(&bpf_verifier_lock);
log_level = 0;
env->strict_alignment = false;
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
env->strict_alignment = true;
env->explored_states = kcalloc(env->prog->len,
sizeof(struct bpf_verifier_state_list *),
......
......@@ -2281,6 +2281,7 @@ bool bpf_helper_changes_pkt_data(void *func)
func == bpf_skb_change_head ||
func == bpf_skb_change_tail ||
func == bpf_skb_pull_data ||
func == bpf_clone_redirect ||
func == bpf_l3_csum_replace ||
func == bpf_l4_csum_replace ||
func == bpf_xdp_adjust_head)
......
......@@ -208,6 +208,16 @@
.off = OFF, \
.imm = IMM })
/* Unconditional jumps, goto pc + off16 */
#define BPF_JMP_A(OFF) \
((struct bpf_insn) { \
.code = BPF_JMP | BPF_JA, \
.dst_reg = 0, \
.src_reg = 0, \
.off = OFF, \
.imm = 0 })
/* Function call */
#define BPF_EMIT_CALL(FUNC) \
......
......@@ -49,6 +49,7 @@
#define MAX_NR_MAPS 4
#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
struct bpf_test {
const char *descr;
......@@ -2614,6 +2615,30 @@ static struct bpf_test tests[] = {
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"direct packet access: test17 (pruning, alignment)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_JMP_A(-6),
},
.errstr = "misaligned packet access off 2+15+-4 size 4",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"helper access to packet: test1, valid packet_ptr range",
.insns = {
......@@ -3340,6 +3365,70 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
{
"alu ops on ptr_to_map_value_or_null, 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 10),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.errstr = "R4 invalid mem access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
{
"alu ops on ptr_to_map_value_or_null, 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 10),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.errstr = "R4 invalid mem access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
{
"alu ops on ptr_to_map_value_or_null, 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 10),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.errstr = "R4 invalid mem access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS
},
{
"invalid memory access with multiple map_lookup_elem calls",
.insns = {
......@@ -4937,7 +5026,149 @@ static struct bpf_test tests[] = {
.fixup_map_in_map = { 3 },
.errstr = "R1 type=map_value_or_null expected=map_ptr",
.result = REJECT,
}
},
{
"ld_abs: check calling conv, r1",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.errstr = "R1 !read_ok",
.result = REJECT,
},
{
"ld_abs: check calling conv, r2",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.errstr = "R2 !read_ok",
.result = REJECT,
},
{
"ld_abs: check calling conv, r3",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
BPF_EXIT_INSN(),
},
.errstr = "R3 !read_ok",
.result = REJECT,
},
{
"ld_abs: check calling conv, r4",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
BPF_EXIT_INSN(),
},
.errstr = "R4 !read_ok",
.result = REJECT,
},
{
"ld_abs: check calling conv, r5",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_5, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.errstr = "R5 !read_ok",
.result = REJECT,
},
{
"ld_abs: check calling conv, r7",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_7, 0),
BPF_LD_ABS(BPF_W, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"ld_ind: check calling conv, r1",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.errstr = "R1 !read_ok",
.result = REJECT,
},
{
"ld_ind: check calling conv, r2",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.errstr = "R2 !read_ok",
.result = REJECT,
},
{
"ld_ind: check calling conv, r3",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_3, 1),
BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
BPF_EXIT_INSN(),
},
.errstr = "R3 !read_ok",
.result = REJECT,
},
{
"ld_ind: check calling conv, r4",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_4, 1),
BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
BPF_EXIT_INSN(),
},
.errstr = "R4 !read_ok",
.result = REJECT,
},
{
"ld_ind: check calling conv, r5",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_5, 1),
BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
BPF_EXIT_INSN(),
},
.errstr = "R5 !read_ok",
.result = REJECT,
},
{
"ld_ind: check calling conv, r7",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_7, 1),
BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
};
static int probe_filter_length(const struct bpf_insn *fp)
......@@ -5059,9 +5290,9 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
do_test_fixup(test, prog, map_fds);
fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
prog, prog_len, "GPL", 0, bpf_vlog,
sizeof(bpf_vlog));
fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
"GPL", 0, bpf_vlog, sizeof(bpf_vlog));
expected_ret = unpriv && test->result_unpriv != UNDEF ?
test->result_unpriv : test->result;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment