Commit e688c3db authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by Daniel Borkmann

bpf: Fix register equivalence tracking.

The 64-bit JEQ/JNE handling in reg_set_min_max() was clearing reg->id in either
true or false branch. In the case 'if (reg->id)' check was done on the other
branch the counter part register would have reg->id == 0 when called into
find_equal_scalars(). In such case the helper would incorrectly identify other
registers with id == 0 as equivalent and propagate the state incorrectly.
Fix it by preserving ID across reg_set_min_max().

In other words any kind of comparison operator on the scalar register
should preserve its ID to recognize:

r1 = r2
if (r1 == 20) {
  #1 here both r1 and r2 == 20
} else if (r2 < 20) {
  #2 here both r1 and r2 < 20
}

The patch is addressing #1 case. The #2 was working correctly already.

Fixes: 75748837 ("bpf: Propagate scalar ranges through register assignments.")
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Acked-by: default avatarJohn Fastabend <john.fastabend@gmail.com>
Tested-by: default avatarYonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20201014175608.1416-1-alexei.starovoitov@gmail.com
parent ccdf7fae
...@@ -1010,14 +1010,9 @@ static const int caller_saved[CALLER_SAVED_REGS] = { ...@@ -1010,14 +1010,9 @@ static const int caller_saved[CALLER_SAVED_REGS] = {
static void __mark_reg_not_init(const struct bpf_verifier_env *env, static void __mark_reg_not_init(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg); struct bpf_reg_state *reg);
/* Mark the unknown part of a register (variable offset or scalar value) as /* This helper doesn't clear reg->id */
* known to have the value @imm. static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
*/
static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
{ {
/* Clear id, off, and union(map_ptr, range) */
memset(((u8 *)reg) + sizeof(reg->type), 0,
offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
reg->var_off = tnum_const(imm); reg->var_off = tnum_const(imm);
reg->smin_value = (s64)imm; reg->smin_value = (s64)imm;
reg->smax_value = (s64)imm; reg->smax_value = (s64)imm;
...@@ -1030,6 +1025,17 @@ static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) ...@@ -1030,6 +1025,17 @@ static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
reg->u32_max_value = (u32)imm; reg->u32_max_value = (u32)imm;
} }
/* Mark the unknown part of a register (variable offset or scalar value) as
* known to have the value @imm.
*/
static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
{
/* Clear id, off, and union(map_ptr, range) */
memset(((u8 *)reg) + sizeof(reg->type), 0,
offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
___mark_reg_known(reg, imm);
}
static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm) static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
{ {
reg->var_off = tnum_const_subreg(reg->var_off, imm); reg->var_off = tnum_const_subreg(reg->var_off, imm);
...@@ -7001,14 +7007,18 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, ...@@ -7001,14 +7007,18 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
struct bpf_reg_state *reg = struct bpf_reg_state *reg =
opcode == BPF_JEQ ? true_reg : false_reg; opcode == BPF_JEQ ? true_reg : false_reg;
/* For BPF_JEQ, if this is false we know nothing Jon Snow, but /* JEQ/JNE comparison doesn't change the register equivalence.
* if it is true we know the value for sure. Likewise for * r1 = r2;
* BPF_JNE. * if (r1 == 42) goto label;
* ...
* label: // here both r1 and r2 are known to be 42.
*
* Hence when marking register as known preserve it's ID.
*/ */
if (is_jmp32) if (is_jmp32)
__mark_reg32_known(reg, val32); __mark_reg32_known(reg, val32);
else else
__mark_reg_known(reg, val); ___mark_reg_known(reg, val);
break; break;
} }
case BPF_JSET: case BPF_JSET:
...@@ -7551,7 +7561,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, ...@@ -7551,7 +7561,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
reg_combine_min_max(&other_branch_regs[insn->src_reg], reg_combine_min_max(&other_branch_regs[insn->src_reg],
&other_branch_regs[insn->dst_reg], &other_branch_regs[insn->dst_reg],
src_reg, dst_reg, opcode); src_reg, dst_reg, opcode);
if (src_reg->id) { if (src_reg->id &&
!WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
find_equal_scalars(this_branch, src_reg); find_equal_scalars(this_branch, src_reg);
find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]); find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
} }
...@@ -7563,7 +7574,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, ...@@ -7563,7 +7574,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
opcode, is_jmp32); opcode, is_jmp32);
} }
if (dst_reg->type == SCALAR_VALUE && dst_reg->id) { if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
!WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
find_equal_scalars(this_branch, dst_reg); find_equal_scalars(this_branch, dst_reg);
find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]); find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]);
} }
......
...@@ -241,3 +241,29 @@ ...@@ -241,3 +241,29 @@
.result = ACCEPT, .result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT, .prog_type = BPF_PROG_TYPE_TRACEPOINT,
}, },
{
"regalloc, spill, JEQ",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), /* spill r0 */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 0),
/* The verifier will walk the rest twice with r0 == 0 and r0 == map_value */
BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 20, 0),
/* The verifier will walk the rest two more times with r0 == 20 and r0 == unknown */
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -8), /* fill r3 with map_value */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1), /* skip ldx if map_value == NULL */
/* Buggy verifier will think that r3 == 20 here */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), /* read from map_value */
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 4 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment