Commit 3db9128f authored by Daniel Borkmann's avatar Daniel Borkmann

Merge branch 'bpf-verifier-sec-fixes'

Alexei Starovoitov says:

====================
This patch set addresses a set of security vulnerabilities
in bpf verifier logic discovered by Jann Horn.
All of the patches are candidates for 4.14 stable.
====================
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents 19c832ed 2255f8d5
......@@ -15,11 +15,11 @@
* In practice this is far bigger than any realistic pointer offset; this limit
* ensures that umax_value + (int)off + (int)size cannot overflow a u64.
*/
#define BPF_MAX_VAR_OFF (1ULL << 31)
#define BPF_MAX_VAR_OFF (1 << 29)
/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
* that converting umax_value to int cannot overflow.
*/
#define BPF_MAX_VAR_SIZ INT_MAX
#define BPF_MAX_VAR_SIZ (1 << 29)
/* Liveness marks, used for registers and spilled-regs (in stack slots).
* Read marks propagate upwards until they find a write mark; they record that
......
......@@ -1059,6 +1059,11 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
break;
case PTR_TO_STACK:
pointer_desc = "stack ";
/* The stack spill tracking logic in check_stack_write()
* and check_stack_read() relies on stack accesses being
* aligned.
*/
strict = true;
break;
default:
break;
......@@ -1067,6 +1072,29 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
strict);
}
/* truncate register to smaller size (in bytes)
* must be called with size < BPF_REG_SIZE
*/
static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
{
u64 mask;
/* clear high bits in bit representation */
reg->var_off = tnum_cast(reg->var_off, size);
/* fix arithmetic bounds */
mask = ((u64)1 << (size * 8)) - 1;
if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
reg->umin_value &= mask;
reg->umax_value &= mask;
} else {
reg->umin_value = 0;
reg->umax_value = mask;
}
reg->smin_value = reg->umin_value;
reg->smax_value = reg->umax_value;
}
/* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory
......@@ -1200,9 +1228,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
regs[value_regno].type == SCALAR_VALUE) {
/* b/h/w load zero-extends, mark upper bits as known 0 */
regs[value_regno].var_off =
tnum_cast(regs[value_regno].var_off, size);
__update_reg_bounds(&regs[value_regno]);
coerce_reg_to_size(&regs[value_regno], size);
}
return err;
}
......@@ -1282,6 +1308,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
verbose(env, "invalid variable stack read R%d var_off=%s\n",
regno, tn_buf);
return -EACCES;
}
off = regs[regno].off + regs[regno].var_off.value;
if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
......@@ -1772,14 +1799,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
return 0;
}
static void coerce_reg_to_32(struct bpf_reg_state *reg)
{
/* clear high 32 bits */
reg->var_off = tnum_cast(reg->var_off, 4);
/* Update bounds */
__update_reg_bounds(reg);
}
static bool signed_add_overflows(s64 a, s64 b)
{
/* Do the add in u64, where overflow is well-defined */
......@@ -1800,6 +1819,41 @@ static bool signed_sub_overflows(s64 a, s64 b)
return res > a;
}
static bool check_reg_sane_offset(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
enum bpf_reg_type type)
{
bool known = tnum_is_const(reg->var_off);
s64 val = reg->var_off.value;
s64 smin = reg->smin_value;
if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
verbose(env, "math between %s pointer and %lld is not allowed\n",
reg_type_str[type], val);
return false;
}
if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
verbose(env, "%s pointer offset %d is not allowed\n",
reg_type_str[type], reg->off);
return false;
}
if (smin == S64_MIN) {
verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
reg_type_str[type]);
return false;
}
if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
verbose(env, "value %lld makes %s pointer be out of bounds\n",
smin, reg_type_str[type]);
return false;
}
return true;
}
/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
* Caller should also handle BPF_MOV case separately.
* If we return -EACCES, caller may want to try again treating pointer as a
......@@ -1868,6 +1922,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
dst_reg->type = ptr_reg->type;
dst_reg->id = ptr_reg->id;
if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
!check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
return -EINVAL;
switch (opcode) {
case BPF_ADD:
/* We can take a fixed offset as long as it doesn't overflow
......@@ -1998,12 +2056,19 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
return -EACCES;
}
if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
return -EINVAL;
__update_reg_bounds(dst_reg);
__reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg);
return 0;
}
/* WARNING: This function does calculations on 64-bit values, but the actual
* execution may occur on 32-bit values. Therefore, things like bitshifts
* need extra checks in the 32-bit case.
*/
static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn,
struct bpf_reg_state *dst_reg,
......@@ -2014,12 +2079,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
bool src_known, dst_known;
s64 smin_val, smax_val;
u64 umin_val, umax_val;
u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops are (32,32)->64 */
coerce_reg_to_32(dst_reg);
coerce_reg_to_32(&src_reg);
}
smin_val = src_reg.smin_value;
smax_val = src_reg.smax_value;
umin_val = src_reg.umin_value;
......@@ -2027,6 +2088,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
src_known = tnum_is_const(src_reg.var_off);
dst_known = tnum_is_const(dst_reg->var_off);
if (!src_known &&
opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
__mark_reg_unknown(dst_reg);
return 0;
}
switch (opcode) {
case BPF_ADD:
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
......@@ -2155,9 +2222,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
__update_reg_bounds(dst_reg);
break;
case BPF_LSH:
if (umax_val > 63) {
/* Shifts greater than 63 are undefined. This includes
* shifts by a negative number.
if (umax_val >= insn_bitness) {
/* Shifts greater than 31 or 63 are undefined.
* This includes shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
......@@ -2183,27 +2250,29 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
__update_reg_bounds(dst_reg);
break;
case BPF_RSH:
if (umax_val > 63) {
/* Shifts greater than 63 are undefined. This includes
* shifts by a negative number.
if (umax_val >= insn_bitness) {
/* Shifts greater than 31 or 63 are undefined.
* This includes shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
/* BPF_RSH is an unsigned shift, so make the appropriate casts */
if (dst_reg->smin_value < 0) {
if (umin_val) {
/* Sign bit will be cleared */
dst_reg->smin_value = 0;
} else {
/* Lost sign bit information */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
}
} else {
dst_reg->smin_value =
(u64)(dst_reg->smin_value) >> umax_val;
}
/* BPF_RSH is an unsigned shift. If the value in dst_reg might
* be negative, then either:
* 1) src_reg might be zero, so the sign bit of the result is
* unknown, so we lose our signed bounds
* 2) it's known negative, thus the unsigned bounds capture the
* signed bounds
* 3) the signed bounds cross zero, so they tell us nothing
* about the result
* If the value in dst_reg is known nonnegative, then again the
* unsigned bounts capture the signed bounds.
* Thus, in all cases it suffices to blow away our signed bounds
* and rely on inferring new ones from the unsigned bounds and
* var_off of the result.
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
if (src_known)
dst_reg->var_off = tnum_rshift(dst_reg->var_off,
umin_val);
......@@ -2219,6 +2288,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
break;
}
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops are (32,32)->32 */
coerce_reg_to_size(dst_reg, 4);
coerce_reg_to_size(&src_reg, 4);
}
__reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg);
return 0;
......@@ -2396,17 +2471,20 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
return -EACCES;
}
mark_reg_unknown(env, regs, insn->dst_reg);
/* high 32 bits are known zero. */
regs[insn->dst_reg].var_off = tnum_cast(
regs[insn->dst_reg].var_off, 4);
__update_reg_bounds(&regs[insn->dst_reg]);
coerce_reg_to_size(&regs[insn->dst_reg], 4);
}
} else {
/* case: R = imm
* remember the value we stored into this reg
*/
regs[insn->dst_reg].type = SCALAR_VALUE;
__mark_reg_known(regs + insn->dst_reg, insn->imm);
if (BPF_CLASS(insn->code) == BPF_ALU64) {
__mark_reg_known(regs + insn->dst_reg,
insn->imm);
} else {
__mark_reg_known(regs + insn->dst_reg,
(u32)insn->imm);
}
}
} else if (opcode > BPF_END) {
......@@ -3437,15 +3515,14 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
return range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off);
} else {
/* if we knew anything about the old value, we're not
* equal, because we can't know anything about the
* scalar value of the pointer in the new value.
/* We're trying to use a pointer in place of a scalar.
* Even if the scalar was unbounded, this could lead to
* pointer leaks because scalars are allowed to leak
* while pointers are not. We could make this safe in
* special cases if root is calling us, but it's
* probably not worth the hassle.
*/
return rold->umin_value == 0 &&
rold->umax_value == U64_MAX &&
rold->smin_value == S64_MIN &&
rold->smax_value == S64_MAX &&
tnum_is_unknown(rold->var_off);
return false;
}
case PTR_TO_MAP_VALUE:
/* If the new min/max/var_off satisfy the old ones and
......
......@@ -606,7 +606,6 @@ static struct bpf_test tests[] = {
},
.errstr = "misaligned stack access",
.result = REJECT,
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"invalid map_fd for function call",
......@@ -1797,7 +1796,6 @@ static struct bpf_test tests[] = {
},
.result = REJECT,
.errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"PTR_TO_STACK store/load - bad alignment on reg",
......@@ -1810,7 +1808,6 @@ static struct bpf_test tests[] = {
},
.result = REJECT,
.errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
"PTR_TO_STACK store/load - out of bounds low",
......@@ -6324,7 +6321,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 min value is negative",
.errstr = "unbounded min value",
.result = REJECT,
},
{
......@@ -6348,7 +6345,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 min value is negative",
.errstr = "unbounded min value",
.result = REJECT,
},
{
......@@ -6374,7 +6371,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R8 invalid mem access 'inv'",
.errstr = "unbounded min value",
.result = REJECT,
},
{
......@@ -6399,7 +6396,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R8 invalid mem access 'inv'",
.errstr = "unbounded min value",
.result = REJECT,
},
{
......@@ -6447,7 +6444,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 min value is negative",
.errstr = "unbounded min value",
.result = REJECT,
},
{
......@@ -6518,7 +6515,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 min value is negative",
.errstr = "unbounded min value",
.result = REJECT,
},
{
......@@ -6569,7 +6566,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 min value is negative",
.errstr = "unbounded min value",
.result = REJECT,
},
{
......@@ -6596,7 +6593,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 min value is negative",
.errstr = "unbounded min value",
.result = REJECT,
},
{
......@@ -6622,7 +6619,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 min value is negative",
.errstr = "unbounded min value",
.result = REJECT,
},
{
......@@ -6651,7 +6648,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 min value is negative",
.errstr = "unbounded min value",
.result = REJECT,
},
{
......@@ -6681,7 +6678,7 @@ static struct bpf_test tests[] = {
BPF_JMP_IMM(BPF_JA, 0, 0, -7),
},
.fixup_map1 = { 4 },
.errstr = "R0 min value is negative",
.errstr = "unbounded min value",
.result = REJECT,
},
{
......@@ -6709,8 +6706,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr_unpriv = "R0 pointer comparison prohibited",
.errstr = "R0 min value is negative",
.errstr = "unbounded min value",
.result = REJECT,
.result_unpriv = REJECT,
},
......@@ -6765,6 +6761,462 @@ static struct bpf_test tests[] = {
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.result = REJECT,
},
{
"bounds check based on zero-extended MOV",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
/* r2 = 0x0000'0000'ffff'ffff */
BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
/* r2 = 0 */
BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
/* no-op */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
/* access at offset 0 */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.result = ACCEPT
},
{
"bounds check based on sign-extended MOV. test1",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
/* r2 = 0xffff'ffff'ffff'ffff */
BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
/* r2 = 0xffff'ffff */
BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
/* r0 = <oob pointer> */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
/* access to OOB pointer */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "map_value pointer and 4294967295",
.result = REJECT
},
{
"bounds check based on sign-extended MOV. test2",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
/* r2 = 0xffff'ffff'ffff'ffff */
BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
/* r2 = 0xfff'ffff */
BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
/* r0 = <oob pointer> */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
/* access to OOB pointer */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 min value is outside of the array range",
.result = REJECT
},
{
"bounds check based on reg_off + var_off + insn_off. test1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.errstr = "value_size=8 off=1073741825",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"bounds check based on reg_off + var_off + insn_off. test2",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 4 },
.errstr = "value 1073741823",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"bounds check after truncation of non-boundary-crossing range",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
/* r1 = [0x00, 0xff] */
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_2, 1),
/* r2 = 0x10'0000'0000 */
BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
/* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
/* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
/* r1 = [0x00, 0xff] */
BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
/* r1 = 0 */
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
/* no-op */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* access at offset 0 */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.result = ACCEPT
},
{
"bounds check after truncation of boundary-crossing range (1)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
/* r1 = [0x00, 0xff] */
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
/* r1 = [0xffff'ff80, 0x1'0000'007f] */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
/* r1 = [0xffff'ff80, 0xffff'ffff] or
* [0x0000'0000, 0x0000'007f]
*/
BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
/* r1 = [0x00, 0xff] or
* [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
*/
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
/* r1 = 0 or
* [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
*/
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
/* no-op or OOB pointer computation */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* potentially OOB access */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
/* not actually fully unbounded, but the bound is very high */
.errstr = "R0 unbounded memory access",
.result = REJECT
},
{
"bounds check after truncation of boundary-crossing range (2)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
/* r1 = [0x00, 0xff] */
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
/* r1 = [0xffff'ff80, 0x1'0000'007f] */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
/* r1 = [0xffff'ff80, 0xffff'ffff] or
* [0x0000'0000, 0x0000'007f]
* difference to previous test: truncation via MOV32
* instead of ALU32.
*/
BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
/* r1 = [0x00, 0xff] or
* [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
*/
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
/* r1 = 0 or
* [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
*/
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
/* no-op or OOB pointer computation */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* potentially OOB access */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
/* not actually fully unbounded, but the bound is very high */
.errstr = "R0 unbounded memory access",
.result = REJECT
},
{
"bounds check after wrapping 32-bit addition",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
/* r1 = 0x7fff'ffff */
BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
/* r1 = 0xffff'fffe */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
/* r1 = 0 */
BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
/* no-op */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* access at offset 0 */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.result = ACCEPT
},
{
"bounds check after shift with oversized count operand",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_MOV64_IMM(BPF_REG_2, 32),
BPF_MOV64_IMM(BPF_REG_1, 1),
/* r1 = (u32)1 << (u32)32 = ? */
BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
/* r1 = [0x0000, 0xffff] */
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
/* computes unknown pointer, potentially OOB */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* potentially OOB access */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 max value is outside of the array range",
.result = REJECT
},
{
"bounds check after right shift of maybe-negative number",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
/* r1 = [0x00, 0xff] */
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
/* r1 = [-0x01, 0xfe] */
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
/* r1 = 0 or 0xff'ffff'ffff'ffff */
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
/* r1 = 0 or 0xffff'ffff'ffff */
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
/* computes unknown pointer, potentially OOB */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* potentially OOB access */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "R0 unbounded memory access",
.result = REJECT
},
{
"bounds check map access with off+size signed 32bit overflow. test1",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_JMP_A(0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "map_value pointer and 2147483646",
.result = REJECT
},
{
"bounds check map access with off+size signed 32bit overflow. test2",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_JMP_A(0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "pointer offset 1073741822",
.result = REJECT
},
{
"bounds check map access with off+size signed 32bit overflow. test3",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
BPF_JMP_A(0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "pointer offset -1073741822",
.result = REJECT
},
{
"bounds check map access with off+size signed 32bit overflow. test4",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_1, 1000000),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
BPF_JMP_A(0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.errstr = "map_value pointer and 1000000000000",
.result = REJECT
},
{
"pointer/scalar confusion in state equality check (way 1)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_JMP_A(1),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
BPF_JMP_A(0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 leaks addr as return value"
},
{
"pointer/scalar confusion in state equality check (way 2)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
BPF_JMP_A(1),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 3 },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 leaks addr as return value"
},
{
"variable-offset ctx access",
.insns = {
......@@ -6806,6 +7258,71 @@ static struct bpf_test tests[] = {
.result = REJECT,
.prog_type = BPF_PROG_TYPE_LWT_IN,
},
{
"indirect variable-offset stack access",
.insns = {
/* Fill the top 8 bytes of the stack */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
/* Get an unknown value */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
/* Make it small and 4-byte aligned */
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
/* add it to fp. We now have either fp-4 or fp-8, but
* we don't know which
*/
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
/* dereference it indirectly */
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map1 = { 5 },
.errstr = "variable stack read R2",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_LWT_IN,
},
{
"direct stack access with 32-bit wraparound. test1",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_EXIT_INSN()
},
.errstr = "fp pointer and 2147483647",
.result = REJECT
},
{
"direct stack access with 32-bit wraparound. test2",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_EXIT_INSN()
},
.errstr = "fp pointer and 1073741823",
.result = REJECT
},
{
"direct stack access with 32-bit wraparound. test3",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_EXIT_INSN()
},
.errstr = "fp pointer offset 1073741822",
.result = REJECT
},
{
"liveness pruning and write screening",
.insns = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment