Commit e5ffcc91 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'subreg-bounds'

John Fastabend says:

====================
This series adds ALU32 signed and unsigned min/max bounds.

The origins of this work is to fix do_refine_retval_range() which before
this series clamps the return value bounds to [0, max]. However, this
is not correct because its possible these functions may return negative
errors so the correct bound is [*MIN, max]. Where *MIN is the signed
and unsigned min values U64_MIN and S64_MIN. And 'max' here is the max
positive value returned by this routine.

Patch 1 changes the do_refine_retval_range() to return the correct bounds
but this breaks existing programs that were depending on the old incorrect
bound. To repair these old programs we add ALU32 bounds to properly track
the return values from these helpers. The ALU32 bounds are needed because
clang realizes these helepers return 'int' type and will use jmp32 ops
with the return value.  With current state of things this does little to
help 64bit bounds and with patch 1 applied will cause many programs to
fail verifier pass. See patch 5 for trace details on how this happens.

Patch 2 does the ALU32 addition it adds the new bounds and populates them
through the verifier. Design note, initially a var32 was added but as
pointed out by Alexei and Edward it is not strictly needed so it was
removed here. This worked out nicely.

Patch 3 notes that the refine return value can now also bound the 32-bit
subregister allowing better bouinds tracking in these cases.

Patches 4 adds a C test case to test_progs which will cause the verifier
to fail if new 32bit and do_refine_retval_range() is incorrect.

Patches 5 and 6 fix test cases that broke after refining the return
values from helpers. I attempted to be explicit about each failure and
why we need the change. See patches for details.

Patch 7 adds some bounds check tests to ensure bounds checking when
mixing alu32, alu64 and jmp32 ops together.

Thanks to Alexei, Edward, and Daniel for initial feedback it helped clean
this up a lot.

v2:
  - rebased to bpf-next
  - fixed tnum equals optimization for combining 32->64bits
  - updated patch to fix verifier test correctly
  - updated refine_retval_range to set both s32_*_value and s*_value we
    need both to get better bounds tracking
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 4edf16b7 41f70fe0
...@@ -123,6 +123,10 @@ struct bpf_reg_state { ...@@ -123,6 +123,10 @@ struct bpf_reg_state {
s64 smax_value; /* maximum possible (s64)value */ s64 smax_value; /* maximum possible (s64)value */
u64 umin_value; /* minimum possible (u64)value */ u64 umin_value; /* minimum possible (u64)value */
u64 umax_value; /* maximum possible (u64)value */ u64 umax_value; /* maximum possible (u64)value */
s32 s32_min_value; /* minimum possible (s32)value */
s32 s32_max_value; /* maximum possible (s32)value */
u32 u32_min_value; /* minimum possible (u32)value */
u32 u32_max_value; /* maximum possible (u32)value */
/* parentage chain for liveness checking */ /* parentage chain for liveness checking */
struct bpf_reg_state *parent; struct bpf_reg_state *parent;
/* Inside the callee two registers can be both PTR_TO_STACK like /* Inside the callee two registers can be both PTR_TO_STACK like
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#define S16_MAX ((s16)(U16_MAX >> 1)) #define S16_MAX ((s16)(U16_MAX >> 1))
#define S16_MIN ((s16)(-S16_MAX - 1)) #define S16_MIN ((s16)(-S16_MAX - 1))
#define U32_MAX ((u32)~0U) #define U32_MAX ((u32)~0U)
#define U32_MIN ((u32)0)
#define S32_MAX ((s32)(U32_MAX >> 1)) #define S32_MAX ((s32)(U32_MAX >> 1))
#define S32_MIN ((s32)(-S32_MAX - 1)) #define S32_MIN ((s32)(-S32_MAX - 1))
#define U64_MAX ((u64)~0ULL) #define U64_MAX ((u64)~0ULL)
......
...@@ -86,4 +86,16 @@ int tnum_strn(char *str, size_t size, struct tnum a); ...@@ -86,4 +86,16 @@ int tnum_strn(char *str, size_t size, struct tnum a);
/* Format a tnum as tristate binary expansion */ /* Format a tnum as tristate binary expansion */
int tnum_sbin(char *str, size_t size, struct tnum a); int tnum_sbin(char *str, size_t size, struct tnum a);
/* Returns the 32-bit subreg */
struct tnum tnum_subreg(struct tnum a);
/* Returns the tnum with the lower 32-bit subreg cleared */
struct tnum tnum_clear_subreg(struct tnum a);
/* Returns the tnum with the lower 32-bit subreg set to value */
struct tnum tnum_const_subreg(struct tnum a, u32 value);
/* Returns true if 32-bit subreg @a is a known constant*/
static inline bool tnum_subreg_is_const(struct tnum a)
{
return !(tnum_subreg(a)).mask;
}
#endif /* _LINUX_TNUM_H */ #endif /* _LINUX_TNUM_H */
...@@ -194,3 +194,18 @@ int tnum_sbin(char *str, size_t size, struct tnum a) ...@@ -194,3 +194,18 @@ int tnum_sbin(char *str, size_t size, struct tnum a)
str[min(size - 1, (size_t)64)] = 0; str[min(size - 1, (size_t)64)] = 0;
return 64; return 64;
} }
struct tnum tnum_subreg(struct tnum a)
{
return tnum_cast(a, 4);
}
struct tnum tnum_clear_subreg(struct tnum a)
{
return tnum_lshift(tnum_rshift(a, 32), 32);
}
struct tnum tnum_const_subreg(struct tnum a, u32 value)
{
return tnum_or(tnum_clear_subreg(a), tnum_const(value));
}
This diff is collapsed.
...@@ -82,6 +82,7 @@ static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size) ...@@ -82,6 +82,7 @@ static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size)
void test_get_stack_raw_tp(void) void test_get_stack_raw_tp(void)
{ {
const char *file = "./test_get_stack_rawtp.o"; const char *file = "./test_get_stack_rawtp.o";
const char *file_err = "./test_get_stack_rawtp_err.o";
const char *prog_name = "raw_tracepoint/sys_enter"; const char *prog_name = "raw_tracepoint/sys_enter";
int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP; int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP;
struct perf_buffer_opts pb_opts = {}; struct perf_buffer_opts pb_opts = {};
...@@ -93,6 +94,10 @@ void test_get_stack_raw_tp(void) ...@@ -93,6 +94,10 @@ void test_get_stack_raw_tp(void)
struct bpf_map *map; struct bpf_map *map;
cpu_set_t cpu_set; cpu_set_t cpu_set;
err = bpf_prog_load(file_err, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err >= 0, "prog_load raw tp", "err %d errno %d\n", err, errno))
return;
err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
return; return;
......
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#define MAX_STACK_RAWTP 10
SEC("raw_tracepoint/sys_enter")
int bpf_prog2(void *ctx)
{
__u64 stack[MAX_STACK_RAWTP];
int error;
/* set all the flags which should return -EINVAL */
error = bpf_get_stack(ctx, stack, 0, -1);
if (error < 0)
goto loop;
return error;
loop:
while (1) {
error++;
}
}
char _license[] SEC("license") = "GPL";
...@@ -257,17 +257,15 @@ ...@@ -257,17 +257,15 @@
* [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff] * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
*/ */
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
/* no-op or OOB pointer computation */ /* error on OOB pointer computation */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* potentially OOB access */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */ /* exit */
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 3 },
/* not actually fully unbounded, but the bound is very high */ /* not actually fully unbounded, but the bound is very high */
.errstr = "R0 unbounded memory access", .errstr = "value 72057594021150720 makes map_value pointer be out of bounds",
.result = REJECT .result = REJECT
}, },
{ {
...@@ -299,17 +297,15 @@ ...@@ -299,17 +297,15 @@
* [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff] * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
*/ */
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
/* no-op or OOB pointer computation */ /* error on OOB pointer computation */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* potentially OOB access */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */ /* exit */
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 3 },
/* not actually fully unbounded, but the bound is very high */ /* not actually fully unbounded, but the bound is very high */
.errstr = "R0 unbounded memory access", .errstr = "value 72057594021150720 makes map_value pointer be out of bounds",
.result = REJECT .result = REJECT
}, },
{ {
...@@ -504,3 +500,42 @@ ...@@ -504,3 +500,42 @@
.errstr = "map_value pointer and 1000000000000", .errstr = "map_value pointer and 1000000000000",
.result = REJECT .result = REJECT
}, },
{
"bounds check mixed 32bit and 64bit arithmatic. test1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_1, -1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
/* r1 = 0xffffFFFF00000001 */
BPF_JMP32_IMM(BPF_JGT, BPF_REG_1, 1, 3),
/* check ALU64 op keeps 32bit bounds */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
BPF_JMP32_IMM(BPF_JGT, BPF_REG_1, 2, 1),
BPF_JMP_A(1),
/* invalid ldx if bounds are lost above */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
BPF_EXIT_INSN(),
},
.result = ACCEPT
},
{
"bounds check mixed 32bit and 64bit arithmatic. test2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_1, -1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
/* r1 = 0xffffFFFF00000001 */
BPF_MOV64_IMM(BPF_REG_2, 3),
/* r1 = 0x2 */
BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
/* check ALU32 op zero extends 64bit bounds */
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 1),
BPF_JMP_A(1),
/* invalid ldx if bounds are lost above */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
BPF_EXIT_INSN(),
},
.result = ACCEPT
},
...@@ -9,17 +9,17 @@ ...@@ -9,17 +9,17 @@
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)), BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)/2),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)), BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)/2),
BPF_MOV64_IMM(BPF_REG_4, 256), BPF_MOV64_IMM(BPF_REG_4, 256),
BPF_EMIT_CALL(BPF_FUNC_get_stack), BPF_EMIT_CALL(BPF_FUNC_get_stack),
BPF_MOV64_IMM(BPF_REG_1, 0), BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32), BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32), BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16), BPF_JMP_REG(BPF_JSGT, BPF_REG_1, BPF_REG_8, 16),
BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8), BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8), BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1), BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)), BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)/2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5), BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4), BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment