Commit 90679706 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'bpf-verifier-retval-logic-fixes'

Andrii Nakryiko says:

====================
BPF verifier retval logic fixes

This patch set fixes BPF verifier logic around validating and enforcing return
values for BPF programs that have specific range of expected return values.
Both sync and async callbacks have similar logic and are fixes as well.
A few tests are added that would fail without the fixes in this patch set.

Also, while at it, we update retval checking logic to use smin/smax range
instead of tnum, avoiding future potential issues if expected range cannot be
represented precisely by tnum (e.g., [0, 2] is not representable by tnum and
is treated as [0, 3]).

There is a little bit of refactoring to unify async callback and program exit
logic to avoid duplication of checks as much as possible.

v4->v5:
  - fix timer_bad_ret test on no-alu32 flavor (CI);
v3->v4:
  - add back bpf_func_state rearrangement patch;
  - simplified patch #4 as suggested (Shung-Hsi);
v2->v3:
  - more carefullly switch from umin/umax to smin/smax;
v1->v2:
  - drop tnum from retval checks (Eduard);
  - use smin/smax instead of umin/umax (Alexei).
====================

Link: https://lore.kernel.org/r/20231202175705.885270-1-andrii@kernel.orgSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 6685aadc 81eff2e3
......@@ -275,6 +275,11 @@ struct bpf_reference_state {
int callback_ref;
};
struct bpf_retval_range {
s32 minval;
s32 maxval;
};
/* state of the program:
* type of all registers and stack info
*/
......@@ -297,8 +302,8 @@ struct bpf_func_state {
* void foo(void) { bpf_timer_set_callback(,foo); }
*/
u32 async_entry_cnt;
struct bpf_retval_range callback_ret_range;
bool in_callback_fn;
struct tnum callback_ret_range;
bool in_async_callback_fn;
bool in_exception_callback_fn;
/* For callback calling functions that limit number of possible
......@@ -316,8 +321,8 @@ struct bpf_func_state {
/* The following fields should be last. See copy_func_state() */
int acquired_refs;
struct bpf_reference_state *refs;
int allocated_stack;
struct bpf_stack_state *stack;
int allocated_stack;
};
struct bpf_idx_pair {
......
......@@ -539,6 +539,19 @@ static void verbose_snum(struct bpf_verifier_env *env, s64 num)
verbose(env, "%#llx", num);
}
int tnum_strn(char *str, size_t size, struct tnum a)
{
/* print as a constant, if tnum is fully known */
if (a.mask == 0) {
if (is_unum_decimal(a.value))
return snprintf(str, size, "%llu", a.value);
else
return snprintf(str, size, "%#llx", a.value);
}
return snprintf(str, size, "(%#llx; %#llx)", a.value, a.mask);
}
EXPORT_SYMBOL_GPL(tnum_strn);
static void print_scalar_ranges(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
const char **sep)
......
......@@ -172,12 +172,6 @@ bool tnum_in(struct tnum a, struct tnum b)
return a.value == b.value;
}
int tnum_strn(char *str, size_t size, struct tnum a)
{
return snprintf(str, size, "(%#llx; %#llx)", a.value, a.mask);
}
EXPORT_SYMBOL_GPL(tnum_strn);
int tnum_sbin(char *str, size_t size, struct tnum a)
{
size_t n;
......
This diff is collapsed.
......@@ -125,7 +125,7 @@ int check_assert_generic(struct __sk_buff *ctx)
}
SEC("?fentry/bpf_check")
__failure __msg("At program exit the register R0 has value (0x40; 0x0)")
__failure __msg("At program exit the register R1 has smin=64 smax=64")
int check_assert_with_return(void *ctx)
{
bpf_assert_with(!ctx, 64);
......
......@@ -308,7 +308,7 @@ int reject_set_exception_cb_bad_ret1(void *ctx)
}
SEC("?fentry/bpf_check")
__failure __msg("At program exit the register R0 has value (0x40; 0x0) should")
__failure __msg("At program exit the register R1 has smin=64 smax=64 should")
int reject_set_exception_cb_bad_ret2(void *ctx)
{
bpf_throw(64);
......
......@@ -13,7 +13,7 @@ __noinline int foo(unsigned int *v)
}
SEC("cgroup_skb/ingress")
__failure __msg("At program exit the register R0 has value")
__failure __msg("At program exit the register R0 has ")
int global_func15(struct __sk_buff *skb)
{
unsigned int v = 1;
......@@ -22,3 +22,35 @@ int global_func15(struct __sk_buff *skb)
return v;
}
SEC("cgroup_skb/ingress")
__log_level(2) __flag(BPF_F_TEST_STATE_FREQ)
__failure
/* check that fallthrough code path marks r0 as precise */
__msg("mark_precise: frame0: regs=r0 stack= before 2: (b7) r0 = 1")
/* check that branch code path marks r0 as precise */
__msg("mark_precise: frame0: regs=r0 stack= before 0: (85) call bpf_get_prandom_u32#7")
__msg("At program exit the register R0 has ")
__naked int global_func15_tricky_pruning(void)
{
asm volatile (
"call %[bpf_get_prandom_u32];"
"if r0 s> 1000 goto 1f;"
"r0 = 1;"
"1:"
"goto +0;" /* checkpoint */
/* cgroup_skb/ingress program is expected to return [0, 1]
* values, so branch above makes sure that in a fallthrough
* case we have a valid 1 stored in R0 register, but in
* a branch case we assign some random value to R0. So if
* there is something wrong with precision tracking for R0 at
* program exit, we might erronenously prune branch case,
* because R0 in fallthrough case is imprecise (and thus any
* value is valid from POV of verifier is_state_equal() logic)
*/
"exit;"
:
: __imm(bpf_get_prandom_u32)
: __clobber_common
);
}
......@@ -21,17 +21,37 @@ struct {
__type(value, struct elem);
} timer_map SEC(".maps");
static int timer_cb_ret1(void *map, int *key, struct bpf_timer *timer)
__naked __noinline __used
static unsigned long timer_cb_ret_bad()
{
if (bpf_get_smp_processor_id() % 2)
return 1;
else
return 0;
asm volatile (
"call %[bpf_get_prandom_u32];"
"if r0 s> 1000 goto 1f;"
"r0 = 0;"
"1:"
"goto +0;" /* checkpoint */
/* async callback is expected to return 0, so branch above
* skipping r0 = 0; should lead to a failure, but if exit
* instruction doesn't enforce r0's precision, this callback
* will be successfully verified
*/
"exit;"
:
: __imm(bpf_get_prandom_u32)
: __clobber_common
);
}
SEC("fentry/bpf_fentry_test1")
__failure __msg("should have been in (0x0; 0x0)")
int BPF_PROG2(test_ret_1, int, a)
__log_level(2)
__flag(BPF_F_TEST_STATE_FREQ)
__failure
/* check that fallthrough code path marks r0 as precise */
__msg("mark_precise: frame0: regs=r0 stack= before 22: (b7) r0 = 0")
/* check that branch code path marks r0 as precise */
__msg("mark_precise: frame0: regs=r0 stack= before 24: (85) call bpf_get_prandom_u32#7")
__msg("should have been in [0, 0]")
long BPF_PROG2(test_bad_ret, int, a)
{
int key = 0;
struct bpf_timer *timer;
......@@ -39,7 +59,7 @@ int BPF_PROG2(test_ret_1, int, a)
timer = bpf_map_lookup_elem(&timer_map, &key);
if (timer) {
bpf_timer_init(timer, &timer_map, CLOCK_BOOTTIME);
bpf_timer_set_callback(timer, timer_cb_ret1);
bpf_timer_set_callback(timer, timer_cb_ret_bad);
bpf_timer_start(timer, 1000, 0);
}
......
......@@ -184,7 +184,7 @@ invalid_drain_callback_return(struct bpf_dynptr *dynptr, void *context)
* not be able to write to that pointer.
*/
SEC("?raw_tp")
__failure __msg("At callback return the register R0 has value")
__failure __msg("At callback return the register R0 has ")
int user_ringbuf_callback_invalid_return(void *ctx)
{
bpf_user_ringbuf_drain(&user_ringbuf, invalid_drain_callback_return, NULL, 0);
......
......@@ -7,7 +7,7 @@
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test1")
__failure __msg("R0 has value (0x0; 0xffffffff)")
__failure __msg("smin=0 smax=4294967295 should have been in [0, 1]")
__naked void with_invalid_return_code_test1(void)
{
asm volatile (" \
......@@ -30,7 +30,7 @@ __naked void with_invalid_return_code_test2(void)
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test3")
__failure __msg("R0 has value (0x0; 0x3)")
__failure __msg("smin=0 smax=3 should have been in [0, 1]")
__naked void with_invalid_return_code_test3(void)
{
asm volatile (" \
......@@ -53,7 +53,7 @@ __naked void with_invalid_return_code_test4(void)
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test5")
__failure __msg("R0 has value (0x2; 0x0)")
__failure __msg("smin=2 smax=2 should have been in [0, 1]")
__naked void with_invalid_return_code_test5(void)
{
asm volatile (" \
......@@ -75,7 +75,7 @@ __naked void with_invalid_return_code_test6(void)
SEC("cgroup/sock")
__description("bpf_exit with invalid return code. test7")
__failure __msg("R0 has unknown scalar value")
__failure __msg("R0 has unknown scalar value should have been in [0, 1]")
__naked void with_invalid_return_code_test7(void)
{
asm volatile (" \
......
......@@ -411,7 +411,7 @@ l0_%=: r0 = 0; \
SEC("tc")
__description("direct packet access: test17 (pruning, alignment)")
__failure __msg("misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4")
__failure __msg("misaligned packet access off 2+0+15+-4 size 4")
__flag(BPF_F_STRICT_ALIGNMENT)
__naked void packet_access_test17_pruning_alignment(void)
{
......
......@@ -67,7 +67,7 @@ __naked void ptr_to_long_half_uninitialized(void)
SEC("cgroup/sysctl")
__description("ARG_PTR_TO_LONG misaligned")
__failure __msg("misaligned stack access off (0x0; 0x0)+-20+0 size 8")
__failure __msg("misaligned stack access off 0+-20+0 size 8")
__naked void arg_ptr_to_long_misaligned(void)
{
asm volatile (" \
......
......@@ -39,7 +39,7 @@ __naked void with_valid_return_code_test3(void)
SEC("netfilter")
__description("bpf_exit with invalid return code. test4")
__failure __msg("R0 has value (0x2; 0x0)")
__failure __msg("R0 has smin=2 smax=2 should have been in [0, 1]")
__naked void with_invalid_return_code_test4(void)
{
asm volatile (" \
......
......@@ -37,7 +37,7 @@ __naked void ptr_to_stack_store_load(void)
SEC("socket")
__description("PTR_TO_STACK store/load - bad alignment on off")
__failure __msg("misaligned stack access off (0x0; 0x0)+-8+2 size 8")
__failure __msg("misaligned stack access off 0+-8+2 size 8")
__failure_unpriv
__naked void load_bad_alignment_on_off(void)
{
......@@ -53,7 +53,7 @@ __naked void load_bad_alignment_on_off(void)
SEC("socket")
__description("PTR_TO_STACK store/load - bad alignment on reg")
__failure __msg("misaligned stack access off (0x0; 0x0)+-10+8 size 8")
__failure __msg("misaligned stack access off 0+-10+8 size 8")
__failure_unpriv
__naked void load_bad_alignment_on_reg(void)
{
......
......@@ -117,6 +117,56 @@ __naked int global_subprog_result_precise(void)
);
}
__naked __noinline __used
static unsigned long loop_callback_bad()
{
/* bpf_loop() callback that can return values outside of [0, 1] range */
asm volatile (
"call %[bpf_get_prandom_u32];"
"if r0 s> 1000 goto 1f;"
"r0 = 0;"
"1:"
"goto +0;" /* checkpoint */
/* bpf_loop() expects [0, 1] values, so branch above skipping
* r0 = 0; should lead to a failure, but if exit instruction
* doesn't enforce r0's precision, this callback will be
* successfully verified
*/
"exit;"
:
: __imm(bpf_get_prandom_u32)
: __clobber_common
);
}
SEC("?raw_tp")
__failure __log_level(2)
__flag(BPF_F_TEST_STATE_FREQ)
/* check that fallthrough code path marks r0 as precise */
__msg("mark_precise: frame1: regs=r0 stack= before 11: (b7) r0 = 0")
/* check that we have branch code path doing its own validation */
__msg("from 10 to 12: frame1: R0=scalar(smin=umin=1001")
/* check that branch code path marks r0 as precise, before failing */
__msg("mark_precise: frame1: regs=r0 stack= before 9: (85) call bpf_get_prandom_u32#7")
__msg("At callback return the register R0 has smin=1001 should have been in [0, 1]")
__naked int callback_precise_return_fail(void)
{
asm volatile (
"r1 = 1;" /* nr_loops */
"r2 = %[loop_callback_bad];" /* callback_fn */
"r3 = 0;" /* callback_ctx */
"r4 = 0;" /* flags */
"call %[bpf_loop];"
"r0 = 0;"
"exit;"
:
: __imm_ptr(loop_callback_bad),
__imm(bpf_loop)
: __clobber_common
);
}
SEC("?raw_tp")
__success __log_level(2)
/* First simulated path does not include callback body,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment