Commit a6a6a0a9 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Alexei Starovoitov says:

====================
pull-request: bpf 2023-11-15

We've added 7 non-merge commits during the last 6 day(s) which contain
a total of 9 files changed, 200 insertions(+), 49 deletions(-).

The main changes are:

1) Do not allocate bpf specific percpu memory unconditionally, from Yonghong.

2) Fix precision backtracking instruction iteration, from Andrii.

3) Fix control flow graph checking, from Andrii.

4) Fix xskxceiver selftest build, from Anders.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  bpf: Do not allocate percpu memory at init stage
  selftests/bpf: add more test cases for check_cfg()
  bpf: fix control-flow graph checking in privileged mode
  selftests/bpf: add edge case backtracking logic test
  bpf: fix precision backtracking instruction iteration
  bpf: handle ldimm64 properly in check_cfg()
  selftests: bpf: xskxceiver: ksft_print_msg: fix format type error
====================

Link: https://lore.kernel.org/r/20231115214949.48854-1-alexei.starovoitov@gmail.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 674e3180 1fda5bb6
......@@ -56,7 +56,7 @@ extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
extern struct kobject *btf_kobj;
extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma;
extern bool bpf_global_ma_set, bpf_global_percpu_ma_set;
extern bool bpf_global_ma_set;
typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
......@@ -909,10 +909,14 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
aux->ctx_field_size = size;
}
static bool bpf_is_ldimm64(const struct bpf_insn *insn)
{
return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
}
static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
{
return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
insn->src_reg == BPF_PSEUDO_FUNC;
return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
}
struct bpf_prog_ops {
......
......@@ -64,8 +64,8 @@
#define OFF insn->off
#define IMM insn->imm
struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma;
bool bpf_global_ma_set, bpf_global_percpu_ma_set;
struct bpf_mem_alloc bpf_global_ma;
bool bpf_global_ma_set;
/* No hurry in this branch
*
......@@ -2934,9 +2934,7 @@ static int __init bpf_global_ma_init(void)
ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
bpf_global_ma_set = !ret;
ret = bpf_mem_alloc_init(&bpf_global_percpu_ma, 0, true);
bpf_global_percpu_ma_set = !ret;
return !bpf_global_ma_set || !bpf_global_percpu_ma_set;
return ret;
}
late_initcall(bpf_global_ma_init);
#endif
......
......@@ -26,6 +26,7 @@
#include <linux/poison.h>
#include <linux/module.h>
#include <linux/cpumask.h>
#include <linux/bpf_mem_alloc.h>
#include <net/xdp.h>
#include "disasm.h"
......@@ -41,6 +42,9 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
#undef BPF_LINK_TYPE
};
struct bpf_mem_alloc bpf_global_percpu_ma;
static bool bpf_global_percpu_ma_set;
/* bpf_check() is a static code analyzer that walks eBPF program
* instruction by instruction and updates register/stack state.
* All paths of conditional branches are analyzed until 'bpf_exit' insn.
......@@ -336,6 +340,7 @@ struct bpf_kfunc_call_arg_meta {
struct btf *btf_vmlinux;
static DEFINE_MUTEX(bpf_verifier_lock);
static DEFINE_MUTEX(bpf_percpu_ma_lock);
static const struct bpf_line_info *
find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
......@@ -3516,12 +3521,29 @@ static int push_jmp_history(struct bpf_verifier_env *env,
/* Backtrack one insn at a time. If idx is not at the top of recorded
* history then previous instruction came from straight line execution.
* Return -ENOENT if we exhausted all instructions within given state.
*
* It's legal to have a bit of a looping with the same starting and ending
* insn index within the same state, e.g.: 3->4->5->3, so just because current
* instruction index is the same as state's first_idx doesn't mean we are
* done. If there is still some jump history left, we should keep going. We
* need to take into account that we might have a jump history between given
* state's parent and itself, due to checkpointing. In this case, we'll have
* history entry recording a jump from last instruction of parent state and
* first instruction of given state.
*/
static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
u32 *history)
{
u32 cnt = *history;
if (i == st->first_insn_idx) {
if (cnt == 0)
return -ENOENT;
if (cnt == 1 && st->jmp_history[0].idx == i)
return -ENOENT;
}
if (cnt && st->jmp_history[cnt - 1].idx == i) {
i = st->jmp_history[cnt - 1].prev_idx;
(*history)--;
......@@ -4401,10 +4423,10 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
* Nothing to be tracked further in the parent state.
*/
return 0;
if (i == first_idx)
break;
subseq_idx = i;
i = get_prev_insn_idx(st, i, &history);
if (i == -ENOENT)
break;
if (i >= env->prog->len) {
/* This can happen if backtracking reached insn 0
* and there are still reg_mask or stack_mask
......@@ -12074,8 +12096,19 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl] && !bpf_global_ma_set)
return -ENOMEM;
if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl] && !bpf_global_percpu_ma_set)
return -ENOMEM;
if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
if (!bpf_global_percpu_ma_set) {
mutex_lock(&bpf_percpu_ma_lock);
if (!bpf_global_percpu_ma_set) {
err = bpf_mem_alloc_init(&bpf_global_percpu_ma, 0, true);
if (!err)
bpf_global_percpu_ma_set = true;
}
mutex_unlock(&bpf_percpu_ma_lock);
if (err)
return err;
}
}
if (((u64)(u32)meta.arg_constant.value) != meta.arg_constant.value) {
verbose(env, "local type ID argument must be in range [0, U32_MAX]\n");
......@@ -15386,8 +15419,7 @@ enum {
* w - next instruction
* e - edge
*/
static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
bool loop_ok)
static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
{
int *insn_stack = env->cfg.insn_stack;
int *insn_state = env->cfg.insn_state;
......@@ -15419,7 +15451,7 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
insn_stack[env->cfg.cur_stack++] = w;
return KEEP_EXPLORING;
} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
if (loop_ok && env->bpf_capable)
if (env->bpf_capable)
return DONE_EXPLORING;
verbose_linfo(env, t, "%d: ", t);
verbose_linfo(env, w, "%d: ", w);
......@@ -15439,24 +15471,20 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
struct bpf_verifier_env *env,
bool visit_callee)
{
int ret;
int ret, insn_sz;
ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1;
ret = push_insn(t, t + insn_sz, FALLTHROUGH, env);
if (ret)
return ret;
mark_prune_point(env, t + 1);
mark_prune_point(env, t + insn_sz);
/* when we exit from subprog, we need to record non-linear history */
mark_jmp_point(env, t + 1);
mark_jmp_point(env, t + insn_sz);
if (visit_callee) {
mark_prune_point(env, t);
ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
/* It's ok to allow recursion from CFG point of
* view. __check_func_call() will do the actual
* check.
*/
bpf_pseudo_func(insns + t));
ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
}
return ret;
}
......@@ -15469,15 +15497,17 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
static int visit_insn(int t, struct bpf_verifier_env *env)
{
struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
int ret, off;
int ret, off, insn_sz;
if (bpf_pseudo_func(insn))
return visit_func_call_insn(t, insns, env, true);
/* All non-branch instructions have a single fall-through edge. */
if (BPF_CLASS(insn->code) != BPF_JMP &&
BPF_CLASS(insn->code) != BPF_JMP32)
return push_insn(t, t + 1, FALLTHROUGH, env, false);
BPF_CLASS(insn->code) != BPF_JMP32) {
insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
return push_insn(t, t + insn_sz, FALLTHROUGH, env);
}
switch (BPF_OP(insn->code)) {
case BPF_EXIT:
......@@ -15523,8 +15553,7 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
off = insn->imm;
/* unconditional jump with single edge */
ret = push_insn(t, t + off + 1, FALLTHROUGH, env,
true);
ret = push_insn(t, t + off + 1, FALLTHROUGH, env);
if (ret)
return ret;
......@@ -15537,11 +15566,11 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
/* conditional jump with two edges */
mark_prune_point(env, t);
ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
ret = push_insn(t, t + 1, FALLTHROUGH, env);
if (ret)
return ret;
return push_insn(t, t + insn->off + 1, BRANCH, env, true);
return push_insn(t, t + insn->off + 1, BRANCH, env);
}
}
......@@ -15607,11 +15636,21 @@ static int check_cfg(struct bpf_verifier_env *env)
}
for (i = 0; i < insn_cnt; i++) {
struct bpf_insn *insn = &env->prog->insnsi[i];
if (insn_state[i] != EXPLORED) {
verbose(env, "unreachable insn %d\n", i);
ret = -EINVAL;
goto err_free;
}
if (bpf_is_ldimm64(insn)) {
if (insn_state[i + 1] != 0) {
verbose(env, "jump into the middle of ldimm64 insn %d\n", i);
ret = -EINVAL;
goto err_free;
}
i++; /* skip second half of ldimm64 */
}
}
ret = 0; /* cfg looks good */
......
......@@ -97,4 +97,66 @@ l0_%=: r2 = r0; \
" ::: __clobber_all);
}
SEC("socket")
__description("conditional loop (2)")
__success
__failure_unpriv __msg_unpriv("back-edge from insn 10 to 11")
__naked void conditional_loop2(void)
{
asm volatile (" \
r9 = 2 ll; \
r3 = 0x20 ll; \
r4 = 0x35 ll; \
r8 = r4; \
goto l1_%=; \
l0_%=: r9 -= r3; \
r9 -= r4; \
r9 -= r8; \
l1_%=: r8 += r4; \
if r8 < 0x64 goto l0_%=; \
r0 = r9; \
exit; \
" ::: __clobber_all);
}
SEC("socket")
__description("unconditional loop after conditional jump")
__failure __msg("infinite loop detected")
__failure_unpriv __msg_unpriv("back-edge from insn 3 to 2")
__naked void uncond_loop_after_cond_jmp(void)
{
asm volatile (" \
r0 = 0; \
if r0 > 0 goto l1_%=; \
l0_%=: r0 = 1; \
goto l0_%=; \
l1_%=: exit; \
" ::: __clobber_all);
}
__naked __noinline __used
static unsigned long never_ending_subprog()
{
asm volatile (" \
r0 = r1; \
goto -1; \
" ::: __clobber_all);
}
SEC("socket")
__description("unconditional loop after conditional jump")
/* infinite loop is detected *after* check_cfg() */
__failure __msg("infinite loop detected")
__naked void uncond_loop_in_subprog_after_cond_jmp(void)
{
asm volatile (" \
r0 = 0; \
if r0 > 0 goto l1_%=; \
l0_%=: r0 += 1; \
call never_ending_subprog; \
l1_%=: exit; \
" ::: __clobber_all);
}
char _license[] SEC("license") = "GPL";
......@@ -75,9 +75,10 @@ l0_%=: r0 += 1; \
" ::: __clobber_all);
}
SEC("tracepoint")
SEC("socket")
__description("bounded loop, start in the middle")
__failure __msg("back-edge")
__success
__failure_unpriv __msg_unpriv("back-edge")
__naked void loop_start_in_the_middle(void)
{
asm volatile (" \
......@@ -136,7 +137,9 @@ l0_%=: exit; \
SEC("tracepoint")
__description("bounded recursion")
__failure __msg("back-edge")
__failure
/* verifier limitation in detecting max stack depth */
__msg("the call stack of 8 frames is too deep !")
__naked void bounded_recursion(void)
{
asm volatile (" \
......
......@@ -91,3 +91,43 @@ __naked int bpf_end_bswap(void)
}
#endif /* v4 instruction */
SEC("?raw_tp")
__success __log_level(2)
/*
* Without the bug fix there will be no history between "last_idx 3 first_idx 3"
* and "parent state regs=" lines. "R0_w=6" parts are here to help anchor
* expected log messages to the one specific mark_chain_precision operation.
*
* This is quite fragile: if verifier checkpointing heuristic changes, this
* might need adjusting.
*/
__msg("2: (07) r0 += 1 ; R0_w=6")
__msg("3: (35) if r0 >= 0xa goto pc+1")
__msg("mark_precise: frame0: last_idx 3 first_idx 3 subseq_idx -1")
__msg("mark_precise: frame0: regs=r0 stack= before 2: (07) r0 += 1")
__msg("mark_precise: frame0: regs=r0 stack= before 1: (07) r0 += 1")
__msg("mark_precise: frame0: regs=r0 stack= before 4: (05) goto pc-4")
__msg("mark_precise: frame0: regs=r0 stack= before 3: (35) if r0 >= 0xa goto pc+1")
__msg("mark_precise: frame0: parent state regs= stack=: R0_rw=P4")
__msg("3: R0_w=6")
__naked int state_loop_first_last_equal(void)
{
asm volatile (
"r0 = 0;"
"l0_%=:"
"r0 += 1;"
"r0 += 1;"
/* every few iterations we'll have a checkpoint here with
* first_idx == last_idx, potentially confusing precision
* backtracking logic
*/
"if r0 >= 10 goto l1_%=;" /* checkpoint + mark_precise */
"goto l0_%=;"
"l1_%=:"
"exit;"
::: __clobber_common
);
}
char _license[] SEC("license") = "GPL";
......@@ -442,7 +442,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "back-edge from insn 0 to 0",
.errstr = "the call stack of 9 frames is too deep",
.result = REJECT,
},
{
......@@ -799,7 +799,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "back-edge",
.errstr = "the call stack of 9 frames is too deep",
.result = REJECT,
},
{
......@@ -811,7 +811,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "back-edge",
.errstr = "the call stack of 9 frames is too deep",
.result = REJECT,
},
{
......
......@@ -9,8 +9,8 @@
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.errstr = "invalid BPF_LD_IMM insn",
.errstr_unpriv = "R1 pointer comparison",
.errstr = "jump into the middle of ldimm64 insn 1",
.errstr_unpriv = "jump into the middle of ldimm64 insn 1",
.result = REJECT,
},
{
......@@ -23,8 +23,8 @@
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid BPF_LD_IMM insn",
.errstr_unpriv = "R1 pointer comparison",
.errstr = "jump into the middle of ldimm64 insn 1",
.errstr_unpriv = "jump into the middle of ldimm64 insn 1",
.result = REJECT,
},
{
......
......@@ -908,8 +908,9 @@ static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr)
struct xdp_info *meta = data - sizeof(struct xdp_info);
if (meta->count != pkt->pkt_nb) {
ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%d]\n",
__func__, pkt->pkt_nb, meta->count);
ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%llu]\n",
__func__, pkt->pkt_nb,
(unsigned long long)meta->count);
return false;
}
......@@ -926,11 +927,13 @@ static bool is_frag_valid(struct xsk_umem_info *umem, u64 addr, u32 len, u32 exp
if (addr >= umem->num_frames * umem->frame_size ||
addr + len > umem->num_frames * umem->frame_size) {
ksft_print_msg("Frag invalid addr: %llx len: %u\n", addr, len);
ksft_print_msg("Frag invalid addr: %llx len: %u\n",
(unsigned long long)addr, len);
return false;
}
if (!umem->unaligned_mode && addr % umem->frame_size + len > umem->frame_size) {
ksft_print_msg("Frag crosses frame boundary addr: %llx len: %u\n", addr, len);
ksft_print_msg("Frag crosses frame boundary addr: %llx len: %u\n",
(unsigned long long)addr, len);
return false;
}
......@@ -1029,7 +1032,8 @@ static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1);
ksft_print_msg("[%s] Too many packets completed\n", __func__);
ksft_print_msg("Last completion address: %llx\n", addr);
ksft_print_msg("Last completion address: %llx\n",
(unsigned long long)addr);
return TEST_FAILURE;
}
......@@ -1513,8 +1517,9 @@ static int validate_tx_invalid_descs(struct ifobject *ifobject)
}
if (stats.tx_invalid_descs != ifobject->xsk->pkt_stream->nb_pkts / 2) {
ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%u] expected [%u]\n",
__func__, stats.tx_invalid_descs,
ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%llu] expected [%u]\n",
__func__,
(unsigned long long)stats.tx_invalid_descs,
ifobject->xsk->pkt_stream->nb_pkts);
return TEST_FAILURE;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment