Commit f0c5a2d9 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'Refactor verifier prune and jump point handling'

Andrii Nakryiko says:

====================

Disentangle prune and jump points in BPF verifier code. They are conceptually
independent but currently coupled together. This small patch set refactors
related code and make it possible to have some instruction marked as pruning
or jump point independently.

Besides just conceptual cleanliness, this allows to remove unnecessary jump
points (saving a tiny bit of performance and memory usage, potentially), and
even more importantly it allows for clean extension of special pruning points,
similarly to how it's done for BPF_FUNC_timer_set_callback. This will be used
by future patches implementing open-coded BPF iterators.

v1->v2:
  - clarified path #3 commit message and a comment in the code (John);
  - added back mark_jmp_point() to right after subprog call to record
    non-linear implicit jump from BPF_EXIT to right after CALL <subprog>.
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents d8939cb0 618945fb
...@@ -452,6 +452,7 @@ struct bpf_insn_aux_data { ...@@ -452,6 +452,7 @@ struct bpf_insn_aux_data {
/* below fields are initialized once */ /* below fields are initialized once */
unsigned int orig_idx; /* original instruction index */ unsigned int orig_idx; /* original instruction index */
bool prune_point; bool prune_point;
bool jmp_point;
}; };
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
......
...@@ -2530,6 +2530,16 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, ...@@ -2530,6 +2530,16 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
return 0; return 0;
} }
static void mark_jmp_point(struct bpf_verifier_env *env, int idx)
{
env->insn_aux_data[idx].jmp_point = true;
}
static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx)
{
return env->insn_aux_data[insn_idx].jmp_point;
}
/* for any branch, call, exit record the history of jmps in the given state */ /* for any branch, call, exit record the history of jmps in the given state */
static int push_jmp_history(struct bpf_verifier_env *env, static int push_jmp_history(struct bpf_verifier_env *env,
struct bpf_verifier_state *cur) struct bpf_verifier_state *cur)
...@@ -2538,6 +2548,9 @@ static int push_jmp_history(struct bpf_verifier_env *env, ...@@ -2538,6 +2548,9 @@ static int push_jmp_history(struct bpf_verifier_env *env,
struct bpf_idx_pair *p; struct bpf_idx_pair *p;
size_t alloc_size; size_t alloc_size;
if (!is_jmp_point(env, env->insn_idx))
return 0;
cnt++; cnt++;
alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p))); alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
p = krealloc(cur->jmp_history, alloc_size, GFP_USER); p = krealloc(cur->jmp_history, alloc_size, GFP_USER);
...@@ -12140,11 +12153,16 @@ static struct bpf_verifier_state_list **explored_state( ...@@ -12140,11 +12153,16 @@ static struct bpf_verifier_state_list **explored_state(
return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
} }
static void init_explored_state(struct bpf_verifier_env *env, int idx) static void mark_prune_point(struct bpf_verifier_env *env, int idx)
{ {
env->insn_aux_data[idx].prune_point = true; env->insn_aux_data[idx].prune_point = true;
} }
static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx)
{
return env->insn_aux_data[insn_idx].prune_point;
}
enum { enum {
DONE_EXPLORING = 0, DONE_EXPLORING = 0,
KEEP_EXPLORING = 1, KEEP_EXPLORING = 1,
...@@ -12173,9 +12191,11 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, ...@@ -12173,9 +12191,11 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
return -EINVAL; return -EINVAL;
} }
if (e == BRANCH) if (e == BRANCH) {
/* mark branch target for state pruning */ /* mark branch target for state pruning */
init_explored_state(env, w); mark_prune_point(env, w);
mark_jmp_point(env, w);
}
if (insn_state[w] == 0) { if (insn_state[w] == 0) {
/* tree-edge */ /* tree-edge */
...@@ -12213,10 +12233,12 @@ static int visit_func_call_insn(int t, int insn_cnt, ...@@ -12213,10 +12233,12 @@ static int visit_func_call_insn(int t, int insn_cnt,
if (ret) if (ret)
return ret; return ret;
if (t + 1 < insn_cnt) mark_prune_point(env, t + 1);
init_explored_state(env, t + 1); /* when we exit from subprog, we need to record non-linear history */
mark_jmp_point(env, t + 1);
if (visit_callee) { if (visit_callee) {
init_explored_state(env, t); mark_prune_point(env, t);
ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env, ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
/* It's ok to allow recursion from CFG point of /* It's ok to allow recursion from CFG point of
* view. __check_func_call() will do the actual * view. __check_func_call() will do the actual
...@@ -12251,12 +12273,12 @@ static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env) ...@@ -12251,12 +12273,12 @@ static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env)
case BPF_CALL: case BPF_CALL:
if (insns[t].imm == BPF_FUNC_timer_set_callback) if (insns[t].imm == BPF_FUNC_timer_set_callback)
/* Mark this call insn to trigger is_state_visited() check /* Mark this call insn as a prune point to trigger
* before call itself is processed by __check_func_call(). * is_state_visited() check before call itself is
* Otherwise new async state will be pushed for further * processed by __check_func_call(). Otherwise new
* exploration. * async state will be pushed for further exploration.
*/ */
init_explored_state(env, t); mark_prune_point(env, t);
return visit_func_call_insn(t, insn_cnt, insns, env, return visit_func_call_insn(t, insn_cnt, insns, env,
insns[t].src_reg == BPF_PSEUDO_CALL); insns[t].src_reg == BPF_PSEUDO_CALL);
...@@ -12270,22 +12292,15 @@ static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env) ...@@ -12270,22 +12292,15 @@ static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env)
if (ret) if (ret)
return ret; return ret;
/* unconditional jmp is not a good pruning point, mark_prune_point(env, t + insns[t].off + 1);
* but it's marked, since backtracking needs mark_jmp_point(env, t + insns[t].off + 1);
* to record jmp history in is_state_visited().
*/
init_explored_state(env, t + insns[t].off + 1);
/* tell verifier to check for equivalent states
* after every call and jump
*/
if (t + 1 < insn_cnt)
init_explored_state(env, t + 1);
return ret; return ret;
default: default:
/* conditional jump with two edges */ /* conditional jump with two edges */
init_explored_state(env, t); mark_prune_point(env, t);
ret = push_insn(t, t + 1, FALLTHROUGH, env, true); ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
if (ret) if (ret)
return ret; return ret;
...@@ -13319,13 +13334,6 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) ...@@ -13319,13 +13334,6 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
int i, j, err, states_cnt = 0; int i, j, err, states_cnt = 0;
bool add_new_state = env->test_state_freq ? true : false; bool add_new_state = env->test_state_freq ? true : false;
cur->last_insn_idx = env->prev_insn_idx;
if (!env->insn_aux_data[insn_idx].prune_point)
/* this 'insn_idx' instruction wasn't marked, so we will not
* be doing state search here
*/
return 0;
/* bpf progs typically have pruning point every 4 instructions /* bpf progs typically have pruning point every 4 instructions
* http://vger.kernel.org/bpfconf2019.html#session-1 * http://vger.kernel.org/bpfconf2019.html#session-1
* Do not add new state for future pruning if the verifier hasn't seen * Do not add new state for future pruning if the verifier hasn't seen
...@@ -13460,10 +13468,10 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) ...@@ -13460,10 +13468,10 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
env->max_states_per_insn = states_cnt; env->max_states_per_insn = states_cnt;
if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
return push_jmp_history(env, cur); return 0;
if (!add_new_state) if (!add_new_state)
return push_jmp_history(env, cur); return 0;
/* There were no equivalent states, remember the current one. /* There were no equivalent states, remember the current one.
* Technically the current state is not proven to be safe yet, * Technically the current state is not proven to be safe yet,
...@@ -13603,21 +13611,31 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -13603,21 +13611,31 @@ static int do_check(struct bpf_verifier_env *env)
return -E2BIG; return -E2BIG;
} }
err = is_state_visited(env, env->insn_idx); state->last_insn_idx = env->prev_insn_idx;
if (err < 0)
return err; if (is_prune_point(env, env->insn_idx)) {
if (err == 1) { err = is_state_visited(env, env->insn_idx);
/* found equivalent state, can prune the search */ if (err < 0)
if (env->log.level & BPF_LOG_LEVEL) { return err;
if (do_print_state) if (err == 1) {
verbose(env, "\nfrom %d to %d%s: safe\n", /* found equivalent state, can prune the search */
env->prev_insn_idx, env->insn_idx, if (env->log.level & BPF_LOG_LEVEL) {
env->cur_state->speculative ? if (do_print_state)
" (speculative execution)" : ""); verbose(env, "\nfrom %d to %d%s: safe\n",
else env->prev_insn_idx, env->insn_idx,
verbose(env, "%d: safe\n", env->insn_idx); env->cur_state->speculative ?
" (speculative execution)" : "");
else
verbose(env, "%d: safe\n", env->insn_idx);
}
goto process_bpf_exit;
} }
goto process_bpf_exit; }
if (is_jmp_point(env, env->insn_idx)) {
err = push_jmp_history(env, state);
if (err)
return err;
} }
if (signal_pending(current)) if (signal_pending(current))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment