Commit e042aa53 authored by Daniel Borkmann's avatar Daniel Borkmann

bpf: Fix pointer arithmetic mask tightening under state pruning

In 7fedb63a ("bpf: Tighten speculative pointer arithmetic mask") we
narrowed the offset mask for unprivileged pointer arithmetic in order to
mitigate a corner case where in the speculative domain it is possible to
advance, for example, the map value pointer by up to value_size-1 out-of-
bounds in order to leak kernel memory via side-channel to user space.

The verifier's state pruning for scalars leaves one corner case open
where in the first verification path R_x holds an unknown scalar with an
aux->alu_limit of e.g. 7, and in a second verification path that same
register R_x, here denoted as R_x', holds an unknown scalar which has
tighter bounds and would thus satisfy range_within(R_x, R_x') as well as
tnum_in(R_x, R_x') for state pruning, yielding an aux->alu_limit of 3:
Given the second path fits the register constraints for pruning, the final
generated mask from aux->alu_limit will remain at 7. While technically
not wrong for the non-speculative domain, it would however be possible
to craft similar cases where the mask would be too wide as in 7fedb63a.

One way to fix it is to detect the presence of unknown scalar map pointer
arithmetic and force a deeper search on unknown scalars to ensure that
we do not run into a masking mismatch.
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 59089a18
...@@ -414,6 +414,7 @@ struct bpf_verifier_env { ...@@ -414,6 +414,7 @@ struct bpf_verifier_env {
u32 used_map_cnt; /* number of used maps */ u32 used_map_cnt; /* number of used maps */
u32 used_btf_cnt; /* number of used BTF objects */ u32 used_btf_cnt; /* number of used BTF objects */
u32 id_gen; /* used to generate unique reg IDs */ u32 id_gen; /* used to generate unique reg IDs */
bool explore_alu_limits;
bool allow_ptr_leaks; bool allow_ptr_leaks;
bool allow_uninit_stack; bool allow_uninit_stack;
bool allow_ptr_to_map_access; bool allow_ptr_to_map_access;
......
...@@ -6561,6 +6561,12 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, ...@@ -6561,6 +6561,12 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0; alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
alu_state |= ptr_is_dst_reg ? alu_state |= ptr_is_dst_reg ?
BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
/* Limit pruning on unknown scalars to enable deep search for
* potential masking differences from other program paths.
*/
if (!off_is_imm)
env->explore_alu_limits = true;
} }
err = update_alu_sanitation_state(aux, alu_state, alu_limit); err = update_alu_sanitation_state(aux, alu_state, alu_limit);
...@@ -9936,8 +9942,8 @@ static void clean_live_states(struct bpf_verifier_env *env, int insn, ...@@ -9936,8 +9942,8 @@ static void clean_live_states(struct bpf_verifier_env *env, int insn,
} }
/* Returns true if (rold safe implies rcur safe) */ /* Returns true if (rold safe implies rcur safe) */
static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
struct bpf_id_pair *idmap) struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
{ {
bool equal; bool equal;
...@@ -9963,6 +9969,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, ...@@ -9963,6 +9969,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
return false; return false;
switch (rold->type) { switch (rold->type) {
case SCALAR_VALUE: case SCALAR_VALUE:
if (env->explore_alu_limits)
return false;
if (rcur->type == SCALAR_VALUE) { if (rcur->type == SCALAR_VALUE) {
if (!rold->precise && !rcur->precise) if (!rold->precise && !rcur->precise)
return true; return true;
...@@ -10053,9 +10061,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, ...@@ -10053,9 +10061,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
return false; return false;
} }
static bool stacksafe(struct bpf_func_state *old, static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
struct bpf_func_state *cur, struct bpf_func_state *cur, struct bpf_id_pair *idmap)
struct bpf_id_pair *idmap)
{ {
int i, spi; int i, spi;
...@@ -10100,9 +10107,8 @@ static bool stacksafe(struct bpf_func_state *old, ...@@ -10100,9 +10107,8 @@ static bool stacksafe(struct bpf_func_state *old,
continue; continue;
if (old->stack[spi].slot_type[0] != STACK_SPILL) if (old->stack[spi].slot_type[0] != STACK_SPILL)
continue; continue;
if (!regsafe(&old->stack[spi].spilled_ptr, if (!regsafe(env, &old->stack[spi].spilled_ptr,
&cur->stack[spi].spilled_ptr, &cur->stack[spi].spilled_ptr, idmap))
idmap))
/* when explored and current stack slot are both storing /* when explored and current stack slot are both storing
* spilled registers, check that stored pointers types * spilled registers, check that stored pointers types
* are the same as well. * are the same as well.
...@@ -10159,10 +10165,11 @@ static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_stat ...@@ -10159,10 +10165,11 @@ static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_stat
memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch)); memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
for (i = 0; i < MAX_BPF_REG; i++) for (i = 0; i < MAX_BPF_REG; i++)
if (!regsafe(&old->regs[i], &cur->regs[i], env->idmap_scratch)) if (!regsafe(env, &old->regs[i], &cur->regs[i],
env->idmap_scratch))
return false; return false;
if (!stacksafe(old, cur, env->idmap_scratch)) if (!stacksafe(env, old, cur, env->idmap_scratch))
return false; return false;
if (!refsafe(old, cur)) if (!refsafe(old, cur))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment