Commit c93552c4 authored by Daniel Borkmann's avatar Daniel Borkmann Committed by Alexei Starovoitov

bpf: properly enforce index mask to prevent out-of-bounds speculation

While reviewing the verifier code, I recently noticed that the
following two program variants in relation to tail calls can be
loaded.

Variant 1:

  # bpftool p d x i 15
    0: (15) if r1 == 0x0 goto pc+3
    1: (18) r2 = map[id:5]
    3: (05) goto pc+2
    4: (18) r2 = map[id:6]
    6: (b7) r3 = 7
    7: (35) if r3 >= 0xa0 goto pc+2
    8: (54) (u32) r3 &= (u32) 255
    9: (85) call bpf_tail_call#12
   10: (b7) r0 = 1
   11: (95) exit

  # bpftool m s i 5
    5: prog_array  flags 0x0
        key 4B  value 4B  max_entries 4  memlock 4096B
  # bpftool m s i 6
    6: prog_array  flags 0x0
        key 4B  value 4B  max_entries 160  memlock 4096B

Variant 2:

  # bpftool p d x i 20
    0: (15) if r1 == 0x0 goto pc+3
    1: (18) r2 = map[id:8]
    3: (05) goto pc+2
    4: (18) r2 = map[id:7]
    6: (b7) r3 = 7
    7: (35) if r3 >= 0x4 goto pc+2
    8: (54) (u32) r3 &= (u32) 3
    9: (85) call bpf_tail_call#12
   10: (b7) r0 = 1
   11: (95) exit

  # bpftool m s i 8
    8: prog_array  flags 0x0
        key 4B  value 4B  max_entries 160  memlock 4096B
  # bpftool m s i 7
    7: prog_array  flags 0x0
        key 4B  value 4B  max_entries 4  memlock 4096B

In both cases the index masking inserted by the verifier in order
to control out of bounds speculation from a CPU via b2157399
("bpf: prevent out-of-bounds speculation") seems to be incorrect
in what it is enforcing. In the 1st variant, the mask is applied
from the map with the significantly larger number of entries where
we would allow to a certain degree out of bounds speculation for
the smaller map, and in the 2nd variant where the mask is applied
from the map with the smaller number of entries, we get buggy
behavior since we truncate the index of the larger map.

The original intent from commit b2157399 is to reject such
occasions where two or more different tail call maps are used
in the same tail call helper invocation. However, the check on
the BPF_MAP_PTR_POISON is never hit since we never poisoned the
saved pointer in the first place! We do this explicitly for map
lookups but in case of tail calls we basically used the tail
call map in insn_aux_data that was processed in the most recent
path which the verifier walked. Thus any prior path that stored
a pointer in insn_aux_data at the helper location was always
overridden.

Fix it by moving the map pointer poison logic into a small helper
that covers both BPF helpers with the same logic. After that in
fixup_bpf_calls() the poison check is then hit for tail calls
and the program rejected. Latter only happens in unprivileged
case since this is the *only* occasion where a rewrite needs to
happen, and where such rewrite is specific to the map (max_entries,
index_mask). In the privileged case the rewrite is generic for
the insn->imm / insn->code update so multiple maps from different
paths can be handled just fine since all the remaining logic
happens in the instruction processing itself. This is similar
to the case of map lookups: in case there is a collision of
maps in fixup_bpf_calls() we must skip the inlined rewrite since
this will turn the generic instruction sequence into a non-
generic one. Thus the patch_call_imm will simply update the
insn->imm location where the bpf_map_lookup_elem() will later
take care of the dispatch. Given we need this 'poison' state
as a check, the information of whether a map is an unpriv_array
gets lost, so enforcing it prior to that needs an additional
state. In general this check is needed since there are some
complex and tail call intensive BPF programs out there where
LLVM tends to generate such code occasionally. We therefore
convert the map_ptr rather into map_state to store all this
w/o extra memory overhead, and the bit whether one of the maps
involved in the collision was from an unpriv_array thus needs
to be retained as well there.

Fixes: b2157399 ("bpf: prevent out-of-bounds speculation")
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 1a2b80ec
...@@ -142,7 +142,7 @@ struct bpf_verifier_state_list { ...@@ -142,7 +142,7 @@ struct bpf_verifier_state_list {
struct bpf_insn_aux_data { struct bpf_insn_aux_data {
union { union {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ unsigned long map_state; /* pointer/poison value for maps */
s32 call_imm; /* saved imm field of call insn */ s32 call_imm; /* saved imm field of call insn */
}; };
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
......
...@@ -156,7 +156,29 @@ struct bpf_verifier_stack_elem { ...@@ -156,7 +156,29 @@ struct bpf_verifier_stack_elem {
#define BPF_COMPLEXITY_LIMIT_INSNS 131072 #define BPF_COMPLEXITY_LIMIT_INSNS 131072
#define BPF_COMPLEXITY_LIMIT_STACK 1024 #define BPF_COMPLEXITY_LIMIT_STACK 1024
#define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) #define BPF_MAP_PTR_UNPRIV 1UL
#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
POISON_POINTER_DELTA))
#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
{
return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON;
}
static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
{
return aux->map_state & BPF_MAP_PTR_UNPRIV;
}
static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
const struct bpf_map *map, bool unpriv)
{
BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
unpriv |= bpf_map_ptr_unpriv(aux);
aux->map_state = (unsigned long)map |
(unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
}
struct bpf_call_arg_meta { struct bpf_call_arg_meta {
struct bpf_map *map_ptr; struct bpf_map *map_ptr;
...@@ -2333,6 +2355,29 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) ...@@ -2333,6 +2355,29 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
return 0; return 0;
} }
static int
record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
int func_id, int insn_idx)
{
struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
if (func_id != BPF_FUNC_tail_call &&
func_id != BPF_FUNC_map_lookup_elem)
return 0;
if (meta->map_ptr == NULL) {
verbose(env, "kernel subsystem misconfigured verifier\n");
return -EINVAL;
}
if (!BPF_MAP_PTR(aux->map_state))
bpf_map_ptr_store(aux, meta->map_ptr,
meta->map_ptr->unpriv_array);
else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr)
bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
meta->map_ptr->unpriv_array);
return 0;
}
static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
{ {
const struct bpf_func_proto *fn = NULL; const struct bpf_func_proto *fn = NULL;
...@@ -2387,13 +2432,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn ...@@ -2387,13 +2432,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
if (err) if (err)
return err; return err;
if (func_id == BPF_FUNC_tail_call) {
if (meta.map_ptr == NULL) {
verbose(env, "verifier bug\n");
return -EINVAL;
}
env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr;
}
err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
if (err) if (err)
return err; return err;
...@@ -2404,6 +2442,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn ...@@ -2404,6 +2442,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
if (err) if (err)
return err; return err;
err = record_func_map(env, &meta, func_id, insn_idx);
if (err)
return err;
/* Mark slots with STACK_MISC in case of raw mode, stack offset /* Mark slots with STACK_MISC in case of raw mode, stack offset
* is inferred from register state. * is inferred from register state.
*/ */
...@@ -2428,8 +2470,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn ...@@ -2428,8 +2470,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
} else if (fn->ret_type == RET_VOID) { } else if (fn->ret_type == RET_VOID) {
regs[BPF_REG_0].type = NOT_INIT; regs[BPF_REG_0].type = NOT_INIT;
} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
struct bpf_insn_aux_data *insn_aux;
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
/* There is no offset yet applied, variable or fixed */ /* There is no offset yet applied, variable or fixed */
mark_reg_known_zero(env, regs, BPF_REG_0); mark_reg_known_zero(env, regs, BPF_REG_0);
...@@ -2445,11 +2485,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn ...@@ -2445,11 +2485,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
} }
regs[BPF_REG_0].map_ptr = meta.map_ptr; regs[BPF_REG_0].map_ptr = meta.map_ptr;
regs[BPF_REG_0].id = ++env->id_gen; regs[BPF_REG_0].id = ++env->id_gen;
insn_aux = &env->insn_aux_data[insn_idx];
if (!insn_aux->map_ptr)
insn_aux->map_ptr = meta.map_ptr;
else if (insn_aux->map_ptr != meta.map_ptr)
insn_aux->map_ptr = BPF_MAP_PTR_POISON;
} else { } else {
verbose(env, "unknown return type %d of func %s#%d\n", verbose(env, "unknown return type %d of func %s#%d\n",
fn->ret_type, func_id_name(func_id), func_id); fn->ret_type, func_id_name(func_id), func_id);
...@@ -5417,6 +5452,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) ...@@ -5417,6 +5452,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
struct bpf_insn *insn = prog->insnsi; struct bpf_insn *insn = prog->insnsi;
const struct bpf_func_proto *fn; const struct bpf_func_proto *fn;
const int insn_cnt = prog->len; const int insn_cnt = prog->len;
struct bpf_insn_aux_data *aux;
struct bpf_insn insn_buf[16]; struct bpf_insn insn_buf[16];
struct bpf_prog *new_prog; struct bpf_prog *new_prog;
struct bpf_map *map_ptr; struct bpf_map *map_ptr;
...@@ -5491,19 +5527,22 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) ...@@ -5491,19 +5527,22 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
insn->imm = 0; insn->imm = 0;
insn->code = BPF_JMP | BPF_TAIL_CALL; insn->code = BPF_JMP | BPF_TAIL_CALL;
aux = &env->insn_aux_data[i + delta];
if (!bpf_map_ptr_unpriv(aux))
continue;
/* instead of changing every JIT dealing with tail_call /* instead of changing every JIT dealing with tail_call
* emit two extra insns: * emit two extra insns:
* if (index >= max_entries) goto out; * if (index >= max_entries) goto out;
* index &= array->index_mask; * index &= array->index_mask;
* to avoid out-of-bounds cpu speculation * to avoid out-of-bounds cpu speculation
*/ */
map_ptr = env->insn_aux_data[i + delta].map_ptr; if (bpf_map_ptr_poisoned(aux)) {
if (map_ptr == BPF_MAP_PTR_POISON) {
verbose(env, "tail_call abusing map_ptr\n"); verbose(env, "tail_call abusing map_ptr\n");
return -EINVAL; return -EINVAL;
} }
if (!map_ptr->unpriv_array)
continue; map_ptr = BPF_MAP_PTR(aux->map_state);
insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
map_ptr->max_entries, 2); map_ptr->max_entries, 2);
insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
...@@ -5527,9 +5566,12 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) ...@@ -5527,9 +5566,12 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
*/ */
if (prog->jit_requested && BITS_PER_LONG == 64 && if (prog->jit_requested && BITS_PER_LONG == 64 &&
insn->imm == BPF_FUNC_map_lookup_elem) { insn->imm == BPF_FUNC_map_lookup_elem) {
map_ptr = env->insn_aux_data[i + delta].map_ptr; aux = &env->insn_aux_data[i + delta];
if (map_ptr == BPF_MAP_PTR_POISON || if (bpf_map_ptr_poisoned(aux))
!map_ptr->ops->map_gen_lookup) goto patch_call_imm;
map_ptr = BPF_MAP_PTR(aux->map_state);
if (!map_ptr->ops->map_gen_lookup)
goto patch_call_imm; goto patch_call_imm;
cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf); cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment