Commit a67825f5 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'prevent-oob-under-speculation'

Daniel Borkmann says:

====================
This set fixes an out of bounds case under speculative execution
by implementing masking of pointer alu into the verifier. For
details please see the individual patches.

Thanks!

v2 -> v3:
  - 8/9: change states_equal condition into old->speculative &&
    !cur->speculative, thanks Jakub!
  - 8/9: remove incorrect speculative state test in
    propagate_liveness(), thanks Jakub!
v1 -> v2:
  - Typo fixes in commit msg and a comment, thanks David!
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 8b6b25cf 80c9b2fa
...@@ -148,6 +148,7 @@ struct bpf_verifier_state { ...@@ -148,6 +148,7 @@ struct bpf_verifier_state {
/* call stack tracking */ /* call stack tracking */
struct bpf_func_state *frame[MAX_CALL_FRAMES]; struct bpf_func_state *frame[MAX_CALL_FRAMES];
u32 curframe; u32 curframe;
bool speculative;
}; };
#define bpf_get_spilled_reg(slot, frame) \ #define bpf_get_spilled_reg(slot, frame) \
...@@ -167,15 +168,24 @@ struct bpf_verifier_state_list { ...@@ -167,15 +168,24 @@ struct bpf_verifier_state_list {
struct bpf_verifier_state_list *next; struct bpf_verifier_state_list *next;
}; };
/* Possible states for alu_state member. */
#define BPF_ALU_SANITIZE_SRC 1U
#define BPF_ALU_SANITIZE_DST 2U
#define BPF_ALU_NEG_VALUE (1U << 2)
#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
BPF_ALU_SANITIZE_DST)
struct bpf_insn_aux_data { struct bpf_insn_aux_data {
union { union {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
unsigned long map_state; /* pointer/poison value for maps */ unsigned long map_state; /* pointer/poison value for maps */
s32 call_imm; /* saved imm field of call insn */ s32 call_imm; /* saved imm field of call insn */
u32 alu_limit; /* limit for add/sub register with pointer */
}; };
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
int sanitize_stack_off; /* stack slot to be cleared */ int sanitize_stack_off; /* stack slot to be cleared */
bool seen; /* this insn was processed by the verifier */ bool seen; /* this insn was processed by the verifier */
u8 alu_state; /* used in combination with alu_limit */
}; };
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
...@@ -212,6 +222,8 @@ struct bpf_subprog_info { ...@@ -212,6 +222,8 @@ struct bpf_subprog_info {
* one verifier_env per bpf_check() call * one verifier_env per bpf_check() call
*/ */
struct bpf_verifier_env { struct bpf_verifier_env {
u32 insn_idx;
u32 prev_insn_idx;
struct bpf_prog *prog; /* eBPF program being verified */ struct bpf_prog *prog; /* eBPF program being verified */
const struct bpf_verifier_ops *ops; const struct bpf_verifier_ops *ops;
struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
......
...@@ -53,14 +53,10 @@ struct sock_reuseport; ...@@ -53,14 +53,10 @@ struct sock_reuseport;
#define BPF_REG_D BPF_REG_8 /* data, callee-saved */ #define BPF_REG_D BPF_REG_8 /* data, callee-saved */
#define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */ #define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */
/* Kernel hidden auxiliary/helper register for hardening step. /* Kernel hidden auxiliary/helper register. */
* Only used by eBPF JITs. It's nothing more than a temporary
* register that JITs use internally, only that here it's part
* of eBPF instructions that have been rewritten for blinding
* constants. See JIT pre-step in bpf_jit_blind_constants().
*/
#define BPF_REG_AX MAX_BPF_REG #define BPF_REG_AX MAX_BPF_REG
#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1) #define MAX_BPF_EXT_REG (MAX_BPF_REG + 1)
#define MAX_BPF_JIT_REG MAX_BPF_EXT_REG
/* unused opcode to mark special call to bpf_tail_call() helper */ /* unused opcode to mark special call to bpf_tail_call() helper */
#define BPF_TAIL_CALL 0xf0 #define BPF_TAIL_CALL 0xf0
......
...@@ -54,6 +54,7 @@ ...@@ -54,6 +54,7 @@
#define DST regs[insn->dst_reg] #define DST regs[insn->dst_reg]
#define SRC regs[insn->src_reg] #define SRC regs[insn->src_reg]
#define FP regs[BPF_REG_FP] #define FP regs[BPF_REG_FP]
#define AX regs[BPF_REG_AX]
#define ARG1 regs[BPF_REG_ARG1] #define ARG1 regs[BPF_REG_ARG1]
#define CTX regs[BPF_REG_CTX] #define CTX regs[BPF_REG_CTX]
#define IMM insn->imm #define IMM insn->imm
...@@ -857,6 +858,26 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from, ...@@ -857,6 +858,26 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG); BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG); BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
/* Constraints on AX register:
*
* AX register is inaccessible from user space. It is mapped in
* all JITs, and used here for constant blinding rewrites. It is
* typically "stateless" meaning its contents are only valid within
* the executed instruction, but not across several instructions.
* There are a few exceptions however which are further detailed
* below.
*
* Constant blinding is only used by JITs, not in the interpreter.
* The interpreter uses AX in some occasions as a local temporary
* register e.g. in DIV or MOD instructions.
*
* In restricted circumstances, the verifier can also use the AX
* register for rewrites as long as they do not interfere with
* the above cases!
*/
if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
goto out;
if (from->imm == 0 && if (from->imm == 0 &&
(from->code == (BPF_ALU | BPF_MOV | BPF_K) || (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
...@@ -1188,7 +1209,6 @@ bool bpf_opcode_in_insntable(u8 code) ...@@ -1188,7 +1209,6 @@ bool bpf_opcode_in_insntable(u8 code)
*/ */
static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
{ {
u64 tmp;
#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
static const void *jumptable[256] = { static const void *jumptable[256] = {
...@@ -1268,36 +1288,36 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) ...@@ -1268,36 +1288,36 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
(*(s64 *) &DST) >>= IMM; (*(s64 *) &DST) >>= IMM;
CONT; CONT;
ALU64_MOD_X: ALU64_MOD_X:
div64_u64_rem(DST, SRC, &tmp); div64_u64_rem(DST, SRC, &AX);
DST = tmp; DST = AX;
CONT; CONT;
ALU_MOD_X: ALU_MOD_X:
tmp = (u32) DST; AX = (u32) DST;
DST = do_div(tmp, (u32) SRC); DST = do_div(AX, (u32) SRC);
CONT; CONT;
ALU64_MOD_K: ALU64_MOD_K:
div64_u64_rem(DST, IMM, &tmp); div64_u64_rem(DST, IMM, &AX);
DST = tmp; DST = AX;
CONT; CONT;
ALU_MOD_K: ALU_MOD_K:
tmp = (u32) DST; AX = (u32) DST;
DST = do_div(tmp, (u32) IMM); DST = do_div(AX, (u32) IMM);
CONT; CONT;
ALU64_DIV_X: ALU64_DIV_X:
DST = div64_u64(DST, SRC); DST = div64_u64(DST, SRC);
CONT; CONT;
ALU_DIV_X: ALU_DIV_X:
tmp = (u32) DST; AX = (u32) DST;
do_div(tmp, (u32) SRC); do_div(AX, (u32) SRC);
DST = (u32) tmp; DST = (u32) AX;
CONT; CONT;
ALU64_DIV_K: ALU64_DIV_K:
DST = div64_u64(DST, IMM); DST = div64_u64(DST, IMM);
CONT; CONT;
ALU_DIV_K: ALU_DIV_K:
tmp = (u32) DST; AX = (u32) DST;
do_div(tmp, (u32) IMM); do_div(AX, (u32) IMM);
DST = (u32) tmp; DST = (u32) AX;
CONT; CONT;
ALU_END_TO_BE: ALU_END_TO_BE:
switch (IMM) { switch (IMM) {
...@@ -1553,7 +1573,7 @@ STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */ ...@@ -1553,7 +1573,7 @@ STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \ static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
{ \ { \
u64 stack[stack_size / sizeof(u64)]; \ u64 stack[stack_size / sizeof(u64)]; \
u64 regs[MAX_BPF_REG]; \ u64 regs[MAX_BPF_EXT_REG]; \
\ \
FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
ARG1 = (u64) (unsigned long) ctx; \ ARG1 = (u64) (unsigned long) ctx; \
...@@ -1566,7 +1586,7 @@ static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \ ...@@ -1566,7 +1586,7 @@ static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
const struct bpf_insn *insn) \ const struct bpf_insn *insn) \
{ \ { \
u64 stack[stack_size / sizeof(u64)]; \ u64 stack[stack_size / sizeof(u64)]; \
u64 regs[MAX_BPF_REG]; \ u64 regs[MAX_BPF_EXT_REG]; \
\ \
FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
BPF_R1 = r1; \ BPF_R1 = r1; \
......
...@@ -710,6 +710,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, ...@@ -710,6 +710,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
free_func_state(dst_state->frame[i]); free_func_state(dst_state->frame[i]);
dst_state->frame[i] = NULL; dst_state->frame[i] = NULL;
} }
dst_state->speculative = src->speculative;
dst_state->curframe = src->curframe; dst_state->curframe = src->curframe;
for (i = 0; i <= src->curframe; i++) { for (i = 0; i <= src->curframe; i++) {
dst = dst_state->frame[i]; dst = dst_state->frame[i];
...@@ -754,7 +755,8 @@ static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, ...@@ -754,7 +755,8 @@ static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
} }
static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx) int insn_idx, int prev_insn_idx,
bool speculative)
{ {
struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_state *cur = env->cur_state;
struct bpf_verifier_stack_elem *elem; struct bpf_verifier_stack_elem *elem;
...@@ -772,6 +774,7 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, ...@@ -772,6 +774,7 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
err = copy_verifier_state(&elem->st, cur); err = copy_verifier_state(&elem->st, cur);
if (err) if (err)
goto err; goto err;
elem->st.speculative |= speculative;
if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
verbose(env, "BPF program is too complex\n"); verbose(env, "BPF program is too complex\n");
goto err; goto err;
...@@ -1387,6 +1390,31 @@ static int check_stack_read(struct bpf_verifier_env *env, ...@@ -1387,6 +1390,31 @@ static int check_stack_read(struct bpf_verifier_env *env,
} }
} }
static int check_stack_access(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
int off, int size)
{
/* Stack accesses must be at a fixed offset, so that we
* can determine what type of data were returned. See
* check_stack_read().
*/
if (!tnum_is_const(reg->var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "variable stack access var_off=%s off=%d size=%d",
tn_buf, off, size);
return -EACCES;
}
if (off >= 0 || off < -MAX_BPF_STACK) {
verbose(env, "invalid stack off=%d size=%d\n", off, size);
return -EACCES;
}
return 0;
}
/* check read/write into map element returned by bpf_map_lookup_elem() */ /* check read/write into map element returned by bpf_map_lookup_elem() */
static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
int size, bool zero_size_allowed) int size, bool zero_size_allowed)
...@@ -1418,13 +1446,17 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno, ...@@ -1418,13 +1446,17 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno,
*/ */
if (env->log.level) if (env->log.level)
print_verifier_state(env, state); print_verifier_state(env, state);
/* The minimum value is only important with signed /* The minimum value is only important with signed
* comparisons where we can't assume the floor of a * comparisons where we can't assume the floor of a
* value is 0. If we are using signed variables for our * value is 0. If we are using signed variables for our
* index'es we need to make sure that whatever we use * index'es we need to make sure that whatever we use
* will have a set floor within our range. * will have a set floor within our range.
*/ */
if (reg->smin_value < 0) { if (reg->smin_value < 0 &&
(reg->smin_value == S64_MIN ||
(off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
reg->smin_value + off < 0)) {
verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
regno); regno);
return -EACCES; return -EACCES;
...@@ -1954,24 +1986,10 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn ...@@ -1954,24 +1986,10 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
} }
} else if (reg->type == PTR_TO_STACK) { } else if (reg->type == PTR_TO_STACK) {
/* stack accesses must be at a fixed offset, so that we can
* determine what type of data were returned.
* See check_stack_read().
*/
if (!tnum_is_const(reg->var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "variable stack access var_off=%s off=%d size=%d",
tn_buf, off, size);
return -EACCES;
}
off += reg->var_off.value; off += reg->var_off.value;
if (off >= 0 || off < -MAX_BPF_STACK) { err = check_stack_access(env, reg, off, size);
verbose(env, "invalid stack off=%d size=%d\n", off, if (err)
size); return err;
return -EACCES;
}
state = func(env, reg); state = func(env, reg);
err = update_stack_depth(env, state, off); err = update_stack_depth(env, state, off);
...@@ -3052,6 +3070,102 @@ static bool check_reg_sane_offset(struct bpf_verifier_env *env, ...@@ -3052,6 +3070,102 @@ static bool check_reg_sane_offset(struct bpf_verifier_env *env,
return true; return true;
} }
static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
{
return &env->insn_aux_data[env->insn_idx];
}
static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
u32 *ptr_limit, u8 opcode, bool off_is_neg)
{
bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
(opcode == BPF_SUB && !off_is_neg);
u32 off;
switch (ptr_reg->type) {
case PTR_TO_STACK:
off = ptr_reg->off + ptr_reg->var_off.value;
if (mask_to_left)
*ptr_limit = MAX_BPF_STACK + off;
else
*ptr_limit = -off;
return 0;
case PTR_TO_MAP_VALUE:
if (mask_to_left) {
*ptr_limit = ptr_reg->umax_value + ptr_reg->off;
} else {
off = ptr_reg->smin_value + ptr_reg->off;
*ptr_limit = ptr_reg->map_ptr->value_size - off;
}
return 0;
default:
return -EINVAL;
}
}
static int sanitize_ptr_alu(struct bpf_verifier_env *env,
struct bpf_insn *insn,
const struct bpf_reg_state *ptr_reg,
struct bpf_reg_state *dst_reg,
bool off_is_neg)
{
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_insn_aux_data *aux = cur_aux(env);
bool ptr_is_dst_reg = ptr_reg == dst_reg;
u8 opcode = BPF_OP(insn->code);
u32 alu_state, alu_limit;
struct bpf_reg_state tmp;
bool ret;
if (env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K)
return 0;
/* We already marked aux for masking from non-speculative
* paths, thus we got here in the first place. We only care
* to explore bad access from here.
*/
if (vstate->speculative)
goto do_sim;
alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
alu_state |= ptr_is_dst_reg ?
BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
return 0;
/* If we arrived here from different branches with different
* limits to sanitize, then this won't work.
*/
if (aux->alu_state &&
(aux->alu_state != alu_state ||
aux->alu_limit != alu_limit))
return -EACCES;
/* Corresponding fixup done in fixup_bpf_calls(). */
aux->alu_state = alu_state;
aux->alu_limit = alu_limit;
do_sim:
/* Simulate and find potential out-of-bounds access under
* speculative execution from truncation as a result of
* masking when off was not within expected range. If off
* sits in dst, then we temporarily need to move ptr there
* to simulate dst (== 0) +/-= ptr. Needed, for example,
* for cases where we use K-based arithmetic in one direction
* and truncated reg-based in the other in order to explore
* bad access.
*/
if (!ptr_is_dst_reg) {
tmp = *dst_reg;
*dst_reg = *ptr_reg;
}
ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
if (!ptr_is_dst_reg)
*dst_reg = tmp;
return !ret ? -EFAULT : 0;
}
/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
* Caller should also handle BPF_MOV case separately. * Caller should also handle BPF_MOV case separately.
* If we return -EACCES, caller may want to try again treating pointer as a * If we return -EACCES, caller may want to try again treating pointer as a
...@@ -3070,8 +3184,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, ...@@ -3070,8 +3184,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
u32 dst = insn->dst_reg, src = insn->src_reg;
u8 opcode = BPF_OP(insn->code); u8 opcode = BPF_OP(insn->code);
u32 dst = insn->dst_reg; int ret;
dst_reg = &regs[dst]; dst_reg = &regs[dst];
...@@ -3104,6 +3219,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, ...@@ -3104,6 +3219,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
verbose(env, "R%d pointer arithmetic on %s prohibited\n", verbose(env, "R%d pointer arithmetic on %s prohibited\n",
dst, reg_type_str[ptr_reg->type]); dst, reg_type_str[ptr_reg->type]);
return -EACCES; return -EACCES;
case PTR_TO_MAP_VALUE:
if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
off_reg == dst_reg ? dst : src);
return -EACCES;
}
/* fall-through */
default: default:
break; break;
} }
...@@ -3120,6 +3242,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, ...@@ -3120,6 +3242,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
switch (opcode) { switch (opcode) {
case BPF_ADD: case BPF_ADD:
ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
if (ret < 0) {
verbose(env, "R%d tried to add from different maps or paths\n", dst);
return ret;
}
/* We can take a fixed offset as long as it doesn't overflow /* We can take a fixed offset as long as it doesn't overflow
* the s32 'off' field * the s32 'off' field
*/ */
...@@ -3170,6 +3297,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, ...@@ -3170,6 +3297,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
} }
break; break;
case BPF_SUB: case BPF_SUB:
ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
if (ret < 0) {
verbose(env, "R%d tried to sub from different maps or paths\n", dst);
return ret;
}
if (dst_reg == off_reg) { if (dst_reg == off_reg) {
/* scalar -= pointer. Creates an unknown scalar */ /* scalar -= pointer. Creates an unknown scalar */
verbose(env, "R%d tried to subtract pointer from scalar\n", verbose(env, "R%d tried to subtract pointer from scalar\n",
...@@ -3249,6 +3381,25 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, ...@@ -3249,6 +3381,25 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
__update_reg_bounds(dst_reg); __update_reg_bounds(dst_reg);
__reg_deduce_bounds(dst_reg); __reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg); __reg_bound_offset(dst_reg);
/* For unprivileged we require that resulting offset must be in bounds
* in order to be able to sanitize access later on.
*/
if (!env->allow_ptr_leaks) {
if (dst_reg->type == PTR_TO_MAP_VALUE &&
check_map_access(env, dst, dst_reg->off, 1, false)) {
verbose(env, "R%d pointer arithmetic of map value goes out of range, "
"prohibited for !root\n", dst);
return -EACCES;
} else if (dst_reg->type == PTR_TO_STACK &&
check_stack_access(env, dst_reg, dst_reg->off +
dst_reg->var_off.value, 1)) {
verbose(env, "R%d stack pointer arithmetic goes out of range, "
"prohibited for !root\n", dst);
return -EACCES;
}
}
return 0; return 0;
} }
...@@ -4348,7 +4499,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, ...@@ -4348,7 +4499,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
} }
} }
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
false);
if (!other_branch) if (!other_branch)
return -EFAULT; return -EFAULT;
other_branch_regs = other_branch->frame[other_branch->curframe]->regs; other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
...@@ -5458,6 +5610,12 @@ static bool states_equal(struct bpf_verifier_env *env, ...@@ -5458,6 +5610,12 @@ static bool states_equal(struct bpf_verifier_env *env,
if (old->curframe != cur->curframe) if (old->curframe != cur->curframe)
return false; return false;
/* Verification state from speculative execution simulation
* must never prune a non-speculative execution one.
*/
if (old->speculative && !cur->speculative)
return false;
/* for states to be equal callsites have to be the same /* for states to be equal callsites have to be the same
* and all frame states need to be equivalent * and all frame states need to be equivalent
*/ */
...@@ -5650,7 +5808,6 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -5650,7 +5808,6 @@ static int do_check(struct bpf_verifier_env *env)
struct bpf_insn *insns = env->prog->insnsi; struct bpf_insn *insns = env->prog->insnsi;
struct bpf_reg_state *regs; struct bpf_reg_state *regs;
int insn_cnt = env->prog->len, i; int insn_cnt = env->prog->len, i;
int insn_idx, prev_insn_idx = 0;
int insn_processed = 0; int insn_processed = 0;
bool do_print_state = false; bool do_print_state = false;
...@@ -5660,6 +5817,7 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -5660,6 +5817,7 @@ static int do_check(struct bpf_verifier_env *env)
if (!state) if (!state)
return -ENOMEM; return -ENOMEM;
state->curframe = 0; state->curframe = 0;
state->speculative = false;
state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
if (!state->frame[0]) { if (!state->frame[0]) {
kfree(state); kfree(state);
...@@ -5670,19 +5828,19 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -5670,19 +5828,19 @@ static int do_check(struct bpf_verifier_env *env)
BPF_MAIN_FUNC /* callsite */, BPF_MAIN_FUNC /* callsite */,
0 /* frameno */, 0 /* frameno */,
0 /* subprogno, zero == main subprog */); 0 /* subprogno, zero == main subprog */);
insn_idx = 0;
for (;;) { for (;;) {
struct bpf_insn *insn; struct bpf_insn *insn;
u8 class; u8 class;
int err; int err;
if (insn_idx >= insn_cnt) { if (env->insn_idx >= insn_cnt) {
verbose(env, "invalid insn idx %d insn_cnt %d\n", verbose(env, "invalid insn idx %d insn_cnt %d\n",
insn_idx, insn_cnt); env->insn_idx, insn_cnt);
return -EFAULT; return -EFAULT;
} }
insn = &insns[insn_idx]; insn = &insns[env->insn_idx];
class = BPF_CLASS(insn->code); class = BPF_CLASS(insn->code);
if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
...@@ -5692,17 +5850,19 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -5692,17 +5850,19 @@ static int do_check(struct bpf_verifier_env *env)
return -E2BIG; return -E2BIG;
} }
err = is_state_visited(env, insn_idx); err = is_state_visited(env, env->insn_idx);
if (err < 0) if (err < 0)
return err; return err;
if (err == 1) { if (err == 1) {
/* found equivalent state, can prune the search */ /* found equivalent state, can prune the search */
if (env->log.level) { if (env->log.level) {
if (do_print_state) if (do_print_state)
verbose(env, "\nfrom %d to %d: safe\n", verbose(env, "\nfrom %d to %d%s: safe\n",
prev_insn_idx, insn_idx); env->prev_insn_idx, env->insn_idx,
env->cur_state->speculative ?
" (speculative execution)" : "");
else else
verbose(env, "%d: safe\n", insn_idx); verbose(env, "%d: safe\n", env->insn_idx);
} }
goto process_bpf_exit; goto process_bpf_exit;
} }
...@@ -5715,10 +5875,12 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -5715,10 +5875,12 @@ static int do_check(struct bpf_verifier_env *env)
if (env->log.level > 1 || (env->log.level && do_print_state)) { if (env->log.level > 1 || (env->log.level && do_print_state)) {
if (env->log.level > 1) if (env->log.level > 1)
verbose(env, "%d:", insn_idx); verbose(env, "%d:", env->insn_idx);
else else
verbose(env, "\nfrom %d to %d:", verbose(env, "\nfrom %d to %d%s:",
prev_insn_idx, insn_idx); env->prev_insn_idx, env->insn_idx,
env->cur_state->speculative ?
" (speculative execution)" : "");
print_verifier_state(env, state->frame[state->curframe]); print_verifier_state(env, state->frame[state->curframe]);
do_print_state = false; do_print_state = false;
} }
...@@ -5729,20 +5891,20 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -5729,20 +5891,20 @@ static int do_check(struct bpf_verifier_env *env)
.private_data = env, .private_data = env,
}; };
verbose_linfo(env, insn_idx, "; "); verbose_linfo(env, env->insn_idx, "; ");
verbose(env, "%d: ", insn_idx); verbose(env, "%d: ", env->insn_idx);
print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
} }
if (bpf_prog_is_dev_bound(env->prog->aux)) { if (bpf_prog_is_dev_bound(env->prog->aux)) {
err = bpf_prog_offload_verify_insn(env, insn_idx, err = bpf_prog_offload_verify_insn(env, env->insn_idx,
prev_insn_idx); env->prev_insn_idx);
if (err) if (err)
return err; return err;
} }
regs = cur_regs(env); regs = cur_regs(env);
env->insn_aux_data[insn_idx].seen = true; env->insn_aux_data[env->insn_idx].seen = true;
if (class == BPF_ALU || class == BPF_ALU64) { if (class == BPF_ALU || class == BPF_ALU64) {
err = check_alu_op(env, insn); err = check_alu_op(env, insn);
...@@ -5768,13 +5930,13 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -5768,13 +5930,13 @@ static int do_check(struct bpf_verifier_env *env)
/* check that memory (src_reg + off) is readable, /* check that memory (src_reg + off) is readable,
* the state of dst_reg will be updated by this func * the state of dst_reg will be updated by this func
*/ */
err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, err = check_mem_access(env, env->insn_idx, insn->src_reg,
BPF_SIZE(insn->code), BPF_READ, insn->off, BPF_SIZE(insn->code),
insn->dst_reg, false); BPF_READ, insn->dst_reg, false);
if (err) if (err)
return err; return err;
prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
if (*prev_src_type == NOT_INIT) { if (*prev_src_type == NOT_INIT) {
/* saw a valid insn /* saw a valid insn
...@@ -5799,10 +5961,10 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -5799,10 +5961,10 @@ static int do_check(struct bpf_verifier_env *env)
enum bpf_reg_type *prev_dst_type, dst_reg_type; enum bpf_reg_type *prev_dst_type, dst_reg_type;
if (BPF_MODE(insn->code) == BPF_XADD) { if (BPF_MODE(insn->code) == BPF_XADD) {
err = check_xadd(env, insn_idx, insn); err = check_xadd(env, env->insn_idx, insn);
if (err) if (err)
return err; return err;
insn_idx++; env->insn_idx++;
continue; continue;
} }
...@@ -5818,13 +5980,13 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -5818,13 +5980,13 @@ static int do_check(struct bpf_verifier_env *env)
dst_reg_type = regs[insn->dst_reg].type; dst_reg_type = regs[insn->dst_reg].type;
/* check that memory (dst_reg + off) is writeable */ /* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, err = check_mem_access(env, env->insn_idx, insn->dst_reg,
BPF_SIZE(insn->code), BPF_WRITE, insn->off, BPF_SIZE(insn->code),
insn->src_reg, false); BPF_WRITE, insn->src_reg, false);
if (err) if (err)
return err; return err;
prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type; prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
if (*prev_dst_type == NOT_INIT) { if (*prev_dst_type == NOT_INIT) {
*prev_dst_type = dst_reg_type; *prev_dst_type = dst_reg_type;
...@@ -5852,9 +6014,9 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -5852,9 +6014,9 @@ static int do_check(struct bpf_verifier_env *env)
} }
/* check that memory (dst_reg + off) is writeable */ /* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, err = check_mem_access(env, env->insn_idx, insn->dst_reg,
BPF_SIZE(insn->code), BPF_WRITE, insn->off, BPF_SIZE(insn->code),
-1, false); BPF_WRITE, -1, false);
if (err) if (err)
return err; return err;
...@@ -5872,9 +6034,9 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -5872,9 +6034,9 @@ static int do_check(struct bpf_verifier_env *env)
} }
if (insn->src_reg == BPF_PSEUDO_CALL) if (insn->src_reg == BPF_PSEUDO_CALL)
err = check_func_call(env, insn, &insn_idx); err = check_func_call(env, insn, &env->insn_idx);
else else
err = check_helper_call(env, insn->imm, insn_idx); err = check_helper_call(env, insn->imm, env->insn_idx);
if (err) if (err)
return err; return err;
...@@ -5887,7 +6049,7 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -5887,7 +6049,7 @@ static int do_check(struct bpf_verifier_env *env)
return -EINVAL; return -EINVAL;
} }
insn_idx += insn->off + 1; env->insn_idx += insn->off + 1;
continue; continue;
} else if (opcode == BPF_EXIT) { } else if (opcode == BPF_EXIT) {
...@@ -5901,8 +6063,8 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -5901,8 +6063,8 @@ static int do_check(struct bpf_verifier_env *env)
if (state->curframe) { if (state->curframe) {
/* exit from nested function */ /* exit from nested function */
prev_insn_idx = insn_idx; env->prev_insn_idx = env->insn_idx;
err = prepare_func_exit(env, &insn_idx); err = prepare_func_exit(env, &env->insn_idx);
if (err) if (err)
return err; return err;
do_print_state = true; do_print_state = true;
...@@ -5932,7 +6094,8 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -5932,7 +6094,8 @@ static int do_check(struct bpf_verifier_env *env)
if (err) if (err)
return err; return err;
process_bpf_exit: process_bpf_exit:
err = pop_stack(env, &prev_insn_idx, &insn_idx); err = pop_stack(env, &env->prev_insn_idx,
&env->insn_idx);
if (err < 0) { if (err < 0) {
if (err != -ENOENT) if (err != -ENOENT)
return err; return err;
...@@ -5942,7 +6105,7 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -5942,7 +6105,7 @@ static int do_check(struct bpf_verifier_env *env)
continue; continue;
} }
} else { } else {
err = check_cond_jmp_op(env, insn, &insn_idx); err = check_cond_jmp_op(env, insn, &env->insn_idx);
if (err) if (err)
return err; return err;
} }
...@@ -5959,8 +6122,8 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -5959,8 +6122,8 @@ static int do_check(struct bpf_verifier_env *env)
if (err) if (err)
return err; return err;
insn_idx++; env->insn_idx++;
env->insn_aux_data[insn_idx].seen = true; env->insn_aux_data[env->insn_idx].seen = true;
} else { } else {
verbose(env, "invalid BPF_LD mode\n"); verbose(env, "invalid BPF_LD mode\n");
return -EINVAL; return -EINVAL;
...@@ -5970,7 +6133,7 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -5970,7 +6133,7 @@ static int do_check(struct bpf_verifier_env *env)
return -EINVAL; return -EINVAL;
} }
insn_idx++; env->insn_idx++;
} }
verbose(env, "processed %d insns (limit %d), stack depth ", verbose(env, "processed %d insns (limit %d), stack depth ",
...@@ -6709,6 +6872,57 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) ...@@ -6709,6 +6872,57 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
continue; continue;
} }
if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
struct bpf_insn insn_buf[16];
struct bpf_insn *patch = &insn_buf[0];
bool issrc, isneg;
u32 off_reg;
aux = &env->insn_aux_data[i + delta];
if (!aux->alu_state)
continue;
isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
BPF_ALU_SANITIZE_SRC;
off_reg = issrc ? insn->src_reg : insn->dst_reg;
if (isneg)
*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
if (issrc) {
*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
off_reg);
insn->src_reg = BPF_REG_AX;
} else {
*patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
BPF_REG_AX);
}
if (isneg)
insn->code = insn->code == code_add ?
code_sub : code_add;
*patch++ = *insn;
if (issrc && isneg)
*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
cnt = patch - insn_buf;
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
if (insn->code != (BPF_JMP | BPF_CALL)) if (insn->code != (BPF_JMP | BPF_CALL))
continue; continue;
if (insn->src_reg == BPF_PSEUDO_CALL) if (insn->src_reg == BPF_PSEUDO_CALL)
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <stdbool.h> #include <stdbool.h>
#include <sched.h> #include <sched.h>
#include <limits.h> #include <limits.h>
#include <assert.h>
#include <sys/capability.h> #include <sys/capability.h>
...@@ -2577,6 +2578,7 @@ static struct bpf_test tests[] = { ...@@ -2577,6 +2578,7 @@ static struct bpf_test tests[] = {
}, },
.result = REJECT, .result = REJECT,
.errstr = "invalid stack off=-79992 size=8", .errstr = "invalid stack off=-79992 size=8",
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
}, },
{ {
"PTR_TO_STACK store/load - out of bounds high", "PTR_TO_STACK store/load - out of bounds high",
...@@ -3104,6 +3106,8 @@ static struct bpf_test tests[] = { ...@@ -3104,6 +3106,8 @@ static struct bpf_test tests[] = {
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
.result_unpriv = REJECT,
.result = ACCEPT, .result = ACCEPT,
}, },
{ {
...@@ -3205,6 +3209,243 @@ static struct bpf_test tests[] = { ...@@ -3205,6 +3209,243 @@ static struct bpf_test tests[] = {
/* Verifier rewrite for unpriv skips tail call here. */ /* Verifier rewrite for unpriv skips tail call here. */
.retval_unpriv = 2, .retval_unpriv = 2,
}, },
{
"PTR_TO_STACK check high 1",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 42,
},
{
"PTR_TO_STACK check high 2",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 42,
},
{
"PTR_TO_STACK check high 3",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 42,
},
{
"PTR_TO_STACK check high 4",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
.errstr = "invalid stack off=0 size=1",
.result = REJECT,
},
{
"PTR_TO_STACK check high 5",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack off",
},
{
"PTR_TO_STACK check high 6",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack off",
},
{
"PTR_TO_STACK check high 7",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
.errstr = "fp pointer offset",
},
{
"PTR_TO_STACK check low 1",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -512),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 42,
},
{
"PTR_TO_STACK check low 2",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
BPF_ST_MEM(BPF_B, BPF_REG_1, 1, 42),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1),
BPF_EXIT_INSN(),
},
.result_unpriv = REJECT,
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
.result = ACCEPT,
.retval = 42,
},
{
"PTR_TO_STACK check low 3",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
.errstr = "invalid stack off=-513 size=1",
.result = REJECT,
},
{
"PTR_TO_STACK check low 4",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, INT_MIN),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "math between fp pointer",
},
{
"PTR_TO_STACK check low 5",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack off",
},
{
"PTR_TO_STACK check low 6",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid stack off",
},
{
"PTR_TO_STACK check low 7",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
.errstr = "fp pointer offset",
},
{
"PTR_TO_STACK mixed reg/k, 1",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
BPF_MOV64_IMM(BPF_REG_2, -3),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 42,
},
{
"PTR_TO_STACK mixed reg/k, 2",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
BPF_MOV64_IMM(BPF_REG_2, -3),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_5, -6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 42,
},
{
"PTR_TO_STACK mixed reg/k, 3",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
BPF_MOV64_IMM(BPF_REG_2, -3),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = -3,
},
{
"PTR_TO_STACK reg",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_MOV64_IMM(BPF_REG_2, -3),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.result_unpriv = REJECT,
.errstr_unpriv = "invalid stack off=0 size=1",
.result = ACCEPT,
.retval = 42,
},
{ {
"stack pointer arithmetic", "stack pointer arithmetic",
.insns = { .insns = {
...@@ -6610,7 +6851,446 @@ static struct bpf_test tests[] = { ...@@ -6610,7 +6851,446 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_TRACEPOINT, .prog_type = BPF_PROG_TYPE_TRACEPOINT,
}, },
{ {
"map access: known scalar += value_ptr", "map access: known scalar += value_ptr from different maps",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_MOV64_IMM(BPF_REG_1, 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_map_hash_16b = { 5 },
.fixup_map_array_48b = { 8 },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R1 tried to add from different maps",
.retval = 1,
},
{
"map access: value_ptr -= known scalar from different maps",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_IMM(BPF_REG_1, 4),
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_map_hash_16b = { 5 },
.fixup_map_array_48b = { 8 },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 min value is outside of the array range",
.retval = 1,
},
{
"map access: known scalar += value_ptr from different maps, but same value properties",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_MOV64_IMM(BPF_REG_1, 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 5 },
.fixup_map_array_48b = { 8 },
.result = ACCEPT,
.retval = 1,
},
{
"map access: value_ptr += known scalar, upper oob arith, test 1",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_IMM(BPF_REG_1, 48),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
.retval = 1,
},
{
"map access: value_ptr += known scalar, upper oob arith, test 2",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_IMM(BPF_REG_1, 49),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
.retval = 1,
},
{
"map access: value_ptr += known scalar, upper oob arith, test 3",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_IMM(BPF_REG_1, 47),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
.retval = 1,
},
{
"map access: value_ptr -= known scalar, lower oob arith, test 1",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_MOV64_IMM(BPF_REG_1, 47),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 48),
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 3 },
.result = REJECT,
.errstr = "R0 min value is outside of the array range",
.result_unpriv = REJECT,
.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
},
{
"map access: value_ptr -= known scalar, lower oob arith, test 2",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_MOV64_IMM(BPF_REG_1, 47),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 48),
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
.retval = 1,
},
{
"map access: value_ptr -= known scalar, lower oob arith, test 3",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_MOV64_IMM(BPF_REG_1, 47),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 47),
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
.retval = 1,
},
{
"map access: known scalar += value_ptr",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_MOV64_IMM(BPF_REG_1, 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
.retval = 1,
},
{
"map access: value_ptr += known scalar, 1",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_MOV64_IMM(BPF_REG_1, 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
.retval = 1,
},
{
"map access: value_ptr += known scalar, 2",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_MOV64_IMM(BPF_REG_1, 49),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 3 },
.result = REJECT,
.errstr = "invalid access to map value",
},
{
"map access: value_ptr += known scalar, 3",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_MOV64_IMM(BPF_REG_1, -1),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 3 },
.result = REJECT,
.errstr = "invalid access to map value",
},
{
"map access: value_ptr += known scalar, 4",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_MOV64_IMM(BPF_REG_1, 5),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, -2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, -1),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
.retval = 1,
},
{
"map access: value_ptr += known scalar, 5",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_MOV64_IMM(BPF_REG_1, (6 + 1) * sizeof(int)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
.retval = 0xabcdef12,
},
{
"map access: value_ptr += known scalar, 6",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_MOV64_IMM(BPF_REG_1, (3 + 1) * sizeof(int)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 3 * sizeof(int)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
.retval = 0xabcdef12,
},
{
"map access: unknown scalar += value_ptr, 1",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
.retval = 1,
},
{
"map access: unknown scalar += value_ptr, 2",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
.retval = 0xabcdef12,
},
{
"map access: unknown scalar += value_ptr, 3",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
BPF_MOV64_IMM(BPF_REG_1, -1),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.fixup_map_array_48b = { 3 },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
.retval = 0xabcdef12,
},
{
"map access: unknown scalar += value_ptr, 4",
.insns = { .insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
...@@ -6618,19 +7298,22 @@ static struct bpf_test tests[] = { ...@@ -6618,19 +7298,22 @@ static struct bpf_test tests[] = {
BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem), BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_MOV64_IMM(BPF_REG_1, 4), BPF_MOV64_IMM(BPF_REG_1, 19),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_array_48b = { 3 }, .fixup_map_array_48b = { 3 },
.result = ACCEPT, .result = REJECT,
.retval = 1, .errstr = "R1 max value is outside of the array range",
.errstr_unpriv = "R1 pointer arithmetic of map value goes out of range",
}, },
{ {
"map access: value_ptr += known scalar", "map access: value_ptr += unknown scalar, 1",
.insns = { .insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
...@@ -6638,8 +7321,9 @@ static struct bpf_test tests[] = { ...@@ -6638,8 +7321,9 @@ static struct bpf_test tests[] = {
BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem), BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_IMM(BPF_REG_1, 4), BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1), BPF_MOV64_IMM(BPF_REG_0, 1),
...@@ -6650,7 +7334,7 @@ static struct bpf_test tests[] = { ...@@ -6650,7 +7334,7 @@ static struct bpf_test tests[] = {
.retval = 1, .retval = 1,
}, },
{ {
"map access: unknown scalar += value_ptr", "map access: value_ptr += unknown scalar, 2",
.insns = { .insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
...@@ -6659,19 +7343,18 @@ static struct bpf_test tests[] = { ...@@ -6659,19 +7343,18 @@ static struct bpf_test tests[] = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem), BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_array_48b = { 3 }, .fixup_map_array_48b = { 3 },
.result = ACCEPT, .result = ACCEPT,
.retval = 1, .retval = 0xabcdef12,
}, },
{ {
"map access: value_ptr += unknown scalar", "map access: value_ptr += unknown scalar, 3",
.insns = { .insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
...@@ -6679,13 +7362,20 @@ static struct bpf_test tests[] = { ...@@ -6679,13 +7362,20 @@ static struct bpf_test tests[] = {
BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem), BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 8),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 16),
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), BPF_ALU64_IMM(BPF_OR, BPF_REG_3, 1),
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1), BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_JMP_IMM(BPF_JA, 0, 0, -3),
}, },
.fixup_map_array_48b = { 3 }, .fixup_map_array_48b = { 3 },
.result = ACCEPT, .result = ACCEPT,
...@@ -6770,6 +7460,8 @@ static struct bpf_test tests[] = { ...@@ -6770,6 +7460,8 @@ static struct bpf_test tests[] = {
}, },
.fixup_map_array_48b = { 3 }, .fixup_map_array_48b = { 3 },
.result = ACCEPT, .result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
.retval = 1, .retval = 1,
}, },
{ {
...@@ -6837,6 +7529,8 @@ static struct bpf_test tests[] = { ...@@ -6837,6 +7529,8 @@ static struct bpf_test tests[] = {
}, },
.fixup_map_array_48b = { 3 }, .fixup_map_array_48b = { 3 },
.result = ACCEPT, .result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
.retval = 1, .retval = 1,
}, },
{ {
...@@ -8376,6 +9070,7 @@ static struct bpf_test tests[] = { ...@@ -8376,6 +9070,7 @@ static struct bpf_test tests[] = {
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{ {
...@@ -8400,6 +9095,7 @@ static struct bpf_test tests[] = { ...@@ -8400,6 +9095,7 @@ static struct bpf_test tests[] = {
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{ {
...@@ -8426,6 +9122,7 @@ static struct bpf_test tests[] = { ...@@ -8426,6 +9122,7 @@ static struct bpf_test tests[] = {
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{ {
...@@ -8451,6 +9148,7 @@ static struct bpf_test tests[] = { ...@@ -8451,6 +9148,7 @@ static struct bpf_test tests[] = {
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{ {
...@@ -8499,6 +9197,7 @@ static struct bpf_test tests[] = { ...@@ -8499,6 +9197,7 @@ static struct bpf_test tests[] = {
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{ {
...@@ -8570,6 +9269,7 @@ static struct bpf_test tests[] = { ...@@ -8570,6 +9269,7 @@ static struct bpf_test tests[] = {
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{ {
...@@ -8621,6 +9321,7 @@ static struct bpf_test tests[] = { ...@@ -8621,6 +9321,7 @@ static struct bpf_test tests[] = {
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{ {
...@@ -8648,6 +9349,7 @@ static struct bpf_test tests[] = { ...@@ -8648,6 +9349,7 @@ static struct bpf_test tests[] = {
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{ {
...@@ -8674,6 +9376,7 @@ static struct bpf_test tests[] = { ...@@ -8674,6 +9376,7 @@ static struct bpf_test tests[] = {
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{ {
...@@ -8703,6 +9406,7 @@ static struct bpf_test tests[] = { ...@@ -8703,6 +9406,7 @@ static struct bpf_test tests[] = {
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R7 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{ {
...@@ -8733,6 +9437,7 @@ static struct bpf_test tests[] = { ...@@ -8733,6 +9437,7 @@ static struct bpf_test tests[] = {
}, },
.fixup_map_hash_8b = { 4 }, .fixup_map_hash_8b = { 4 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{ {
...@@ -8761,6 +9466,7 @@ static struct bpf_test tests[] = { ...@@ -8761,6 +9466,7 @@ static struct bpf_test tests[] = {
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
.result_unpriv = REJECT, .result_unpriv = REJECT,
}, },
...@@ -8813,8 +9519,38 @@ static struct bpf_test tests[] = { ...@@ -8813,8 +9519,38 @@ static struct bpf_test tests[] = {
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 3 },
.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{
"check subtraction on pointers for unpriv",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 9),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_9, BPF_REG_FP),
BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_0),
BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 1, 9 },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R9 pointer -= pointer prohibited",
},
{ {
"bounds check based on zero-extended MOV", "bounds check based on zero-extended MOV",
.insns = { .insns = {
...@@ -9145,6 +9881,36 @@ static struct bpf_test tests[] = { ...@@ -9145,6 +9881,36 @@ static struct bpf_test tests[] = {
.errstr = "R0 unbounded memory access", .errstr = "R0 unbounded memory access",
.result = REJECT .result = REJECT
}, },
{
"bounds check after 32-bit right shift with 64-bit input",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
/* r1 = 2 */
BPF_MOV64_IMM(BPF_REG_1, 2),
/* r1 = 1<<32 */
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 31),
/* r1 = 0 (NOT 2!) */
BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31),
/* r1 = 0xffff'fffe (NOT 0!) */
BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2),
/* computes OOB pointer */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* OOB access */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
/* exit */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 3 },
.errstr = "R0 invalid mem access",
.result = REJECT,
},
{ {
"bounds check map access with off+size signed 32bit overflow. test1", "bounds check map access with off+size signed 32bit overflow. test1",
.insns = { .insns = {
...@@ -9185,6 +9951,7 @@ static struct bpf_test tests[] = { ...@@ -9185,6 +9951,7 @@ static struct bpf_test tests[] = {
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 3 },
.errstr = "pointer offset 1073741822", .errstr = "pointer offset 1073741822",
.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
.result = REJECT .result = REJECT
}, },
{ {
...@@ -9206,6 +9973,7 @@ static struct bpf_test tests[] = { ...@@ -9206,6 +9973,7 @@ static struct bpf_test tests[] = {
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 3 },
.errstr = "pointer offset -1073741822", .errstr = "pointer offset -1073741822",
.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
.result = REJECT .result = REJECT
}, },
{ {
...@@ -9377,6 +10145,7 @@ static struct bpf_test tests[] = { ...@@ -9377,6 +10145,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN() BPF_EXIT_INSN()
}, },
.errstr = "fp pointer offset 1073741822", .errstr = "fp pointer offset 1073741822",
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
.result = REJECT .result = REJECT
}, },
{ {
...@@ -13718,6 +14487,328 @@ static struct bpf_test tests[] = { ...@@ -13718,6 +14487,328 @@ static struct bpf_test tests[] = {
.result_unpriv = ACCEPT, .result_unpriv = ACCEPT,
.insn_processed = 15, .insn_processed = 15,
}, },
{
"masking, test out of bounds 1",
.insns = {
BPF_MOV32_IMM(BPF_REG_1, 5),
BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"masking, test out of bounds 2",
.insns = {
BPF_MOV32_IMM(BPF_REG_1, 1),
BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"masking, test out of bounds 3",
.insns = {
BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"masking, test out of bounds 4",
.insns = {
BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"masking, test out of bounds 5",
.insns = {
BPF_MOV32_IMM(BPF_REG_1, -1),
BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"masking, test out of bounds 6",
.insns = {
BPF_MOV32_IMM(BPF_REG_1, -1),
BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"masking, test out of bounds 7",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 5),
BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"masking, test out of bounds 8",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"masking, test out of bounds 9",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"masking, test out of bounds 10",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"masking, test out of bounds 11",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, -1),
BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"masking, test out of bounds 12",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, -1),
BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"masking, test in bounds 1",
.insns = {
BPF_MOV32_IMM(BPF_REG_1, 4),
BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 4,
},
{
"masking, test in bounds 2",
.insns = {
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"masking, test in bounds 3",
.insns = {
BPF_MOV32_IMM(BPF_REG_1, 0xfffffffe),
BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0xfffffffe,
},
{
"masking, test in bounds 4",
.insns = {
BPF_MOV32_IMM(BPF_REG_1, 0xabcde),
BPF_MOV32_IMM(BPF_REG_2, 0xabcdef - 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0xabcde,
},
{
"masking, test in bounds 5",
.insns = {
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"masking, test in bounds 6",
.insns = {
BPF_MOV32_IMM(BPF_REG_1, 46),
BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 46,
},
{
"masking, test in bounds 7",
.insns = {
BPF_MOV64_IMM(BPF_REG_3, -46),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 46,
},
{
"masking, test in bounds 8",
.insns = {
BPF_MOV64_IMM(BPF_REG_3, -47),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{ {
"reference tracking in call: free reference in subprog and outside", "reference tracking in call: free reference in subprog and outside",
.insns = { .insns = {
...@@ -14413,6 +15504,16 @@ static int create_map(uint32_t type, uint32_t size_key, ...@@ -14413,6 +15504,16 @@ static int create_map(uint32_t type, uint32_t size_key,
return fd; return fd;
} }
static void update_map(int fd, int index)
{
struct test_val value = {
.index = (6 + 1) * sizeof(int),
.foo[6] = 0xabcdef12,
};
assert(!bpf_map_update_elem(fd, &index, &value, 0));
}
static int create_prog_dummy1(enum bpf_prog_type prog_type) static int create_prog_dummy1(enum bpf_prog_type prog_type)
{ {
struct bpf_insn prog[] = { struct bpf_insn prog[] = {
...@@ -14564,6 +15665,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, ...@@ -14564,6 +15665,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
if (*fixup_map_array_48b) { if (*fixup_map_array_48b) {
map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int), map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
sizeof(struct test_val), 1); sizeof(struct test_val), 1);
update_map(map_fds[3], 0);
do { do {
prog[*fixup_map_array_48b].imm = map_fds[3]; prog[*fixup_map_array_48b].imm = map_fds[3];
fixup_map_array_48b++; fixup_map_array_48b++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment