Commit a67825f5 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'prevent-oob-under-speculation'

Daniel Borkmann says:

====================
This set fixes an out of bounds case under speculative execution
by implementing masking of pointer alu into the verifier. For
details please see the individual patches.

Thanks!

v2 -> v3:
  - 8/9: change states_equal condition into old->speculative &&
    !cur->speculative, thanks Jakub!
  - 8/9: remove incorrect speculative state test in
    propagate_liveness(), thanks Jakub!
v1 -> v2:
  - Typo fixes in commit msg and a comment, thanks David!
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 8b6b25cf 80c9b2fa
...@@ -148,6 +148,7 @@ struct bpf_verifier_state { ...@@ -148,6 +148,7 @@ struct bpf_verifier_state {
/* call stack tracking */ /* call stack tracking */
struct bpf_func_state *frame[MAX_CALL_FRAMES]; struct bpf_func_state *frame[MAX_CALL_FRAMES];
u32 curframe; u32 curframe;
bool speculative;
}; };
#define bpf_get_spilled_reg(slot, frame) \ #define bpf_get_spilled_reg(slot, frame) \
...@@ -167,15 +168,24 @@ struct bpf_verifier_state_list { ...@@ -167,15 +168,24 @@ struct bpf_verifier_state_list {
struct bpf_verifier_state_list *next; struct bpf_verifier_state_list *next;
}; };
/* Possible states for alu_state member. */
#define BPF_ALU_SANITIZE_SRC 1U
#define BPF_ALU_SANITIZE_DST 2U
#define BPF_ALU_NEG_VALUE (1U << 2)
#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
BPF_ALU_SANITIZE_DST)
struct bpf_insn_aux_data { struct bpf_insn_aux_data {
union { union {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
unsigned long map_state; /* pointer/poison value for maps */ unsigned long map_state; /* pointer/poison value for maps */
s32 call_imm; /* saved imm field of call insn */ s32 call_imm; /* saved imm field of call insn */
u32 alu_limit; /* limit for add/sub register with pointer */
}; };
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
int sanitize_stack_off; /* stack slot to be cleared */ int sanitize_stack_off; /* stack slot to be cleared */
bool seen; /* this insn was processed by the verifier */ bool seen; /* this insn was processed by the verifier */
u8 alu_state; /* used in combination with alu_limit */
}; };
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
...@@ -212,6 +222,8 @@ struct bpf_subprog_info { ...@@ -212,6 +222,8 @@ struct bpf_subprog_info {
* one verifier_env per bpf_check() call * one verifier_env per bpf_check() call
*/ */
struct bpf_verifier_env { struct bpf_verifier_env {
u32 insn_idx;
u32 prev_insn_idx;
struct bpf_prog *prog; /* eBPF program being verified */ struct bpf_prog *prog; /* eBPF program being verified */
const struct bpf_verifier_ops *ops; const struct bpf_verifier_ops *ops;
struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
......
...@@ -53,14 +53,10 @@ struct sock_reuseport; ...@@ -53,14 +53,10 @@ struct sock_reuseport;
#define BPF_REG_D BPF_REG_8 /* data, callee-saved */ #define BPF_REG_D BPF_REG_8 /* data, callee-saved */
#define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */ #define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */
/* Kernel hidden auxiliary/helper register for hardening step. /* Kernel hidden auxiliary/helper register. */
* Only used by eBPF JITs. It's nothing more than a temporary
* register that JITs use internally, only that here it's part
* of eBPF instructions that have been rewritten for blinding
* constants. See JIT pre-step in bpf_jit_blind_constants().
*/
#define BPF_REG_AX MAX_BPF_REG #define BPF_REG_AX MAX_BPF_REG
#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1) #define MAX_BPF_EXT_REG (MAX_BPF_REG + 1)
#define MAX_BPF_JIT_REG MAX_BPF_EXT_REG
/* unused opcode to mark special call to bpf_tail_call() helper */ /* unused opcode to mark special call to bpf_tail_call() helper */
#define BPF_TAIL_CALL 0xf0 #define BPF_TAIL_CALL 0xf0
......
...@@ -54,6 +54,7 @@ ...@@ -54,6 +54,7 @@
#define DST regs[insn->dst_reg] #define DST regs[insn->dst_reg]
#define SRC regs[insn->src_reg] #define SRC regs[insn->src_reg]
#define FP regs[BPF_REG_FP] #define FP regs[BPF_REG_FP]
#define AX regs[BPF_REG_AX]
#define ARG1 regs[BPF_REG_ARG1] #define ARG1 regs[BPF_REG_ARG1]
#define CTX regs[BPF_REG_CTX] #define CTX regs[BPF_REG_CTX]
#define IMM insn->imm #define IMM insn->imm
...@@ -857,6 +858,26 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from, ...@@ -857,6 +858,26 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG); BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG); BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
/* Constraints on AX register:
*
* AX register is inaccessible from user space. It is mapped in
* all JITs, and used here for constant blinding rewrites. It is
* typically "stateless" meaning its contents are only valid within
* the executed instruction, but not across several instructions.
* There are a few exceptions however which are further detailed
* below.
*
* Constant blinding is only used by JITs, not in the interpreter.
* The interpreter uses AX in some occasions as a local temporary
* register e.g. in DIV or MOD instructions.
*
* In restricted circumstances, the verifier can also use the AX
* register for rewrites as long as they do not interfere with
* the above cases!
*/
if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
goto out;
if (from->imm == 0 && if (from->imm == 0 &&
(from->code == (BPF_ALU | BPF_MOV | BPF_K) || (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
...@@ -1188,7 +1209,6 @@ bool bpf_opcode_in_insntable(u8 code) ...@@ -1188,7 +1209,6 @@ bool bpf_opcode_in_insntable(u8 code)
*/ */
static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
{ {
u64 tmp;
#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
static const void *jumptable[256] = { static const void *jumptable[256] = {
...@@ -1268,36 +1288,36 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) ...@@ -1268,36 +1288,36 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
(*(s64 *) &DST) >>= IMM; (*(s64 *) &DST) >>= IMM;
CONT; CONT;
ALU64_MOD_X: ALU64_MOD_X:
div64_u64_rem(DST, SRC, &tmp); div64_u64_rem(DST, SRC, &AX);
DST = tmp; DST = AX;
CONT; CONT;
ALU_MOD_X: ALU_MOD_X:
tmp = (u32) DST; AX = (u32) DST;
DST = do_div(tmp, (u32) SRC); DST = do_div(AX, (u32) SRC);
CONT; CONT;
ALU64_MOD_K: ALU64_MOD_K:
div64_u64_rem(DST, IMM, &tmp); div64_u64_rem(DST, IMM, &AX);
DST = tmp; DST = AX;
CONT; CONT;
ALU_MOD_K: ALU_MOD_K:
tmp = (u32) DST; AX = (u32) DST;
DST = do_div(tmp, (u32) IMM); DST = do_div(AX, (u32) IMM);
CONT; CONT;
ALU64_DIV_X: ALU64_DIV_X:
DST = div64_u64(DST, SRC); DST = div64_u64(DST, SRC);
CONT; CONT;
ALU_DIV_X: ALU_DIV_X:
tmp = (u32) DST; AX = (u32) DST;
do_div(tmp, (u32) SRC); do_div(AX, (u32) SRC);
DST = (u32) tmp; DST = (u32) AX;
CONT; CONT;
ALU64_DIV_K: ALU64_DIV_K:
DST = div64_u64(DST, IMM); DST = div64_u64(DST, IMM);
CONT; CONT;
ALU_DIV_K: ALU_DIV_K:
tmp = (u32) DST; AX = (u32) DST;
do_div(tmp, (u32) IMM); do_div(AX, (u32) IMM);
DST = (u32) tmp; DST = (u32) AX;
CONT; CONT;
ALU_END_TO_BE: ALU_END_TO_BE:
switch (IMM) { switch (IMM) {
...@@ -1553,7 +1573,7 @@ STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */ ...@@ -1553,7 +1573,7 @@ STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \ static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
{ \ { \
u64 stack[stack_size / sizeof(u64)]; \ u64 stack[stack_size / sizeof(u64)]; \
u64 regs[MAX_BPF_REG]; \ u64 regs[MAX_BPF_EXT_REG]; \
\ \
FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
ARG1 = (u64) (unsigned long) ctx; \ ARG1 = (u64) (unsigned long) ctx; \
...@@ -1566,7 +1586,7 @@ static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \ ...@@ -1566,7 +1586,7 @@ static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
const struct bpf_insn *insn) \ const struct bpf_insn *insn) \
{ \ { \
u64 stack[stack_size / sizeof(u64)]; \ u64 stack[stack_size / sizeof(u64)]; \
u64 regs[MAX_BPF_REG]; \ u64 regs[MAX_BPF_EXT_REG]; \
\ \
FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
BPF_R1 = r1; \ BPF_R1 = r1; \
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment