Commit c532cea9 authored by David S. Miller's avatar David S. Miller

Merge branch 'filter-next'

Daniel Borkmann says:

====================
BPF + test suite updates

These are the last bigger BPF changes that I had in my todo
queue for now. As the first two patches from this series
contain additional test cases for the test suite, I have
rebased them on top of current net-next with the set from [1]
applied to avoid introducing any unnecessary merge conflicts.

For details, please refer to the individual patches. Test
suite runs fine with the set applied.

 [1] http://patchwork.ozlabs.org/patch/352599/
     http://patchwork.ozlabs.org/patch/352600/
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 019ee792 f8f6d679
...@@ -136,7 +136,7 @@ static u16 saved_regs(struct jit_ctx *ctx) ...@@ -136,7 +136,7 @@ static u16 saved_regs(struct jit_ctx *ctx)
u16 ret = 0; u16 ret = 0;
if ((ctx->skf->len > 1) || if ((ctx->skf->len > 1) ||
(ctx->skf->insns[0].code == BPF_S_RET_A)) (ctx->skf->insns[0].code == (BPF_RET | BPF_A)))
ret |= 1 << r_A; ret |= 1 << r_A;
#ifdef CONFIG_FRAME_POINTER #ifdef CONFIG_FRAME_POINTER
...@@ -164,18 +164,10 @@ static inline int mem_words_used(struct jit_ctx *ctx) ...@@ -164,18 +164,10 @@ static inline int mem_words_used(struct jit_ctx *ctx)
static inline bool is_load_to_a(u16 inst) static inline bool is_load_to_a(u16 inst)
{ {
switch (inst) { switch (inst) {
case BPF_S_LD_W_LEN: case BPF_LD | BPF_W | BPF_LEN:
case BPF_S_LD_W_ABS: case BPF_LD | BPF_W | BPF_ABS:
case BPF_S_LD_H_ABS: case BPF_LD | BPF_H | BPF_ABS:
case BPF_S_LD_B_ABS: case BPF_LD | BPF_B | BPF_ABS:
case BPF_S_ANC_CPU:
case BPF_S_ANC_IFINDEX:
case BPF_S_ANC_MARK:
case BPF_S_ANC_PROTOCOL:
case BPF_S_ANC_RXHASH:
case BPF_S_ANC_VLAN_TAG:
case BPF_S_ANC_VLAN_TAG_PRESENT:
case BPF_S_ANC_QUEUE:
return true; return true;
default: default:
return false; return false;
...@@ -215,7 +207,7 @@ static void build_prologue(struct jit_ctx *ctx) ...@@ -215,7 +207,7 @@ static void build_prologue(struct jit_ctx *ctx)
emit(ARM_MOV_I(r_X, 0), ctx); emit(ARM_MOV_I(r_X, 0), ctx);
/* do not leak kernel data to userspace */ /* do not leak kernel data to userspace */
if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst))) if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
emit(ARM_MOV_I(r_A, 0), ctx); emit(ARM_MOV_I(r_A, 0), ctx);
/* stack space for the BPF_MEM words */ /* stack space for the BPF_MEM words */
...@@ -480,36 +472,39 @@ static int build_body(struct jit_ctx *ctx) ...@@ -480,36 +472,39 @@ static int build_body(struct jit_ctx *ctx)
u32 k; u32 k;
for (i = 0; i < prog->len; i++) { for (i = 0; i < prog->len; i++) {
u16 code;
inst = &(prog->insns[i]); inst = &(prog->insns[i]);
/* K as an immediate value operand */ /* K as an immediate value operand */
k = inst->k; k = inst->k;
code = bpf_anc_helper(inst);
/* compute offsets only in the fake pass */ /* compute offsets only in the fake pass */
if (ctx->target == NULL) if (ctx->target == NULL)
ctx->offsets[i] = ctx->idx * 4; ctx->offsets[i] = ctx->idx * 4;
switch (inst->code) { switch (code) {
case BPF_S_LD_IMM: case BPF_LD | BPF_IMM:
emit_mov_i(r_A, k, ctx); emit_mov_i(r_A, k, ctx);
break; break;
case BPF_S_LD_W_LEN: case BPF_LD | BPF_W | BPF_LEN:
ctx->seen |= SEEN_SKB; ctx->seen |= SEEN_SKB;
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
emit(ARM_LDR_I(r_A, r_skb, emit(ARM_LDR_I(r_A, r_skb,
offsetof(struct sk_buff, len)), ctx); offsetof(struct sk_buff, len)), ctx);
break; break;
case BPF_S_LD_MEM: case BPF_LD | BPF_MEM:
/* A = scratch[k] */ /* A = scratch[k] */
ctx->seen |= SEEN_MEM_WORD(k); ctx->seen |= SEEN_MEM_WORD(k);
emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
break; break;
case BPF_S_LD_W_ABS: case BPF_LD | BPF_W | BPF_ABS:
load_order = 2; load_order = 2;
goto load; goto load;
case BPF_S_LD_H_ABS: case BPF_LD | BPF_H | BPF_ABS:
load_order = 1; load_order = 1;
goto load; goto load;
case BPF_S_LD_B_ABS: case BPF_LD | BPF_B | BPF_ABS:
load_order = 0; load_order = 0;
load: load:
/* the interpreter will deal with the negative K */ /* the interpreter will deal with the negative K */
...@@ -552,31 +547,31 @@ static int build_body(struct jit_ctx *ctx) ...@@ -552,31 +547,31 @@ static int build_body(struct jit_ctx *ctx)
emit_err_ret(ARM_COND_NE, ctx); emit_err_ret(ARM_COND_NE, ctx);
emit(ARM_MOV_R(r_A, ARM_R0), ctx); emit(ARM_MOV_R(r_A, ARM_R0), ctx);
break; break;
case BPF_S_LD_W_IND: case BPF_LD | BPF_W | BPF_IND:
load_order = 2; load_order = 2;
goto load_ind; goto load_ind;
case BPF_S_LD_H_IND: case BPF_LD | BPF_H | BPF_IND:
load_order = 1; load_order = 1;
goto load_ind; goto load_ind;
case BPF_S_LD_B_IND: case BPF_LD | BPF_B | BPF_IND:
load_order = 0; load_order = 0;
load_ind: load_ind:
OP_IMM3(ARM_ADD, r_off, r_X, k, ctx); OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
goto load_common; goto load_common;
case BPF_S_LDX_IMM: case BPF_LDX | BPF_IMM:
ctx->seen |= SEEN_X; ctx->seen |= SEEN_X;
emit_mov_i(r_X, k, ctx); emit_mov_i(r_X, k, ctx);
break; break;
case BPF_S_LDX_W_LEN: case BPF_LDX | BPF_W | BPF_LEN:
ctx->seen |= SEEN_X | SEEN_SKB; ctx->seen |= SEEN_X | SEEN_SKB;
emit(ARM_LDR_I(r_X, r_skb, emit(ARM_LDR_I(r_X, r_skb,
offsetof(struct sk_buff, len)), ctx); offsetof(struct sk_buff, len)), ctx);
break; break;
case BPF_S_LDX_MEM: case BPF_LDX | BPF_MEM:
ctx->seen |= SEEN_X | SEEN_MEM_WORD(k); ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
break; break;
case BPF_S_LDX_B_MSH: case BPF_LDX | BPF_B | BPF_MSH:
/* x = ((*(frame + k)) & 0xf) << 2; */ /* x = ((*(frame + k)) & 0xf) << 2; */
ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
/* the interpreter should deal with the negative K */ /* the interpreter should deal with the negative K */
...@@ -606,113 +601,113 @@ static int build_body(struct jit_ctx *ctx) ...@@ -606,113 +601,113 @@ static int build_body(struct jit_ctx *ctx)
emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx); emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
emit(ARM_LSL_I(r_X, r_X, 2), ctx); emit(ARM_LSL_I(r_X, r_X, 2), ctx);
break; break;
case BPF_S_ST: case BPF_ST:
ctx->seen |= SEEN_MEM_WORD(k); ctx->seen |= SEEN_MEM_WORD(k);
emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
break; break;
case BPF_S_STX: case BPF_STX:
update_on_xread(ctx); update_on_xread(ctx);
ctx->seen |= SEEN_MEM_WORD(k); ctx->seen |= SEEN_MEM_WORD(k);
emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
break; break;
case BPF_S_ALU_ADD_K: case BPF_ALU | BPF_ADD | BPF_K:
/* A += K */ /* A += K */
OP_IMM3(ARM_ADD, r_A, r_A, k, ctx); OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
break; break;
case BPF_S_ALU_ADD_X: case BPF_ALU | BPF_ADD | BPF_X:
update_on_xread(ctx); update_on_xread(ctx);
emit(ARM_ADD_R(r_A, r_A, r_X), ctx); emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
break; break;
case BPF_S_ALU_SUB_K: case BPF_ALU | BPF_SUB | BPF_K:
/* A -= K */ /* A -= K */
OP_IMM3(ARM_SUB, r_A, r_A, k, ctx); OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
break; break;
case BPF_S_ALU_SUB_X: case BPF_ALU | BPF_SUB | BPF_X:
update_on_xread(ctx); update_on_xread(ctx);
emit(ARM_SUB_R(r_A, r_A, r_X), ctx); emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
break; break;
case BPF_S_ALU_MUL_K: case BPF_ALU | BPF_MUL | BPF_K:
/* A *= K */ /* A *= K */
emit_mov_i(r_scratch, k, ctx); emit_mov_i(r_scratch, k, ctx);
emit(ARM_MUL(r_A, r_A, r_scratch), ctx); emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
break; break;
case BPF_S_ALU_MUL_X: case BPF_ALU | BPF_MUL | BPF_X:
update_on_xread(ctx); update_on_xread(ctx);
emit(ARM_MUL(r_A, r_A, r_X), ctx); emit(ARM_MUL(r_A, r_A, r_X), ctx);
break; break;
case BPF_S_ALU_DIV_K: case BPF_ALU | BPF_DIV | BPF_K:
if (k == 1) if (k == 1)
break; break;
emit_mov_i(r_scratch, k, ctx); emit_mov_i(r_scratch, k, ctx);
emit_udiv(r_A, r_A, r_scratch, ctx); emit_udiv(r_A, r_A, r_scratch, ctx);
break; break;
case BPF_S_ALU_DIV_X: case BPF_ALU | BPF_DIV | BPF_X:
update_on_xread(ctx); update_on_xread(ctx);
emit(ARM_CMP_I(r_X, 0), ctx); emit(ARM_CMP_I(r_X, 0), ctx);
emit_err_ret(ARM_COND_EQ, ctx); emit_err_ret(ARM_COND_EQ, ctx);
emit_udiv(r_A, r_A, r_X, ctx); emit_udiv(r_A, r_A, r_X, ctx);
break; break;
case BPF_S_ALU_OR_K: case BPF_ALU | BPF_OR | BPF_K:
/* A |= K */ /* A |= K */
OP_IMM3(ARM_ORR, r_A, r_A, k, ctx); OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
break; break;
case BPF_S_ALU_OR_X: case BPF_ALU | BPF_OR | BPF_X:
update_on_xread(ctx); update_on_xread(ctx);
emit(ARM_ORR_R(r_A, r_A, r_X), ctx); emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
break; break;
case BPF_S_ALU_XOR_K: case BPF_ALU | BPF_XOR | BPF_K:
/* A ^= K; */ /* A ^= K; */
OP_IMM3(ARM_EOR, r_A, r_A, k, ctx); OP_IMM3(ARM_EOR, r_A, r_A, k, ctx);
break; break;
case BPF_S_ANC_ALU_XOR_X: case BPF_ANC | SKF_AD_ALU_XOR_X:
case BPF_S_ALU_XOR_X: case BPF_ALU | BPF_XOR | BPF_X:
/* A ^= X */ /* A ^= X */
update_on_xread(ctx); update_on_xread(ctx);
emit(ARM_EOR_R(r_A, r_A, r_X), ctx); emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
break; break;
case BPF_S_ALU_AND_K: case BPF_ALU | BPF_AND | BPF_K:
/* A &= K */ /* A &= K */
OP_IMM3(ARM_AND, r_A, r_A, k, ctx); OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
break; break;
case BPF_S_ALU_AND_X: case BPF_ALU | BPF_AND | BPF_X:
update_on_xread(ctx); update_on_xread(ctx);
emit(ARM_AND_R(r_A, r_A, r_X), ctx); emit(ARM_AND_R(r_A, r_A, r_X), ctx);
break; break;
case BPF_S_ALU_LSH_K: case BPF_ALU | BPF_LSH | BPF_K:
if (unlikely(k > 31)) if (unlikely(k > 31))
return -1; return -1;
emit(ARM_LSL_I(r_A, r_A, k), ctx); emit(ARM_LSL_I(r_A, r_A, k), ctx);
break; break;
case BPF_S_ALU_LSH_X: case BPF_ALU | BPF_LSH | BPF_X:
update_on_xread(ctx); update_on_xread(ctx);
emit(ARM_LSL_R(r_A, r_A, r_X), ctx); emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
break; break;
case BPF_S_ALU_RSH_K: case BPF_ALU | BPF_RSH | BPF_K:
if (unlikely(k > 31)) if (unlikely(k > 31))
return -1; return -1;
emit(ARM_LSR_I(r_A, r_A, k), ctx); emit(ARM_LSR_I(r_A, r_A, k), ctx);
break; break;
case BPF_S_ALU_RSH_X: case BPF_ALU | BPF_RSH | BPF_X:
update_on_xread(ctx); update_on_xread(ctx);
emit(ARM_LSR_R(r_A, r_A, r_X), ctx); emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
break; break;
case BPF_S_ALU_NEG: case BPF_ALU | BPF_NEG:
/* A = -A */ /* A = -A */
emit(ARM_RSB_I(r_A, r_A, 0), ctx); emit(ARM_RSB_I(r_A, r_A, 0), ctx);
break; break;
case BPF_S_JMP_JA: case BPF_JMP | BPF_JA:
/* pc += K */ /* pc += K */
emit(ARM_B(b_imm(i + k + 1, ctx)), ctx); emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
break; break;
case BPF_S_JMP_JEQ_K: case BPF_JMP | BPF_JEQ | BPF_K:
/* pc += (A == K) ? pc->jt : pc->jf */ /* pc += (A == K) ? pc->jt : pc->jf */
condt = ARM_COND_EQ; condt = ARM_COND_EQ;
goto cmp_imm; goto cmp_imm;
case BPF_S_JMP_JGT_K: case BPF_JMP | BPF_JGT | BPF_K:
/* pc += (A > K) ? pc->jt : pc->jf */ /* pc += (A > K) ? pc->jt : pc->jf */
condt = ARM_COND_HI; condt = ARM_COND_HI;
goto cmp_imm; goto cmp_imm;
case BPF_S_JMP_JGE_K: case BPF_JMP | BPF_JGE | BPF_K:
/* pc += (A >= K) ? pc->jt : pc->jf */ /* pc += (A >= K) ? pc->jt : pc->jf */
condt = ARM_COND_HS; condt = ARM_COND_HS;
cmp_imm: cmp_imm:
...@@ -731,22 +726,22 @@ static int build_body(struct jit_ctx *ctx) ...@@ -731,22 +726,22 @@ static int build_body(struct jit_ctx *ctx)
_emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1, _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
ctx)), ctx); ctx)), ctx);
break; break;
case BPF_S_JMP_JEQ_X: case BPF_JMP | BPF_JEQ | BPF_X:
/* pc += (A == X) ? pc->jt : pc->jf */ /* pc += (A == X) ? pc->jt : pc->jf */
condt = ARM_COND_EQ; condt = ARM_COND_EQ;
goto cmp_x; goto cmp_x;
case BPF_S_JMP_JGT_X: case BPF_JMP | BPF_JGT | BPF_X:
/* pc += (A > X) ? pc->jt : pc->jf */ /* pc += (A > X) ? pc->jt : pc->jf */
condt = ARM_COND_HI; condt = ARM_COND_HI;
goto cmp_x; goto cmp_x;
case BPF_S_JMP_JGE_X: case BPF_JMP | BPF_JGE | BPF_X:
/* pc += (A >= X) ? pc->jt : pc->jf */ /* pc += (A >= X) ? pc->jt : pc->jf */
condt = ARM_COND_CS; condt = ARM_COND_CS;
cmp_x: cmp_x:
update_on_xread(ctx); update_on_xread(ctx);
emit(ARM_CMP_R(r_A, r_X), ctx); emit(ARM_CMP_R(r_A, r_X), ctx);
goto cond_jump; goto cond_jump;
case BPF_S_JMP_JSET_K: case BPF_JMP | BPF_JSET | BPF_K:
/* pc += (A & K) ? pc->jt : pc->jf */ /* pc += (A & K) ? pc->jt : pc->jf */
condt = ARM_COND_NE; condt = ARM_COND_NE;
/* not set iff all zeroes iff Z==1 iff EQ */ /* not set iff all zeroes iff Z==1 iff EQ */
...@@ -759,16 +754,16 @@ static int build_body(struct jit_ctx *ctx) ...@@ -759,16 +754,16 @@ static int build_body(struct jit_ctx *ctx)
emit(ARM_TST_I(r_A, imm12), ctx); emit(ARM_TST_I(r_A, imm12), ctx);
} }
goto cond_jump; goto cond_jump;
case BPF_S_JMP_JSET_X: case BPF_JMP | BPF_JSET | BPF_X:
/* pc += (A & X) ? pc->jt : pc->jf */ /* pc += (A & X) ? pc->jt : pc->jf */
update_on_xread(ctx); update_on_xread(ctx);
condt = ARM_COND_NE; condt = ARM_COND_NE;
emit(ARM_TST_R(r_A, r_X), ctx); emit(ARM_TST_R(r_A, r_X), ctx);
goto cond_jump; goto cond_jump;
case BPF_S_RET_A: case BPF_RET | BPF_A:
emit(ARM_MOV_R(ARM_R0, r_A), ctx); emit(ARM_MOV_R(ARM_R0, r_A), ctx);
goto b_epilogue; goto b_epilogue;
case BPF_S_RET_K: case BPF_RET | BPF_K:
if ((k == 0) && (ctx->ret0_fp_idx < 0)) if ((k == 0) && (ctx->ret0_fp_idx < 0))
ctx->ret0_fp_idx = i; ctx->ret0_fp_idx = i;
emit_mov_i(ARM_R0, k, ctx); emit_mov_i(ARM_R0, k, ctx);
...@@ -776,17 +771,17 @@ static int build_body(struct jit_ctx *ctx) ...@@ -776,17 +771,17 @@ static int build_body(struct jit_ctx *ctx)
if (i != ctx->skf->len - 1) if (i != ctx->skf->len - 1)
emit(ARM_B(b_imm(prog->len, ctx)), ctx); emit(ARM_B(b_imm(prog->len, ctx)), ctx);
break; break;
case BPF_S_MISC_TAX: case BPF_MISC | BPF_TAX:
/* X = A */ /* X = A */
ctx->seen |= SEEN_X; ctx->seen |= SEEN_X;
emit(ARM_MOV_R(r_X, r_A), ctx); emit(ARM_MOV_R(r_X, r_A), ctx);
break; break;
case BPF_S_MISC_TXA: case BPF_MISC | BPF_TXA:
/* A = X */ /* A = X */
update_on_xread(ctx); update_on_xread(ctx);
emit(ARM_MOV_R(r_A, r_X), ctx); emit(ARM_MOV_R(r_A, r_X), ctx);
break; break;
case BPF_S_ANC_PROTOCOL: case BPF_ANC | SKF_AD_PROTOCOL:
/* A = ntohs(skb->protocol) */ /* A = ntohs(skb->protocol) */
ctx->seen |= SEEN_SKB; ctx->seen |= SEEN_SKB;
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
...@@ -795,7 +790,7 @@ static int build_body(struct jit_ctx *ctx) ...@@ -795,7 +790,7 @@ static int build_body(struct jit_ctx *ctx)
emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx); emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
emit_swap16(r_A, r_scratch, ctx); emit_swap16(r_A, r_scratch, ctx);
break; break;
case BPF_S_ANC_CPU: case BPF_ANC | SKF_AD_CPU:
/* r_scratch = current_thread_info() */ /* r_scratch = current_thread_info() */
OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx); OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
/* A = current_thread_info()->cpu */ /* A = current_thread_info()->cpu */
...@@ -803,7 +798,7 @@ static int build_body(struct jit_ctx *ctx) ...@@ -803,7 +798,7 @@ static int build_body(struct jit_ctx *ctx)
off = offsetof(struct thread_info, cpu); off = offsetof(struct thread_info, cpu);
emit(ARM_LDR_I(r_A, r_scratch, off), ctx); emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
break; break;
case BPF_S_ANC_IFINDEX: case BPF_ANC | SKF_AD_IFINDEX:
/* A = skb->dev->ifindex */ /* A = skb->dev->ifindex */
ctx->seen |= SEEN_SKB; ctx->seen |= SEEN_SKB;
off = offsetof(struct sk_buff, dev); off = offsetof(struct sk_buff, dev);
...@@ -817,30 +812,30 @@ static int build_body(struct jit_ctx *ctx) ...@@ -817,30 +812,30 @@ static int build_body(struct jit_ctx *ctx)
off = offsetof(struct net_device, ifindex); off = offsetof(struct net_device, ifindex);
emit(ARM_LDR_I(r_A, r_scratch, off), ctx); emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
break; break;
case BPF_S_ANC_MARK: case BPF_ANC | SKF_AD_MARK:
ctx->seen |= SEEN_SKB; ctx->seen |= SEEN_SKB;
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
off = offsetof(struct sk_buff, mark); off = offsetof(struct sk_buff, mark);
emit(ARM_LDR_I(r_A, r_skb, off), ctx); emit(ARM_LDR_I(r_A, r_skb, off), ctx);
break; break;
case BPF_S_ANC_RXHASH: case BPF_ANC | SKF_AD_RXHASH:
ctx->seen |= SEEN_SKB; ctx->seen |= SEEN_SKB;
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
off = offsetof(struct sk_buff, hash); off = offsetof(struct sk_buff, hash);
emit(ARM_LDR_I(r_A, r_skb, off), ctx); emit(ARM_LDR_I(r_A, r_skb, off), ctx);
break; break;
case BPF_S_ANC_VLAN_TAG: case BPF_ANC | SKF_AD_VLAN_TAG:
case BPF_S_ANC_VLAN_TAG_PRESENT: case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
ctx->seen |= SEEN_SKB; ctx->seen |= SEEN_SKB;
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
off = offsetof(struct sk_buff, vlan_tci); off = offsetof(struct sk_buff, vlan_tci);
emit(ARM_LDRH_I(r_A, r_skb, off), ctx); emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
if (inst->code == BPF_S_ANC_VLAN_TAG) if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx); OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
else else
OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx); OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
break; break;
case BPF_S_ANC_QUEUE: case BPF_ANC | SKF_AD_QUEUE:
ctx->seen |= SEEN_SKB; ctx->seen |= SEEN_SKB;
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
queue_mapping) != 2); queue_mapping) != 2);
......
...@@ -78,7 +78,7 @@ sk_load_byte_positive_offset: ...@@ -78,7 +78,7 @@ sk_load_byte_positive_offset:
blr blr
/* /*
* BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf) * BPF_LDX | BPF_B | BPF_MSH: ldxb 4*([offset]&0xf)
* r_addr is the offset value * r_addr is the offset value
*/ */
.globl sk_load_byte_msh .globl sk_load_byte_msh
......
...@@ -79,19 +79,11 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, ...@@ -79,19 +79,11 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
} }
switch (filter[0].code) { switch (filter[0].code) {
case BPF_S_RET_K: case BPF_RET | BPF_K:
case BPF_S_LD_W_LEN: case BPF_LD | BPF_W | BPF_LEN:
case BPF_S_ANC_PROTOCOL: case BPF_LD | BPF_W | BPF_ABS:
case BPF_S_ANC_IFINDEX: case BPF_LD | BPF_H | BPF_ABS:
case BPF_S_ANC_MARK: case BPF_LD | BPF_B | BPF_ABS:
case BPF_S_ANC_RXHASH:
case BPF_S_ANC_VLAN_TAG:
case BPF_S_ANC_VLAN_TAG_PRESENT:
case BPF_S_ANC_CPU:
case BPF_S_ANC_QUEUE:
case BPF_S_LD_W_ABS:
case BPF_S_LD_H_ABS:
case BPF_S_LD_B_ABS:
/* first instruction sets A register (or is RET 'constant') */ /* first instruction sets A register (or is RET 'constant') */
break; break;
default: default:
...@@ -144,6 +136,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, ...@@ -144,6 +136,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
for (i = 0; i < flen; i++) { for (i = 0; i < flen; i++) {
unsigned int K = filter[i].k; unsigned int K = filter[i].k;
u16 code = bpf_anc_helper(&filter[i]);
/* /*
* addrs[] maps a BPF bytecode address into a real offset from * addrs[] maps a BPF bytecode address into a real offset from
...@@ -151,35 +144,35 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, ...@@ -151,35 +144,35 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
*/ */
addrs[i] = ctx->idx * 4; addrs[i] = ctx->idx * 4;
switch (filter[i].code) { switch (code) {
/*** ALU ops ***/ /*** ALU ops ***/
case BPF_S_ALU_ADD_X: /* A += X; */ case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
ctx->seen |= SEEN_XREG; ctx->seen |= SEEN_XREG;
PPC_ADD(r_A, r_A, r_X); PPC_ADD(r_A, r_A, r_X);
break; break;
case BPF_S_ALU_ADD_K: /* A += K; */ case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
if (!K) if (!K)
break; break;
PPC_ADDI(r_A, r_A, IMM_L(K)); PPC_ADDI(r_A, r_A, IMM_L(K));
if (K >= 32768) if (K >= 32768)
PPC_ADDIS(r_A, r_A, IMM_HA(K)); PPC_ADDIS(r_A, r_A, IMM_HA(K));
break; break;
case BPF_S_ALU_SUB_X: /* A -= X; */ case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
ctx->seen |= SEEN_XREG; ctx->seen |= SEEN_XREG;
PPC_SUB(r_A, r_A, r_X); PPC_SUB(r_A, r_A, r_X);
break; break;
case BPF_S_ALU_SUB_K: /* A -= K */ case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
if (!K) if (!K)
break; break;
PPC_ADDI(r_A, r_A, IMM_L(-K)); PPC_ADDI(r_A, r_A, IMM_L(-K));
if (K >= 32768) if (K >= 32768)
PPC_ADDIS(r_A, r_A, IMM_HA(-K)); PPC_ADDIS(r_A, r_A, IMM_HA(-K));
break; break;
case BPF_S_ALU_MUL_X: /* A *= X; */ case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
ctx->seen |= SEEN_XREG; ctx->seen |= SEEN_XREG;
PPC_MUL(r_A, r_A, r_X); PPC_MUL(r_A, r_A, r_X);
break; break;
case BPF_S_ALU_MUL_K: /* A *= K */ case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
if (K < 32768) if (K < 32768)
PPC_MULI(r_A, r_A, K); PPC_MULI(r_A, r_A, K);
else { else {
...@@ -187,7 +180,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, ...@@ -187,7 +180,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
PPC_MUL(r_A, r_A, r_scratch1); PPC_MUL(r_A, r_A, r_scratch1);
} }
break; break;
case BPF_S_ALU_MOD_X: /* A %= X; */ case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
ctx->seen |= SEEN_XREG; ctx->seen |= SEEN_XREG;
PPC_CMPWI(r_X, 0); PPC_CMPWI(r_X, 0);
if (ctx->pc_ret0 != -1) { if (ctx->pc_ret0 != -1) {
...@@ -201,13 +194,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, ...@@ -201,13 +194,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
PPC_MUL(r_scratch1, r_X, r_scratch1); PPC_MUL(r_scratch1, r_X, r_scratch1);
PPC_SUB(r_A, r_A, r_scratch1); PPC_SUB(r_A, r_A, r_scratch1);
break; break;
case BPF_S_ALU_MOD_K: /* A %= K; */ case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
PPC_LI32(r_scratch2, K); PPC_LI32(r_scratch2, K);
PPC_DIVWU(r_scratch1, r_A, r_scratch2); PPC_DIVWU(r_scratch1, r_A, r_scratch2);
PPC_MUL(r_scratch1, r_scratch2, r_scratch1); PPC_MUL(r_scratch1, r_scratch2, r_scratch1);
PPC_SUB(r_A, r_A, r_scratch1); PPC_SUB(r_A, r_A, r_scratch1);
break; break;
case BPF_S_ALU_DIV_X: /* A /= X; */ case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
ctx->seen |= SEEN_XREG; ctx->seen |= SEEN_XREG;
PPC_CMPWI(r_X, 0); PPC_CMPWI(r_X, 0);
if (ctx->pc_ret0 != -1) { if (ctx->pc_ret0 != -1) {
...@@ -223,17 +216,17 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, ...@@ -223,17 +216,17 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
} }
PPC_DIVWU(r_A, r_A, r_X); PPC_DIVWU(r_A, r_A, r_X);
break; break;
case BPF_S_ALU_DIV_K: /* A /= K */ case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
if (K == 1) if (K == 1)
break; break;
PPC_LI32(r_scratch1, K); PPC_LI32(r_scratch1, K);
PPC_DIVWU(r_A, r_A, r_scratch1); PPC_DIVWU(r_A, r_A, r_scratch1);
break; break;
case BPF_S_ALU_AND_X: case BPF_ALU | BPF_AND | BPF_X:
ctx->seen |= SEEN_XREG; ctx->seen |= SEEN_XREG;
PPC_AND(r_A, r_A, r_X); PPC_AND(r_A, r_A, r_X);
break; break;
case BPF_S_ALU_AND_K: case BPF_ALU | BPF_AND | BPF_K:
if (!IMM_H(K)) if (!IMM_H(K))
PPC_ANDI(r_A, r_A, K); PPC_ANDI(r_A, r_A, K);
else { else {
...@@ -241,51 +234,51 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, ...@@ -241,51 +234,51 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
PPC_AND(r_A, r_A, r_scratch1); PPC_AND(r_A, r_A, r_scratch1);
} }
break; break;
case BPF_S_ALU_OR_X: case BPF_ALU | BPF_OR | BPF_X:
ctx->seen |= SEEN_XREG; ctx->seen |= SEEN_XREG;
PPC_OR(r_A, r_A, r_X); PPC_OR(r_A, r_A, r_X);
break; break;
case BPF_S_ALU_OR_K: case BPF_ALU | BPF_OR | BPF_K:
if (IMM_L(K)) if (IMM_L(K))
PPC_ORI(r_A, r_A, IMM_L(K)); PPC_ORI(r_A, r_A, IMM_L(K));
if (K >= 65536) if (K >= 65536)
PPC_ORIS(r_A, r_A, IMM_H(K)); PPC_ORIS(r_A, r_A, IMM_H(K));
break; break;
case BPF_S_ANC_ALU_XOR_X: case BPF_ANC | SKF_AD_ALU_XOR_X:
case BPF_S_ALU_XOR_X: /* A ^= X */ case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
ctx->seen |= SEEN_XREG; ctx->seen |= SEEN_XREG;
PPC_XOR(r_A, r_A, r_X); PPC_XOR(r_A, r_A, r_X);
break; break;
case BPF_S_ALU_XOR_K: /* A ^= K */ case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
if (IMM_L(K)) if (IMM_L(K))
PPC_XORI(r_A, r_A, IMM_L(K)); PPC_XORI(r_A, r_A, IMM_L(K));
if (K >= 65536) if (K >= 65536)
PPC_XORIS(r_A, r_A, IMM_H(K)); PPC_XORIS(r_A, r_A, IMM_H(K));
break; break;
case BPF_S_ALU_LSH_X: /* A <<= X; */ case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
ctx->seen |= SEEN_XREG; ctx->seen |= SEEN_XREG;
PPC_SLW(r_A, r_A, r_X); PPC_SLW(r_A, r_A, r_X);
break; break;
case BPF_S_ALU_LSH_K: case BPF_ALU | BPF_LSH | BPF_K:
if (K == 0) if (K == 0)
break; break;
else else
PPC_SLWI(r_A, r_A, K); PPC_SLWI(r_A, r_A, K);
break; break;
case BPF_S_ALU_RSH_X: /* A >>= X; */ case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
ctx->seen |= SEEN_XREG; ctx->seen |= SEEN_XREG;
PPC_SRW(r_A, r_A, r_X); PPC_SRW(r_A, r_A, r_X);
break; break;
case BPF_S_ALU_RSH_K: /* A >>= K; */ case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
if (K == 0) if (K == 0)
break; break;
else else
PPC_SRWI(r_A, r_A, K); PPC_SRWI(r_A, r_A, K);
break; break;
case BPF_S_ALU_NEG: case BPF_ALU | BPF_NEG:
PPC_NEG(r_A, r_A); PPC_NEG(r_A, r_A);
break; break;
case BPF_S_RET_K: case BPF_RET | BPF_K:
PPC_LI32(r_ret, K); PPC_LI32(r_ret, K);
if (!K) { if (!K) {
if (ctx->pc_ret0 == -1) if (ctx->pc_ret0 == -1)
...@@ -312,7 +305,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, ...@@ -312,7 +305,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
PPC_BLR(); PPC_BLR();
} }
break; break;
case BPF_S_RET_A: case BPF_RET | BPF_A:
PPC_MR(r_ret, r_A); PPC_MR(r_ret, r_A);
if (i != flen - 1) { if (i != flen - 1) {
if (ctx->seen) if (ctx->seen)
...@@ -321,53 +314,53 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, ...@@ -321,53 +314,53 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
PPC_BLR(); PPC_BLR();
} }
break; break;
case BPF_S_MISC_TAX: /* X = A */ case BPF_MISC | BPF_TAX: /* X = A */
PPC_MR(r_X, r_A); PPC_MR(r_X, r_A);
break; break;
case BPF_S_MISC_TXA: /* A = X */ case BPF_MISC | BPF_TXA: /* A = X */
ctx->seen |= SEEN_XREG; ctx->seen |= SEEN_XREG;
PPC_MR(r_A, r_X); PPC_MR(r_A, r_X);
break; break;
/*** Constant loads/M[] access ***/ /*** Constant loads/M[] access ***/
case BPF_S_LD_IMM: /* A = K */ case BPF_LD | BPF_IMM: /* A = K */
PPC_LI32(r_A, K); PPC_LI32(r_A, K);
break; break;
case BPF_S_LDX_IMM: /* X = K */ case BPF_LDX | BPF_IMM: /* X = K */
PPC_LI32(r_X, K); PPC_LI32(r_X, K);
break; break;
case BPF_S_LD_MEM: /* A = mem[K] */ case BPF_LD | BPF_MEM: /* A = mem[K] */
PPC_MR(r_A, r_M + (K & 0xf)); PPC_MR(r_A, r_M + (K & 0xf));
ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
break; break;
case BPF_S_LDX_MEM: /* X = mem[K] */ case BPF_LDX | BPF_MEM: /* X = mem[K] */
PPC_MR(r_X, r_M + (K & 0xf)); PPC_MR(r_X, r_M + (K & 0xf));
ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
break; break;
case BPF_S_ST: /* mem[K] = A */ case BPF_ST: /* mem[K] = A */
PPC_MR(r_M + (K & 0xf), r_A); PPC_MR(r_M + (K & 0xf), r_A);
ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
break; break;
case BPF_S_STX: /* mem[K] = X */ case BPF_STX: /* mem[K] = X */
PPC_MR(r_M + (K & 0xf), r_X); PPC_MR(r_M + (K & 0xf), r_X);
ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf)); ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
break; break;
case BPF_S_LD_W_LEN: /* A = skb->len; */ case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
break; break;
case BPF_S_LDX_W_LEN: /* X = skb->len; */ case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len)); PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
break; break;
/*** Ancillary info loads ***/ /*** Ancillary info loads ***/
case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
protocol) != 2); protocol) != 2);
PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff, PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
protocol)); protocol));
break; break;
case BPF_S_ANC_IFINDEX: case BPF_ANC | SKF_AD_IFINDEX:
PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
dev)); dev));
PPC_CMPDI(r_scratch1, 0); PPC_CMPDI(r_scratch1, 0);
...@@ -384,33 +377,33 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, ...@@ -384,33 +377,33 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
PPC_LWZ_OFFS(r_A, r_scratch1, PPC_LWZ_OFFS(r_A, r_scratch1,
offsetof(struct net_device, ifindex)); offsetof(struct net_device, ifindex));
break; break;
case BPF_S_ANC_MARK: case BPF_ANC | SKF_AD_MARK:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
mark)); mark));
break; break;
case BPF_S_ANC_RXHASH: case BPF_ANC | SKF_AD_RXHASH:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
hash)); hash));
break; break;
case BPF_S_ANC_VLAN_TAG: case BPF_ANC | SKF_AD_VLAN_TAG:
case BPF_S_ANC_VLAN_TAG_PRESENT: case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
vlan_tci)); vlan_tci));
if (filter[i].code == BPF_S_ANC_VLAN_TAG) if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
PPC_ANDI(r_A, r_A, VLAN_VID_MASK); PPC_ANDI(r_A, r_A, VLAN_VID_MASK);
else else
PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT); PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
break; break;
case BPF_S_ANC_QUEUE: case BPF_ANC | SKF_AD_QUEUE:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
queue_mapping) != 2); queue_mapping) != 2);
PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
queue_mapping)); queue_mapping));
break; break;
case BPF_S_ANC_CPU: case BPF_ANC | SKF_AD_CPU:
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* PACA ptr is r13: * PACA ptr is r13:
...@@ -426,13 +419,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, ...@@ -426,13 +419,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
break; break;
/*** Absolute loads from packet header/data ***/ /*** Absolute loads from packet header/data ***/
case BPF_S_LD_W_ABS: case BPF_LD | BPF_W | BPF_ABS:
func = CHOOSE_LOAD_FUNC(K, sk_load_word); func = CHOOSE_LOAD_FUNC(K, sk_load_word);
goto common_load; goto common_load;
case BPF_S_LD_H_ABS: case BPF_LD | BPF_H | BPF_ABS:
func = CHOOSE_LOAD_FUNC(K, sk_load_half); func = CHOOSE_LOAD_FUNC(K, sk_load_half);
goto common_load; goto common_load;
case BPF_S_LD_B_ABS: case BPF_LD | BPF_B | BPF_ABS:
func = CHOOSE_LOAD_FUNC(K, sk_load_byte); func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
common_load: common_load:
/* Load from [K]. */ /* Load from [K]. */
...@@ -449,13 +442,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, ...@@ -449,13 +442,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
break; break;
/*** Indirect loads from packet header/data ***/ /*** Indirect loads from packet header/data ***/
case BPF_S_LD_W_IND: case BPF_LD | BPF_W | BPF_IND:
func = sk_load_word; func = sk_load_word;
goto common_load_ind; goto common_load_ind;
case BPF_S_LD_H_IND: case BPF_LD | BPF_H | BPF_IND:
func = sk_load_half; func = sk_load_half;
goto common_load_ind; goto common_load_ind;
case BPF_S_LD_B_IND: case BPF_LD | BPF_B | BPF_IND:
func = sk_load_byte; func = sk_load_byte;
common_load_ind: common_load_ind:
/* /*
...@@ -473,31 +466,31 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, ...@@ -473,31 +466,31 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
PPC_BCC(COND_LT, exit_addr); PPC_BCC(COND_LT, exit_addr);
break; break;
case BPF_S_LDX_B_MSH: case BPF_LDX | BPF_B | BPF_MSH:
func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
goto common_load; goto common_load;
break; break;
/*** Jump and branches ***/ /*** Jump and branches ***/
case BPF_S_JMP_JA: case BPF_JMP | BPF_JA:
if (K != 0) if (K != 0)
PPC_JMP(addrs[i + 1 + K]); PPC_JMP(addrs[i + 1 + K]);
break; break;
case BPF_S_JMP_JGT_K: case BPF_JMP | BPF_JGT | BPF_K:
case BPF_S_JMP_JGT_X: case BPF_JMP | BPF_JGT | BPF_X:
true_cond = COND_GT; true_cond = COND_GT;
goto cond_branch; goto cond_branch;
case BPF_S_JMP_JGE_K: case BPF_JMP | BPF_JGE | BPF_K:
case BPF_S_JMP_JGE_X: case BPF_JMP | BPF_JGE | BPF_X:
true_cond = COND_GE; true_cond = COND_GE;
goto cond_branch; goto cond_branch;
case BPF_S_JMP_JEQ_K: case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_S_JMP_JEQ_X: case BPF_JMP | BPF_JEQ | BPF_X:
true_cond = COND_EQ; true_cond = COND_EQ;
goto cond_branch; goto cond_branch;
case BPF_S_JMP_JSET_K: case BPF_JMP | BPF_JSET | BPF_K:
case BPF_S_JMP_JSET_X: case BPF_JMP | BPF_JSET | BPF_X:
true_cond = COND_NE; true_cond = COND_NE;
/* Fall through */ /* Fall through */
cond_branch: cond_branch:
...@@ -508,20 +501,20 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, ...@@ -508,20 +501,20 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
break; break;
} }
switch (filter[i].code) { switch (code) {
case BPF_S_JMP_JGT_X: case BPF_JMP | BPF_JGT | BPF_X:
case BPF_S_JMP_JGE_X: case BPF_JMP | BPF_JGE | BPF_X:
case BPF_S_JMP_JEQ_X: case BPF_JMP | BPF_JEQ | BPF_X:
ctx->seen |= SEEN_XREG; ctx->seen |= SEEN_XREG;
PPC_CMPLW(r_A, r_X); PPC_CMPLW(r_A, r_X);
break; break;
case BPF_S_JMP_JSET_X: case BPF_JMP | BPF_JSET | BPF_X:
ctx->seen |= SEEN_XREG; ctx->seen |= SEEN_XREG;
PPC_AND_DOT(r_scratch1, r_A, r_X); PPC_AND_DOT(r_scratch1, r_A, r_X);
break; break;
case BPF_S_JMP_JEQ_K: case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_S_JMP_JGT_K: case BPF_JMP | BPF_JGT | BPF_K:
case BPF_S_JMP_JGE_K: case BPF_JMP | BPF_JGE | BPF_K:
if (K < 32768) if (K < 32768)
PPC_CMPLWI(r_A, K); PPC_CMPLWI(r_A, K);
else { else {
...@@ -529,7 +522,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, ...@@ -529,7 +522,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
PPC_CMPLW(r_A, r_scratch1); PPC_CMPLW(r_A, r_scratch1);
} }
break; break;
case BPF_S_JMP_JSET_K: case BPF_JMP | BPF_JSET | BPF_K:
if (K < 32768) if (K < 32768)
/* PPC_ANDI is /only/ dot-form */ /* PPC_ANDI is /only/ dot-form */
PPC_ANDI(r_scratch1, r_A, K); PPC_ANDI(r_scratch1, r_A, K);
......
...@@ -269,27 +269,17 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter) ...@@ -269,27 +269,17 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
EMIT4(0xa7c80000); EMIT4(0xa7c80000);
/* Clear A if the first register does not set it. */ /* Clear A if the first register does not set it. */
switch (filter[0].code) { switch (filter[0].code) {
case BPF_S_LD_W_ABS: case BPF_LD | BPF_W | BPF_ABS:
case BPF_S_LD_H_ABS: case BPF_LD | BPF_H | BPF_ABS:
case BPF_S_LD_B_ABS: case BPF_LD | BPF_B | BPF_ABS:
case BPF_S_LD_W_LEN: case BPF_LD | BPF_W | BPF_LEN:
case BPF_S_LD_W_IND: case BPF_LD | BPF_W | BPF_IND:
case BPF_S_LD_H_IND: case BPF_LD | BPF_H | BPF_IND:
case BPF_S_LD_B_IND: case BPF_LD | BPF_B | BPF_IND:
case BPF_S_LD_IMM: case BPF_LD | BPF_IMM:
case BPF_S_LD_MEM: case BPF_LD | BPF_MEM:
case BPF_S_MISC_TXA: case BPF_MISC | BPF_TXA:
case BPF_S_ANC_PROTOCOL: case BPF_RET | BPF_K:
case BPF_S_ANC_PKTTYPE:
case BPF_S_ANC_IFINDEX:
case BPF_S_ANC_MARK:
case BPF_S_ANC_QUEUE:
case BPF_S_ANC_HATYPE:
case BPF_S_ANC_RXHASH:
case BPF_S_ANC_CPU:
case BPF_S_ANC_VLAN_TAG:
case BPF_S_ANC_VLAN_TAG_PRESENT:
case BPF_S_RET_K:
/* first instruction sets A register */ /* first instruction sets A register */
break; break;
default: /* A = 0 */ default: /* A = 0 */
...@@ -304,15 +294,18 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, ...@@ -304,15 +294,18 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
unsigned int K; unsigned int K;
int offset; int offset;
unsigned int mask; unsigned int mask;
u16 code;
K = filter->k; K = filter->k;
switch (filter->code) { code = bpf_anc_helper(filter);
case BPF_S_ALU_ADD_X: /* A += X */
switch (code) {
case BPF_ALU | BPF_ADD | BPF_X: /* A += X */
jit->seen |= SEEN_XREG; jit->seen |= SEEN_XREG;
/* ar %r5,%r12 */ /* ar %r5,%r12 */
EMIT2(0x1a5c); EMIT2(0x1a5c);
break; break;
case BPF_S_ALU_ADD_K: /* A += K */ case BPF_ALU | BPF_ADD | BPF_K: /* A += K */
if (!K) if (!K)
break; break;
if (K <= 16383) if (K <= 16383)
...@@ -325,12 +318,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, ...@@ -325,12 +318,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
/* a %r5,<d(K)>(%r13) */ /* a %r5,<d(K)>(%r13) */
EMIT4_DISP(0x5a50d000, EMIT_CONST(K)); EMIT4_DISP(0x5a50d000, EMIT_CONST(K));
break; break;
case BPF_S_ALU_SUB_X: /* A -= X */ case BPF_ALU | BPF_SUB | BPF_X: /* A -= X */
jit->seen |= SEEN_XREG; jit->seen |= SEEN_XREG;
/* sr %r5,%r12 */ /* sr %r5,%r12 */
EMIT2(0x1b5c); EMIT2(0x1b5c);
break; break;
case BPF_S_ALU_SUB_K: /* A -= K */ case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
if (!K) if (!K)
break; break;
if (K <= 16384) if (K <= 16384)
...@@ -343,12 +336,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, ...@@ -343,12 +336,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
/* s %r5,<d(K)>(%r13) */ /* s %r5,<d(K)>(%r13) */
EMIT4_DISP(0x5b50d000, EMIT_CONST(K)); EMIT4_DISP(0x5b50d000, EMIT_CONST(K));
break; break;
case BPF_S_ALU_MUL_X: /* A *= X */ case BPF_ALU | BPF_MUL | BPF_X: /* A *= X */
jit->seen |= SEEN_XREG; jit->seen |= SEEN_XREG;
/* msr %r5,%r12 */ /* msr %r5,%r12 */
EMIT4(0xb252005c); EMIT4(0xb252005c);
break; break;
case BPF_S_ALU_MUL_K: /* A *= K */ case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
if (K <= 16383) if (K <= 16383)
/* mhi %r5,K */ /* mhi %r5,K */
EMIT4_IMM(0xa75c0000, K); EMIT4_IMM(0xa75c0000, K);
...@@ -359,7 +352,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, ...@@ -359,7 +352,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
/* ms %r5,<d(K)>(%r13) */ /* ms %r5,<d(K)>(%r13) */
EMIT4_DISP(0x7150d000, EMIT_CONST(K)); EMIT4_DISP(0x7150d000, EMIT_CONST(K));
break; break;
case BPF_S_ALU_DIV_X: /* A /= X */ case BPF_ALU | BPF_DIV | BPF_X: /* A /= X */
jit->seen |= SEEN_XREG | SEEN_RET0; jit->seen |= SEEN_XREG | SEEN_RET0;
/* ltr %r12,%r12 */ /* ltr %r12,%r12 */
EMIT2(0x12cc); EMIT2(0x12cc);
...@@ -370,7 +363,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, ...@@ -370,7 +363,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
/* dlr %r4,%r12 */ /* dlr %r4,%r12 */
EMIT4(0xb997004c); EMIT4(0xb997004c);
break; break;
case BPF_S_ALU_DIV_K: /* A /= K */ case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
if (K == 1) if (K == 1)
break; break;
/* lhi %r4,0 */ /* lhi %r4,0 */
...@@ -378,7 +371,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, ...@@ -378,7 +371,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
/* dl %r4,<d(K)>(%r13) */ /* dl %r4,<d(K)>(%r13) */
EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K)); EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
break; break;
case BPF_S_ALU_MOD_X: /* A %= X */ case BPF_ALU | BPF_MOD | BPF_X: /* A %= X */
jit->seen |= SEEN_XREG | SEEN_RET0; jit->seen |= SEEN_XREG | SEEN_RET0;
/* ltr %r12,%r12 */ /* ltr %r12,%r12 */
EMIT2(0x12cc); EMIT2(0x12cc);
...@@ -391,7 +384,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, ...@@ -391,7 +384,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
/* lr %r5,%r4 */ /* lr %r5,%r4 */
EMIT2(0x1854); EMIT2(0x1854);
break; break;
case BPF_S_ALU_MOD_K: /* A %= K */ case BPF_ALU | BPF_MOD | BPF_K: /* A %= K */
if (K == 1) { if (K == 1) {
/* lhi %r5,0 */ /* lhi %r5,0 */
EMIT4(0xa7580000); EMIT4(0xa7580000);
...@@ -404,12 +397,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, ...@@ -404,12 +397,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
/* lr %r5,%r4 */ /* lr %r5,%r4 */
EMIT2(0x1854); EMIT2(0x1854);
break; break;
case BPF_S_ALU_AND_X: /* A &= X */ case BPF_ALU | BPF_AND | BPF_X: /* A &= X */
jit->seen |= SEEN_XREG; jit->seen |= SEEN_XREG;
/* nr %r5,%r12 */ /* nr %r5,%r12 */
EMIT2(0x145c); EMIT2(0x145c);
break; break;
case BPF_S_ALU_AND_K: /* A &= K */ case BPF_ALU | BPF_AND | BPF_K: /* A &= K */
if (test_facility(21)) if (test_facility(21))
/* nilf %r5,<K> */ /* nilf %r5,<K> */
EMIT6_IMM(0xc05b0000, K); EMIT6_IMM(0xc05b0000, K);
...@@ -417,12 +410,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, ...@@ -417,12 +410,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
/* n %r5,<d(K)>(%r13) */ /* n %r5,<d(K)>(%r13) */
EMIT4_DISP(0x5450d000, EMIT_CONST(K)); EMIT4_DISP(0x5450d000, EMIT_CONST(K));
break; break;
case BPF_S_ALU_OR_X: /* A |= X */ case BPF_ALU | BPF_OR | BPF_X: /* A |= X */
jit->seen |= SEEN_XREG; jit->seen |= SEEN_XREG;
/* or %r5,%r12 */ /* or %r5,%r12 */
EMIT2(0x165c); EMIT2(0x165c);
break; break;
case BPF_S_ALU_OR_K: /* A |= K */ case BPF_ALU | BPF_OR | BPF_K: /* A |= K */
if (test_facility(21)) if (test_facility(21))
/* oilf %r5,<K> */ /* oilf %r5,<K> */
EMIT6_IMM(0xc05d0000, K); EMIT6_IMM(0xc05d0000, K);
...@@ -430,55 +423,55 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, ...@@ -430,55 +423,55 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
/* o %r5,<d(K)>(%r13) */ /* o %r5,<d(K)>(%r13) */
EMIT4_DISP(0x5650d000, EMIT_CONST(K)); EMIT4_DISP(0x5650d000, EMIT_CONST(K));
break; break;
case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */ case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */
case BPF_S_ALU_XOR_X: case BPF_ALU | BPF_XOR | BPF_X:
jit->seen |= SEEN_XREG; jit->seen |= SEEN_XREG;
/* xr %r5,%r12 */ /* xr %r5,%r12 */
EMIT2(0x175c); EMIT2(0x175c);
break; break;
case BPF_S_ALU_XOR_K: /* A ^= K */ case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
if (!K) if (!K)
break; break;
/* x %r5,<d(K)>(%r13) */ /* x %r5,<d(K)>(%r13) */
EMIT4_DISP(0x5750d000, EMIT_CONST(K)); EMIT4_DISP(0x5750d000, EMIT_CONST(K));
break; break;
case BPF_S_ALU_LSH_X: /* A <<= X; */ case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
jit->seen |= SEEN_XREG; jit->seen |= SEEN_XREG;
/* sll %r5,0(%r12) */ /* sll %r5,0(%r12) */
EMIT4(0x8950c000); EMIT4(0x8950c000);
break; break;
case BPF_S_ALU_LSH_K: /* A <<= K */ case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */
if (K == 0) if (K == 0)
break; break;
/* sll %r5,K */ /* sll %r5,K */
EMIT4_DISP(0x89500000, K); EMIT4_DISP(0x89500000, K);
break; break;
case BPF_S_ALU_RSH_X: /* A >>= X; */ case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
jit->seen |= SEEN_XREG; jit->seen |= SEEN_XREG;
/* srl %r5,0(%r12) */ /* srl %r5,0(%r12) */
EMIT4(0x8850c000); EMIT4(0x8850c000);
break; break;
case BPF_S_ALU_RSH_K: /* A >>= K; */ case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
if (K == 0) if (K == 0)
break; break;
/* srl %r5,K */ /* srl %r5,K */
EMIT4_DISP(0x88500000, K); EMIT4_DISP(0x88500000, K);
break; break;
case BPF_S_ALU_NEG: /* A = -A */ case BPF_ALU | BPF_NEG: /* A = -A */
/* lnr %r5,%r5 */ /* lnr %r5,%r5 */
EMIT2(0x1155); EMIT2(0x1155);
break; break;
case BPF_S_JMP_JA: /* ip += K */ case BPF_JMP | BPF_JA: /* ip += K */
offset = addrs[i + K] + jit->start - jit->prg; offset = addrs[i + K] + jit->start - jit->prg;
EMIT4_PCREL(0xa7f40000, offset); EMIT4_PCREL(0xa7f40000, offset);
break; break;
case BPF_S_JMP_JGT_K: /* ip += (A > K) ? jt : jf */ case BPF_JMP | BPF_JGT | BPF_K: /* ip += (A > K) ? jt : jf */
mask = 0x200000; /* jh */ mask = 0x200000; /* jh */
goto kbranch; goto kbranch;
case BPF_S_JMP_JGE_K: /* ip += (A >= K) ? jt : jf */ case BPF_JMP | BPF_JGE | BPF_K: /* ip += (A >= K) ? jt : jf */
mask = 0xa00000; /* jhe */ mask = 0xa00000; /* jhe */
goto kbranch; goto kbranch;
case BPF_S_JMP_JEQ_K: /* ip += (A == K) ? jt : jf */ case BPF_JMP | BPF_JEQ | BPF_K: /* ip += (A == K) ? jt : jf */
mask = 0x800000; /* je */ mask = 0x800000; /* je */
kbranch: /* Emit compare if the branch targets are different */ kbranch: /* Emit compare if the branch targets are different */
if (filter->jt != filter->jf) { if (filter->jt != filter->jf) {
...@@ -511,7 +504,7 @@ branch: if (filter->jt == filter->jf) { ...@@ -511,7 +504,7 @@ branch: if (filter->jt == filter->jf) {
EMIT4_PCREL(0xa7040000 | (mask ^ 0xf00000), offset); EMIT4_PCREL(0xa7040000 | (mask ^ 0xf00000), offset);
} }
break; break;
case BPF_S_JMP_JSET_K: /* ip += (A & K) ? jt : jf */ case BPF_JMP | BPF_JSET | BPF_K: /* ip += (A & K) ? jt : jf */
mask = 0x700000; /* jnz */ mask = 0x700000; /* jnz */
/* Emit test if the branch targets are different */ /* Emit test if the branch targets are different */
if (filter->jt != filter->jf) { if (filter->jt != filter->jf) {
...@@ -525,13 +518,13 @@ branch: if (filter->jt == filter->jf) { ...@@ -525,13 +518,13 @@ branch: if (filter->jt == filter->jf) {
EMIT4_IMM(0xa7510000, K); EMIT4_IMM(0xa7510000, K);
} }
goto branch; goto branch;
case BPF_S_JMP_JGT_X: /* ip += (A > X) ? jt : jf */ case BPF_JMP | BPF_JGT | BPF_X: /* ip += (A > X) ? jt : jf */
mask = 0x200000; /* jh */ mask = 0x200000; /* jh */
goto xbranch; goto xbranch;
case BPF_S_JMP_JGE_X: /* ip += (A >= X) ? jt : jf */ case BPF_JMP | BPF_JGE | BPF_X: /* ip += (A >= X) ? jt : jf */
mask = 0xa00000; /* jhe */ mask = 0xa00000; /* jhe */
goto xbranch; goto xbranch;
case BPF_S_JMP_JEQ_X: /* ip += (A == X) ? jt : jf */ case BPF_JMP | BPF_JEQ | BPF_X: /* ip += (A == X) ? jt : jf */
mask = 0x800000; /* je */ mask = 0x800000; /* je */
xbranch: /* Emit compare if the branch targets are different */ xbranch: /* Emit compare if the branch targets are different */
if (filter->jt != filter->jf) { if (filter->jt != filter->jf) {
...@@ -540,7 +533,7 @@ branch: if (filter->jt == filter->jf) { ...@@ -540,7 +533,7 @@ branch: if (filter->jt == filter->jf) {
EMIT2(0x195c); EMIT2(0x195c);
} }
goto branch; goto branch;
case BPF_S_JMP_JSET_X: /* ip += (A & X) ? jt : jf */ case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */
mask = 0x700000; /* jnz */ mask = 0x700000; /* jnz */
/* Emit test if the branch targets are different */ /* Emit test if the branch targets are different */
if (filter->jt != filter->jf) { if (filter->jt != filter->jf) {
...@@ -551,15 +544,15 @@ branch: if (filter->jt == filter->jf) { ...@@ -551,15 +544,15 @@ branch: if (filter->jt == filter->jf) {
EMIT2(0x144c); EMIT2(0x144c);
} }
goto branch; goto branch;
case BPF_S_LD_W_ABS: /* A = *(u32 *) (skb->data+K) */ case BPF_LD | BPF_W | BPF_ABS: /* A = *(u32 *) (skb->data+K) */
jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_WORD; jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_WORD;
offset = jit->off_load_word; offset = jit->off_load_word;
goto load_abs; goto load_abs;
case BPF_S_LD_H_ABS: /* A = *(u16 *) (skb->data+K) */ case BPF_LD | BPF_H | BPF_ABS: /* A = *(u16 *) (skb->data+K) */
jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_HALF; jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_HALF;
offset = jit->off_load_half; offset = jit->off_load_half;
goto load_abs; goto load_abs;
case BPF_S_LD_B_ABS: /* A = *(u8 *) (skb->data+K) */ case BPF_LD | BPF_B | BPF_ABS: /* A = *(u8 *) (skb->data+K) */
jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_BYTE; jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_BYTE;
offset = jit->off_load_byte; offset = jit->off_load_byte;
load_abs: if ((int) K < 0) load_abs: if ((int) K < 0)
...@@ -573,19 +566,19 @@ load_abs: if ((int) K < 0) ...@@ -573,19 +566,19 @@ load_abs: if ((int) K < 0)
/* jnz <ret0> */ /* jnz <ret0> */
EMIT4_PCREL(0xa7740000, (jit->ret0_ip - jit->prg)); EMIT4_PCREL(0xa7740000, (jit->ret0_ip - jit->prg));
break; break;
case BPF_S_LD_W_IND: /* A = *(u32 *) (skb->data+K+X) */ case BPF_LD | BPF_W | BPF_IND: /* A = *(u32 *) (skb->data+K+X) */
jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IWORD; jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IWORD;
offset = jit->off_load_iword; offset = jit->off_load_iword;
goto call_fn; goto call_fn;
case BPF_S_LD_H_IND: /* A = *(u16 *) (skb->data+K+X) */ case BPF_LD | BPF_H | BPF_IND: /* A = *(u16 *) (skb->data+K+X) */
jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IHALF; jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IHALF;
offset = jit->off_load_ihalf; offset = jit->off_load_ihalf;
goto call_fn; goto call_fn;
case BPF_S_LD_B_IND: /* A = *(u8 *) (skb->data+K+X) */ case BPF_LD | BPF_B | BPF_IND: /* A = *(u8 *) (skb->data+K+X) */
jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IBYTE; jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IBYTE;
offset = jit->off_load_ibyte; offset = jit->off_load_ibyte;
goto call_fn; goto call_fn;
case BPF_S_LDX_B_MSH: case BPF_LDX | BPF_B | BPF_MSH:
/* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */ /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
jit->seen |= SEEN_RET0; jit->seen |= SEEN_RET0;
if ((int) K < 0) { if ((int) K < 0) {
...@@ -596,17 +589,17 @@ load_abs: if ((int) K < 0) ...@@ -596,17 +589,17 @@ load_abs: if ((int) K < 0)
jit->seen |= SEEN_DATAREF | SEEN_LOAD_BMSH; jit->seen |= SEEN_DATAREF | SEEN_LOAD_BMSH;
offset = jit->off_load_bmsh; offset = jit->off_load_bmsh;
goto call_fn; goto call_fn;
case BPF_S_LD_W_LEN: /* A = skb->len; */ case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
/* l %r5,<d(len)>(%r2) */ /* l %r5,<d(len)>(%r2) */
EMIT4_DISP(0x58502000, offsetof(struct sk_buff, len)); EMIT4_DISP(0x58502000, offsetof(struct sk_buff, len));
break; break;
case BPF_S_LDX_W_LEN: /* X = skb->len; */ case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
jit->seen |= SEEN_XREG; jit->seen |= SEEN_XREG;
/* l %r12,<d(len)>(%r2) */ /* l %r12,<d(len)>(%r2) */
EMIT4_DISP(0x58c02000, offsetof(struct sk_buff, len)); EMIT4_DISP(0x58c02000, offsetof(struct sk_buff, len));
break; break;
case BPF_S_LD_IMM: /* A = K */ case BPF_LD | BPF_IMM: /* A = K */
if (K <= 16383) if (K <= 16383)
/* lhi %r5,K */ /* lhi %r5,K */
EMIT4_IMM(0xa7580000, K); EMIT4_IMM(0xa7580000, K);
...@@ -617,7 +610,7 @@ load_abs: if ((int) K < 0) ...@@ -617,7 +610,7 @@ load_abs: if ((int) K < 0)
/* l %r5,<d(K)>(%r13) */ /* l %r5,<d(K)>(%r13) */
EMIT4_DISP(0x5850d000, EMIT_CONST(K)); EMIT4_DISP(0x5850d000, EMIT_CONST(K));
break; break;
case BPF_S_LDX_IMM: /* X = K */ case BPF_LDX | BPF_IMM: /* X = K */
jit->seen |= SEEN_XREG; jit->seen |= SEEN_XREG;
if (K <= 16383) if (K <= 16383)
/* lhi %r12,<K> */ /* lhi %r12,<K> */
...@@ -629,29 +622,29 @@ load_abs: if ((int) K < 0) ...@@ -629,29 +622,29 @@ load_abs: if ((int) K < 0)
/* l %r12,<d(K)>(%r13) */ /* l %r12,<d(K)>(%r13) */
EMIT4_DISP(0x58c0d000, EMIT_CONST(K)); EMIT4_DISP(0x58c0d000, EMIT_CONST(K));
break; break;
case BPF_S_LD_MEM: /* A = mem[K] */ case BPF_LD | BPF_MEM: /* A = mem[K] */
jit->seen |= SEEN_MEM; jit->seen |= SEEN_MEM;
/* l %r5,<K>(%r15) */ /* l %r5,<K>(%r15) */
EMIT4_DISP(0x5850f000, EMIT4_DISP(0x5850f000,
(jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4); (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
break; break;
case BPF_S_LDX_MEM: /* X = mem[K] */ case BPF_LDX | BPF_MEM: /* X = mem[K] */
jit->seen |= SEEN_XREG | SEEN_MEM; jit->seen |= SEEN_XREG | SEEN_MEM;
/* l %r12,<K>(%r15) */ /* l %r12,<K>(%r15) */
EMIT4_DISP(0x58c0f000, EMIT4_DISP(0x58c0f000,
(jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4); (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
break; break;
case BPF_S_MISC_TAX: /* X = A */ case BPF_MISC | BPF_TAX: /* X = A */
jit->seen |= SEEN_XREG; jit->seen |= SEEN_XREG;
/* lr %r12,%r5 */ /* lr %r12,%r5 */
EMIT2(0x18c5); EMIT2(0x18c5);
break; break;
case BPF_S_MISC_TXA: /* A = X */ case BPF_MISC | BPF_TXA: /* A = X */
jit->seen |= SEEN_XREG; jit->seen |= SEEN_XREG;
/* lr %r5,%r12 */ /* lr %r5,%r12 */
EMIT2(0x185c); EMIT2(0x185c);
break; break;
case BPF_S_RET_K: case BPF_RET | BPF_K:
if (K == 0) { if (K == 0) {
jit->seen |= SEEN_RET0; jit->seen |= SEEN_RET0;
if (last) if (last)
...@@ -671,33 +664,33 @@ load_abs: if ((int) K < 0) ...@@ -671,33 +664,33 @@ load_abs: if ((int) K < 0)
EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg); EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
} }
break; break;
case BPF_S_RET_A: case BPF_RET | BPF_A:
/* llgfr %r2,%r5 */ /* llgfr %r2,%r5 */
EMIT4(0xb9160025); EMIT4(0xb9160025);
/* j <exit> */ /* j <exit> */
EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg); EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
break; break;
case BPF_S_ST: /* mem[K] = A */ case BPF_ST: /* mem[K] = A */
jit->seen |= SEEN_MEM; jit->seen |= SEEN_MEM;
/* st %r5,<K>(%r15) */ /* st %r5,<K>(%r15) */
EMIT4_DISP(0x5050f000, EMIT4_DISP(0x5050f000,
(jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4); (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
break; break;
case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */ case BPF_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
jit->seen |= SEEN_XREG | SEEN_MEM; jit->seen |= SEEN_XREG | SEEN_MEM;
/* st %r12,<K>(%r15) */ /* st %r12,<K>(%r15) */
EMIT4_DISP(0x50c0f000, EMIT4_DISP(0x50c0f000,
(jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4); (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
break; break;
case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
/* lhi %r5,0 */ /* lhi %r5,0 */
EMIT4(0xa7580000); EMIT4(0xa7580000);
/* icm %r5,3,<d(protocol)>(%r2) */ /* icm %r5,3,<d(protocol)>(%r2) */
EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, protocol)); EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, protocol));
break; break;
case BPF_S_ANC_IFINDEX: /* if (!skb->dev) return 0; case BPF_ANC | SKF_AD_IFINDEX: /* if (!skb->dev) return 0;
* A = skb->dev->ifindex */ * A = skb->dev->ifindex */
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
jit->seen |= SEEN_RET0; jit->seen |= SEEN_RET0;
/* lg %r1,<d(dev)>(%r2) */ /* lg %r1,<d(dev)>(%r2) */
...@@ -709,20 +702,20 @@ load_abs: if ((int) K < 0) ...@@ -709,20 +702,20 @@ load_abs: if ((int) K < 0)
/* l %r5,<d(ifindex)>(%r1) */ /* l %r5,<d(ifindex)>(%r1) */
EMIT4_DISP(0x58501000, offsetof(struct net_device, ifindex)); EMIT4_DISP(0x58501000, offsetof(struct net_device, ifindex));
break; break;
case BPF_S_ANC_MARK: /* A = skb->mark */ case BPF_ANC | SKF_AD_MARK: /* A = skb->mark */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
/* l %r5,<d(mark)>(%r2) */ /* l %r5,<d(mark)>(%r2) */
EMIT4_DISP(0x58502000, offsetof(struct sk_buff, mark)); EMIT4_DISP(0x58502000, offsetof(struct sk_buff, mark));
break; break;
case BPF_S_ANC_QUEUE: /* A = skb->queue_mapping */ case BPF_ANC | SKF_AD_QUEUE: /* A = skb->queue_mapping */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
/* lhi %r5,0 */ /* lhi %r5,0 */
EMIT4(0xa7580000); EMIT4(0xa7580000);
/* icm %r5,3,<d(queue_mapping)>(%r2) */ /* icm %r5,3,<d(queue_mapping)>(%r2) */
EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, queue_mapping)); EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, queue_mapping));
break; break;
case BPF_S_ANC_HATYPE: /* if (!skb->dev) return 0; case BPF_ANC | SKF_AD_HATYPE: /* if (!skb->dev) return 0;
* A = skb->dev->type */ * A = skb->dev->type */
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
jit->seen |= SEEN_RET0; jit->seen |= SEEN_RET0;
/* lg %r1,<d(dev)>(%r2) */ /* lg %r1,<d(dev)>(%r2) */
...@@ -736,20 +729,20 @@ load_abs: if ((int) K < 0) ...@@ -736,20 +729,20 @@ load_abs: if ((int) K < 0)
/* icm %r5,3,<d(type)>(%r1) */ /* icm %r5,3,<d(type)>(%r1) */
EMIT4_DISP(0xbf531000, offsetof(struct net_device, type)); EMIT4_DISP(0xbf531000, offsetof(struct net_device, type));
break; break;
case BPF_S_ANC_RXHASH: /* A = skb->hash */ case BPF_ANC | SKF_AD_RXHASH: /* A = skb->hash */
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
/* l %r5,<d(hash)>(%r2) */ /* l %r5,<d(hash)>(%r2) */
EMIT4_DISP(0x58502000, offsetof(struct sk_buff, hash)); EMIT4_DISP(0x58502000, offsetof(struct sk_buff, hash));
break; break;
case BPF_S_ANC_VLAN_TAG: case BPF_ANC | SKF_AD_VLAN_TAG:
case BPF_S_ANC_VLAN_TAG_PRESENT: case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
/* lhi %r5,0 */ /* lhi %r5,0 */
EMIT4(0xa7580000); EMIT4(0xa7580000);
/* icm %r5,3,<d(vlan_tci)>(%r2) */ /* icm %r5,3,<d(vlan_tci)>(%r2) */
EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, vlan_tci)); EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, vlan_tci));
if (filter->code == BPF_S_ANC_VLAN_TAG) { if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
/* nill %r5,0xefff */ /* nill %r5,0xefff */
EMIT4_IMM(0xa5570000, ~VLAN_TAG_PRESENT); EMIT4_IMM(0xa5570000, ~VLAN_TAG_PRESENT);
} else { } else {
...@@ -759,7 +752,7 @@ load_abs: if ((int) K < 0) ...@@ -759,7 +752,7 @@ load_abs: if ((int) K < 0)
EMIT4_DISP(0x88500000, 12); EMIT4_DISP(0x88500000, 12);
} }
break; break;
case BPF_S_ANC_PKTTYPE: case BPF_ANC | SKF_AD_PKTTYPE:
if (pkt_type_offset < 0) if (pkt_type_offset < 0)
goto out; goto out;
/* lhi %r5,0 */ /* lhi %r5,0 */
...@@ -769,7 +762,7 @@ load_abs: if ((int) K < 0) ...@@ -769,7 +762,7 @@ load_abs: if ((int) K < 0)
/* srl %r5,5 */ /* srl %r5,5 */
EMIT4_DISP(0x88500000, 5); EMIT4_DISP(0x88500000, 5);
break; break;
case BPF_S_ANC_CPU: /* A = smp_processor_id() */ case BPF_ANC | SKF_AD_CPU: /* A = smp_processor_id() */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* l %r5,<d(cpu_nr)> */ /* l %r5,<d(cpu_nr)> */
EMIT4_DISP(0x58500000, offsetof(struct _lowcore, cpu_nr)); EMIT4_DISP(0x58500000, offsetof(struct _lowcore, cpu_nr));
......
...@@ -415,20 +415,11 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -415,20 +415,11 @@ void bpf_jit_compile(struct sk_filter *fp)
emit_reg_move(O7, r_saved_O7); emit_reg_move(O7, r_saved_O7);
switch (filter[0].code) { switch (filter[0].code) {
case BPF_S_RET_K: case BPF_RET | BPF_K:
case BPF_S_LD_W_LEN: case BPF_LD | BPF_W | BPF_LEN:
case BPF_S_ANC_PROTOCOL: case BPF_LD | BPF_W | BPF_ABS:
case BPF_S_ANC_PKTTYPE: case BPF_LD | BPF_H | BPF_ABS:
case BPF_S_ANC_IFINDEX: case BPF_LD | BPF_B | BPF_ABS:
case BPF_S_ANC_MARK:
case BPF_S_ANC_RXHASH:
case BPF_S_ANC_VLAN_TAG:
case BPF_S_ANC_VLAN_TAG_PRESENT:
case BPF_S_ANC_CPU:
case BPF_S_ANC_QUEUE:
case BPF_S_LD_W_ABS:
case BPF_S_LD_H_ABS:
case BPF_S_LD_B_ABS:
/* The first instruction sets the A register (or is /* The first instruction sets the A register (or is
* a "RET 'constant'") * a "RET 'constant'")
*/ */
...@@ -445,59 +436,60 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -445,59 +436,60 @@ void bpf_jit_compile(struct sk_filter *fp)
unsigned int t_offset; unsigned int t_offset;
unsigned int f_offset; unsigned int f_offset;
u32 t_op, f_op; u32 t_op, f_op;
u16 code = bpf_anc_helper(&filter[i]);
int ilen; int ilen;
switch (filter[i].code) { switch (code) {
case BPF_S_ALU_ADD_X: /* A += X; */ case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
emit_alu_X(ADD); emit_alu_X(ADD);
break; break;
case BPF_S_ALU_ADD_K: /* A += K; */ case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
emit_alu_K(ADD, K); emit_alu_K(ADD, K);
break; break;
case BPF_S_ALU_SUB_X: /* A -= X; */ case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
emit_alu_X(SUB); emit_alu_X(SUB);
break; break;
case BPF_S_ALU_SUB_K: /* A -= K */ case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
emit_alu_K(SUB, K); emit_alu_K(SUB, K);
break; break;
case BPF_S_ALU_AND_X: /* A &= X */ case BPF_ALU | BPF_AND | BPF_X: /* A &= X */
emit_alu_X(AND); emit_alu_X(AND);
break; break;
case BPF_S_ALU_AND_K: /* A &= K */ case BPF_ALU | BPF_AND | BPF_K: /* A &= K */
emit_alu_K(AND, K); emit_alu_K(AND, K);
break; break;
case BPF_S_ALU_OR_X: /* A |= X */ case BPF_ALU | BPF_OR | BPF_X: /* A |= X */
emit_alu_X(OR); emit_alu_X(OR);
break; break;
case BPF_S_ALU_OR_K: /* A |= K */ case BPF_ALU | BPF_OR | BPF_K: /* A |= K */
emit_alu_K(OR, K); emit_alu_K(OR, K);
break; break;
case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */ case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */
case BPF_S_ALU_XOR_X: case BPF_ALU | BPF_XOR | BPF_X:
emit_alu_X(XOR); emit_alu_X(XOR);
break; break;
case BPF_S_ALU_XOR_K: /* A ^= K */ case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
emit_alu_K(XOR, K); emit_alu_K(XOR, K);
break; break;
case BPF_S_ALU_LSH_X: /* A <<= X */ case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X */
emit_alu_X(SLL); emit_alu_X(SLL);
break; break;
case BPF_S_ALU_LSH_K: /* A <<= K */ case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */
emit_alu_K(SLL, K); emit_alu_K(SLL, K);
break; break;
case BPF_S_ALU_RSH_X: /* A >>= X */ case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X */
emit_alu_X(SRL); emit_alu_X(SRL);
break; break;
case BPF_S_ALU_RSH_K: /* A >>= K */ case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K */
emit_alu_K(SRL, K); emit_alu_K(SRL, K);
break; break;
case BPF_S_ALU_MUL_X: /* A *= X; */ case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
emit_alu_X(MUL); emit_alu_X(MUL);
break; break;
case BPF_S_ALU_MUL_K: /* A *= K */ case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
emit_alu_K(MUL, K); emit_alu_K(MUL, K);
break; break;
case BPF_S_ALU_DIV_K: /* A /= K with K != 0*/ case BPF_ALU | BPF_DIV | BPF_K: /* A /= K with K != 0*/
if (K == 1) if (K == 1)
break; break;
emit_write_y(G0); emit_write_y(G0);
...@@ -512,7 +504,7 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -512,7 +504,7 @@ void bpf_jit_compile(struct sk_filter *fp)
#endif #endif
emit_alu_K(DIV, K); emit_alu_K(DIV, K);
break; break;
case BPF_S_ALU_DIV_X: /* A /= X; */ case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
emit_cmpi(r_X, 0); emit_cmpi(r_X, 0);
if (pc_ret0 > 0) { if (pc_ret0 > 0) {
t_offset = addrs[pc_ret0 - 1]; t_offset = addrs[pc_ret0 - 1];
...@@ -544,10 +536,10 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -544,10 +536,10 @@ void bpf_jit_compile(struct sk_filter *fp)
#endif #endif
emit_alu_X(DIV); emit_alu_X(DIV);
break; break;
case BPF_S_ALU_NEG: case BPF_ALU | BPF_NEG:
emit_neg(); emit_neg();
break; break;
case BPF_S_RET_K: case BPF_RET | BPF_K:
if (!K) { if (!K) {
if (pc_ret0 == -1) if (pc_ret0 == -1)
pc_ret0 = i; pc_ret0 = i;
...@@ -556,7 +548,7 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -556,7 +548,7 @@ void bpf_jit_compile(struct sk_filter *fp)
emit_loadimm(K, r_A); emit_loadimm(K, r_A);
} }
/* Fallthrough */ /* Fallthrough */
case BPF_S_RET_A: case BPF_RET | BPF_A:
if (seen_or_pass0) { if (seen_or_pass0) {
if (i != flen - 1) { if (i != flen - 1) {
emit_jump(cleanup_addr); emit_jump(cleanup_addr);
...@@ -573,18 +565,18 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -573,18 +565,18 @@ void bpf_jit_compile(struct sk_filter *fp)
emit_jmpl(r_saved_O7, 8, G0); emit_jmpl(r_saved_O7, 8, G0);
emit_reg_move(r_A, O0); /* delay slot */ emit_reg_move(r_A, O0); /* delay slot */
break; break;
case BPF_S_MISC_TAX: case BPF_MISC | BPF_TAX:
seen |= SEEN_XREG; seen |= SEEN_XREG;
emit_reg_move(r_A, r_X); emit_reg_move(r_A, r_X);
break; break;
case BPF_S_MISC_TXA: case BPF_MISC | BPF_TXA:
seen |= SEEN_XREG; seen |= SEEN_XREG;
emit_reg_move(r_X, r_A); emit_reg_move(r_X, r_A);
break; break;
case BPF_S_ANC_CPU: case BPF_ANC | SKF_AD_CPU:
emit_load_cpu(r_A); emit_load_cpu(r_A);
break; break;
case BPF_S_ANC_PROTOCOL: case BPF_ANC | SKF_AD_PROTOCOL:
emit_skb_load16(protocol, r_A); emit_skb_load16(protocol, r_A);
break; break;
#if 0 #if 0
...@@ -592,38 +584,38 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -592,38 +584,38 @@ void bpf_jit_compile(struct sk_filter *fp)
* a bit field even though we very much * a bit field even though we very much
* know what we are doing here. * know what we are doing here.
*/ */
case BPF_S_ANC_PKTTYPE: case BPF_ANC | SKF_AD_PKTTYPE:
__emit_skb_load8(pkt_type, r_A); __emit_skb_load8(pkt_type, r_A);
emit_alu_K(SRL, 5); emit_alu_K(SRL, 5);
break; break;
#endif #endif
case BPF_S_ANC_IFINDEX: case BPF_ANC | SKF_AD_IFINDEX:
emit_skb_loadptr(dev, r_A); emit_skb_loadptr(dev, r_A);
emit_cmpi(r_A, 0); emit_cmpi(r_A, 0);
emit_branch(BNE_PTR, cleanup_addr + 4); emit_branch(BNE_PTR, cleanup_addr + 4);
emit_nop(); emit_nop();
emit_load32(r_A, struct net_device, ifindex, r_A); emit_load32(r_A, struct net_device, ifindex, r_A);
break; break;
case BPF_S_ANC_MARK: case BPF_ANC | SKF_AD_MARK:
emit_skb_load32(mark, r_A); emit_skb_load32(mark, r_A);
break; break;
case BPF_S_ANC_QUEUE: case BPF_ANC | SKF_AD_QUEUE:
emit_skb_load16(queue_mapping, r_A); emit_skb_load16(queue_mapping, r_A);
break; break;
case BPF_S_ANC_HATYPE: case BPF_ANC | SKF_AD_HATYPE:
emit_skb_loadptr(dev, r_A); emit_skb_loadptr(dev, r_A);
emit_cmpi(r_A, 0); emit_cmpi(r_A, 0);
emit_branch(BNE_PTR, cleanup_addr + 4); emit_branch(BNE_PTR, cleanup_addr + 4);
emit_nop(); emit_nop();
emit_load16(r_A, struct net_device, type, r_A); emit_load16(r_A, struct net_device, type, r_A);
break; break;
case BPF_S_ANC_RXHASH: case BPF_ANC | SKF_AD_RXHASH:
emit_skb_load32(hash, r_A); emit_skb_load32(hash, r_A);
break; break;
case BPF_S_ANC_VLAN_TAG: case BPF_ANC | SKF_AD_VLAN_TAG:
case BPF_S_ANC_VLAN_TAG_PRESENT: case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
emit_skb_load16(vlan_tci, r_A); emit_skb_load16(vlan_tci, r_A);
if (filter[i].code == BPF_S_ANC_VLAN_TAG) { if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
emit_andi(r_A, VLAN_VID_MASK, r_A); emit_andi(r_A, VLAN_VID_MASK, r_A);
} else { } else {
emit_loadimm(VLAN_TAG_PRESENT, r_TMP); emit_loadimm(VLAN_TAG_PRESENT, r_TMP);
...@@ -631,44 +623,44 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -631,44 +623,44 @@ void bpf_jit_compile(struct sk_filter *fp)
} }
break; break;
case BPF_S_LD_IMM: case BPF_LD | BPF_IMM:
emit_loadimm(K, r_A); emit_loadimm(K, r_A);
break; break;
case BPF_S_LDX_IMM: case BPF_LDX | BPF_IMM:
emit_loadimm(K, r_X); emit_loadimm(K, r_X);
break; break;
case BPF_S_LD_MEM: case BPF_LD | BPF_MEM:
emit_ldmem(K * 4, r_A); emit_ldmem(K * 4, r_A);
break; break;
case BPF_S_LDX_MEM: case BPF_LDX | BPF_MEM:
emit_ldmem(K * 4, r_X); emit_ldmem(K * 4, r_X);
break; break;
case BPF_S_ST: case BPF_ST:
emit_stmem(K * 4, r_A); emit_stmem(K * 4, r_A);
break; break;
case BPF_S_STX: case BPF_STX:
emit_stmem(K * 4, r_X); emit_stmem(K * 4, r_X);
break; break;
#define CHOOSE_LOAD_FUNC(K, func) \ #define CHOOSE_LOAD_FUNC(K, func) \
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
case BPF_S_LD_W_ABS: case BPF_LD | BPF_W | BPF_ABS:
func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word); func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word);
common_load: seen |= SEEN_DATAREF; common_load: seen |= SEEN_DATAREF;
emit_loadimm(K, r_OFF); emit_loadimm(K, r_OFF);
emit_call(func); emit_call(func);
break; break;
case BPF_S_LD_H_ABS: case BPF_LD | BPF_H | BPF_ABS:
func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half); func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half);
goto common_load; goto common_load;
case BPF_S_LD_B_ABS: case BPF_LD | BPF_B | BPF_ABS:
func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte); func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte);
goto common_load; goto common_load;
case BPF_S_LDX_B_MSH: case BPF_LDX | BPF_B | BPF_MSH:
func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh); func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh);
goto common_load; goto common_load;
case BPF_S_LD_W_IND: case BPF_LD | BPF_W | BPF_IND:
func = bpf_jit_load_word; func = bpf_jit_load_word;
common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
if (K) { if (K) {
...@@ -683,13 +675,13 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; ...@@ -683,13 +675,13 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
} }
emit_call(func); emit_call(func);
break; break;
case BPF_S_LD_H_IND: case BPF_LD | BPF_H | BPF_IND:
func = bpf_jit_load_half; func = bpf_jit_load_half;
goto common_load_ind; goto common_load_ind;
case BPF_S_LD_B_IND: case BPF_LD | BPF_B | BPF_IND:
func = bpf_jit_load_byte; func = bpf_jit_load_byte;
goto common_load_ind; goto common_load_ind;
case BPF_S_JMP_JA: case BPF_JMP | BPF_JA:
emit_jump(addrs[i + K]); emit_jump(addrs[i + K]);
emit_nop(); emit_nop();
break; break;
...@@ -700,14 +692,14 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; ...@@ -700,14 +692,14 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
f_op = FOP; \ f_op = FOP; \
goto cond_branch goto cond_branch
COND_SEL(BPF_S_JMP_JGT_K, BGU, BLEU); COND_SEL(BPF_JMP | BPF_JGT | BPF_K, BGU, BLEU);
COND_SEL(BPF_S_JMP_JGE_K, BGEU, BLU); COND_SEL(BPF_JMP | BPF_JGE | BPF_K, BGEU, BLU);
COND_SEL(BPF_S_JMP_JEQ_K, BE, BNE); COND_SEL(BPF_JMP | BPF_JEQ | BPF_K, BE, BNE);
COND_SEL(BPF_S_JMP_JSET_K, BNE, BE); COND_SEL(BPF_JMP | BPF_JSET | BPF_K, BNE, BE);
COND_SEL(BPF_S_JMP_JGT_X, BGU, BLEU); COND_SEL(BPF_JMP | BPF_JGT | BPF_X, BGU, BLEU);
COND_SEL(BPF_S_JMP_JGE_X, BGEU, BLU); COND_SEL(BPF_JMP | BPF_JGE | BPF_X, BGEU, BLU);
COND_SEL(BPF_S_JMP_JEQ_X, BE, BNE); COND_SEL(BPF_JMP | BPF_JEQ | BPF_X, BE, BNE);
COND_SEL(BPF_S_JMP_JSET_X, BNE, BE); COND_SEL(BPF_JMP | BPF_JSET | BPF_X, BNE, BE);
cond_branch: f_offset = addrs[i + filter[i].jf]; cond_branch: f_offset = addrs[i + filter[i].jf];
t_offset = addrs[i + filter[i].jt]; t_offset = addrs[i + filter[i].jt];
...@@ -719,20 +711,20 @@ cond_branch: f_offset = addrs[i + filter[i].jf]; ...@@ -719,20 +711,20 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
break; break;
} }
switch (filter[i].code) { switch (code) {
case BPF_S_JMP_JGT_X: case BPF_JMP | BPF_JGT | BPF_X:
case BPF_S_JMP_JGE_X: case BPF_JMP | BPF_JGE | BPF_X:
case BPF_S_JMP_JEQ_X: case BPF_JMP | BPF_JEQ | BPF_X:
seen |= SEEN_XREG; seen |= SEEN_XREG;
emit_cmp(r_A, r_X); emit_cmp(r_A, r_X);
break; break;
case BPF_S_JMP_JSET_X: case BPF_JMP | BPF_JSET | BPF_X:
seen |= SEEN_XREG; seen |= SEEN_XREG;
emit_btst(r_A, r_X); emit_btst(r_A, r_X);
break; break;
case BPF_S_JMP_JEQ_K: case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_S_JMP_JGT_K: case BPF_JMP | BPF_JGT | BPF_K:
case BPF_S_JMP_JGE_K: case BPF_JMP | BPF_JGE | BPF_K:
if (is_simm13(K)) { if (is_simm13(K)) {
emit_cmpi(r_A, K); emit_cmpi(r_A, K);
} else { } else {
...@@ -740,7 +732,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf]; ...@@ -740,7 +732,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
emit_cmp(r_A, r_TMP); emit_cmp(r_A, r_TMP);
} }
break; break;
case BPF_S_JMP_JSET_K: case BPF_JMP | BPF_JSET | BPF_K:
if (is_simm13(K)) { if (is_simm13(K)) {
emit_btsti(r_A, K); emit_btsti(r_A, K);
} else { } else {
......
...@@ -76,56 +76,211 @@ enum { ...@@ -76,56 +76,211 @@ enum {
/* BPF program can access up to 512 bytes of stack space. */ /* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK 512 #define MAX_BPF_STACK 512
/* bpf_add|sub|...: a += x, bpf_mov: a = x */ /* Helper macros for filter block array initializers. */
#define BPF_ALU64_REG(op, a, x) \
((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_X, a, x, 0, 0}) /* ALU ops on registers, bpf_add|sub|...: A += X */
#define BPF_ALU32_REG(op, a, x) \
((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_X, a, x, 0, 0}) #define BPF_ALU64_REG(OP, A, X) \
((struct sock_filter_int) { \
/* bpf_add|sub|...: a += imm, bpf_mov: a = imm */ .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
#define BPF_ALU64_IMM(op, a, imm) \ .a_reg = A, \
((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_K, a, 0, 0, imm}) .x_reg = X, \
#define BPF_ALU32_IMM(op, a, imm) \ .off = 0, \
((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_K, a, 0, 0, imm}) .imm = 0 })
/* R0 = *(uint *) (skb->data + off) */ #define BPF_ALU32_REG(OP, A, X) \
#define BPF_LD_ABS(size, off) \ ((struct sock_filter_int) { \
((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_ABS, 0, 0, 0, off}) .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
.a_reg = A, \
/* R0 = *(uint *) (skb->data + x + off) */ .x_reg = X, \
#define BPF_LD_IND(size, x, off) \ .off = 0, \
((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_IND, 0, x, 0, off}) .imm = 0 })
/* a = *(uint *) (x + off) */ /* ALU ops on immediates, bpf_add|sub|...: A += IMM */
#define BPF_LDX_MEM(sz, a, x, off) \
((struct sock_filter_int) {BPF_LDX|BPF_SIZE(sz)|BPF_MEM, a, x, off, 0}) #define BPF_ALU64_IMM(OP, A, IMM) \
((struct sock_filter_int) { \
/* if (a 'op' x) goto pc+off */ .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
#define BPF_JMP_REG(op, a, x, off) \ .a_reg = A, \
((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_X, a, x, off, 0}) .x_reg = 0, \
.off = 0, \
/* if (a 'op' imm) goto pc+off */ .imm = IMM })
#define BPF_JMP_IMM(op, a, imm, off) \
((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_K, a, 0, off, imm}) #define BPF_ALU32_IMM(OP, A, IMM) \
((struct sock_filter_int) { \
#define BPF_EXIT_INSN() \ .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
((struct sock_filter_int) {BPF_JMP|BPF_EXIT, 0, 0, 0, 0}) .a_reg = A, \
.x_reg = 0, \
static inline int size_to_bpf(int size) .off = 0, \
{ .imm = IMM })
switch (size) {
case 1: /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
return BPF_B;
case 2: #define BPF_ENDIAN(TYPE, A, LEN) \
return BPF_H; ((struct sock_filter_int) { \
case 4: .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \
return BPF_W; .a_reg = A, \
case 8: .x_reg = 0, \
return BPF_DW; .off = 0, \
default: .imm = LEN })
return -EINVAL;
} /* Short form of mov, A = X */
}
#define BPF_MOV64_REG(A, X) \
((struct sock_filter_int) { \
.code = BPF_ALU64 | BPF_MOV | BPF_X, \
.a_reg = A, \
.x_reg = X, \
.off = 0, \
.imm = 0 })
#define BPF_MOV32_REG(A, X) \
((struct sock_filter_int) { \
.code = BPF_ALU | BPF_MOV | BPF_X, \
.a_reg = A, \
.x_reg = X, \
.off = 0, \
.imm = 0 })
/* Short form of mov, A = IMM */
#define BPF_MOV64_IMM(A, IMM) \
((struct sock_filter_int) { \
.code = BPF_ALU64 | BPF_MOV | BPF_K, \
.a_reg = A, \
.x_reg = 0, \
.off = 0, \
.imm = IMM })
#define BPF_MOV32_IMM(A, IMM) \
((struct sock_filter_int) { \
.code = BPF_ALU | BPF_MOV | BPF_K, \
.a_reg = A, \
.x_reg = 0, \
.off = 0, \
.imm = IMM })
/* Short form of mov based on type, BPF_X: A = X, BPF_K: A = IMM */
#define BPF_MOV64_RAW(TYPE, A, X, IMM) \
((struct sock_filter_int) { \
.code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \
.a_reg = A, \
.x_reg = X, \
.off = 0, \
.imm = IMM })
#define BPF_MOV32_RAW(TYPE, A, X, IMM) \
((struct sock_filter_int) { \
.code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \
.a_reg = A, \
.x_reg = X, \
.off = 0, \
.imm = IMM })
/* Direct packet access, R0 = *(uint *) (skb->data + OFF) */
#define BPF_LD_ABS(SIZE, OFF) \
((struct sock_filter_int) { \
.code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
.a_reg = 0, \
.x_reg = 0, \
.off = 0, \
.imm = OFF })
/* Indirect packet access, R0 = *(uint *) (skb->data + X + OFF) */
#define BPF_LD_IND(SIZE, X, OFF) \
((struct sock_filter_int) { \
.code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \
.a_reg = 0, \
.x_reg = X, \
.off = 0, \
.imm = OFF })
/* Memory store, A = *(uint *) (X + OFF), and vice versa */
#define BPF_LDX_MEM(SIZE, A, X, OFF) \
((struct sock_filter_int) { \
.code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
.a_reg = A, \
.x_reg = X, \
.off = OFF, \
.imm = 0 })
#define BPF_STX_MEM(SIZE, A, X, OFF) \
((struct sock_filter_int) { \
.code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
.a_reg = A, \
.x_reg = X, \
.off = OFF, \
.imm = 0 })
/* Conditional jumps against registers, if (A 'op' X) goto pc + OFF */
#define BPF_JMP_REG(OP, A, X, OFF) \
((struct sock_filter_int) { \
.code = BPF_JMP | BPF_OP(OP) | BPF_X, \
.a_reg = A, \
.x_reg = X, \
.off = OFF, \
.imm = 0 })
/* Conditional jumps against immediates, if (A 'op' IMM) goto pc + OFF */
#define BPF_JMP_IMM(OP, A, IMM, OFF) \
((struct sock_filter_int) { \
.code = BPF_JMP | BPF_OP(OP) | BPF_K, \
.a_reg = A, \
.x_reg = 0, \
.off = OFF, \
.imm = IMM })
/* Function call */
#define BPF_EMIT_CALL(FUNC) \
((struct sock_filter_int) { \
.code = BPF_JMP | BPF_CALL, \
.a_reg = 0, \
.x_reg = 0, \
.off = 0, \
.imm = ((FUNC) - __bpf_call_base) })
/* Raw code statement block */
#define BPF_RAW_INSN(CODE, A, X, OFF, IMM) \
((struct sock_filter_int) { \
.code = CODE, \
.a_reg = A, \
.x_reg = X, \
.off = OFF, \
.imm = IMM })
/* Program exit */
#define BPF_EXIT_INSN() \
((struct sock_filter_int) { \
.code = BPF_JMP | BPF_EXIT, \
.a_reg = 0, \
.x_reg = 0, \
.off = 0, \
.imm = 0 })
#define bytes_to_bpf_size(bytes) \
({ \
int bpf_size = -EINVAL; \
\
if (bytes == sizeof(u8)) \
bpf_size = BPF_B; \
else if (bytes == sizeof(u16)) \
bpf_size = BPF_H; \
else if (bytes == sizeof(u32)) \
bpf_size = BPF_W; \
else if (bytes == sizeof(u64)) \
bpf_size = BPF_DW; \
\
bpf_size; \
})
/* Macro to invoke filter function. */ /* Macro to invoke filter function. */
#define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) #define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
...@@ -197,7 +352,6 @@ int sk_detach_filter(struct sock *sk); ...@@ -197,7 +352,6 @@ int sk_detach_filter(struct sock *sk);
int sk_chk_filter(struct sock_filter *filter, unsigned int flen); int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
unsigned int len); unsigned int len);
void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to);
void sk_filter_charge(struct sock *sk, struct sk_filter *fp); void sk_filter_charge(struct sock *sk, struct sk_filter *fp);
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
...@@ -205,6 +359,41 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); ...@@ -205,6 +359,41 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
void bpf_int_jit_compile(struct sk_filter *fp); void bpf_int_jit_compile(struct sk_filter *fp);
#define BPF_ANC BIT(15)
static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
{
BUG_ON(ftest->code & BPF_ANC);
switch (ftest->code) {
case BPF_LD | BPF_W | BPF_ABS:
case BPF_LD | BPF_H | BPF_ABS:
case BPF_LD | BPF_B | BPF_ABS:
#define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
return BPF_ANC | SKF_AD_##CODE
switch (ftest->k) {
BPF_ANCILLARY(PROTOCOL);
BPF_ANCILLARY(PKTTYPE);
BPF_ANCILLARY(IFINDEX);
BPF_ANCILLARY(NLATTR);
BPF_ANCILLARY(NLATTR_NEST);
BPF_ANCILLARY(MARK);
BPF_ANCILLARY(QUEUE);
BPF_ANCILLARY(HATYPE);
BPF_ANCILLARY(RXHASH);
BPF_ANCILLARY(CPU);
BPF_ANCILLARY(ALU_XOR_X);
BPF_ANCILLARY(VLAN_TAG);
BPF_ANCILLARY(VLAN_TAG_PRESENT);
BPF_ANCILLARY(PAY_OFFSET);
BPF_ANCILLARY(RANDOM);
}
/* Fallthrough. */
default:
return ftest->code;
}
}
#ifdef CONFIG_BPF_JIT #ifdef CONFIG_BPF_JIT
#include <stdarg.h> #include <stdarg.h>
#include <linux/linkage.h> #include <linux/linkage.h>
...@@ -224,86 +413,20 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, ...@@ -224,86 +413,20 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
} }
#else #else
#include <linux/slab.h> #include <linux/slab.h>
static inline void bpf_jit_compile(struct sk_filter *fp) static inline void bpf_jit_compile(struct sk_filter *fp)
{ {
} }
static inline void bpf_jit_free(struct sk_filter *fp) static inline void bpf_jit_free(struct sk_filter *fp)
{ {
kfree(fp); kfree(fp);
} }
#endif #endif /* CONFIG_BPF_JIT */
static inline int bpf_tell_extensions(void) static inline int bpf_tell_extensions(void)
{ {
return SKF_AD_MAX; return SKF_AD_MAX;
} }
enum {
BPF_S_RET_K = 1,
BPF_S_RET_A,
BPF_S_ALU_ADD_K,
BPF_S_ALU_ADD_X,
BPF_S_ALU_SUB_K,
BPF_S_ALU_SUB_X,
BPF_S_ALU_MUL_K,
BPF_S_ALU_MUL_X,
BPF_S_ALU_DIV_X,
BPF_S_ALU_MOD_K,
BPF_S_ALU_MOD_X,
BPF_S_ALU_AND_K,
BPF_S_ALU_AND_X,
BPF_S_ALU_OR_K,
BPF_S_ALU_OR_X,
BPF_S_ALU_XOR_K,
BPF_S_ALU_XOR_X,
BPF_S_ALU_LSH_K,
BPF_S_ALU_LSH_X,
BPF_S_ALU_RSH_K,
BPF_S_ALU_RSH_X,
BPF_S_ALU_NEG,
BPF_S_LD_W_ABS,
BPF_S_LD_H_ABS,
BPF_S_LD_B_ABS,
BPF_S_LD_W_LEN,
BPF_S_LD_W_IND,
BPF_S_LD_H_IND,
BPF_S_LD_B_IND,
BPF_S_LD_IMM,
BPF_S_LDX_W_LEN,
BPF_S_LDX_B_MSH,
BPF_S_LDX_IMM,
BPF_S_MISC_TAX,
BPF_S_MISC_TXA,
BPF_S_ALU_DIV_K,
BPF_S_LD_MEM,
BPF_S_LDX_MEM,
BPF_S_ST,
BPF_S_STX,
BPF_S_JMP_JA,
BPF_S_JMP_JEQ_K,
BPF_S_JMP_JEQ_X,
BPF_S_JMP_JGE_K,
BPF_S_JMP_JGE_X,
BPF_S_JMP_JGT_K,
BPF_S_JMP_JGT_X,
BPF_S_JMP_JSET_K,
BPF_S_JMP_JSET_X,
/* Ancillary data */
BPF_S_ANC_PROTOCOL,
BPF_S_ANC_PKTTYPE,
BPF_S_ANC_IFINDEX,
BPF_S_ANC_NLATTR,
BPF_S_ANC_NLATTR_NEST,
BPF_S_ANC_MARK,
BPF_S_ANC_QUEUE,
BPF_S_ANC_HATYPE,
BPF_S_ANC_RXHASH,
BPF_S_ANC_CPU,
BPF_S_ANC_ALU_XOR_X,
BPF_S_ANC_VLAN_TAG,
BPF_S_ANC_VLAN_TAG_PRESENT,
BPF_S_ANC_PAY_OFFSET,
BPF_S_ANC_RANDOM,
};
#endif /* __LINUX_FILTER_H__ */ #endif /* __LINUX_FILTER_H__ */
...@@ -103,60 +103,59 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) ...@@ -103,60 +103,59 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
u32 k = ftest->k; u32 k = ftest->k;
switch (code) { switch (code) {
case BPF_S_LD_W_ABS: case BPF_LD | BPF_W | BPF_ABS:
ftest->code = BPF_LDX | BPF_W | BPF_ABS; ftest->code = BPF_LDX | BPF_W | BPF_ABS;
/* 32-bit aligned and not out of bounds. */ /* 32-bit aligned and not out of bounds. */
if (k >= sizeof(struct seccomp_data) || k & 3) if (k >= sizeof(struct seccomp_data) || k & 3)
return -EINVAL; return -EINVAL;
continue; continue;
case BPF_S_LD_W_LEN: case BPF_LD | BPF_W | BPF_LEN:
ftest->code = BPF_LD | BPF_IMM; ftest->code = BPF_LD | BPF_IMM;
ftest->k = sizeof(struct seccomp_data); ftest->k = sizeof(struct seccomp_data);
continue; continue;
case BPF_S_LDX_W_LEN: case BPF_LDX | BPF_W | BPF_LEN:
ftest->code = BPF_LDX | BPF_IMM; ftest->code = BPF_LDX | BPF_IMM;
ftest->k = sizeof(struct seccomp_data); ftest->k = sizeof(struct seccomp_data);
continue; continue;
/* Explicitly include allowed calls. */ /* Explicitly include allowed calls. */
case BPF_S_RET_K: case BPF_RET | BPF_K:
case BPF_S_RET_A: case BPF_RET | BPF_A:
case BPF_S_ALU_ADD_K: case BPF_ALU | BPF_ADD | BPF_K:
case BPF_S_ALU_ADD_X: case BPF_ALU | BPF_ADD | BPF_X:
case BPF_S_ALU_SUB_K: case BPF_ALU | BPF_SUB | BPF_K:
case BPF_S_ALU_SUB_X: case BPF_ALU | BPF_SUB | BPF_X:
case BPF_S_ALU_MUL_K: case BPF_ALU | BPF_MUL | BPF_K:
case BPF_S_ALU_MUL_X: case BPF_ALU | BPF_MUL | BPF_X:
case BPF_S_ALU_DIV_X: case BPF_ALU | BPF_DIV | BPF_K:
case BPF_S_ALU_AND_K: case BPF_ALU | BPF_DIV | BPF_X:
case BPF_S_ALU_AND_X: case BPF_ALU | BPF_AND | BPF_K:
case BPF_S_ALU_OR_K: case BPF_ALU | BPF_AND | BPF_X:
case BPF_S_ALU_OR_X: case BPF_ALU | BPF_OR | BPF_K:
case BPF_S_ALU_XOR_K: case BPF_ALU | BPF_OR | BPF_X:
case BPF_S_ALU_XOR_X: case BPF_ALU | BPF_XOR | BPF_K:
case BPF_S_ALU_LSH_K: case BPF_ALU | BPF_XOR | BPF_X:
case BPF_S_ALU_LSH_X: case BPF_ALU | BPF_LSH | BPF_K:
case BPF_S_ALU_RSH_K: case BPF_ALU | BPF_LSH | BPF_X:
case BPF_S_ALU_RSH_X: case BPF_ALU | BPF_RSH | BPF_K:
case BPF_S_ALU_NEG: case BPF_ALU | BPF_RSH | BPF_X:
case BPF_S_LD_IMM: case BPF_ALU | BPF_NEG:
case BPF_S_LDX_IMM: case BPF_LD | BPF_IMM:
case BPF_S_MISC_TAX: case BPF_LDX | BPF_IMM:
case BPF_S_MISC_TXA: case BPF_MISC | BPF_TAX:
case BPF_S_ALU_DIV_K: case BPF_MISC | BPF_TXA:
case BPF_S_LD_MEM: case BPF_LD | BPF_MEM:
case BPF_S_LDX_MEM: case BPF_LDX | BPF_MEM:
case BPF_S_ST: case BPF_ST:
case BPF_S_STX: case BPF_STX:
case BPF_S_JMP_JA: case BPF_JMP | BPF_JA:
case BPF_S_JMP_JEQ_K: case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_S_JMP_JEQ_X: case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_S_JMP_JGE_K: case BPF_JMP | BPF_JGE | BPF_K:
case BPF_S_JMP_JGE_X: case BPF_JMP | BPF_JGE | BPF_X:
case BPF_S_JMP_JGT_K: case BPF_JMP | BPF_JGT | BPF_K:
case BPF_S_JMP_JGT_X: case BPF_JMP | BPF_JGT | BPF_X:
case BPF_S_JMP_JSET_K: case BPF_JMP | BPF_JSET | BPF_K:
case BPF_S_JMP_JSET_X: case BPF_JMP | BPF_JSET | BPF_X:
sk_decode_filter(ftest, ftest);
continue; continue;
default: default:
return -EINVAL; return -EINVAL;
......
...@@ -1493,7 +1493,7 @@ static struct bpf_test tests[] = { ...@@ -1493,7 +1493,7 @@ static struct bpf_test tests[] = {
{ }, { },
}, },
{ /* Mainly checking JIT here. */ { /* Mainly checking JIT here. */
"M[]: STX + LDX", "M[]: alt STX + LDX",
.u.insns = { .u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 100), BPF_STMT(BPF_LDX | BPF_IMM, 100),
BPF_STMT(BPF_STX, 0), BPF_STMT(BPF_STX, 0),
...@@ -1582,6 +1582,101 @@ static struct bpf_test tests[] = { ...@@ -1582,6 +1582,101 @@ static struct bpf_test tests[] = {
{ }, { },
{ { 0, 116 } }, { { 0, 116 } },
}, },
{ /* Mainly checking JIT here. */
"M[]: full STX + full LDX",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0xbadfeedb),
BPF_STMT(BPF_STX, 0),
BPF_STMT(BPF_LDX | BPF_IMM, 0xecabedae),
BPF_STMT(BPF_STX, 1),
BPF_STMT(BPF_LDX | BPF_IMM, 0xafccfeaf),
BPF_STMT(BPF_STX, 2),
BPF_STMT(BPF_LDX | BPF_IMM, 0xbffdcedc),
BPF_STMT(BPF_STX, 3),
BPF_STMT(BPF_LDX | BPF_IMM, 0xfbbbdccb),
BPF_STMT(BPF_STX, 4),
BPF_STMT(BPF_LDX | BPF_IMM, 0xfbabcbda),
BPF_STMT(BPF_STX, 5),
BPF_STMT(BPF_LDX | BPF_IMM, 0xaedecbdb),
BPF_STMT(BPF_STX, 6),
BPF_STMT(BPF_LDX | BPF_IMM, 0xadebbade),
BPF_STMT(BPF_STX, 7),
BPF_STMT(BPF_LDX | BPF_IMM, 0xfcfcfaec),
BPF_STMT(BPF_STX, 8),
BPF_STMT(BPF_LDX | BPF_IMM, 0xbcdddbdc),
BPF_STMT(BPF_STX, 9),
BPF_STMT(BPF_LDX | BPF_IMM, 0xfeefdfac),
BPF_STMT(BPF_STX, 10),
BPF_STMT(BPF_LDX | BPF_IMM, 0xcddcdeea),
BPF_STMT(BPF_STX, 11),
BPF_STMT(BPF_LDX | BPF_IMM, 0xaccfaebb),
BPF_STMT(BPF_STX, 12),
BPF_STMT(BPF_LDX | BPF_IMM, 0xbdcccdcf),
BPF_STMT(BPF_STX, 13),
BPF_STMT(BPF_LDX | BPF_IMM, 0xaaedecde),
BPF_STMT(BPF_STX, 14),
BPF_STMT(BPF_LDX | BPF_IMM, 0xfaeacdad),
BPF_STMT(BPF_STX, 15),
BPF_STMT(BPF_LDX | BPF_MEM, 0),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 1),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 2),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 3),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 4),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 5),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 6),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 7),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 8),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 9),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 10),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 11),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 12),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 13),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 14),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 15),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_RET | BPF_A, 0),
},
CLASSIC | FLAG_NO_DATA,
{ },
{ { 0, 0x2a5a5e5 } },
},
{
"check: SKF_AD_MAX",
.u.insns = {
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_MAX),
BPF_STMT(BPF_RET | BPF_A, 0),
},
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
{ },
{ },
},
{ /* Passes checker but fails during runtime. */
"LD [SKF_AD_OFF-1]",
.u.insns = {
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF - 1),
BPF_STMT(BPF_RET | BPF_K, 1),
},
CLASSIC,
{ },
{ { 1, 0 } },
},
}; };
static struct net_device dev; static struct net_device dev;
......
...@@ -536,11 +536,13 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins ...@@ -536,11 +536,13 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins
* Output: * Output:
* BPF_R0 - 8/16/32-bit skb data converted to cpu endianness * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
*/ */
ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp); ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp);
if (likely(ptr != NULL)) { if (likely(ptr != NULL)) {
BPF_R0 = get_unaligned_be32(ptr); BPF_R0 = get_unaligned_be32(ptr);
CONT; CONT;
} }
return 0; return 0;
LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + K)) */ LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + K)) */
off = K; off = K;
...@@ -550,6 +552,7 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins ...@@ -550,6 +552,7 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins
BPF_R0 = get_unaligned_be16(ptr); BPF_R0 = get_unaligned_be16(ptr);
CONT; CONT;
} }
return 0; return 0;
LD_ABS_B: /* BPF_R0 = *(u8 *) (ctx + K) */ LD_ABS_B: /* BPF_R0 = *(u8 *) (ctx + K) */
off = K; off = K;
...@@ -559,6 +562,7 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins ...@@ -559,6 +562,7 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins
BPF_R0 = *(u8 *)ptr; BPF_R0 = *(u8 *)ptr;
CONT; CONT;
} }
return 0; return 0;
LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + X + K)) */ LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + X + K)) */
off = K + X; off = K + X;
...@@ -668,14 +672,10 @@ static bool convert_bpf_extensions(struct sock_filter *fp, ...@@ -668,14 +672,10 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
/* A = *(u16 *) (ctx + offsetof(protocol)) */ /* A = *(u16 *) (ctx + offsetof(protocol)) */
*insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
offsetof(struct sk_buff, protocol)); offsetof(struct sk_buff, protocol));
insn++;
/* A = ntohs(A) [emitting a nop or swap16] */ /* A = ntohs(A) [emitting a nop or swap16] */
insn->code = BPF_ALU | BPF_END | BPF_FROM_BE; *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
insn->a_reg = BPF_REG_A;
insn->imm = 16;
break; break;
case SKF_AD_OFF + SKF_AD_PKTTYPE: case SKF_AD_OFF + SKF_AD_PKTTYPE:
...@@ -684,37 +684,27 @@ static bool convert_bpf_extensions(struct sock_filter *fp, ...@@ -684,37 +684,27 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
if (insn->off < 0) if (insn->off < 0)
return false; return false;
insn++; insn++;
*insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX); *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX);
break; break;
case SKF_AD_OFF + SKF_AD_IFINDEX: case SKF_AD_OFF + SKF_AD_IFINDEX:
case SKF_AD_OFF + SKF_AD_HATYPE: case SKF_AD_OFF + SKF_AD_HATYPE:
*insn = BPF_LDX_MEM(size_to_bpf(FIELD_SIZEOF(struct sk_buff, dev)),
BPF_REG_TMP, BPF_REG_CTX,
offsetof(struct sk_buff, dev));
insn++;
/* if (tmp != 0) goto pc+1 */
*insn = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
insn++;
*insn = BPF_EXIT_INSN();
insn++;
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0);
insn->a_reg = BPF_REG_A;
insn->x_reg = BPF_REG_TMP; *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
BPF_REG_TMP, BPF_REG_CTX,
if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) { offsetof(struct sk_buff, dev));
insn->code = BPF_LDX | BPF_MEM | BPF_W; /* if (tmp != 0) goto pc + 1 */
insn->off = offsetof(struct net_device, ifindex); *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
} else { *insn++ = BPF_EXIT_INSN();
insn->code = BPF_LDX | BPF_MEM | BPF_H; if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
insn->off = offsetof(struct net_device, type); *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
} offsetof(struct net_device, ifindex));
else
*insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
offsetof(struct net_device, type));
break; break;
case SKF_AD_OFF + SKF_AD_MARK: case SKF_AD_OFF + SKF_AD_MARK:
...@@ -741,22 +731,17 @@ static bool convert_bpf_extensions(struct sock_filter *fp, ...@@ -741,22 +731,17 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
case SKF_AD_OFF + SKF_AD_VLAN_TAG: case SKF_AD_OFF + SKF_AD_VLAN_TAG:
case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
/* A = *(u16 *) (ctx + offsetof(vlan_tci)) */
*insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
offsetof(struct sk_buff, vlan_tci));
insn++;
BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
/* A = *(u16 *) (ctx + offsetof(vlan_tci)) */
*insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
offsetof(struct sk_buff, vlan_tci));
if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) { if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
*insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A,
~VLAN_TAG_PRESENT); ~VLAN_TAG_PRESENT);
} else { } else {
/* A >>= 12 */ /* A >>= 12 */
*insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12); *insn++ = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12);
insn++;
/* A &= 1 */ /* A &= 1 */
*insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1); *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1);
} }
...@@ -768,34 +753,27 @@ static bool convert_bpf_extensions(struct sock_filter *fp, ...@@ -768,34 +753,27 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
case SKF_AD_OFF + SKF_AD_CPU: case SKF_AD_OFF + SKF_AD_CPU:
case SKF_AD_OFF + SKF_AD_RANDOM: case SKF_AD_OFF + SKF_AD_RANDOM:
/* arg1 = ctx */ /* arg1 = ctx */
*insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG1, BPF_REG_CTX); *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
insn++;
/* arg2 = A */ /* arg2 = A */
*insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG2, BPF_REG_A); *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
insn++;
/* arg3 = X */ /* arg3 = X */
*insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG3, BPF_REG_X); *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
insn++;
/* Emit call(ctx, arg2=A, arg3=X) */ /* Emit call(ctx, arg2=A, arg3=X) */
insn->code = BPF_JMP | BPF_CALL;
switch (fp->k) { switch (fp->k) {
case SKF_AD_OFF + SKF_AD_PAY_OFFSET: case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
insn->imm = __skb_get_pay_offset - __bpf_call_base; *insn = BPF_EMIT_CALL(__skb_get_pay_offset);
break; break;
case SKF_AD_OFF + SKF_AD_NLATTR: case SKF_AD_OFF + SKF_AD_NLATTR:
insn->imm = __skb_get_nlattr - __bpf_call_base; *insn = BPF_EMIT_CALL(__skb_get_nlattr);
break; break;
case SKF_AD_OFF + SKF_AD_NLATTR_NEST: case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
insn->imm = __skb_get_nlattr_nest - __bpf_call_base; *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest);
break; break;
case SKF_AD_OFF + SKF_AD_CPU: case SKF_AD_OFF + SKF_AD_CPU:
insn->imm = __get_raw_cpu_id - __bpf_call_base; *insn = BPF_EMIT_CALL(__get_raw_cpu_id);
break; break;
case SKF_AD_OFF + SKF_AD_RANDOM: case SKF_AD_OFF + SKF_AD_RANDOM:
insn->imm = __get_random_u32 - __bpf_call_base; *insn = BPF_EMIT_CALL(__get_random_u32);
break; break;
} }
break; break;
...@@ -867,9 +845,8 @@ int sk_convert_filter(struct sock_filter *prog, int len, ...@@ -867,9 +845,8 @@ int sk_convert_filter(struct sock_filter *prog, int len,
new_insn = new_prog; new_insn = new_prog;
fp = prog; fp = prog;
if (new_insn) { if (new_insn)
*new_insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_CTX, BPF_REG_ARG1); *new_insn = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
}
new_insn++; new_insn++;
for (i = 0; i < len; fp++, i++) { for (i = 0; i < len; fp++, i++) {
...@@ -917,17 +894,16 @@ int sk_convert_filter(struct sock_filter *prog, int len, ...@@ -917,17 +894,16 @@ int sk_convert_filter(struct sock_filter *prog, int len,
convert_bpf_extensions(fp, &insn)) convert_bpf_extensions(fp, &insn))
break; break;
insn->code = fp->code; *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
insn->a_reg = BPF_REG_A;
insn->x_reg = BPF_REG_X;
insn->imm = fp->k;
break; break;
/* Jump opcodes map as-is, but offsets need adjustment. */ /* Jump transformation cannot use BPF block macros
case BPF_JMP | BPF_JA: * everywhere as offset calculation and target updates
target = i + fp->k + 1; * require a bit more work than the rest, i.e. jump
insn->code = fp->code; * opcodes map as-is, but offsets need adjustment.
#define EMIT_JMP \ */
#define BPF_EMIT_JMP \
do { \ do { \
if (target >= len || target < 0) \ if (target >= len || target < 0) \
goto err; \ goto err; \
...@@ -936,7 +912,10 @@ int sk_convert_filter(struct sock_filter *prog, int len, ...@@ -936,7 +912,10 @@ int sk_convert_filter(struct sock_filter *prog, int len,
insn->off -= insn - tmp_insns; \ insn->off -= insn - tmp_insns; \
} while (0) } while (0)
EMIT_JMP; case BPF_JMP | BPF_JA:
target = i + fp->k + 1;
insn->code = fp->code;
BPF_EMIT_JMP;
break; break;
case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_K:
...@@ -952,10 +931,7 @@ int sk_convert_filter(struct sock_filter *prog, int len, ...@@ -952,10 +931,7 @@ int sk_convert_filter(struct sock_filter *prog, int len,
* immediate into tmp register and use it * immediate into tmp register and use it
* in compare insn. * in compare insn.
*/ */
insn->code = BPF_ALU | BPF_MOV | BPF_K; *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
insn->a_reg = BPF_REG_TMP;
insn->imm = fp->k;
insn++;
insn->a_reg = BPF_REG_A; insn->a_reg = BPF_REG_A;
insn->x_reg = BPF_REG_TMP; insn->x_reg = BPF_REG_TMP;
...@@ -971,7 +947,7 @@ int sk_convert_filter(struct sock_filter *prog, int len, ...@@ -971,7 +947,7 @@ int sk_convert_filter(struct sock_filter *prog, int len,
if (fp->jf == 0) { if (fp->jf == 0) {
insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
target = i + fp->jt + 1; target = i + fp->jt + 1;
EMIT_JMP; BPF_EMIT_JMP;
break; break;
} }
...@@ -979,116 +955,94 @@ int sk_convert_filter(struct sock_filter *prog, int len, ...@@ -979,116 +955,94 @@ int sk_convert_filter(struct sock_filter *prog, int len,
if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) { if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
insn->code = BPF_JMP | BPF_JNE | bpf_src; insn->code = BPF_JMP | BPF_JNE | bpf_src;
target = i + fp->jf + 1; target = i + fp->jf + 1;
EMIT_JMP; BPF_EMIT_JMP;
break; break;
} }
/* Other jumps are mapped into two insns: Jxx and JA. */ /* Other jumps are mapped into two insns: Jxx and JA. */
target = i + fp->jt + 1; target = i + fp->jt + 1;
insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
EMIT_JMP; BPF_EMIT_JMP;
insn++; insn++;
insn->code = BPF_JMP | BPF_JA; insn->code = BPF_JMP | BPF_JA;
target = i + fp->jf + 1; target = i + fp->jf + 1;
EMIT_JMP; BPF_EMIT_JMP;
break; break;
/* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
case BPF_LDX | BPF_MSH | BPF_B: case BPF_LDX | BPF_MSH | BPF_B:
/* tmp = A */ /* tmp = A */
*insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_TMP, BPF_REG_A); *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
insn++;
/* A = BPF_R0 = *(u8 *) (skb->data + K) */ /* A = BPF_R0 = *(u8 *) (skb->data + K) */
*insn = BPF_LD_ABS(BPF_B, fp->k); *insn++ = BPF_LD_ABS(BPF_B, fp->k);
insn++;
/* A &= 0xf */ /* A &= 0xf */
*insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
insn++;
/* A <<= 2 */ /* A <<= 2 */
*insn = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
insn++;
/* X = A */ /* X = A */
*insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_X, BPF_REG_A); *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
insn++;
/* A = tmp */ /* A = tmp */
*insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_A, BPF_REG_TMP); *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
break; break;
/* RET_K, RET_A are remaped into 2 insns. */ /* RET_K, RET_A are remaped into 2 insns. */
case BPF_RET | BPF_A: case BPF_RET | BPF_A:
case BPF_RET | BPF_K: case BPF_RET | BPF_K:
insn->code = BPF_ALU | BPF_MOV | *insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ?
(BPF_RVAL(fp->code) == BPF_K ? BPF_K : BPF_X, BPF_REG_0,
BPF_K : BPF_X); BPF_REG_A, fp->k);
insn->a_reg = 0;
insn->x_reg = BPF_REG_A;
insn->imm = fp->k;
insn++;
*insn = BPF_EXIT_INSN(); *insn = BPF_EXIT_INSN();
break; break;
/* Store to stack. */ /* Store to stack. */
case BPF_ST: case BPF_ST:
case BPF_STX: case BPF_STX:
insn->code = BPF_STX | BPF_MEM | BPF_W; *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
insn->a_reg = BPF_REG_FP; BPF_ST ? BPF_REG_A : BPF_REG_X,
insn->x_reg = fp->code == BPF_ST ? -(BPF_MEMWORDS - fp->k) * 4);
BPF_REG_A : BPF_REG_X;
insn->off = -(BPF_MEMWORDS - fp->k) * 4;
break; break;
/* Load from stack. */ /* Load from stack. */
case BPF_LD | BPF_MEM: case BPF_LD | BPF_MEM:
case BPF_LDX | BPF_MEM: case BPF_LDX | BPF_MEM:
insn->code = BPF_LDX | BPF_MEM | BPF_W; *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? BPF_REG_A : BPF_REG_X, BPF_REG_FP,
BPF_REG_A : BPF_REG_X; -(BPF_MEMWORDS - fp->k) * 4);
insn->x_reg = BPF_REG_FP;
insn->off = -(BPF_MEMWORDS - fp->k) * 4;
break; break;
/* A = K or X = K */ /* A = K or X = K */
case BPF_LD | BPF_IMM: case BPF_LD | BPF_IMM:
case BPF_LDX | BPF_IMM: case BPF_LDX | BPF_IMM:
insn->code = BPF_ALU | BPF_MOV | BPF_K; *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? BPF_REG_A : BPF_REG_X, fp->k);
BPF_REG_A : BPF_REG_X;
insn->imm = fp->k;
break; break;
/* X = A */ /* X = A */
case BPF_MISC | BPF_TAX: case BPF_MISC | BPF_TAX:
*insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_X, BPF_REG_A); *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
break; break;
/* A = X */ /* A = X */
case BPF_MISC | BPF_TXA: case BPF_MISC | BPF_TXA:
*insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_A, BPF_REG_X); *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
break; break;
/* A = skb->len or X = skb->len */ /* A = skb->len or X = skb->len */
case BPF_LD | BPF_W | BPF_LEN: case BPF_LD | BPF_W | BPF_LEN:
case BPF_LDX | BPF_W | BPF_LEN: case BPF_LDX | BPF_W | BPF_LEN:
insn->code = BPF_LDX | BPF_MEM | BPF_W; *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
BPF_REG_A : BPF_REG_X; offsetof(struct sk_buff, len));
insn->x_reg = BPF_REG_CTX;
insn->off = offsetof(struct sk_buff, len);
break; break;
/* access seccomp_data fields */ /* Access seccomp_data fields. */
case BPF_LDX | BPF_ABS | BPF_W: case BPF_LDX | BPF_ABS | BPF_W:
/* A = *(u32 *) (ctx + K) */ /* A = *(u32 *) (ctx + K) */
*insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
break; break;
/* Unkown instruction. */
default: default:
goto err; goto err;
} }
...@@ -1097,7 +1051,6 @@ int sk_convert_filter(struct sock_filter *prog, int len, ...@@ -1097,7 +1051,6 @@ int sk_convert_filter(struct sock_filter *prog, int len,
if (new_prog) if (new_prog)
memcpy(new_insn, tmp_insns, memcpy(new_insn, tmp_insns,
sizeof(*insn) * (insn - tmp_insns)); sizeof(*insn) * (insn - tmp_insns));
new_insn += insn - tmp_insns; new_insn += insn - tmp_insns;
} }
...@@ -1112,7 +1065,6 @@ int sk_convert_filter(struct sock_filter *prog, int len, ...@@ -1112,7 +1065,6 @@ int sk_convert_filter(struct sock_filter *prog, int len,
new_flen = new_insn - new_prog; new_flen = new_insn - new_prog;
if (pass > 2) if (pass > 2)
goto err; goto err;
goto do_pass; goto do_pass;
} }
...@@ -1136,44 +1088,46 @@ int sk_convert_filter(struct sock_filter *prog, int len, ...@@ -1136,44 +1088,46 @@ int sk_convert_filter(struct sock_filter *prog, int len,
*/ */
static int check_load_and_stores(struct sock_filter *filter, int flen) static int check_load_and_stores(struct sock_filter *filter, int flen)
{ {
u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */ u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
int pc, ret = 0; int pc, ret = 0;
BUILD_BUG_ON(BPF_MEMWORDS > 16); BUILD_BUG_ON(BPF_MEMWORDS > 16);
masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL); masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
if (!masks) if (!masks)
return -ENOMEM; return -ENOMEM;
memset(masks, 0xff, flen * sizeof(*masks)); memset(masks, 0xff, flen * sizeof(*masks));
for (pc = 0; pc < flen; pc++) { for (pc = 0; pc < flen; pc++) {
memvalid &= masks[pc]; memvalid &= masks[pc];
switch (filter[pc].code) { switch (filter[pc].code) {
case BPF_S_ST: case BPF_ST:
case BPF_S_STX: case BPF_STX:
memvalid |= (1 << filter[pc].k); memvalid |= (1 << filter[pc].k);
break; break;
case BPF_S_LD_MEM: case BPF_LD | BPF_MEM:
case BPF_S_LDX_MEM: case BPF_LDX | BPF_MEM:
if (!(memvalid & (1 << filter[pc].k))) { if (!(memvalid & (1 << filter[pc].k))) {
ret = -EINVAL; ret = -EINVAL;
goto error; goto error;
} }
break; break;
case BPF_S_JMP_JA: case BPF_JMP | BPF_JA:
/* a jump must set masks on target */ /* A jump must set masks on target */
masks[pc + 1 + filter[pc].k] &= memvalid; masks[pc + 1 + filter[pc].k] &= memvalid;
memvalid = ~0; memvalid = ~0;
break; break;
case BPF_S_JMP_JEQ_K: case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_S_JMP_JEQ_X: case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_S_JMP_JGE_K: case BPF_JMP | BPF_JGE | BPF_K:
case BPF_S_JMP_JGE_X: case BPF_JMP | BPF_JGE | BPF_X:
case BPF_S_JMP_JGT_K: case BPF_JMP | BPF_JGT | BPF_K:
case BPF_S_JMP_JGT_X: case BPF_JMP | BPF_JGT | BPF_X:
case BPF_S_JMP_JSET_X: case BPF_JMP | BPF_JSET | BPF_K:
case BPF_S_JMP_JSET_K: case BPF_JMP | BPF_JSET | BPF_X:
/* a jump must set masks on targets */ /* A jump must set masks on targets */
masks[pc + 1 + filter[pc].jt] &= memvalid; masks[pc + 1 + filter[pc].jt] &= memvalid;
masks[pc + 1 + filter[pc].jf] &= memvalid; masks[pc + 1 + filter[pc].jf] &= memvalid;
memvalid = ~0; memvalid = ~0;
...@@ -1185,6 +1139,72 @@ static int check_load_and_stores(struct sock_filter *filter, int flen) ...@@ -1185,6 +1139,72 @@ static int check_load_and_stores(struct sock_filter *filter, int flen)
return ret; return ret;
} }
static bool chk_code_allowed(u16 code_to_probe)
{
static const bool codes[] = {
/* 32 bit ALU operations */
[BPF_ALU | BPF_ADD | BPF_K] = true,
[BPF_ALU | BPF_ADD | BPF_X] = true,
[BPF_ALU | BPF_SUB | BPF_K] = true,
[BPF_ALU | BPF_SUB | BPF_X] = true,
[BPF_ALU | BPF_MUL | BPF_K] = true,
[BPF_ALU | BPF_MUL | BPF_X] = true,
[BPF_ALU | BPF_DIV | BPF_K] = true,
[BPF_ALU | BPF_DIV | BPF_X] = true,
[BPF_ALU | BPF_MOD | BPF_K] = true,
[BPF_ALU | BPF_MOD | BPF_X] = true,
[BPF_ALU | BPF_AND | BPF_K] = true,
[BPF_ALU | BPF_AND | BPF_X] = true,
[BPF_ALU | BPF_OR | BPF_K] = true,
[BPF_ALU | BPF_OR | BPF_X] = true,
[BPF_ALU | BPF_XOR | BPF_K] = true,
[BPF_ALU | BPF_XOR | BPF_X] = true,
[BPF_ALU | BPF_LSH | BPF_K] = true,
[BPF_ALU | BPF_LSH | BPF_X] = true,
[BPF_ALU | BPF_RSH | BPF_K] = true,
[BPF_ALU | BPF_RSH | BPF_X] = true,
[BPF_ALU | BPF_NEG] = true,
/* Load instructions */
[BPF_LD | BPF_W | BPF_ABS] = true,
[BPF_LD | BPF_H | BPF_ABS] = true,
[BPF_LD | BPF_B | BPF_ABS] = true,
[BPF_LD | BPF_W | BPF_LEN] = true,
[BPF_LD | BPF_W | BPF_IND] = true,
[BPF_LD | BPF_H | BPF_IND] = true,
[BPF_LD | BPF_B | BPF_IND] = true,
[BPF_LD | BPF_IMM] = true,
[BPF_LD | BPF_MEM] = true,
[BPF_LDX | BPF_W | BPF_LEN] = true,
[BPF_LDX | BPF_B | BPF_MSH] = true,
[BPF_LDX | BPF_IMM] = true,
[BPF_LDX | BPF_MEM] = true,
/* Store instructions */
[BPF_ST] = true,
[BPF_STX] = true,
/* Misc instructions */
[BPF_MISC | BPF_TAX] = true,
[BPF_MISC | BPF_TXA] = true,
/* Return instructions */
[BPF_RET | BPF_K] = true,
[BPF_RET | BPF_A] = true,
/* Jump instructions */
[BPF_JMP | BPF_JA] = true,
[BPF_JMP | BPF_JEQ | BPF_K] = true,
[BPF_JMP | BPF_JEQ | BPF_X] = true,
[BPF_JMP | BPF_JGE | BPF_K] = true,
[BPF_JMP | BPF_JGE | BPF_X] = true,
[BPF_JMP | BPF_JGT | BPF_K] = true,
[BPF_JMP | BPF_JGT | BPF_X] = true,
[BPF_JMP | BPF_JSET | BPF_K] = true,
[BPF_JMP | BPF_JSET | BPF_X] = true,
};
if (code_to_probe >= ARRAY_SIZE(codes))
return false;
return codes[code_to_probe];
}
/** /**
* sk_chk_filter - verify socket filter code * sk_chk_filter - verify socket filter code
* @filter: filter to verify * @filter: filter to verify
...@@ -1201,154 +1221,76 @@ static int check_load_and_stores(struct sock_filter *filter, int flen) ...@@ -1201,154 +1221,76 @@ static int check_load_and_stores(struct sock_filter *filter, int flen)
*/ */
int sk_chk_filter(struct sock_filter *filter, unsigned int flen) int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
{ {
/*
* Valid instructions are initialized to non-0.
* Invalid instructions are initialized to 0.
*/
static const u8 codes[] = {
[BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
[BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
[BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
[BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
[BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
[BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
[BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
[BPF_ALU|BPF_MOD|BPF_K] = BPF_S_ALU_MOD_K,
[BPF_ALU|BPF_MOD|BPF_X] = BPF_S_ALU_MOD_X,
[BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
[BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
[BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
[BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
[BPF_ALU|BPF_XOR|BPF_K] = BPF_S_ALU_XOR_K,
[BPF_ALU|BPF_XOR|BPF_X] = BPF_S_ALU_XOR_X,
[BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
[BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
[BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
[BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
[BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
[BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
[BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
[BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
[BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
[BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
[BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
[BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
[BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
[BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
[BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
[BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
[BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
[BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
[BPF_RET|BPF_K] = BPF_S_RET_K,
[BPF_RET|BPF_A] = BPF_S_RET_A,
[BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
[BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
[BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
[BPF_ST] = BPF_S_ST,
[BPF_STX] = BPF_S_STX,
[BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
[BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
[BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
[BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
[BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
[BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
[BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
[BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
[BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
};
int pc;
bool anc_found; bool anc_found;
int pc;
if (flen == 0 || flen > BPF_MAXINSNS) if (flen == 0 || flen > BPF_MAXINSNS)
return -EINVAL; return -EINVAL;
/* check the filter code now */ /* Check the filter code now */
for (pc = 0; pc < flen; pc++) { for (pc = 0; pc < flen; pc++) {
struct sock_filter *ftest = &filter[pc]; struct sock_filter *ftest = &filter[pc];
u16 code = ftest->code;
if (code >= ARRAY_SIZE(codes)) /* May we actually operate on this code? */
return -EINVAL; if (!chk_code_allowed(ftest->code))
code = codes[code];
if (!code)
return -EINVAL; return -EINVAL;
/* Some instructions need special checks */ /* Some instructions need special checks */
switch (code) { switch (ftest->code) {
case BPF_S_ALU_DIV_K: case BPF_ALU | BPF_DIV | BPF_K:
case BPF_S_ALU_MOD_K: case BPF_ALU | BPF_MOD | BPF_K:
/* check for division by zero */ /* Check for division by zero */
if (ftest->k == 0) if (ftest->k == 0)
return -EINVAL; return -EINVAL;
break; break;
case BPF_S_LD_MEM: case BPF_LD | BPF_MEM:
case BPF_S_LDX_MEM: case BPF_LDX | BPF_MEM:
case BPF_S_ST: case BPF_ST:
case BPF_S_STX: case BPF_STX:
/* check for invalid memory addresses */ /* Check for invalid memory addresses */
if (ftest->k >= BPF_MEMWORDS) if (ftest->k >= BPF_MEMWORDS)
return -EINVAL; return -EINVAL;
break; break;
case BPF_S_JMP_JA: case BPF_JMP | BPF_JA:
/* /* Note, the large ftest->k might cause loops.
* Note, the large ftest->k might cause loops.
* Compare this with conditional jumps below, * Compare this with conditional jumps below,
* where offsets are limited. --ANK (981016) * where offsets are limited. --ANK (981016)
*/ */
if (ftest->k >= (unsigned int)(flen-pc-1)) if (ftest->k >= (unsigned int)(flen - pc - 1))
return -EINVAL; return -EINVAL;
break; break;
case BPF_S_JMP_JEQ_K: case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_S_JMP_JEQ_X: case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_S_JMP_JGE_K: case BPF_JMP | BPF_JGE | BPF_K:
case BPF_S_JMP_JGE_X: case BPF_JMP | BPF_JGE | BPF_X:
case BPF_S_JMP_JGT_K: case BPF_JMP | BPF_JGT | BPF_K:
case BPF_S_JMP_JGT_X: case BPF_JMP | BPF_JGT | BPF_X:
case BPF_S_JMP_JSET_X: case BPF_JMP | BPF_JSET | BPF_K:
case BPF_S_JMP_JSET_K: case BPF_JMP | BPF_JSET | BPF_X:
/* for conditionals both must be safe */ /* Both conditionals must be safe */
if (pc + ftest->jt + 1 >= flen || if (pc + ftest->jt + 1 >= flen ||
pc + ftest->jf + 1 >= flen) pc + ftest->jf + 1 >= flen)
return -EINVAL; return -EINVAL;
break; break;
case BPF_S_LD_W_ABS: case BPF_LD | BPF_W | BPF_ABS:
case BPF_S_LD_H_ABS: case BPF_LD | BPF_H | BPF_ABS:
case BPF_S_LD_B_ABS: case BPF_LD | BPF_B | BPF_ABS:
anc_found = false; anc_found = false;
#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ if (bpf_anc_helper(ftest) & BPF_ANC)
code = BPF_S_ANC_##CODE; \ anc_found = true;
anc_found = true; \ /* Ancillary operation unknown or unsupported */
break
switch (ftest->k) {
ANCILLARY(PROTOCOL);
ANCILLARY(PKTTYPE);
ANCILLARY(IFINDEX);
ANCILLARY(NLATTR);
ANCILLARY(NLATTR_NEST);
ANCILLARY(MARK);
ANCILLARY(QUEUE);
ANCILLARY(HATYPE);
ANCILLARY(RXHASH);
ANCILLARY(CPU);
ANCILLARY(ALU_XOR_X);
ANCILLARY(VLAN_TAG);
ANCILLARY(VLAN_TAG_PRESENT);
ANCILLARY(PAY_OFFSET);
ANCILLARY(RANDOM);
}
/* ancillary operation unknown or unsupported */
if (anc_found == false && ftest->k >= SKF_AD_OFF) if (anc_found == false && ftest->k >= SKF_AD_OFF)
return -EINVAL; return -EINVAL;
} }
ftest->code = code;
} }
/* last instruction must be a RET code */ /* Last instruction must be a RET code */
switch (filter[flen - 1].code) { switch (filter[flen - 1].code) {
case BPF_S_RET_K: case BPF_RET | BPF_K:
case BPF_S_RET_A: case BPF_RET | BPF_A:
return check_load_and_stores(filter, flen); return check_load_and_stores(filter, flen);
} }
return -EINVAL; return -EINVAL;
} }
EXPORT_SYMBOL(sk_chk_filter); EXPORT_SYMBOL(sk_chk_filter);
...@@ -1448,7 +1390,7 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp, ...@@ -1448,7 +1390,7 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
{ {
struct sock_filter *old_prog; struct sock_filter *old_prog;
struct sk_filter *old_fp; struct sk_filter *old_fp;
int i, err, new_len, old_len = fp->len; int err, new_len, old_len = fp->len;
/* We are free to overwrite insns et al right here as it /* We are free to overwrite insns et al right here as it
* won't be used at this point in time anymore internally * won't be used at this point in time anymore internally
...@@ -1458,13 +1400,6 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp, ...@@ -1458,13 +1400,6 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
BUILD_BUG_ON(sizeof(struct sock_filter) != BUILD_BUG_ON(sizeof(struct sock_filter) !=
sizeof(struct sock_filter_int)); sizeof(struct sock_filter_int));
/* For now, we need to unfiddle BPF_S_* identifiers in place.
* This can sooner or later on be subject to removal, e.g. when
* JITs have been converted.
*/
for (i = 0; i < fp->len; i++)
sk_decode_filter(&fp->insns[i], &fp->insns[i]);
/* Conversion cannot happen on overlapping memory areas, /* Conversion cannot happen on overlapping memory areas,
* so we need to keep the user BPF around until the 2nd * so we need to keep the user BPF around until the 2nd
* pass. At this time, the user BPF is stored in fp->insns. * pass. At this time, the user BPF is stored in fp->insns.
...@@ -1706,84 +1641,6 @@ int sk_detach_filter(struct sock *sk) ...@@ -1706,84 +1641,6 @@ int sk_detach_filter(struct sock *sk)
} }
EXPORT_SYMBOL_GPL(sk_detach_filter); EXPORT_SYMBOL_GPL(sk_detach_filter);
void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
{
static const u16 decodes[] = {
[BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K,
[BPF_S_ALU_ADD_X] = BPF_ALU|BPF_ADD|BPF_X,
[BPF_S_ALU_SUB_K] = BPF_ALU|BPF_SUB|BPF_K,
[BPF_S_ALU_SUB_X] = BPF_ALU|BPF_SUB|BPF_X,
[BPF_S_ALU_MUL_K] = BPF_ALU|BPF_MUL|BPF_K,
[BPF_S_ALU_MUL_X] = BPF_ALU|BPF_MUL|BPF_X,
[BPF_S_ALU_DIV_X] = BPF_ALU|BPF_DIV|BPF_X,
[BPF_S_ALU_MOD_K] = BPF_ALU|BPF_MOD|BPF_K,
[BPF_S_ALU_MOD_X] = BPF_ALU|BPF_MOD|BPF_X,
[BPF_S_ALU_AND_K] = BPF_ALU|BPF_AND|BPF_K,
[BPF_S_ALU_AND_X] = BPF_ALU|BPF_AND|BPF_X,
[BPF_S_ALU_OR_K] = BPF_ALU|BPF_OR|BPF_K,
[BPF_S_ALU_OR_X] = BPF_ALU|BPF_OR|BPF_X,
[BPF_S_ALU_XOR_K] = BPF_ALU|BPF_XOR|BPF_K,
[BPF_S_ALU_XOR_X] = BPF_ALU|BPF_XOR|BPF_X,
[BPF_S_ALU_LSH_K] = BPF_ALU|BPF_LSH|BPF_K,
[BPF_S_ALU_LSH_X] = BPF_ALU|BPF_LSH|BPF_X,
[BPF_S_ALU_RSH_K] = BPF_ALU|BPF_RSH|BPF_K,
[BPF_S_ALU_RSH_X] = BPF_ALU|BPF_RSH|BPF_X,
[BPF_S_ALU_NEG] = BPF_ALU|BPF_NEG,
[BPF_S_LD_W_ABS] = BPF_LD|BPF_W|BPF_ABS,
[BPF_S_LD_H_ABS] = BPF_LD|BPF_H|BPF_ABS,
[BPF_S_LD_B_ABS] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_PROTOCOL] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_PKTTYPE] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_IFINDEX] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_NLATTR] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_NLATTR_NEST] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_MARK] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_QUEUE] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_HATYPE] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_ANC_RANDOM] = BPF_LD|BPF_B|BPF_ABS,
[BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN,
[BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND,
[BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND,
[BPF_S_LD_B_IND] = BPF_LD|BPF_B|BPF_IND,
[BPF_S_LD_IMM] = BPF_LD|BPF_IMM,
[BPF_S_LDX_W_LEN] = BPF_LDX|BPF_W|BPF_LEN,
[BPF_S_LDX_B_MSH] = BPF_LDX|BPF_B|BPF_MSH,
[BPF_S_LDX_IMM] = BPF_LDX|BPF_IMM,
[BPF_S_MISC_TAX] = BPF_MISC|BPF_TAX,
[BPF_S_MISC_TXA] = BPF_MISC|BPF_TXA,
[BPF_S_RET_K] = BPF_RET|BPF_K,
[BPF_S_RET_A] = BPF_RET|BPF_A,
[BPF_S_ALU_DIV_K] = BPF_ALU|BPF_DIV|BPF_K,
[BPF_S_LD_MEM] = BPF_LD|BPF_MEM,
[BPF_S_LDX_MEM] = BPF_LDX|BPF_MEM,
[BPF_S_ST] = BPF_ST,
[BPF_S_STX] = BPF_STX,
[BPF_S_JMP_JA] = BPF_JMP|BPF_JA,
[BPF_S_JMP_JEQ_K] = BPF_JMP|BPF_JEQ|BPF_K,
[BPF_S_JMP_JEQ_X] = BPF_JMP|BPF_JEQ|BPF_X,
[BPF_S_JMP_JGE_K] = BPF_JMP|BPF_JGE|BPF_K,
[BPF_S_JMP_JGE_X] = BPF_JMP|BPF_JGE|BPF_X,
[BPF_S_JMP_JGT_K] = BPF_JMP|BPF_JGT|BPF_K,
[BPF_S_JMP_JGT_X] = BPF_JMP|BPF_JGT|BPF_X,
[BPF_S_JMP_JSET_K] = BPF_JMP|BPF_JSET|BPF_K,
[BPF_S_JMP_JSET_X] = BPF_JMP|BPF_JSET|BPF_X,
};
u16 code;
code = filt->code;
to->code = decodes[code];
to->jt = filt->jt;
to->jf = filt->jf;
to->k = filt->k;
}
int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
unsigned int len) unsigned int len)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment