Commit c532cea9 authored by David S. Miller's avatar David S. Miller

Merge branch 'filter-next'

Daniel Borkmann says:

====================
BPF + test suite updates

These are the last bigger BPF changes that I had in my todo
queue for now. As the first two patches from this series
contain additional test cases for the test suite, I have
rebased them on top of current net-next with the set from [1]
applied to avoid introducing any unnecessary merge conflicts.

For details, please refer to the individual patches. Test
suite runs fine with the set applied.

 [1] http://patchwork.ozlabs.org/patch/352599/
     http://patchwork.ozlabs.org/patch/352600/
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 019ee792 f8f6d679
This diff is collapsed.
...@@ -78,7 +78,7 @@ sk_load_byte_positive_offset: ...@@ -78,7 +78,7 @@ sk_load_byte_positive_offset:
blr blr
/* /*
* BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf) * BPF_LDX | BPF_B | BPF_MSH: ldxb 4*([offset]&0xf)
* r_addr is the offset value * r_addr is the offset value
*/ */
.globl sk_load_byte_msh .globl sk_load_byte_msh
......
This diff is collapsed.
This diff is collapsed.
...@@ -415,20 +415,11 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -415,20 +415,11 @@ void bpf_jit_compile(struct sk_filter *fp)
emit_reg_move(O7, r_saved_O7); emit_reg_move(O7, r_saved_O7);
switch (filter[0].code) { switch (filter[0].code) {
case BPF_S_RET_K: case BPF_RET | BPF_K:
case BPF_S_LD_W_LEN: case BPF_LD | BPF_W | BPF_LEN:
case BPF_S_ANC_PROTOCOL: case BPF_LD | BPF_W | BPF_ABS:
case BPF_S_ANC_PKTTYPE: case BPF_LD | BPF_H | BPF_ABS:
case BPF_S_ANC_IFINDEX: case BPF_LD | BPF_B | BPF_ABS:
case BPF_S_ANC_MARK:
case BPF_S_ANC_RXHASH:
case BPF_S_ANC_VLAN_TAG:
case BPF_S_ANC_VLAN_TAG_PRESENT:
case BPF_S_ANC_CPU:
case BPF_S_ANC_QUEUE:
case BPF_S_LD_W_ABS:
case BPF_S_LD_H_ABS:
case BPF_S_LD_B_ABS:
/* The first instruction sets the A register (or is /* The first instruction sets the A register (or is
* a "RET 'constant'") * a "RET 'constant'")
*/ */
...@@ -445,59 +436,60 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -445,59 +436,60 @@ void bpf_jit_compile(struct sk_filter *fp)
unsigned int t_offset; unsigned int t_offset;
unsigned int f_offset; unsigned int f_offset;
u32 t_op, f_op; u32 t_op, f_op;
u16 code = bpf_anc_helper(&filter[i]);
int ilen; int ilen;
switch (filter[i].code) { switch (code) {
case BPF_S_ALU_ADD_X: /* A += X; */ case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
emit_alu_X(ADD); emit_alu_X(ADD);
break; break;
case BPF_S_ALU_ADD_K: /* A += K; */ case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
emit_alu_K(ADD, K); emit_alu_K(ADD, K);
break; break;
case BPF_S_ALU_SUB_X: /* A -= X; */ case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
emit_alu_X(SUB); emit_alu_X(SUB);
break; break;
case BPF_S_ALU_SUB_K: /* A -= K */ case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
emit_alu_K(SUB, K); emit_alu_K(SUB, K);
break; break;
case BPF_S_ALU_AND_X: /* A &= X */ case BPF_ALU | BPF_AND | BPF_X: /* A &= X */
emit_alu_X(AND); emit_alu_X(AND);
break; break;
case BPF_S_ALU_AND_K: /* A &= K */ case BPF_ALU | BPF_AND | BPF_K: /* A &= K */
emit_alu_K(AND, K); emit_alu_K(AND, K);
break; break;
case BPF_S_ALU_OR_X: /* A |= X */ case BPF_ALU | BPF_OR | BPF_X: /* A |= X */
emit_alu_X(OR); emit_alu_X(OR);
break; break;
case BPF_S_ALU_OR_K: /* A |= K */ case BPF_ALU | BPF_OR | BPF_K: /* A |= K */
emit_alu_K(OR, K); emit_alu_K(OR, K);
break; break;
case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */ case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */
case BPF_S_ALU_XOR_X: case BPF_ALU | BPF_XOR | BPF_X:
emit_alu_X(XOR); emit_alu_X(XOR);
break; break;
case BPF_S_ALU_XOR_K: /* A ^= K */ case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
emit_alu_K(XOR, K); emit_alu_K(XOR, K);
break; break;
case BPF_S_ALU_LSH_X: /* A <<= X */ case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X */
emit_alu_X(SLL); emit_alu_X(SLL);
break; break;
case BPF_S_ALU_LSH_K: /* A <<= K */ case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */
emit_alu_K(SLL, K); emit_alu_K(SLL, K);
break; break;
case BPF_S_ALU_RSH_X: /* A >>= X */ case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X */
emit_alu_X(SRL); emit_alu_X(SRL);
break; break;
case BPF_S_ALU_RSH_K: /* A >>= K */ case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K */
emit_alu_K(SRL, K); emit_alu_K(SRL, K);
break; break;
case BPF_S_ALU_MUL_X: /* A *= X; */ case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
emit_alu_X(MUL); emit_alu_X(MUL);
break; break;
case BPF_S_ALU_MUL_K: /* A *= K */ case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
emit_alu_K(MUL, K); emit_alu_K(MUL, K);
break; break;
case BPF_S_ALU_DIV_K: /* A /= K with K != 0*/ case BPF_ALU | BPF_DIV | BPF_K: /* A /= K with K != 0*/
if (K == 1) if (K == 1)
break; break;
emit_write_y(G0); emit_write_y(G0);
...@@ -512,7 +504,7 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -512,7 +504,7 @@ void bpf_jit_compile(struct sk_filter *fp)
#endif #endif
emit_alu_K(DIV, K); emit_alu_K(DIV, K);
break; break;
case BPF_S_ALU_DIV_X: /* A /= X; */ case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
emit_cmpi(r_X, 0); emit_cmpi(r_X, 0);
if (pc_ret0 > 0) { if (pc_ret0 > 0) {
t_offset = addrs[pc_ret0 - 1]; t_offset = addrs[pc_ret0 - 1];
...@@ -544,10 +536,10 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -544,10 +536,10 @@ void bpf_jit_compile(struct sk_filter *fp)
#endif #endif
emit_alu_X(DIV); emit_alu_X(DIV);
break; break;
case BPF_S_ALU_NEG: case BPF_ALU | BPF_NEG:
emit_neg(); emit_neg();
break; break;
case BPF_S_RET_K: case BPF_RET | BPF_K:
if (!K) { if (!K) {
if (pc_ret0 == -1) if (pc_ret0 == -1)
pc_ret0 = i; pc_ret0 = i;
...@@ -556,7 +548,7 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -556,7 +548,7 @@ void bpf_jit_compile(struct sk_filter *fp)
emit_loadimm(K, r_A); emit_loadimm(K, r_A);
} }
/* Fallthrough */ /* Fallthrough */
case BPF_S_RET_A: case BPF_RET | BPF_A:
if (seen_or_pass0) { if (seen_or_pass0) {
if (i != flen - 1) { if (i != flen - 1) {
emit_jump(cleanup_addr); emit_jump(cleanup_addr);
...@@ -573,18 +565,18 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -573,18 +565,18 @@ void bpf_jit_compile(struct sk_filter *fp)
emit_jmpl(r_saved_O7, 8, G0); emit_jmpl(r_saved_O7, 8, G0);
emit_reg_move(r_A, O0); /* delay slot */ emit_reg_move(r_A, O0); /* delay slot */
break; break;
case BPF_S_MISC_TAX: case BPF_MISC | BPF_TAX:
seen |= SEEN_XREG; seen |= SEEN_XREG;
emit_reg_move(r_A, r_X); emit_reg_move(r_A, r_X);
break; break;
case BPF_S_MISC_TXA: case BPF_MISC | BPF_TXA:
seen |= SEEN_XREG; seen |= SEEN_XREG;
emit_reg_move(r_X, r_A); emit_reg_move(r_X, r_A);
break; break;
case BPF_S_ANC_CPU: case BPF_ANC | SKF_AD_CPU:
emit_load_cpu(r_A); emit_load_cpu(r_A);
break; break;
case BPF_S_ANC_PROTOCOL: case BPF_ANC | SKF_AD_PROTOCOL:
emit_skb_load16(protocol, r_A); emit_skb_load16(protocol, r_A);
break; break;
#if 0 #if 0
...@@ -592,38 +584,38 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -592,38 +584,38 @@ void bpf_jit_compile(struct sk_filter *fp)
* a bit field even though we very much * a bit field even though we very much
* know what we are doing here. * know what we are doing here.
*/ */
case BPF_S_ANC_PKTTYPE: case BPF_ANC | SKF_AD_PKTTYPE:
__emit_skb_load8(pkt_type, r_A); __emit_skb_load8(pkt_type, r_A);
emit_alu_K(SRL, 5); emit_alu_K(SRL, 5);
break; break;
#endif #endif
case BPF_S_ANC_IFINDEX: case BPF_ANC | SKF_AD_IFINDEX:
emit_skb_loadptr(dev, r_A); emit_skb_loadptr(dev, r_A);
emit_cmpi(r_A, 0); emit_cmpi(r_A, 0);
emit_branch(BNE_PTR, cleanup_addr + 4); emit_branch(BNE_PTR, cleanup_addr + 4);
emit_nop(); emit_nop();
emit_load32(r_A, struct net_device, ifindex, r_A); emit_load32(r_A, struct net_device, ifindex, r_A);
break; break;
case BPF_S_ANC_MARK: case BPF_ANC | SKF_AD_MARK:
emit_skb_load32(mark, r_A); emit_skb_load32(mark, r_A);
break; break;
case BPF_S_ANC_QUEUE: case BPF_ANC | SKF_AD_QUEUE:
emit_skb_load16(queue_mapping, r_A); emit_skb_load16(queue_mapping, r_A);
break; break;
case BPF_S_ANC_HATYPE: case BPF_ANC | SKF_AD_HATYPE:
emit_skb_loadptr(dev, r_A); emit_skb_loadptr(dev, r_A);
emit_cmpi(r_A, 0); emit_cmpi(r_A, 0);
emit_branch(BNE_PTR, cleanup_addr + 4); emit_branch(BNE_PTR, cleanup_addr + 4);
emit_nop(); emit_nop();
emit_load16(r_A, struct net_device, type, r_A); emit_load16(r_A, struct net_device, type, r_A);
break; break;
case BPF_S_ANC_RXHASH: case BPF_ANC | SKF_AD_RXHASH:
emit_skb_load32(hash, r_A); emit_skb_load32(hash, r_A);
break; break;
case BPF_S_ANC_VLAN_TAG: case BPF_ANC | SKF_AD_VLAN_TAG:
case BPF_S_ANC_VLAN_TAG_PRESENT: case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
emit_skb_load16(vlan_tci, r_A); emit_skb_load16(vlan_tci, r_A);
if (filter[i].code == BPF_S_ANC_VLAN_TAG) { if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
emit_andi(r_A, VLAN_VID_MASK, r_A); emit_andi(r_A, VLAN_VID_MASK, r_A);
} else { } else {
emit_loadimm(VLAN_TAG_PRESENT, r_TMP); emit_loadimm(VLAN_TAG_PRESENT, r_TMP);
...@@ -631,44 +623,44 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -631,44 +623,44 @@ void bpf_jit_compile(struct sk_filter *fp)
} }
break; break;
case BPF_S_LD_IMM: case BPF_LD | BPF_IMM:
emit_loadimm(K, r_A); emit_loadimm(K, r_A);
break; break;
case BPF_S_LDX_IMM: case BPF_LDX | BPF_IMM:
emit_loadimm(K, r_X); emit_loadimm(K, r_X);
break; break;
case BPF_S_LD_MEM: case BPF_LD | BPF_MEM:
emit_ldmem(K * 4, r_A); emit_ldmem(K * 4, r_A);
break; break;
case BPF_S_LDX_MEM: case BPF_LDX | BPF_MEM:
emit_ldmem(K * 4, r_X); emit_ldmem(K * 4, r_X);
break; break;
case BPF_S_ST: case BPF_ST:
emit_stmem(K * 4, r_A); emit_stmem(K * 4, r_A);
break; break;
case BPF_S_STX: case BPF_STX:
emit_stmem(K * 4, r_X); emit_stmem(K * 4, r_X);
break; break;
#define CHOOSE_LOAD_FUNC(K, func) \ #define CHOOSE_LOAD_FUNC(K, func) \
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
case BPF_S_LD_W_ABS: case BPF_LD | BPF_W | BPF_ABS:
func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word); func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word);
common_load: seen |= SEEN_DATAREF; common_load: seen |= SEEN_DATAREF;
emit_loadimm(K, r_OFF); emit_loadimm(K, r_OFF);
emit_call(func); emit_call(func);
break; break;
case BPF_S_LD_H_ABS: case BPF_LD | BPF_H | BPF_ABS:
func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half); func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half);
goto common_load; goto common_load;
case BPF_S_LD_B_ABS: case BPF_LD | BPF_B | BPF_ABS:
func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte); func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte);
goto common_load; goto common_load;
case BPF_S_LDX_B_MSH: case BPF_LDX | BPF_B | BPF_MSH:
func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh); func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh);
goto common_load; goto common_load;
case BPF_S_LD_W_IND: case BPF_LD | BPF_W | BPF_IND:
func = bpf_jit_load_word; func = bpf_jit_load_word;
common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
if (K) { if (K) {
...@@ -683,13 +675,13 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; ...@@ -683,13 +675,13 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
} }
emit_call(func); emit_call(func);
break; break;
case BPF_S_LD_H_IND: case BPF_LD | BPF_H | BPF_IND:
func = bpf_jit_load_half; func = bpf_jit_load_half;
goto common_load_ind; goto common_load_ind;
case BPF_S_LD_B_IND: case BPF_LD | BPF_B | BPF_IND:
func = bpf_jit_load_byte; func = bpf_jit_load_byte;
goto common_load_ind; goto common_load_ind;
case BPF_S_JMP_JA: case BPF_JMP | BPF_JA:
emit_jump(addrs[i + K]); emit_jump(addrs[i + K]);
emit_nop(); emit_nop();
break; break;
...@@ -700,14 +692,14 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; ...@@ -700,14 +692,14 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
f_op = FOP; \ f_op = FOP; \
goto cond_branch goto cond_branch
COND_SEL(BPF_S_JMP_JGT_K, BGU, BLEU); COND_SEL(BPF_JMP | BPF_JGT | BPF_K, BGU, BLEU);
COND_SEL(BPF_S_JMP_JGE_K, BGEU, BLU); COND_SEL(BPF_JMP | BPF_JGE | BPF_K, BGEU, BLU);
COND_SEL(BPF_S_JMP_JEQ_K, BE, BNE); COND_SEL(BPF_JMP | BPF_JEQ | BPF_K, BE, BNE);
COND_SEL(BPF_S_JMP_JSET_K, BNE, BE); COND_SEL(BPF_JMP | BPF_JSET | BPF_K, BNE, BE);
COND_SEL(BPF_S_JMP_JGT_X, BGU, BLEU); COND_SEL(BPF_JMP | BPF_JGT | BPF_X, BGU, BLEU);
COND_SEL(BPF_S_JMP_JGE_X, BGEU, BLU); COND_SEL(BPF_JMP | BPF_JGE | BPF_X, BGEU, BLU);
COND_SEL(BPF_S_JMP_JEQ_X, BE, BNE); COND_SEL(BPF_JMP | BPF_JEQ | BPF_X, BE, BNE);
COND_SEL(BPF_S_JMP_JSET_X, BNE, BE); COND_SEL(BPF_JMP | BPF_JSET | BPF_X, BNE, BE);
cond_branch: f_offset = addrs[i + filter[i].jf]; cond_branch: f_offset = addrs[i + filter[i].jf];
t_offset = addrs[i + filter[i].jt]; t_offset = addrs[i + filter[i].jt];
...@@ -719,20 +711,20 @@ cond_branch: f_offset = addrs[i + filter[i].jf]; ...@@ -719,20 +711,20 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
break; break;
} }
switch (filter[i].code) { switch (code) {
case BPF_S_JMP_JGT_X: case BPF_JMP | BPF_JGT | BPF_X:
case BPF_S_JMP_JGE_X: case BPF_JMP | BPF_JGE | BPF_X:
case BPF_S_JMP_JEQ_X: case BPF_JMP | BPF_JEQ | BPF_X:
seen |= SEEN_XREG; seen |= SEEN_XREG;
emit_cmp(r_A, r_X); emit_cmp(r_A, r_X);
break; break;
case BPF_S_JMP_JSET_X: case BPF_JMP | BPF_JSET | BPF_X:
seen |= SEEN_XREG; seen |= SEEN_XREG;
emit_btst(r_A, r_X); emit_btst(r_A, r_X);
break; break;
case BPF_S_JMP_JEQ_K: case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_S_JMP_JGT_K: case BPF_JMP | BPF_JGT | BPF_K:
case BPF_S_JMP_JGE_K: case BPF_JMP | BPF_JGE | BPF_K:
if (is_simm13(K)) { if (is_simm13(K)) {
emit_cmpi(r_A, K); emit_cmpi(r_A, K);
} else { } else {
...@@ -740,7 +732,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf]; ...@@ -740,7 +732,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
emit_cmp(r_A, r_TMP); emit_cmp(r_A, r_TMP);
} }
break; break;
case BPF_S_JMP_JSET_K: case BPF_JMP | BPF_JSET | BPF_K:
if (is_simm13(K)) { if (is_simm13(K)) {
emit_btsti(r_A, K); emit_btsti(r_A, K);
} else { } else {
......
This diff is collapsed.
...@@ -103,60 +103,59 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) ...@@ -103,60 +103,59 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
u32 k = ftest->k; u32 k = ftest->k;
switch (code) { switch (code) {
case BPF_S_LD_W_ABS: case BPF_LD | BPF_W | BPF_ABS:
ftest->code = BPF_LDX | BPF_W | BPF_ABS; ftest->code = BPF_LDX | BPF_W | BPF_ABS;
/* 32-bit aligned and not out of bounds. */ /* 32-bit aligned and not out of bounds. */
if (k >= sizeof(struct seccomp_data) || k & 3) if (k >= sizeof(struct seccomp_data) || k & 3)
return -EINVAL; return -EINVAL;
continue; continue;
case BPF_S_LD_W_LEN: case BPF_LD | BPF_W | BPF_LEN:
ftest->code = BPF_LD | BPF_IMM; ftest->code = BPF_LD | BPF_IMM;
ftest->k = sizeof(struct seccomp_data); ftest->k = sizeof(struct seccomp_data);
continue; continue;
case BPF_S_LDX_W_LEN: case BPF_LDX | BPF_W | BPF_LEN:
ftest->code = BPF_LDX | BPF_IMM; ftest->code = BPF_LDX | BPF_IMM;
ftest->k = sizeof(struct seccomp_data); ftest->k = sizeof(struct seccomp_data);
continue; continue;
/* Explicitly include allowed calls. */ /* Explicitly include allowed calls. */
case BPF_S_RET_K: case BPF_RET | BPF_K:
case BPF_S_RET_A: case BPF_RET | BPF_A:
case BPF_S_ALU_ADD_K: case BPF_ALU | BPF_ADD | BPF_K:
case BPF_S_ALU_ADD_X: case BPF_ALU | BPF_ADD | BPF_X:
case BPF_S_ALU_SUB_K: case BPF_ALU | BPF_SUB | BPF_K:
case BPF_S_ALU_SUB_X: case BPF_ALU | BPF_SUB | BPF_X:
case BPF_S_ALU_MUL_K: case BPF_ALU | BPF_MUL | BPF_K:
case BPF_S_ALU_MUL_X: case BPF_ALU | BPF_MUL | BPF_X:
case BPF_S_ALU_DIV_X: case BPF_ALU | BPF_DIV | BPF_K:
case BPF_S_ALU_AND_K: case BPF_ALU | BPF_DIV | BPF_X:
case BPF_S_ALU_AND_X: case BPF_ALU | BPF_AND | BPF_K:
case BPF_S_ALU_OR_K: case BPF_ALU | BPF_AND | BPF_X:
case BPF_S_ALU_OR_X: case BPF_ALU | BPF_OR | BPF_K:
case BPF_S_ALU_XOR_K: case BPF_ALU | BPF_OR | BPF_X:
case BPF_S_ALU_XOR_X: case BPF_ALU | BPF_XOR | BPF_K:
case BPF_S_ALU_LSH_K: case BPF_ALU | BPF_XOR | BPF_X:
case BPF_S_ALU_LSH_X: case BPF_ALU | BPF_LSH | BPF_K:
case BPF_S_ALU_RSH_K: case BPF_ALU | BPF_LSH | BPF_X:
case BPF_S_ALU_RSH_X: case BPF_ALU | BPF_RSH | BPF_K:
case BPF_S_ALU_NEG: case BPF_ALU | BPF_RSH | BPF_X:
case BPF_S_LD_IMM: case BPF_ALU | BPF_NEG:
case BPF_S_LDX_IMM: case BPF_LD | BPF_IMM:
case BPF_S_MISC_TAX: case BPF_LDX | BPF_IMM:
case BPF_S_MISC_TXA: case BPF_MISC | BPF_TAX:
case BPF_S_ALU_DIV_K: case BPF_MISC | BPF_TXA:
case BPF_S_LD_MEM: case BPF_LD | BPF_MEM:
case BPF_S_LDX_MEM: case BPF_LDX | BPF_MEM:
case BPF_S_ST: case BPF_ST:
case BPF_S_STX: case BPF_STX:
case BPF_S_JMP_JA: case BPF_JMP | BPF_JA:
case BPF_S_JMP_JEQ_K: case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_S_JMP_JEQ_X: case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_S_JMP_JGE_K: case BPF_JMP | BPF_JGE | BPF_K:
case BPF_S_JMP_JGE_X: case BPF_JMP | BPF_JGE | BPF_X:
case BPF_S_JMP_JGT_K: case BPF_JMP | BPF_JGT | BPF_K:
case BPF_S_JMP_JGT_X: case BPF_JMP | BPF_JGT | BPF_X:
case BPF_S_JMP_JSET_K: case BPF_JMP | BPF_JSET | BPF_K:
case BPF_S_JMP_JSET_X: case BPF_JMP | BPF_JSET | BPF_X:
sk_decode_filter(ftest, ftest);
continue; continue;
default: default:
return -EINVAL; return -EINVAL;
......
...@@ -1493,7 +1493,7 @@ static struct bpf_test tests[] = { ...@@ -1493,7 +1493,7 @@ static struct bpf_test tests[] = {
{ }, { },
}, },
{ /* Mainly checking JIT here. */ { /* Mainly checking JIT here. */
"M[]: STX + LDX", "M[]: alt STX + LDX",
.u.insns = { .u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 100), BPF_STMT(BPF_LDX | BPF_IMM, 100),
BPF_STMT(BPF_STX, 0), BPF_STMT(BPF_STX, 0),
...@@ -1582,6 +1582,101 @@ static struct bpf_test tests[] = { ...@@ -1582,6 +1582,101 @@ static struct bpf_test tests[] = {
{ }, { },
{ { 0, 116 } }, { { 0, 116 } },
}, },
{ /* Mainly checking JIT here. */
"M[]: full STX + full LDX",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0xbadfeedb),
BPF_STMT(BPF_STX, 0),
BPF_STMT(BPF_LDX | BPF_IMM, 0xecabedae),
BPF_STMT(BPF_STX, 1),
BPF_STMT(BPF_LDX | BPF_IMM, 0xafccfeaf),
BPF_STMT(BPF_STX, 2),
BPF_STMT(BPF_LDX | BPF_IMM, 0xbffdcedc),
BPF_STMT(BPF_STX, 3),
BPF_STMT(BPF_LDX | BPF_IMM, 0xfbbbdccb),
BPF_STMT(BPF_STX, 4),
BPF_STMT(BPF_LDX | BPF_IMM, 0xfbabcbda),
BPF_STMT(BPF_STX, 5),
BPF_STMT(BPF_LDX | BPF_IMM, 0xaedecbdb),
BPF_STMT(BPF_STX, 6),
BPF_STMT(BPF_LDX | BPF_IMM, 0xadebbade),
BPF_STMT(BPF_STX, 7),
BPF_STMT(BPF_LDX | BPF_IMM, 0xfcfcfaec),
BPF_STMT(BPF_STX, 8),
BPF_STMT(BPF_LDX | BPF_IMM, 0xbcdddbdc),
BPF_STMT(BPF_STX, 9),
BPF_STMT(BPF_LDX | BPF_IMM, 0xfeefdfac),
BPF_STMT(BPF_STX, 10),
BPF_STMT(BPF_LDX | BPF_IMM, 0xcddcdeea),
BPF_STMT(BPF_STX, 11),
BPF_STMT(BPF_LDX | BPF_IMM, 0xaccfaebb),
BPF_STMT(BPF_STX, 12),
BPF_STMT(BPF_LDX | BPF_IMM, 0xbdcccdcf),
BPF_STMT(BPF_STX, 13),
BPF_STMT(BPF_LDX | BPF_IMM, 0xaaedecde),
BPF_STMT(BPF_STX, 14),
BPF_STMT(BPF_LDX | BPF_IMM, 0xfaeacdad),
BPF_STMT(BPF_STX, 15),
BPF_STMT(BPF_LDX | BPF_MEM, 0),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 1),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 2),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 3),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 4),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 5),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 6),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 7),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 8),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 9),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 10),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 11),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 12),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 13),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 14),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 15),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_RET | BPF_A, 0),
},
CLASSIC | FLAG_NO_DATA,
{ },
{ { 0, 0x2a5a5e5 } },
},
{
"check: SKF_AD_MAX",
.u.insns = {
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_MAX),
BPF_STMT(BPF_RET | BPF_A, 0),
},
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
{ },
{ },
},
{ /* Passes checker but fails during runtime. */
"LD [SKF_AD_OFF-1]",
.u.insns = {
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF - 1),
BPF_STMT(BPF_RET | BPF_K, 1),
},
CLASSIC,
{ },
{ { 1, 0 } },
},
}; };
static struct net_device dev; static struct net_device dev;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment