Commit 5bcfedf0 authored by Daniel Borkmann's avatar Daniel Borkmann Committed by David S. Miller

net: filter: simplify label names from jump-table

This patch simplifies label naming for the BPF jump-table.
When we define labels via DL(), we just concatenate/textify
the combination of instruction opcode which consists of the
class, subclass, word size, target register and so on. Each
time we leave BPF_ prefix intact, so that e.g. the preprocessor
generates a label BPF_ALU_BPF_ADD_BPF_X for DL(BPF_ALU, BPF_ADD,
BPF_X) whereas a label name of ALU_ADD_X is much more easy
to grasp. Pure cleanup only.
Signed-off-by: default avatarDaniel Borkmann <dborkman@redhat.com>
Acked-by: default avatarAlexei Starovoitov <ast@plumgrid.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent dfee07cc
...@@ -37,6 +37,9 @@ ...@@ -37,6 +37,9 @@
#define BPF_CALL 0x80 /* function call */ #define BPF_CALL 0x80 /* function call */
#define BPF_EXIT 0x90 /* function return */ #define BPF_EXIT 0x90 /* function return */
/* Placeholder/dummy for 0 */
#define BPF_0 0
/* BPF has 10 general purpose 64-bit registers and stack frame. */ /* BPF has 10 general purpose 64-bit registers and stack frame. */
#define MAX_BPF_REG 11 #define MAX_BPF_REG 11
......
...@@ -156,94 +156,94 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn) ...@@ -156,94 +156,94 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
static const void *jumptable[256] = { static const void *jumptable[256] = {
[0 ... 255] = &&default_label, [0 ... 255] = &&default_label,
/* Now overwrite non-defaults ... */ /* Now overwrite non-defaults ... */
#define DL(A, B, C) [A|B|C] = &&A##_##B##_##C #define DL(A, B, C) [BPF_##A|BPF_##B|BPF_##C] = &&A##_##B##_##C
DL(BPF_ALU, BPF_ADD, BPF_X), DL(ALU, ADD, X),
DL(BPF_ALU, BPF_ADD, BPF_K), DL(ALU, ADD, K),
DL(BPF_ALU, BPF_SUB, BPF_X), DL(ALU, SUB, X),
DL(BPF_ALU, BPF_SUB, BPF_K), DL(ALU, SUB, K),
DL(BPF_ALU, BPF_AND, BPF_X), DL(ALU, AND, X),
DL(BPF_ALU, BPF_AND, BPF_K), DL(ALU, AND, K),
DL(BPF_ALU, BPF_OR, BPF_X), DL(ALU, OR, X),
DL(BPF_ALU, BPF_OR, BPF_K), DL(ALU, OR, K),
DL(BPF_ALU, BPF_LSH, BPF_X), DL(ALU, LSH, X),
DL(BPF_ALU, BPF_LSH, BPF_K), DL(ALU, LSH, K),
DL(BPF_ALU, BPF_RSH, BPF_X), DL(ALU, RSH, X),
DL(BPF_ALU, BPF_RSH, BPF_K), DL(ALU, RSH, K),
DL(BPF_ALU, BPF_XOR, BPF_X), DL(ALU, XOR, X),
DL(BPF_ALU, BPF_XOR, BPF_K), DL(ALU, XOR, K),
DL(BPF_ALU, BPF_MUL, BPF_X), DL(ALU, MUL, X),
DL(BPF_ALU, BPF_MUL, BPF_K), DL(ALU, MUL, K),
DL(BPF_ALU, BPF_MOV, BPF_X), DL(ALU, MOV, X),
DL(BPF_ALU, BPF_MOV, BPF_K), DL(ALU, MOV, K),
DL(BPF_ALU, BPF_DIV, BPF_X), DL(ALU, DIV, X),
DL(BPF_ALU, BPF_DIV, BPF_K), DL(ALU, DIV, K),
DL(BPF_ALU, BPF_MOD, BPF_X), DL(ALU, MOD, X),
DL(BPF_ALU, BPF_MOD, BPF_K), DL(ALU, MOD, K),
DL(BPF_ALU, BPF_NEG, 0), DL(ALU, NEG, 0),
DL(BPF_ALU, BPF_END, BPF_TO_BE), DL(ALU, END, TO_BE),
DL(BPF_ALU, BPF_END, BPF_TO_LE), DL(ALU, END, TO_LE),
DL(BPF_ALU64, BPF_ADD, BPF_X), DL(ALU64, ADD, X),
DL(BPF_ALU64, BPF_ADD, BPF_K), DL(ALU64, ADD, K),
DL(BPF_ALU64, BPF_SUB, BPF_X), DL(ALU64, SUB, X),
DL(BPF_ALU64, BPF_SUB, BPF_K), DL(ALU64, SUB, K),
DL(BPF_ALU64, BPF_AND, BPF_X), DL(ALU64, AND, X),
DL(BPF_ALU64, BPF_AND, BPF_K), DL(ALU64, AND, K),
DL(BPF_ALU64, BPF_OR, BPF_X), DL(ALU64, OR, X),
DL(BPF_ALU64, BPF_OR, BPF_K), DL(ALU64, OR, K),
DL(BPF_ALU64, BPF_LSH, BPF_X), DL(ALU64, LSH, X),
DL(BPF_ALU64, BPF_LSH, BPF_K), DL(ALU64, LSH, K),
DL(BPF_ALU64, BPF_RSH, BPF_X), DL(ALU64, RSH, X),
DL(BPF_ALU64, BPF_RSH, BPF_K), DL(ALU64, RSH, K),
DL(BPF_ALU64, BPF_XOR, BPF_X), DL(ALU64, XOR, X),
DL(BPF_ALU64, BPF_XOR, BPF_K), DL(ALU64, XOR, K),
DL(BPF_ALU64, BPF_MUL, BPF_X), DL(ALU64, MUL, X),
DL(BPF_ALU64, BPF_MUL, BPF_K), DL(ALU64, MUL, K),
DL(BPF_ALU64, BPF_MOV, BPF_X), DL(ALU64, MOV, X),
DL(BPF_ALU64, BPF_MOV, BPF_K), DL(ALU64, MOV, K),
DL(BPF_ALU64, BPF_ARSH, BPF_X), DL(ALU64, ARSH, X),
DL(BPF_ALU64, BPF_ARSH, BPF_K), DL(ALU64, ARSH, K),
DL(BPF_ALU64, BPF_DIV, BPF_X), DL(ALU64, DIV, X),
DL(BPF_ALU64, BPF_DIV, BPF_K), DL(ALU64, DIV, K),
DL(BPF_ALU64, BPF_MOD, BPF_X), DL(ALU64, MOD, X),
DL(BPF_ALU64, BPF_MOD, BPF_K), DL(ALU64, MOD, K),
DL(BPF_ALU64, BPF_NEG, 0), DL(ALU64, NEG, 0),
DL(BPF_JMP, BPF_CALL, 0), DL(JMP, CALL, 0),
DL(BPF_JMP, BPF_JA, 0), DL(JMP, JA, 0),
DL(BPF_JMP, BPF_JEQ, BPF_X), DL(JMP, JEQ, X),
DL(BPF_JMP, BPF_JEQ, BPF_K), DL(JMP, JEQ, K),
DL(BPF_JMP, BPF_JNE, BPF_X), DL(JMP, JNE, X),
DL(BPF_JMP, BPF_JNE, BPF_K), DL(JMP, JNE, K),
DL(BPF_JMP, BPF_JGT, BPF_X), DL(JMP, JGT, X),
DL(BPF_JMP, BPF_JGT, BPF_K), DL(JMP, JGT, K),
DL(BPF_JMP, BPF_JGE, BPF_X), DL(JMP, JGE, X),
DL(BPF_JMP, BPF_JGE, BPF_K), DL(JMP, JGE, K),
DL(BPF_JMP, BPF_JSGT, BPF_X), DL(JMP, JSGT, X),
DL(BPF_JMP, BPF_JSGT, BPF_K), DL(JMP, JSGT, K),
DL(BPF_JMP, BPF_JSGE, BPF_X), DL(JMP, JSGE, X),
DL(BPF_JMP, BPF_JSGE, BPF_K), DL(JMP, JSGE, K),
DL(BPF_JMP, BPF_JSET, BPF_X), DL(JMP, JSET, X),
DL(BPF_JMP, BPF_JSET, BPF_K), DL(JMP, JSET, K),
DL(BPF_JMP, BPF_EXIT, 0), DL(JMP, EXIT, 0),
DL(BPF_STX, BPF_MEM, BPF_B), DL(STX, MEM, B),
DL(BPF_STX, BPF_MEM, BPF_H), DL(STX, MEM, H),
DL(BPF_STX, BPF_MEM, BPF_W), DL(STX, MEM, W),
DL(BPF_STX, BPF_MEM, BPF_DW), DL(STX, MEM, DW),
DL(BPF_STX, BPF_XADD, BPF_W), DL(STX, XADD, W),
DL(BPF_STX, BPF_XADD, BPF_DW), DL(STX, XADD, DW),
DL(BPF_ST, BPF_MEM, BPF_B), DL(ST, MEM, B),
DL(BPF_ST, BPF_MEM, BPF_H), DL(ST, MEM, H),
DL(BPF_ST, BPF_MEM, BPF_W), DL(ST, MEM, W),
DL(BPF_ST, BPF_MEM, BPF_DW), DL(ST, MEM, DW),
DL(BPF_LDX, BPF_MEM, BPF_B), DL(LDX, MEM, B),
DL(BPF_LDX, BPF_MEM, BPF_H), DL(LDX, MEM, H),
DL(BPF_LDX, BPF_MEM, BPF_W), DL(LDX, MEM, W),
DL(BPF_LDX, BPF_MEM, BPF_DW), DL(LDX, MEM, DW),
DL(BPF_LD, BPF_ABS, BPF_W), DL(LD, ABS, W),
DL(BPF_LD, BPF_ABS, BPF_H), DL(LD, ABS, H),
DL(BPF_LD, BPF_ABS, BPF_B), DL(LD, ABS, B),
DL(BPF_LD, BPF_IND, BPF_W), DL(LD, IND, W),
DL(BPF_LD, BPF_IND, BPF_H), DL(LD, IND, H),
DL(BPF_LD, BPF_IND, BPF_B), DL(LD, IND, B),
#undef DL #undef DL
}; };
...@@ -257,93 +257,93 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn) ...@@ -257,93 +257,93 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
/* ALU */ /* ALU */
#define ALU(OPCODE, OP) \ #define ALU(OPCODE, OP) \
BPF_ALU64_##OPCODE##_BPF_X: \ ALU64_##OPCODE##_X: \
A = A OP X; \ A = A OP X; \
CONT; \ CONT; \
BPF_ALU_##OPCODE##_BPF_X: \ ALU_##OPCODE##_X: \
A = (u32) A OP (u32) X; \ A = (u32) A OP (u32) X; \
CONT; \ CONT; \
BPF_ALU64_##OPCODE##_BPF_K: \ ALU64_##OPCODE##_K: \
A = A OP K; \ A = A OP K; \
CONT; \ CONT; \
BPF_ALU_##OPCODE##_BPF_K: \ ALU_##OPCODE##_K: \
A = (u32) A OP (u32) K; \ A = (u32) A OP (u32) K; \
CONT; CONT;
ALU(BPF_ADD, +) ALU(ADD, +)
ALU(BPF_SUB, -) ALU(SUB, -)
ALU(BPF_AND, &) ALU(AND, &)
ALU(BPF_OR, |) ALU(OR, |)
ALU(BPF_LSH, <<) ALU(LSH, <<)
ALU(BPF_RSH, >>) ALU(RSH, >>)
ALU(BPF_XOR, ^) ALU(XOR, ^)
ALU(BPF_MUL, *) ALU(MUL, *)
#undef ALU #undef ALU
BPF_ALU_BPF_NEG_0: ALU_NEG_0:
A = (u32) -A; A = (u32) -A;
CONT; CONT;
BPF_ALU64_BPF_NEG_0: ALU64_NEG_0:
A = -A; A = -A;
CONT; CONT;
BPF_ALU_BPF_MOV_BPF_X: ALU_MOV_X:
A = (u32) X; A = (u32) X;
CONT; CONT;
BPF_ALU_BPF_MOV_BPF_K: ALU_MOV_K:
A = (u32) K; A = (u32) K;
CONT; CONT;
BPF_ALU64_BPF_MOV_BPF_X: ALU64_MOV_X:
A = X; A = X;
CONT; CONT;
BPF_ALU64_BPF_MOV_BPF_K: ALU64_MOV_K:
A = K; A = K;
CONT; CONT;
BPF_ALU64_BPF_ARSH_BPF_X: ALU64_ARSH_X:
(*(s64 *) &A) >>= X; (*(s64 *) &A) >>= X;
CONT; CONT;
BPF_ALU64_BPF_ARSH_BPF_K: ALU64_ARSH_K:
(*(s64 *) &A) >>= K; (*(s64 *) &A) >>= K;
CONT; CONT;
BPF_ALU64_BPF_MOD_BPF_X: ALU64_MOD_X:
if (unlikely(X == 0)) if (unlikely(X == 0))
return 0; return 0;
tmp = A; tmp = A;
A = do_div(tmp, X); A = do_div(tmp, X);
CONT; CONT;
BPF_ALU_BPF_MOD_BPF_X: ALU_MOD_X:
if (unlikely(X == 0)) if (unlikely(X == 0))
return 0; return 0;
tmp = (u32) A; tmp = (u32) A;
A = do_div(tmp, (u32) X); A = do_div(tmp, (u32) X);
CONT; CONT;
BPF_ALU64_BPF_MOD_BPF_K: ALU64_MOD_K:
tmp = A; tmp = A;
A = do_div(tmp, K); A = do_div(tmp, K);
CONT; CONT;
BPF_ALU_BPF_MOD_BPF_K: ALU_MOD_K:
tmp = (u32) A; tmp = (u32) A;
A = do_div(tmp, (u32) K); A = do_div(tmp, (u32) K);
CONT; CONT;
BPF_ALU64_BPF_DIV_BPF_X: ALU64_DIV_X:
if (unlikely(X == 0)) if (unlikely(X == 0))
return 0; return 0;
do_div(A, X); do_div(A, X);
CONT; CONT;
BPF_ALU_BPF_DIV_BPF_X: ALU_DIV_X:
if (unlikely(X == 0)) if (unlikely(X == 0))
return 0; return 0;
tmp = (u32) A; tmp = (u32) A;
do_div(tmp, (u32) X); do_div(tmp, (u32) X);
A = (u32) tmp; A = (u32) tmp;
CONT; CONT;
BPF_ALU64_BPF_DIV_BPF_K: ALU64_DIV_K:
do_div(A, K); do_div(A, K);
CONT; CONT;
BPF_ALU_BPF_DIV_BPF_K: ALU_DIV_K:
tmp = (u32) A; tmp = (u32) A;
do_div(tmp, (u32) K); do_div(tmp, (u32) K);
A = (u32) tmp; A = (u32) tmp;
CONT; CONT;
BPF_ALU_BPF_END_BPF_TO_BE: ALU_END_TO_BE:
switch (K) { switch (K) {
case 16: case 16:
A = (__force u16) cpu_to_be16(A); A = (__force u16) cpu_to_be16(A);
...@@ -356,7 +356,7 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn) ...@@ -356,7 +356,7 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
break; break;
} }
CONT; CONT;
BPF_ALU_BPF_END_BPF_TO_LE: ALU_END_TO_LE:
switch (K) { switch (K) {
case 16: case 16:
A = (__force u16) cpu_to_le16(A); A = (__force u16) cpu_to_le16(A);
...@@ -371,7 +371,7 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn) ...@@ -371,7 +371,7 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
CONT; CONT;
/* CALL */ /* CALL */
BPF_JMP_BPF_CALL_0: JMP_CALL_0:
/* Function call scratches R1-R5 registers, preserves R6-R9, /* Function call scratches R1-R5 registers, preserves R6-R9,
* and stores return value into R0. * and stores return value into R0.
*/ */
...@@ -380,122 +380,122 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn) ...@@ -380,122 +380,122 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
CONT; CONT;
/* JMP */ /* JMP */
BPF_JMP_BPF_JA_0: JMP_JA_0:
insn += insn->off; insn += insn->off;
CONT; CONT;
BPF_JMP_BPF_JEQ_BPF_X: JMP_JEQ_X:
if (A == X) { if (A == X) {
insn += insn->off; insn += insn->off;
CONT_JMP; CONT_JMP;
} }
CONT; CONT;
BPF_JMP_BPF_JEQ_BPF_K: JMP_JEQ_K:
if (A == K) { if (A == K) {
insn += insn->off; insn += insn->off;
CONT_JMP; CONT_JMP;
} }
CONT; CONT;
BPF_JMP_BPF_JNE_BPF_X: JMP_JNE_X:
if (A != X) { if (A != X) {
insn += insn->off; insn += insn->off;
CONT_JMP; CONT_JMP;
} }
CONT; CONT;
BPF_JMP_BPF_JNE_BPF_K: JMP_JNE_K:
if (A != K) { if (A != K) {
insn += insn->off; insn += insn->off;
CONT_JMP; CONT_JMP;
} }
CONT; CONT;
BPF_JMP_BPF_JGT_BPF_X: JMP_JGT_X:
if (A > X) { if (A > X) {
insn += insn->off; insn += insn->off;
CONT_JMP; CONT_JMP;
} }
CONT; CONT;
BPF_JMP_BPF_JGT_BPF_K: JMP_JGT_K:
if (A > K) { if (A > K) {
insn += insn->off; insn += insn->off;
CONT_JMP; CONT_JMP;
} }
CONT; CONT;
BPF_JMP_BPF_JGE_BPF_X: JMP_JGE_X:
if (A >= X) { if (A >= X) {
insn += insn->off; insn += insn->off;
CONT_JMP; CONT_JMP;
} }
CONT; CONT;
BPF_JMP_BPF_JGE_BPF_K: JMP_JGE_K:
if (A >= K) { if (A >= K) {
insn += insn->off; insn += insn->off;
CONT_JMP; CONT_JMP;
} }
CONT; CONT;
BPF_JMP_BPF_JSGT_BPF_X: JMP_JSGT_X:
if (((s64)A) > ((s64)X)) { if (((s64) A) > ((s64) X)) {
insn += insn->off; insn += insn->off;
CONT_JMP; CONT_JMP;
} }
CONT; CONT;
BPF_JMP_BPF_JSGT_BPF_K: JMP_JSGT_K:
if (((s64)A) > ((s64)K)) { if (((s64) A) > ((s64) K)) {
insn += insn->off; insn += insn->off;
CONT_JMP; CONT_JMP;
} }
CONT; CONT;
BPF_JMP_BPF_JSGE_BPF_X: JMP_JSGE_X:
if (((s64)A) >= ((s64)X)) { if (((s64) A) >= ((s64) X)) {
insn += insn->off; insn += insn->off;
CONT_JMP; CONT_JMP;
} }
CONT; CONT;
BPF_JMP_BPF_JSGE_BPF_K: JMP_JSGE_K:
if (((s64)A) >= ((s64)K)) { if (((s64) A) >= ((s64) K)) {
insn += insn->off; insn += insn->off;
CONT_JMP; CONT_JMP;
} }
CONT; CONT;
BPF_JMP_BPF_JSET_BPF_X: JMP_JSET_X:
if (A & X) { if (A & X) {
insn += insn->off; insn += insn->off;
CONT_JMP; CONT_JMP;
} }
CONT; CONT;
BPF_JMP_BPF_JSET_BPF_K: JMP_JSET_K:
if (A & K) { if (A & K) {
insn += insn->off; insn += insn->off;
CONT_JMP; CONT_JMP;
} }
CONT; CONT;
BPF_JMP_BPF_EXIT_0: JMP_EXIT_0:
return R0; return R0;
/* STX and ST and LDX*/ /* STX and ST and LDX*/
#define LDST(SIZEOP, SIZE) \ #define LDST(SIZEOP, SIZE) \
BPF_STX_BPF_MEM_##SIZEOP: \ STX_MEM_##SIZEOP: \
*(SIZE *)(unsigned long) (A + insn->off) = X; \ *(SIZE *)(unsigned long) (A + insn->off) = X; \
CONT; \ CONT; \
BPF_ST_BPF_MEM_##SIZEOP: \ ST_MEM_##SIZEOP: \
*(SIZE *)(unsigned long) (A + insn->off) = K; \ *(SIZE *)(unsigned long) (A + insn->off) = K; \
CONT; \ CONT; \
BPF_LDX_BPF_MEM_##SIZEOP: \ LDX_MEM_##SIZEOP: \
A = *(SIZE *)(unsigned long) (X + insn->off); \ A = *(SIZE *)(unsigned long) (X + insn->off); \
CONT; CONT;
LDST(BPF_B, u8) LDST(B, u8)
LDST(BPF_H, u16) LDST(H, u16)
LDST(BPF_W, u32) LDST(W, u32)
LDST(BPF_DW, u64) LDST(DW, u64)
#undef LDST #undef LDST
BPF_STX_BPF_XADD_BPF_W: /* lock xadd *(u32 *)(A + insn->off) += X */ STX_XADD_W: /* lock xadd *(u32 *)(A + insn->off) += X */
atomic_add((u32) X, (atomic_t *)(unsigned long) atomic_add((u32) X, (atomic_t *)(unsigned long)
(A + insn->off)); (A + insn->off));
CONT; CONT;
BPF_STX_BPF_XADD_BPF_DW: /* lock xadd *(u64 *)(A + insn->off) += X */ STX_XADD_DW: /* lock xadd *(u64 *)(A + insn->off) += X */
atomic64_add((u64) X, (atomic64_t *)(unsigned long) atomic64_add((u64) X, (atomic64_t *)(unsigned long)
(A + insn->off)); (A + insn->off));
CONT; CONT;
BPF_LD_BPF_ABS_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + K)) */ LD_ABS_W: /* R0 = ntohl(*(u32 *) (skb->data + K)) */
off = K; off = K;
load_word: load_word:
/* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
...@@ -524,7 +524,7 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn) ...@@ -524,7 +524,7 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
CONT; CONT;
} }
return 0; return 0;
BPF_LD_BPF_ABS_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + K)) */ LD_ABS_H: /* R0 = ntohs(*(u16 *) (skb->data + K)) */
off = K; off = K;
load_half: load_half:
ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp); ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp);
...@@ -533,7 +533,7 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn) ...@@ -533,7 +533,7 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
CONT; CONT;
} }
return 0; return 0;
BPF_LD_BPF_ABS_BPF_B: /* R0 = *(u8 *) (ctx + K) */ LD_ABS_B: /* R0 = *(u8 *) (ctx + K) */
off = K; off = K;
load_byte: load_byte:
ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp); ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp);
...@@ -542,13 +542,13 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn) ...@@ -542,13 +542,13 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
CONT; CONT;
} }
return 0; return 0;
BPF_LD_BPF_IND_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + X + K)) */ LD_IND_W: /* R0 = ntohl(*(u32 *) (skb->data + X + K)) */
off = K + X; off = K + X;
goto load_word; goto load_word;
BPF_LD_BPF_IND_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + X + K)) */ LD_IND_H: /* R0 = ntohs(*(u16 *) (skb->data + X + K)) */
off = K + X; off = K + X;
goto load_half; goto load_half;
BPF_LD_BPF_IND_BPF_B: /* R0 = *(u8 *) (skb->data + X + K) */ LD_IND_B: /* R0 = *(u8 *) (skb->data + X + K) */
off = K + X; off = K + X;
goto load_byte; goto load_byte;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment