Commit 8556ce79 authored by Daniel Borkmann's avatar Daniel Borkmann Committed by David S. Miller

net: filter: remove DL macro

Lets get rid of this macro. After commit 5bcfedf0 ("net: filter:
simplify label names from jump-table"), labels have become more
readable due to omission of BPF_ prefix but at the same time more
generic, so that things like `git grep -n` would not find them. As
a middle path, lets get rid of the DL macro as it's not strictly
needed and would otherwise just hide the full name.
Signed-off-by: default avatarDaniel Borkmann <dborkman@redhat.com>
Acked-by: default avatarAlexei Starovoitov <ast@plumgrid.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 76fcee24
...@@ -37,9 +37,6 @@ ...@@ -37,9 +37,6 @@
#define BPF_CALL 0x80 /* function call */ #define BPF_CALL 0x80 /* function call */
#define BPF_EXIT 0x90 /* function return */ #define BPF_EXIT 0x90 /* function return */
/* Placeholder/dummy for 0 */
#define BPF_0 0
/* Register numbers */ /* Register numbers */
enum { enum {
BPF_REG_0 = 0, BPF_REG_0 = 0,
......
...@@ -160,95 +160,100 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins ...@@ -160,95 +160,100 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins
static const void *jumptable[256] = { static const void *jumptable[256] = {
[0 ... 255] = &&default_label, [0 ... 255] = &&default_label,
/* Now overwrite non-defaults ... */ /* Now overwrite non-defaults ... */
#define DL(A, B, C) [BPF_##A|BPF_##B|BPF_##C] = &&A##_##B##_##C /* 32 bit ALU operations */
DL(ALU, ADD, X), [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
DL(ALU, ADD, K), [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
DL(ALU, SUB, X), [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
DL(ALU, SUB, K), [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
DL(ALU, AND, X), [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
DL(ALU, AND, K), [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
DL(ALU, OR, X), [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
DL(ALU, OR, K), [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
DL(ALU, LSH, X), [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
DL(ALU, LSH, K), [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
DL(ALU, RSH, X), [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
DL(ALU, RSH, K), [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
DL(ALU, XOR, X), [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
DL(ALU, XOR, K), [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
DL(ALU, MUL, X), [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
DL(ALU, MUL, K), [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
DL(ALU, MOV, X), [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
DL(ALU, MOV, K), [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
DL(ALU, DIV, X), [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
DL(ALU, DIV, K), [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
DL(ALU, MOD, X), [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
DL(ALU, MOD, K), [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
DL(ALU, NEG, 0), [BPF_ALU | BPF_NEG] = &&ALU_NEG,
DL(ALU, END, TO_BE), [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
DL(ALU, END, TO_LE), [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
DL(ALU64, ADD, X), /* 64 bit ALU operations */
DL(ALU64, ADD, K), [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
DL(ALU64, SUB, X), [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
DL(ALU64, SUB, K), [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
DL(ALU64, AND, X), [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
DL(ALU64, AND, K), [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
DL(ALU64, OR, X), [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
DL(ALU64, OR, K), [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
DL(ALU64, LSH, X), [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
DL(ALU64, LSH, K), [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
DL(ALU64, RSH, X), [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
DL(ALU64, RSH, K), [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
DL(ALU64, XOR, X), [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
DL(ALU64, XOR, K), [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
DL(ALU64, MUL, X), [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
DL(ALU64, MUL, K), [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
DL(ALU64, MOV, X), [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
DL(ALU64, MOV, K), [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
DL(ALU64, ARSH, X), [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
DL(ALU64, ARSH, K), [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
DL(ALU64, DIV, X), [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
DL(ALU64, DIV, K), [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
DL(ALU64, MOD, X), [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
DL(ALU64, MOD, K), [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
DL(ALU64, NEG, 0), [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
DL(JMP, CALL, 0), [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
DL(JMP, JA, 0), /* Call instruction */
DL(JMP, JEQ, X), [BPF_JMP | BPF_CALL] = &&JMP_CALL,
DL(JMP, JEQ, K), /* Jumps */
DL(JMP, JNE, X), [BPF_JMP | BPF_JA] = &&JMP_JA,
DL(JMP, JNE, K), [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
DL(JMP, JGT, X), [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
DL(JMP, JGT, K), [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
DL(JMP, JGE, X), [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
DL(JMP, JGE, K), [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
DL(JMP, JSGT, X), [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
DL(JMP, JSGT, K), [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
DL(JMP, JSGE, X), [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
DL(JMP, JSGE, K), [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
DL(JMP, JSET, X), [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
DL(JMP, JSET, K), [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
DL(JMP, EXIT, 0), [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
DL(STX, MEM, B), [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
DL(STX, MEM, H), [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
DL(STX, MEM, W), /* Program return */
DL(STX, MEM, DW), [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
DL(STX, XADD, W), /* Store instructions */
DL(STX, XADD, DW), [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
DL(ST, MEM, B), [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
DL(ST, MEM, H), [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
DL(ST, MEM, W), [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
DL(ST, MEM, DW), [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
DL(LDX, MEM, B), [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
DL(LDX, MEM, H), [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
DL(LDX, MEM, W), [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
DL(LDX, MEM, DW), [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
DL(LD, ABS, W), [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
DL(LD, ABS, H), /* Load instructions */
DL(LD, ABS, B), [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
DL(LD, IND, W), [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
DL(LD, IND, H), [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
DL(LD, IND, B), [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
#undef DL [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
[BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
[BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
[BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
[BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
[BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
}; };
void *ptr; void *ptr;
int off; int off;
...@@ -290,10 +295,10 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins ...@@ -290,10 +295,10 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins
ALU(XOR, ^) ALU(XOR, ^)
ALU(MUL, *) ALU(MUL, *)
#undef ALU #undef ALU
ALU_NEG_0: ALU_NEG:
A = (u32) -A; A = (u32) -A;
CONT; CONT;
ALU64_NEG_0: ALU64_NEG:
A = -A; A = -A;
CONT; CONT;
ALU_MOV_X: ALU_MOV_X:
...@@ -382,7 +387,7 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins ...@@ -382,7 +387,7 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins
CONT; CONT;
/* CALL */ /* CALL */
JMP_CALL_0: JMP_CALL:
/* Function call scratches BPF_R1-BPF_R5 registers, /* Function call scratches BPF_R1-BPF_R5 registers,
* preserves BPF_R6-BPF_R9, and stores return value * preserves BPF_R6-BPF_R9, and stores return value
* into BPF_R0. * into BPF_R0.
...@@ -392,7 +397,7 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins ...@@ -392,7 +397,7 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins
CONT; CONT;
/* JMP */ /* JMP */
JMP_JA_0: JMP_JA:
insn += insn->off; insn += insn->off;
CONT; CONT;
JMP_JEQ_X: JMP_JEQ_X:
...@@ -479,7 +484,7 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins ...@@ -479,7 +484,7 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins
CONT_JMP; CONT_JMP;
} }
CONT; CONT;
JMP_EXIT_0: JMP_EXIT:
return BPF_R0; return BPF_R0;
/* STX and ST and LDX*/ /* STX and ST and LDX*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment