Commit cb65f39f authored by Eric Dumazet's avatar Eric Dumazet Committed by Greg Kroah-Hartman

net: bpf_jit: fix divide by 0 generation

[ Upstream commit d00a9dd2 ]

Several problems fixed in this patch :

1) Target of the conditional jump in case a divide by 0 is performed
   by a bpf is wrong.

2) Must 'generate' the full function prologue/epilogue at pass=0,
   or else we can stop too early in pass=1 if the proglen doesnt change.
   (if the increase of prologue/epilogue equals decrease of all
    instructions length because some jumps are converted to near jumps)

3) Change the wrong length detection at the end of code generation to
   issue a more explicit message, no need for a full stack trace.
Reported-by: default avatarPhil Oester <kernel@linuxace.com>
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 54652156
...@@ -151,17 +151,18 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -151,17 +151,18 @@ void bpf_jit_compile(struct sk_filter *fp)
cleanup_addr = proglen; /* epilogue address */ cleanup_addr = proglen; /* epilogue address */
for (pass = 0; pass < 10; pass++) { for (pass = 0; pass < 10; pass++) {
u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
/* no prologue/epilogue for trivial filters (RET something) */ /* no prologue/epilogue for trivial filters (RET something) */
proglen = 0; proglen = 0;
prog = temp; prog = temp;
if (seen) { if (seen_or_pass0) {
EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */ EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */ EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */
/* note : must save %rbx in case bpf_error is hit */ /* note : must save %rbx in case bpf_error is hit */
if (seen & (SEEN_XREG | SEEN_DATAREF)) if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */ EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
if (seen & SEEN_XREG) if (seen_or_pass0 & SEEN_XREG)
CLEAR_X(); /* make sure we dont leek kernel memory */ CLEAR_X(); /* make sure we dont leek kernel memory */
/* /*
...@@ -170,7 +171,7 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -170,7 +171,7 @@ void bpf_jit_compile(struct sk_filter *fp)
* r9 = skb->len - skb->data_len * r9 = skb->len - skb->data_len
* r8 = skb->data * r8 = skb->data
*/ */
if (seen & SEEN_DATAREF) { if (seen_or_pass0 & SEEN_DATAREF) {
if (offsetof(struct sk_buff, len) <= 127) if (offsetof(struct sk_buff, len) <= 127)
/* mov off8(%rdi),%r9d */ /* mov off8(%rdi),%r9d */
EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len)); EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
...@@ -260,9 +261,14 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -260,9 +261,14 @@ void bpf_jit_compile(struct sk_filter *fp)
case BPF_S_ALU_DIV_X: /* A /= X; */ case BPF_S_ALU_DIV_X: /* A /= X; */
seen |= SEEN_XREG; seen |= SEEN_XREG;
EMIT2(0x85, 0xdb); /* test %ebx,%ebx */ EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
if (pc_ret0 != -1) if (pc_ret0 > 0) {
EMIT_COND_JMP(X86_JE, addrs[pc_ret0] - (addrs[i] - 4)); /* addrs[pc_ret0 - 1] is start address of target
else { * (addrs[i] - 4) is the address following this jmp
* ("xor %edx,%edx; div %ebx" being 4 bytes long)
*/
EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
(addrs[i] - 4));
} else {
EMIT_COND_JMP(X86_JNE, 2 + 5); EMIT_COND_JMP(X86_JNE, 2 + 5);
CLEAR_A(); CLEAR_A();
EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */ EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
...@@ -335,12 +341,12 @@ void bpf_jit_compile(struct sk_filter *fp) ...@@ -335,12 +341,12 @@ void bpf_jit_compile(struct sk_filter *fp)
} }
/* fallinto */ /* fallinto */
case BPF_S_RET_A: case BPF_S_RET_A:
if (seen) { if (seen_or_pass0) {
if (i != flen - 1) { if (i != flen - 1) {
EMIT_JMP(cleanup_addr - addrs[i]); EMIT_JMP(cleanup_addr - addrs[i]);
break; break;
} }
if (seen & SEEN_XREG) if (seen_or_pass0 & SEEN_XREG)
EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */ EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */
EMIT1(0xc9); /* leaveq */ EMIT1(0xc9); /* leaveq */
} }
...@@ -483,8 +489,9 @@ common_load: seen |= SEEN_DATAREF; ...@@ -483,8 +489,9 @@ common_load: seen |= SEEN_DATAREF;
goto common_load; goto common_load;
case BPF_S_LDX_B_MSH: case BPF_S_LDX_B_MSH:
if ((int)K < 0) { if ((int)K < 0) {
if (pc_ret0 != -1) { if (pc_ret0 > 0) {
EMIT_JMP(addrs[pc_ret0] - addrs[i]); /* addrs[pc_ret0 - 1] is the start address */
EMIT_JMP(addrs[pc_ret0 - 1] - addrs[i]);
break; break;
} }
CLEAR_A(); CLEAR_A();
...@@ -599,13 +606,14 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; ...@@ -599,13 +606,14 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
* use it to give the cleanup instruction(s) addr * use it to give the cleanup instruction(s) addr
*/ */
cleanup_addr = proglen - 1; /* ret */ cleanup_addr = proglen - 1; /* ret */
if (seen) if (seen_or_pass0)
cleanup_addr -= 1; /* leaveq */ cleanup_addr -= 1; /* leaveq */
if (seen & SEEN_XREG) if (seen_or_pass0 & SEEN_XREG)
cleanup_addr -= 4; /* mov -8(%rbp),%rbx */ cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
if (image) { if (image) {
WARN_ON(proglen != oldproglen); if (proglen != oldproglen)
pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
break; break;
} }
if (proglen == oldproglen) { if (proglen == oldproglen) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment