Commit 2809a208 authored by David S. Miller's avatar David S. Miller

net: filter: Just In Time compiler for sparc

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f4f9f6e7
...@@ -30,6 +30,7 @@ config SPARC ...@@ -30,6 +30,7 @@ config SPARC
select USE_GENERIC_SMP_HELPERS if SMP select USE_GENERIC_SMP_HELPERS if SMP
select GENERIC_PCI_IOMAP select GENERIC_PCI_IOMAP
select HAVE_NMI_WATCHDOG if SPARC64 select HAVE_NMI_WATCHDOG if SPARC64
select HAVE_BPF_JIT
config SPARC32 config SPARC32
def_bool !64BIT def_bool !64BIT
......
...@@ -66,6 +66,7 @@ head-y += arch/sparc/kernel/init_task.o ...@@ -66,6 +66,7 @@ head-y += arch/sparc/kernel/init_task.o
core-y += arch/sparc/kernel/ core-y += arch/sparc/kernel/
core-y += arch/sparc/mm/ arch/sparc/math-emu/ core-y += arch/sparc/mm/ arch/sparc/math-emu/
core-y += arch/sparc/net/
libs-y += arch/sparc/prom/ libs-y += arch/sparc/prom/
libs-y += arch/sparc/lib/ libs-y += arch/sparc/lib/
......
#
# Arch-specific network modules
#
obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o
#ifndef _BPF_JIT_H
#define _BPF_JIT_H
/* Conventions:
* %g1 : temporary
* %g2 : Secondary temporary used by SKB data helper stubs.
* %o0 : pointer to skb (first argument given to JIT function)
* %o1 : BPF A accumulator
* %o2 : BPF X accumulator
* %o3 : Holds saved %o7 so we can call helper functions without needing
* to allocate a register window.
* %o4 : skb->data
* %o5 : skb->len - skb->data_len
*/
#ifndef __ASSEMBLER__
#define G0 0x00
#define G1 0x01
#define G3 0x03
#define G6 0x06
#define O0 0x08
#define O1 0x09
#define O2 0x0a
#define O3 0x0b
#define O4 0x0c
#define O5 0x0d
#define SP 0x0e
#define O7 0x0f
#define FP 0x1e
#define r_SKB O0
#define r_A O1
#define r_X O2
#define r_saved_O7 O3
#define r_HEADLEN O4
#define r_SKB_DATA O5
#define r_TMP G1
#define r_TMP2 G2
#define r_OFF G3
#else
#define r_SKB %o0
#define r_A %o1
#define r_X %o2
#define r_saved_O7 %o3
#define r_HEADLEN %o4
#define r_SKB_DATA %o5
#define r_TMP %g1
#define r_TMP2 %g2
#define r_OFF %g3
#endif
#endif /* _BPF_JIT_H */
#include <asm/ptrace.h>
#include "bpf_jit.h"
#ifdef CONFIG_SPARC64
#define SAVE_SZ 176
#define SCRATCH_OFF STACK_BIAS + 128
#define BE_PTR(label) be,pn %xcc, label
#else
#define SAVE_SZ 96
#define SCRATCH_OFF 72
#define BE_PTR(label) be label
#endif
#define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */
.text
.globl bpf_jit_load_word
bpf_jit_load_word:
cmp r_OFF, 0
bl bpf_slow_path_word_neg
nop
.globl bpf_jit_load_word_positive_offset
bpf_jit_load_word_positive_offset:
sub r_HEADLEN, r_OFF, r_TMP
cmp r_TMP, 3
ble bpf_slow_path_word
add r_SKB_DATA, r_OFF, r_TMP
andcc r_TMP, 3, %g0
bne load_word_unaligned
nop
retl
ld [r_SKB_DATA + r_OFF], r_A
load_word_unaligned:
ldub [r_TMP + 0x0], r_OFF
ldub [r_TMP + 0x1], r_TMP2
sll r_OFF, 8, r_OFF
or r_OFF, r_TMP2, r_OFF
ldub [r_TMP + 0x2], r_TMP2
sll r_OFF, 8, r_OFF
or r_OFF, r_TMP2, r_OFF
ldub [r_TMP + 0x3], r_TMP2
sll r_OFF, 8, r_OFF
retl
or r_OFF, r_TMP2, r_A
.globl bpf_jit_load_half
bpf_jit_load_half:
cmp r_OFF, 0
bl bpf_slow_path_half_neg
nop
.globl bpf_jit_load_half_positive_offset
bpf_jit_load_half_positive_offset:
sub r_HEADLEN, r_OFF, r_TMP
cmp r_TMP, 1
ble bpf_slow_path_half
add r_SKB_DATA, r_OFF, r_TMP
andcc r_TMP, 1, %g0
bne load_half_unaligned
nop
retl
lduh [r_SKB_DATA + r_OFF], r_A
load_half_unaligned:
ldub [r_TMP + 0x0], r_OFF
ldub [r_TMP + 0x1], r_TMP2
sll r_OFF, 8, r_OFF
retl
or r_OFF, r_TMP2, r_A
.globl bpf_jit_load_byte
bpf_jit_load_byte:
cmp r_OFF, 0
bl bpf_slow_path_byte_neg
nop
.globl bpf_jit_load_byte_positive_offset
bpf_jit_load_byte_positive_offset:
cmp r_OFF, r_HEADLEN
bge bpf_slow_path_byte
nop
retl
ldub [r_SKB_DATA + r_OFF], r_A
.globl bpf_jit_load_byte_msh
bpf_jit_load_byte_msh:
cmp r_OFF, 0
bl bpf_slow_path_byte_msh_neg
nop
.globl bpf_jit_load_byte_msh_positive_offset
bpf_jit_load_byte_msh_positive_offset:
cmp r_OFF, r_HEADLEN
bge bpf_slow_path_byte_msh
nop
ldub [r_SKB_DATA + r_OFF], r_OFF
and r_OFF, 0xf, r_OFF
retl
sll r_OFF, 2, r_X
#define bpf_slow_path_common(LEN) \
save %sp, -SAVE_SZ, %sp; \
mov %i0, %o0; \
mov r_OFF, %o1; \
add %fp, SCRATCH_OFF, %o2; \
call skb_copy_bits; \
mov (LEN), %o3; \
cmp %o0, 0; \
restore;
bpf_slow_path_word:
bpf_slow_path_common(4)
bl bpf_error
ld [%sp + SCRATCH_OFF], r_A
retl
nop
bpf_slow_path_half:
bpf_slow_path_common(2)
bl bpf_error
lduh [%sp + SCRATCH_OFF], r_A
retl
nop
bpf_slow_path_byte:
bpf_slow_path_common(1)
bl bpf_error
ldub [%sp + SCRATCH_OFF], r_A
retl
nop
bpf_slow_path_byte_msh:
bpf_slow_path_common(1)
bl bpf_error
ldub [%sp + SCRATCH_OFF], r_A
and r_OFF, 0xf, r_OFF
retl
sll r_OFF, 2, r_X
#define bpf_negative_common(LEN) \
save %sp, -SAVE_SZ, %sp; \
mov %i0, %o0; \
mov r_OFF, %o1; \
call bpf_internal_load_pointer_neg_helper; \
mov (LEN), %o2; \
mov %o0, r_TMP; \
cmp %o0, 0; \
BE_PTR(bpf_error); \
restore;
bpf_slow_path_word_neg:
sethi %hi(SKF_MAX_NEG_OFF), r_TMP
cmp r_OFF, r_TMP
bl bpf_error
nop
.globl bpf_jit_load_word_negative_offset
bpf_jit_load_word_negative_offset:
bpf_negative_common(4)
andcc r_TMP, 3, %g0
bne load_word_unaligned
nop
retl
ld [r_TMP], r_A
bpf_slow_path_half_neg:
sethi %hi(SKF_MAX_NEG_OFF), r_TMP
cmp r_OFF, r_TMP
bl bpf_error
nop
.globl bpf_jit_load_half_negative_offset
bpf_jit_load_half_negative_offset:
bpf_negative_common(2)
andcc r_TMP, 1, %g0
bne load_half_unaligned
nop
retl
lduh [r_TMP], r_A
bpf_slow_path_byte_neg:
sethi %hi(SKF_MAX_NEG_OFF), r_TMP
cmp r_OFF, r_TMP
bl bpf_error
nop
.globl bpf_jit_load_byte_negative_offset
bpf_jit_load_byte_negative_offset:
bpf_negative_common(1)
retl
ldub [r_TMP], r_A
bpf_slow_path_byte_msh_neg:
sethi %hi(SKF_MAX_NEG_OFF), r_TMP
cmp r_OFF, r_TMP
bl bpf_error
nop
.globl bpf_jit_load_byte_msh_negative_offset
bpf_jit_load_byte_msh_negative_offset:
bpf_negative_common(1)
ldub [r_TMP], r_OFF
and r_OFF, 0xf, r_OFF
retl
sll r_OFF, 2, r_X
bpf_error:
jmpl r_saved_O7 + 8, %g0
clr %o0
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment