Commit de5cb6eb authored by Martin Schwidefsky's avatar Martin Schwidefsky

s390: use expoline thunks in the BPF JIT

The BPF JIT need safe guarding against spectre v2 in the sk_load_xxx
assembler stubs and the indirect branches generated by the JIT itself
need to be converted to expolines.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 6deaa3bb
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/nospec-insn.h>
#include "bpf_jit.h" #include "bpf_jit.h"
/* /*
...@@ -54,7 +55,7 @@ ENTRY(sk_load_##NAME##_pos); \ ...@@ -54,7 +55,7 @@ ENTRY(sk_load_##NAME##_pos); \
clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \ clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \
jh sk_load_##NAME##_slow; \ jh sk_load_##NAME##_slow; \
LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \ LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \
b OFF_OK(%r6); /* Return */ \ B_EX OFF_OK,%r6; /* Return */ \
\ \
sk_load_##NAME##_slow:; \ sk_load_##NAME##_slow:; \
lgr %r2,%r7; /* Arg1 = skb pointer */ \ lgr %r2,%r7; /* Arg1 = skb pointer */ \
...@@ -64,11 +65,14 @@ sk_load_##NAME##_slow:; \ ...@@ -64,11 +65,14 @@ sk_load_##NAME##_slow:; \
brasl %r14,skb_copy_bits; /* Get data from skb */ \ brasl %r14,skb_copy_bits; /* Get data from skb */ \
LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \ LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \
ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \ ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \
br %r6; /* Return */ BR_EX %r6; /* Return */
sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */ sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */
sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */ sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */
GEN_BR_THUNK %r6
GEN_B_THUNK OFF_OK,%r6
/* /*
* Load 1 byte from SKB (optimized version) * Load 1 byte from SKB (optimized version)
*/ */
...@@ -80,7 +84,7 @@ ENTRY(sk_load_byte_pos) ...@@ -80,7 +84,7 @@ ENTRY(sk_load_byte_pos)
clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen? clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen?
jnl sk_load_byte_slow jnl sk_load_byte_slow
llgc %r14,0(%r3,%r12) # Get byte from skb llgc %r14,0(%r3,%r12) # Get byte from skb
b OFF_OK(%r6) # Return OK B_EX OFF_OK,%r6 # Return OK
sk_load_byte_slow: sk_load_byte_slow:
lgr %r2,%r7 # Arg1 = skb pointer lgr %r2,%r7 # Arg1 = skb pointer
...@@ -90,7 +94,7 @@ sk_load_byte_slow: ...@@ -90,7 +94,7 @@ sk_load_byte_slow:
brasl %r14,skb_copy_bits # Get data from skb brasl %r14,skb_copy_bits # Get data from skb
llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer
ltgr %r2,%r2 # Set cc to (%r2 != 0) ltgr %r2,%r2 # Set cc to (%r2 != 0)
br %r6 # Return cc BR_EX %r6 # Return cc
#define sk_negative_common(NAME, SIZE, LOAD) \ #define sk_negative_common(NAME, SIZE, LOAD) \
sk_load_##NAME##_slow_neg:; \ sk_load_##NAME##_slow_neg:; \
...@@ -104,7 +108,7 @@ sk_load_##NAME##_slow_neg:; \ ...@@ -104,7 +108,7 @@ sk_load_##NAME##_slow_neg:; \
jz bpf_error; \ jz bpf_error; \
LOAD %r14,0(%r2); /* Get data from pointer */ \ LOAD %r14,0(%r2); /* Get data from pointer */ \
xr %r3,%r3; /* Set cc to zero */ \ xr %r3,%r3; /* Set cc to zero */ \
br %r6; /* Return cc */ BR_EX %r6; /* Return cc */
sk_negative_common(word, 4, llgf) sk_negative_common(word, 4, llgf)
sk_negative_common(half, 2, llgh) sk_negative_common(half, 2, llgh)
...@@ -113,4 +117,4 @@ sk_negative_common(byte, 1, llgc) ...@@ -113,4 +117,4 @@ sk_negative_common(byte, 1, llgc)
bpf_error: bpf_error:
# force a return 0 from jit handler # force a return 0 from jit handler
ltgr %r15,%r15 # Set condition code ltgr %r15,%r15 # Set condition code
br %r6 BR_EX %r6
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#include <linux/bpf.h> #include <linux/bpf.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/dis.h> #include <asm/dis.h>
#include <asm/facility.h>
#include <asm/nospec-branch.h>
#include <asm/set_memory.h> #include <asm/set_memory.h>
#include "bpf_jit.h" #include "bpf_jit.h"
...@@ -41,6 +43,8 @@ struct bpf_jit { ...@@ -41,6 +43,8 @@ struct bpf_jit {
int base_ip; /* Base address for literal pool */ int base_ip; /* Base address for literal pool */
int ret0_ip; /* Address of return 0 */ int ret0_ip; /* Address of return 0 */
int exit_ip; /* Address of exit */ int exit_ip; /* Address of exit */
int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */
int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
int tail_call_start; /* Tail call start offset */ int tail_call_start; /* Tail call start offset */
int labels[1]; /* Labels for local jumps */ int labels[1]; /* Labels for local jumps */
}; };
...@@ -250,6 +254,19 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) ...@@ -250,6 +254,19 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
REG_SET_SEEN(b2); \ REG_SET_SEEN(b2); \
}) })
#define EMIT6_PCREL_RILB(op, b, target) \
({ \
int rel = (target - jit->prg) / 2; \
_EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff); \
REG_SET_SEEN(b); \
})
#define EMIT6_PCREL_RIL(op, target) \
({ \
int rel = (target - jit->prg) / 2; \
_EMIT6(op | rel >> 16, rel & 0xffff); \
})
#define _EMIT6_IMM(op, imm) \ #define _EMIT6_IMM(op, imm) \
({ \ ({ \
unsigned int __imm = (imm); \ unsigned int __imm = (imm); \
...@@ -469,8 +486,45 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth) ...@@ -469,8 +486,45 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
EMIT4(0xb9040000, REG_2, BPF_REG_0); EMIT4(0xb9040000, REG_2, BPF_REG_0);
/* Restore registers */ /* Restore registers */
save_restore_regs(jit, REGS_RESTORE, stack_depth); save_restore_regs(jit, REGS_RESTORE, stack_depth);
if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
jit->r14_thunk_ip = jit->prg;
/* Generate __s390_indirect_jump_r14 thunk */
if (test_facility(35)) {
/* exrl %r0,.+10 */
EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
} else {
/* larl %r1,.+14 */
EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
/* ex 0,0(%r1) */
EMIT4_DISP(0x44000000, REG_0, REG_1, 0);
}
/* j . */
EMIT4_PCREL(0xa7f40000, 0);
}
/* br %r14 */ /* br %r14 */
_EMIT2(0x07fe); _EMIT2(0x07fe);
if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable &&
(jit->seen & SEEN_FUNC)) {
jit->r1_thunk_ip = jit->prg;
/* Generate __s390_indirect_jump_r1 thunk */
if (test_facility(35)) {
/* exrl %r0,.+10 */
EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
/* j . */
EMIT4_PCREL(0xa7f40000, 0);
/* br %r1 */
_EMIT2(0x07f1);
} else {
/* larl %r1,.+14 */
EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
/* ex 0,S390_lowcore.br_r1_tampoline */
EMIT4_DISP(0x44000000, REG_0, REG_0,
offsetof(struct lowcore, br_r1_trampoline));
/* j . */
EMIT4_PCREL(0xa7f40000, 0);
}
}
} }
/* /*
...@@ -966,8 +1020,13 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i ...@@ -966,8 +1020,13 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
/* lg %w1,<d(imm)>(%l) */ /* lg %w1,<d(imm)>(%l) */
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L, EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
EMIT_CONST_U64(func)); EMIT_CONST_U64(func));
/* basr %r14,%w1 */ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
EMIT2(0x0d00, REG_14, REG_W1); /* brasl %r14,__s390_indirect_jump_r1 */
EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
} else {
/* basr %r14,%w1 */
EMIT2(0x0d00, REG_14, REG_W1);
}
/* lgr %b0,%r2: load return value into %b0 */ /* lgr %b0,%r2: load return value into %b0 */
EMIT4(0xb9040000, BPF_REG_0, REG_2); EMIT4(0xb9040000, BPF_REG_0, REG_2);
if ((jit->seen & SEEN_SKB) && if ((jit->seen & SEEN_SKB) &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment