Commit d63903bb authored by Xi Wang's avatar Xi Wang Committed by Catalin Marinas

arm64: bpf: fix endianness conversion bugs

Upper bits should be zeroed in endianness conversion:

- even when there's no need to change endianness (i.e., BPF_FROM_BE
  on big endian or BPF_FROM_LE on little endian);

- after rev16.

This patch fixes such bugs by emitting extra instructions to clear
upper bits.

Cc: Zi Shen Lim <zlim.lnx@gmail.com>
Acked-by: default avatarAlexei Starovoitov <ast@plumgrid.com>
Fixes: e54bcde3 ("arm64: eBPF JIT compiler")
Cc: <stable@vger.kernel.org> # 3.18+
Signed-off-by: default avatarXi Wang <xi.wang@gmail.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 8eee539d
...@@ -110,6 +110,10 @@ ...@@ -110,6 +110,10 @@
/* Rd = Rn >> shift; signed */ /* Rd = Rn >> shift; signed */
#define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31) #define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
/* Zero extend */
#define A64_UXTH(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 15)
#define A64_UXTW(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 31)
/* Move wide (immediate) */ /* Move wide (immediate) */
#define A64_MOVEW(sf, Rd, imm16, shift, type) \ #define A64_MOVEW(sf, Rd, imm16, shift, type) \
aarch64_insn_gen_movewide(Rd, imm16, shift, \ aarch64_insn_gen_movewide(Rd, imm16, shift, \
......
...@@ -289,23 +289,41 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) ...@@ -289,23 +289,41 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
case BPF_ALU | BPF_END | BPF_FROM_BE: case BPF_ALU | BPF_END | BPF_FROM_BE:
#ifdef CONFIG_CPU_BIG_ENDIAN #ifdef CONFIG_CPU_BIG_ENDIAN
if (BPF_SRC(code) == BPF_FROM_BE) if (BPF_SRC(code) == BPF_FROM_BE)
break; goto emit_bswap_uxt;
#else /* !CONFIG_CPU_BIG_ENDIAN */ #else /* !CONFIG_CPU_BIG_ENDIAN */
if (BPF_SRC(code) == BPF_FROM_LE) if (BPF_SRC(code) == BPF_FROM_LE)
break; goto emit_bswap_uxt;
#endif #endif
switch (imm) { switch (imm) {
case 16: case 16:
emit(A64_REV16(is64, dst, dst), ctx); emit(A64_REV16(is64, dst, dst), ctx);
/* zero-extend 16 bits into 64 bits */
emit(A64_UXTH(is64, dst, dst), ctx);
break; break;
case 32: case 32:
emit(A64_REV32(is64, dst, dst), ctx); emit(A64_REV32(is64, dst, dst), ctx);
/* upper 32 bits already cleared */
break; break;
case 64: case 64:
emit(A64_REV64(dst, dst), ctx); emit(A64_REV64(dst, dst), ctx);
break; break;
} }
break; break;
emit_bswap_uxt:
switch (imm) {
case 16:
/* zero-extend 16 bits into 64 bits */
emit(A64_UXTH(is64, dst, dst), ctx);
break;
case 32:
/* zero-extend 32 bits into 64 bits */
emit(A64_UXTW(is64, dst, dst), ctx);
break;
case 64:
/* nop */
break;
}
break;
/* dst = imm */ /* dst = imm */
case BPF_ALU | BPF_MOV | BPF_K: case BPF_ALU | BPF_MOV | BPF_K:
case BPF_ALU64 | BPF_MOV | BPF_K: case BPF_ALU64 | BPF_MOV | BPF_K:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment