Commit 55309dd3 authored by Daniel Borkmann's avatar Daniel Borkmann Committed by David S. Miller

net: bpf: arm: address randomize and write protect JIT code

This is the ARM variant for 314beb9b ("x86: bpf_jit_comp: secure bpf
jit against spraying attacks").

It is now possible to implement it due to commits 75374ad4 ("ARM: mm:
Define set_memory_* functions for ARM") and dca9aa92 ("ARM: add
DEBUG_SET_MODULE_RONX option to Kconfig") which added infrastructure for
this facility.

Thus, this patch makes sure the BPF generated JIT code is marked RO, as
other kernel text sections, and also lets the generated JIT code start
at a pseudo random offset instead on a page boundary. The holes are filled
with illegal instructions.

JIT tested on armv7hl with BPF test suite.

Reference: http://mainisusuallyafunction.blogspot.com/2012/11/attacking-hardened-linux-systems-with.htmlSigned-off-by: default avatarDaniel Borkmann <dborkman@redhat.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@plumgrid.com>
Acked-by: default avatarMircea Gherzan <mgherzan@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 738cbe72
......@@ -12,7 +12,6 @@
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/filter.h>
#include <linux/moduleloader.h>
#include <linux/netdevice.h>
#include <linux/string.h>
#include <linux/slab.h>
......@@ -174,6 +173,15 @@ static inline bool is_load_to_a(u16 inst)
}
}
static void jit_fill_hole(void *area, unsigned int size)
{
/* Insert illegal UND instructions. */
u32 *ptr, fill_ins = 0xe7ffffff;
/* We are guaranteed to have aligned memory. */
for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
*ptr++ = fill_ins;
}
static void build_prologue(struct jit_ctx *ctx)
{
u16 reg_set = saved_regs(ctx);
......@@ -859,9 +867,11 @@ static int build_body(struct jit_ctx *ctx)
void bpf_jit_compile(struct bpf_prog *fp)
{
struct bpf_binary_header *header;
struct jit_ctx ctx;
unsigned tmp_idx;
unsigned alloc_size;
u8 *target_ptr;
if (!bpf_jit_enable)
return;
......@@ -897,13 +907,15 @@ void bpf_jit_compile(struct bpf_prog *fp)
/* there's nothing after the epilogue on ARMv7 */
build_epilogue(&ctx);
#endif
alloc_size = 4 * ctx.idx;
ctx.target = module_alloc(alloc_size);
if (unlikely(ctx.target == NULL))
header = bpf_jit_binary_alloc(alloc_size, &target_ptr,
4, jit_fill_hole);
if (header == NULL)
goto out;
ctx.target = (u32 *) target_ptr;
ctx.idx = 0;
build_prologue(&ctx);
build_body(&ctx);
build_epilogue(&ctx);
......@@ -919,6 +931,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
/* there are 2 passes here */
bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
set_memory_ro((unsigned long)header, header->pages);
fp->bpf_func = (void *)ctx.target;
fp->jited = 1;
out:
......@@ -928,8 +941,15 @@ void bpf_jit_compile(struct bpf_prog *fp)
void bpf_jit_free(struct bpf_prog *fp)
{
if (fp->jited)
module_free(NULL, fp->bpf_func);
unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
struct bpf_binary_header *header = (void *)addr;
if (!fp->jited)
goto free_filter;
set_memory_rw(addr, header->pages);
bpf_jit_binary_free(header);
free_filter:
bpf_prog_unlock_free(fp);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment