Commit 41de23e2 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2019-11-02

The following pull-request contains BPF updates for your *net* tree.

We've added 6 non-merge commits during the last 6 day(s) which contain
a total of 8 files changed, 35 insertions(+), 9 deletions(-).

The main changes are:

1) Fix ppc BPF JIT's tail call implementation by performing a second pass
   to gather a stable JIT context before opcode emission, from Eric Dumazet.

2) Fix build of BPF samples sys_perf_event_open() usage to compiled out
   unavailable test_attr__{enabled,open} checks. Also fix potential overflows
   in bpf_map_{area_alloc,charge_init} on 32 bit archs, from Björn Töpel.

3) Fix narrow loads of bpf_sysctl context fields with offset > 0 on big endian
   archs like s390x and also improve the test coverage, from Ilya Leoshkevich.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0763b3e8 7de08690
...@@ -3053,6 +3053,7 @@ M: Daniel Borkmann <daniel@iogearbox.net> ...@@ -3053,6 +3053,7 @@ M: Daniel Borkmann <daniel@iogearbox.net>
R: Martin KaFai Lau <kafai@fb.com> R: Martin KaFai Lau <kafai@fb.com>
R: Song Liu <songliubraving@fb.com> R: Song Liu <songliubraving@fb.com>
R: Yonghong Song <yhs@fb.com> R: Yonghong Song <yhs@fb.com>
R: Andrii Nakryiko <andriin@fb.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
L: bpf@vger.kernel.org L: bpf@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
......
...@@ -1141,6 +1141,19 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) ...@@ -1141,6 +1141,19 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
goto out_addrs; goto out_addrs;
} }
/*
* If we have seen a tail call, we need a second pass.
* This is because bpf_jit_emit_common_epilogue() is called
* from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
*/
if (cgctx.seen & SEEN_TAILCALL) {
cgctx.idx = 0;
if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
fp = org_fp;
goto out_addrs;
}
}
/* /*
* Pretend to build prologue, given the features we've seen. This will * Pretend to build prologue, given the features we've seen. This will
* update ctgtx.idx as it pretends to output instructions, then we can * update ctgtx.idx as it pretends to output instructions, then we can
......
...@@ -656,11 +656,11 @@ void bpf_map_put_with_uref(struct bpf_map *map); ...@@ -656,11 +656,11 @@ void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map); void bpf_map_put(struct bpf_map *map);
int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size); int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size);
void bpf_map_charge_finish(struct bpf_map_memory *mem); void bpf_map_charge_finish(struct bpf_map_memory *mem);
void bpf_map_charge_move(struct bpf_map_memory *dst, void bpf_map_charge_move(struct bpf_map_memory *dst,
struct bpf_map_memory *src); struct bpf_map_memory *src);
void *bpf_map_area_alloc(size_t size, int numa_node); void *bpf_map_area_alloc(u64 size, int numa_node);
void bpf_map_area_free(void *base); void bpf_map_area_free(void *base);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
......
...@@ -1311,12 +1311,12 @@ static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type, ...@@ -1311,12 +1311,12 @@ static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
return false; return false;
switch (off) { switch (off) {
case offsetof(struct bpf_sysctl, write): case bpf_ctx_range(struct bpf_sysctl, write):
if (type != BPF_READ) if (type != BPF_READ)
return false; return false;
bpf_ctx_record_field_size(info, size_default); bpf_ctx_record_field_size(info, size_default);
return bpf_ctx_narrow_access_ok(off, size, size_default); return bpf_ctx_narrow_access_ok(off, size, size_default);
case offsetof(struct bpf_sysctl, file_pos): case bpf_ctx_range(struct bpf_sysctl, file_pos):
if (type == BPF_READ) { if (type == BPF_READ) {
bpf_ctx_record_field_size(info, size_default); bpf_ctx_record_field_size(info, size_default);
return bpf_ctx_narrow_access_ok(off, size, size_default); return bpf_ctx_narrow_access_ok(off, size, size_default);
......
...@@ -126,7 +126,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) ...@@ -126,7 +126,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
return map; return map;
} }
void *bpf_map_area_alloc(size_t size, int numa_node) void *bpf_map_area_alloc(u64 size, int numa_node)
{ {
/* We really just want to fail instead of triggering OOM killer /* We really just want to fail instead of triggering OOM killer
* under memory pressure, therefore we set __GFP_NORETRY to kmalloc, * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
...@@ -141,6 +141,9 @@ void *bpf_map_area_alloc(size_t size, int numa_node) ...@@ -141,6 +141,9 @@ void *bpf_map_area_alloc(size_t size, int numa_node)
const gfp_t flags = __GFP_NOWARN | __GFP_ZERO; const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;
void *area; void *area;
if (size >= SIZE_MAX)
return NULL;
if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags, area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
numa_node); numa_node);
...@@ -197,7 +200,7 @@ static void bpf_uncharge_memlock(struct user_struct *user, u32 pages) ...@@ -197,7 +200,7 @@ static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
atomic_long_sub(pages, &user->locked_vm); atomic_long_sub(pages, &user->locked_vm);
} }
int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size) int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size)
{ {
u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT; u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
struct user_struct *user; struct user_struct *user;
......
...@@ -176,6 +176,7 @@ KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/bpf/ ...@@ -176,6 +176,7 @@ KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/bpf/
KBUILD_HOSTCFLAGS += -I$(srctree)/tools/testing/selftests/bpf/ KBUILD_HOSTCFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/ -I$(srctree)/tools/include KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/ -I$(srctree)/tools/include
KBUILD_HOSTCFLAGS += -I$(srctree)/tools/perf KBUILD_HOSTCFLAGS += -I$(srctree)/tools/perf
KBUILD_HOSTCFLAGS += -DHAVE_ATTR_TEST=0
HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable
......
...@@ -15,7 +15,9 @@ void test_attr__init(void); ...@@ -15,7 +15,9 @@ void test_attr__init(void);
void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu, void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
int fd, int group_fd, unsigned long flags); int fd, int group_fd, unsigned long flags);
#define HAVE_ATTR_TEST #ifndef HAVE_ATTR_TEST
#define HAVE_ATTR_TEST 1
#endif
static inline int static inline int
sys_perf_event_open(struct perf_event_attr *attr, sys_perf_event_open(struct perf_event_attr *attr,
...@@ -27,7 +29,7 @@ sys_perf_event_open(struct perf_event_attr *attr, ...@@ -27,7 +29,7 @@ sys_perf_event_open(struct perf_event_attr *attr,
fd = syscall(__NR_perf_event_open, attr, pid, cpu, fd = syscall(__NR_perf_event_open, attr, pid, cpu,
group_fd, flags); group_fd, flags);
#ifdef HAVE_ATTR_TEST #if HAVE_ATTR_TEST
if (unlikely(test_attr__enabled)) if (unlikely(test_attr__enabled))
test_attr__open(attr, pid, cpu, fd, group_fd, flags); test_attr__open(attr, pid, cpu, fd, group_fd, flags);
#endif #endif
......
...@@ -161,9 +161,14 @@ static struct sysctl_test tests[] = { ...@@ -161,9 +161,14 @@ static struct sysctl_test tests[] = {
.descr = "ctx:file_pos sysctl:read read ok narrow", .descr = "ctx:file_pos sysctl:read read ok narrow",
.insns = { .insns = {
/* If (file_pos == X) */ /* If (file_pos == X) */
#if __BYTE_ORDER == __LITTLE_ENDIAN
BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1, BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1,
offsetof(struct bpf_sysctl, file_pos)), offsetof(struct bpf_sysctl, file_pos)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2), #else
BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1,
offsetof(struct bpf_sysctl, file_pos) + 3),
#endif
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 4, 2),
/* return ALLOW; */ /* return ALLOW; */
BPF_MOV64_IMM(BPF_REG_0, 1), BPF_MOV64_IMM(BPF_REG_0, 1),
...@@ -176,6 +181,7 @@ static struct sysctl_test tests[] = { ...@@ -176,6 +181,7 @@ static struct sysctl_test tests[] = {
.attach_type = BPF_CGROUP_SYSCTL, .attach_type = BPF_CGROUP_SYSCTL,
.sysctl = "kernel/ostype", .sysctl = "kernel/ostype",
.open_flags = O_RDONLY, .open_flags = O_RDONLY,
.seek = 4,
.result = SUCCESS, .result = SUCCESS,
}, },
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment