Commit b0b0ab6f authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Alexei Starovoitov says:

====================
pull-request: bpf 2023-07-12

We've added 5 non-merge commits during the last 7 day(s) which contain
a total of 7 files changed, 93 insertions(+), 28 deletions(-).

The main changes are:

1) Fix max stack depth check for async callbacks, from Kumar.

2) Fix inconsistent JIT image generation, from Björn.

3) Use trusted arguments in XDP hints kfuncs, from Larysa.

4) Fix memory leak in cpu_map_update_elem, from Pu.

* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  xdp: use trusted arguments in XDP hints kfuncs
  bpf: cpumap: Fix memory leak in cpu_map_update_elem
  riscv, bpf: Fix inconsistent JIT image generation
  selftests/bpf: Add selftest for check_stack_max_depth bug
  bpf: Fix max stack depth check for async callbacks
====================

Link: https://lore.kernel.org/r/20230712223045.40182-1-alexei.starovoitov@gmail.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents fec3ebb5 2e06c57d
...@@ -69,7 +69,7 @@ struct rv_jit_context { ...@@ -69,7 +69,7 @@ struct rv_jit_context {
struct bpf_prog *prog; struct bpf_prog *prog;
u16 *insns; /* RV insns */ u16 *insns; /* RV insns */
int ninsns; int ninsns;
int body_len; int prologue_len;
int epilogue_offset; int epilogue_offset;
int *offset; /* BPF to RV */ int *offset; /* BPF to RV */
int nexentries; int nexentries;
...@@ -216,8 +216,8 @@ static inline int rv_offset(int insn, int off, struct rv_jit_context *ctx) ...@@ -216,8 +216,8 @@ static inline int rv_offset(int insn, int off, struct rv_jit_context *ctx)
int from, to; int from, to;
off++; /* BPF branch is from PC+1, RV is from PC */ off++; /* BPF branch is from PC+1, RV is from PC */
from = (insn > 0) ? ctx->offset[insn - 1] : 0; from = (insn > 0) ? ctx->offset[insn - 1] : ctx->prologue_len;
to = (insn + off > 0) ? ctx->offset[insn + off - 1] : 0; to = (insn + off > 0) ? ctx->offset[insn + off - 1] : ctx->prologue_len;
return ninsns_rvoff(to - from); return ninsns_rvoff(to - from);
} }
......
...@@ -44,7 +44,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -44,7 +44,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
unsigned int prog_size = 0, extable_size = 0; unsigned int prog_size = 0, extable_size = 0;
bool tmp_blinded = false, extra_pass = false; bool tmp_blinded = false, extra_pass = false;
struct bpf_prog *tmp, *orig_prog = prog; struct bpf_prog *tmp, *orig_prog = prog;
int pass = 0, prev_ninsns = 0, prologue_len, i; int pass = 0, prev_ninsns = 0, i;
struct rv_jit_data *jit_data; struct rv_jit_data *jit_data;
struct rv_jit_context *ctx; struct rv_jit_context *ctx;
...@@ -83,6 +83,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -83,6 +83,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog = orig_prog; prog = orig_prog;
goto out_offset; goto out_offset;
} }
if (build_body(ctx, extra_pass, NULL)) {
prog = orig_prog;
goto out_offset;
}
for (i = 0; i < prog->len; i++) { for (i = 0; i < prog->len; i++) {
prev_ninsns += 32; prev_ninsns += 32;
ctx->offset[i] = prev_ninsns; ctx->offset[i] = prev_ninsns;
...@@ -91,12 +97,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -91,12 +97,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
for (i = 0; i < NR_JIT_ITERATIONS; i++) { for (i = 0; i < NR_JIT_ITERATIONS; i++) {
pass++; pass++;
ctx->ninsns = 0; ctx->ninsns = 0;
bpf_jit_build_prologue(ctx);
ctx->prologue_len = ctx->ninsns;
if (build_body(ctx, extra_pass, ctx->offset)) { if (build_body(ctx, extra_pass, ctx->offset)) {
prog = orig_prog; prog = orig_prog;
goto out_offset; goto out_offset;
} }
ctx->body_len = ctx->ninsns;
bpf_jit_build_prologue(ctx);
ctx->epilogue_offset = ctx->ninsns; ctx->epilogue_offset = ctx->ninsns;
bpf_jit_build_epilogue(ctx); bpf_jit_build_epilogue(ctx);
...@@ -162,10 +171,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -162,10 +171,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
if (!prog->is_func || extra_pass) { if (!prog->is_func || extra_pass) {
bpf_jit_binary_lock_ro(jit_data->header); bpf_jit_binary_lock_ro(jit_data->header);
prologue_len = ctx->epilogue_offset - ctx->body_len;
for (i = 0; i < prog->len; i++) for (i = 0; i < prog->len; i++)
ctx->offset[i] = ninsns_rvoff(prologue_len + ctx->offset[i] = ninsns_rvoff(ctx->offset[i]);
ctx->offset[i]);
bpf_prog_fill_jited_linfo(prog, ctx->offset); bpf_prog_fill_jited_linfo(prog, ctx->offset);
out_offset: out_offset:
kfree(ctx->offset); kfree(ctx->offset);
......
...@@ -122,22 +122,6 @@ static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) ...@@ -122,22 +122,6 @@ static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
atomic_inc(&rcpu->refcnt); atomic_inc(&rcpu->refcnt);
} }
/* called from workqueue, to workaround syscall using preempt_disable */
static void cpu_map_kthread_stop(struct work_struct *work)
{
struct bpf_cpu_map_entry *rcpu;
rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
/* Wait for flush in __cpu_map_entry_free(), via full RCU barrier,
* as it waits until all in-flight call_rcu() callbacks complete.
*/
rcu_barrier();
/* kthread_stop will wake_up_process and wait for it to complete */
kthread_stop(rcpu->kthread);
}
static void __cpu_map_ring_cleanup(struct ptr_ring *ring) static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
{ {
/* The tear-down procedure should have made sure that queue is /* The tear-down procedure should have made sure that queue is
...@@ -165,6 +149,30 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) ...@@ -165,6 +149,30 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
} }
} }
/* called from workqueue, to workaround syscall using preempt_disable */
static void cpu_map_kthread_stop(struct work_struct *work)
{
struct bpf_cpu_map_entry *rcpu;
int err;
rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
/* Wait for flush in __cpu_map_entry_free(), via full RCU barrier,
* as it waits until all in-flight call_rcu() callbacks complete.
*/
rcu_barrier();
/* kthread_stop will wake_up_process and wait for it to complete */
err = kthread_stop(rcpu->kthread);
if (err) {
/* kthread_stop may be called before cpu_map_kthread_run
* is executed, so we need to release the memory related
* to rcpu.
*/
put_cpu_map_entry(rcpu);
}
}
static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu, static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
struct list_head *listp, struct list_head *listp,
struct xdp_cpumap_stats *stats) struct xdp_cpumap_stats *stats)
......
...@@ -5642,8 +5642,9 @@ static int check_max_stack_depth(struct bpf_verifier_env *env) ...@@ -5642,8 +5642,9 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
verbose(env, "verifier bug. subprog has tail_call and async cb\n"); verbose(env, "verifier bug. subprog has tail_call and async cb\n");
return -EFAULT; return -EFAULT;
} }
/* async callbacks don't increase bpf prog stack size */ /* async callbacks don't increase bpf prog stack size unless called directly */
continue; if (!bpf_pseudo_call(insn + i))
continue;
} }
i = next_insn; i = next_insn;
......
...@@ -741,7 +741,7 @@ __bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash, ...@@ -741,7 +741,7 @@ __bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash,
__diag_pop(); __diag_pop();
BTF_SET8_START(xdp_metadata_kfunc_ids) BTF_SET8_START(xdp_metadata_kfunc_ids)
#define XDP_METADATA_KFUNC(_, name) BTF_ID_FLAGS(func, name, 0) #define XDP_METADATA_KFUNC(_, name) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS)
XDP_METADATA_KFUNC_xxx XDP_METADATA_KFUNC_xxx
#undef XDP_METADATA_KFUNC #undef XDP_METADATA_KFUNC
BTF_SET8_END(xdp_metadata_kfunc_ids) BTF_SET8_END(xdp_metadata_kfunc_ids)
......
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "async_stack_depth.skel.h"
void test_async_stack_depth(void)
{
RUN_TESTS(async_stack_depth);
}
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct hmap_elem {
struct bpf_timer timer;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 64);
__type(key, int);
__type(value, struct hmap_elem);
} hmap SEC(".maps");
__attribute__((noinline))
static int timer_cb(void *map, int *key, struct bpf_timer *timer)
{
volatile char buf[256] = {};
return buf[69];
}
SEC("tc")
__failure __msg("combined stack size of 2 calls")
int prog(struct __sk_buff *ctx)
{
struct hmap_elem *elem;
volatile char buf[256] = {};
elem = bpf_map_lookup_elem(&hmap, &(int){0});
if (!elem)
return 0;
timer_cb(NULL, NULL, NULL);
return bpf_timer_set_callback(&elem->timer, timer_cb) + buf[0];
}
char _license[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment