Commit 88575199 authored by Daniel Borkmann's avatar Daniel Borkmann Committed by David S. Miller

bpf: drop unnecessary context cast from BPF_PROG_RUN

Since long already bpf_func is not only about struct sk_buff * as
input anymore. Make it generic as void *, so that callers don't
need to cast for it each time they call BPF_PROG_RUN().
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e3739099
...@@ -1518,7 +1518,7 @@ static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len) ...@@ -1518,7 +1518,7 @@ static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len)
xdp.data = data; xdp.data = data;
xdp.data_end = data + len; xdp.data_end = data + len;
return BPF_PROG_RUN(prog, (void *)&xdp); return BPF_PROG_RUN(prog, &xdp);
} }
/** /**
......
...@@ -408,8 +408,8 @@ struct bpf_prog { ...@@ -408,8 +408,8 @@ struct bpf_prog {
enum bpf_prog_type type; /* Type of BPF program */ enum bpf_prog_type type; /* Type of BPF program */
struct bpf_prog_aux *aux; /* Auxiliary fields */ struct bpf_prog_aux *aux; /* Auxiliary fields */
struct sock_fprog_kern *orig_prog; /* Original BPF program */ struct sock_fprog_kern *orig_prog; /* Original BPF program */
unsigned int (*bpf_func)(const struct sk_buff *skb, unsigned int (*bpf_func)(const void *ctx,
const struct bpf_insn *filter); const struct bpf_insn *insn);
/* Instructions for interpreter */ /* Instructions for interpreter */
union { union {
struct sock_filter insns[0]; struct sock_filter insns[0];
...@@ -504,7 +504,7 @@ static inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, ...@@ -504,7 +504,7 @@ static inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
u32 ret; u32 ret;
rcu_read_lock(); rcu_read_lock();
ret = BPF_PROG_RUN(prog, (void *)xdp); ret = BPF_PROG_RUN(prog, xdp);
rcu_read_unlock(); rcu_read_unlock();
return ret; return ret;
......
...@@ -7726,7 +7726,7 @@ static void bpf_overflow_handler(struct perf_event *event, ...@@ -7726,7 +7726,7 @@ static void bpf_overflow_handler(struct perf_event *event,
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
goto out; goto out;
rcu_read_lock(); rcu_read_lock();
ret = BPF_PROG_RUN(event->prog, (void *)&ctx); ret = BPF_PROG_RUN(event->prog, &ctx);
rcu_read_unlock(); rcu_read_unlock();
out: out:
__this_cpu_dec(bpf_prog_active); __this_cpu_dec(bpf_prog_active);
......
...@@ -195,7 +195,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd) ...@@ -195,7 +195,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd)
* value always takes priority (ignoring the DATA). * value always takes priority (ignoring the DATA).
*/ */
for (; f; f = f->prev) { for (; f; f = f->prev) {
u32 cur_ret = BPF_PROG_RUN(f->prog, (void *)sd); u32 cur_ret = BPF_PROG_RUN(f->prog, sd);
if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
ret = cur_ret; ret = cur_ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment