Commit 1456ddcc authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'selftests/bpf: make BPF_CFLAGS stricter with -Wall'

Andrii Nakryiko says:

====================

Make BPF-side compiler flags stricter by adding -Wall. Fix tons of small
issues pointed out by compiler immediately after that. That includes newly
added bpf_for(), bpf_for_each(), and bpf_repeat() macros.
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 63d78b7e 3d5a55dd
...@@ -352,12 +352,12 @@ CLANG_TARGET_ARCH = --target=$(notdir $(CROSS_COMPILE:%-=%)) ...@@ -352,12 +352,12 @@ CLANG_TARGET_ARCH = --target=$(notdir $(CROSS_COMPILE:%-=%))
endif endif
CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH)) CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH))
BPF_CFLAGS = -g -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \ BPF_CFLAGS = -g -Wall -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \
-I$(INCLUDE_DIR) -I$(CURDIR) -I$(APIDIR) \ -I$(INCLUDE_DIR) -I$(CURDIR) -I$(APIDIR) \
-I$(abspath $(OUTPUT)/../usr/include) -I$(abspath $(OUTPUT)/../usr/include)
CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) \ CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) \
-Wno-compare-distinct-pointer-types -Wuninitialized -Wno-compare-distinct-pointer-types
$(OUTPUT)/test_l4lb_noinline.o: BPF_CFLAGS += -fno-inline $(OUTPUT)/test_l4lb_noinline.o: BPF_CFLAGS += -fno-inline
$(OUTPUT)/test_xdp_noinline.o: BPF_CFLAGS += -fno-inline $(OUTPUT)/test_xdp_noinline.o: BPF_CFLAGS += -fno-inline
......
...@@ -33,7 +33,6 @@ int dump_ksym(struct bpf_iter__ksym *ctx) ...@@ -33,7 +33,6 @@ int dump_ksym(struct bpf_iter__ksym *ctx)
__u32 seq_num = ctx->meta->seq_num; __u32 seq_num = ctx->meta->seq_num;
unsigned long value; unsigned long value;
char type; char type;
int ret;
if (!iter) if (!iter)
return 0; return 0;
......
...@@ -42,7 +42,6 @@ int change_tcp_cc(struct bpf_iter__tcp *ctx) ...@@ -42,7 +42,6 @@ int change_tcp_cc(struct bpf_iter__tcp *ctx)
char cur_cc[TCP_CA_NAME_MAX]; char cur_cc[TCP_CA_NAME_MAX];
struct tcp_sock *tp; struct tcp_sock *tp;
struct sock *sk; struct sock *sk;
int ret;
if (!bpf_tcp_sk(ctx->sk_common)) if (!bpf_tcp_sk(ctx->sk_common))
return 0; return 0;
......
...@@ -138,8 +138,6 @@ static int callback_set_0f(int i, void *ctx) ...@@ -138,8 +138,6 @@ static int callback_set_0f(int i, void *ctx)
SEC("fentry/" SYS_PREFIX "sys_nanosleep") SEC("fentry/" SYS_PREFIX "sys_nanosleep")
int prog_non_constant_callback(void *ctx) int prog_non_constant_callback(void *ctx)
{ {
struct callback_ctx data = {};
if (bpf_get_current_pid_tgid() >> 32 != pid) if (bpf_get_current_pid_tgid() >> 32 != pid)
return 0; return 0;
......
...@@ -76,6 +76,9 @@ ...@@ -76,6 +76,9 @@
#define FUNC_REG_ARG_CNT 5 #define FUNC_REG_ARG_CNT 5
#endif #endif
/* make it look to compiler like value is read and written */
#define __sink(expr) asm volatile("" : "+g"(expr))
struct bpf_iter_num; struct bpf_iter_num;
extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __ksym; extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __ksym;
...@@ -115,7 +118,8 @@ extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __ksym; ...@@ -115,7 +118,8 @@ extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __ksym;
struct bpf_iter_##type ___it __attribute__((aligned(8), /* enforce, just in case */, \ struct bpf_iter_##type ___it __attribute__((aligned(8), /* enforce, just in case */, \
cleanup(bpf_iter_##type##_destroy))), \ cleanup(bpf_iter_##type##_destroy))), \
/* ___p pointer is just to call bpf_iter_##type##_new() *once* to init ___it */ \ /* ___p pointer is just to call bpf_iter_##type##_new() *once* to init ___it */ \
*___p = (bpf_iter_##type##_new(&___it, ##args), \ *___p __attribute__((unused)) = ( \
bpf_iter_##type##_new(&___it, ##args), \
/* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
/* for bpf_iter_##type##_destroy() when used from cleanup() attribute */ \ /* for bpf_iter_##type##_destroy() when used from cleanup() attribute */ \
(void)bpf_iter_##type##_destroy, (void *)0); \ (void)bpf_iter_##type##_destroy, (void *)0); \
...@@ -143,7 +147,8 @@ extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __ksym; ...@@ -143,7 +147,8 @@ extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __ksym;
struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \
cleanup(bpf_iter_num_destroy))), \ cleanup(bpf_iter_num_destroy))), \
/* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \
*___p = (bpf_iter_num_new(&___it, (start), (end)), \ *___p __attribute__((unused)) = ( \
bpf_iter_num_new(&___it, (start), (end)), \
/* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
/* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \
(void)bpf_iter_num_destroy, (void *)0); \ (void)bpf_iter_num_destroy, (void *)0); \
...@@ -167,7 +172,8 @@ extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __ksym; ...@@ -167,7 +172,8 @@ extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __ksym;
struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \
cleanup(bpf_iter_num_destroy))), \ cleanup(bpf_iter_num_destroy))), \
/* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \
*___p = (bpf_iter_num_new(&___it, 0, (N)), \ *___p __attribute__((unused)) = ( \
bpf_iter_num_new(&___it, 0, (N)), \
/* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
/* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \
(void)bpf_iter_num_destroy, (void *)0); \ (void)bpf_iter_num_destroy, (void *)0); \
......
...@@ -52,7 +52,6 @@ int leak_prog(void *ctx) ...@@ -52,7 +52,6 @@ int leak_prog(void *ctx)
{ {
struct prog_test_ref_kfunc *p; struct prog_test_ref_kfunc *p;
struct map_value *v; struct map_value *v;
unsigned long sl;
v = bpf_map_lookup_elem(&array_map, &(int){0}); v = bpf_map_lookup_elem(&array_map, &(int){0});
if (!v) if (!v)
......
...@@ -66,7 +66,6 @@ static inline int is_allowed_peer_cg(struct __sk_buff *skb, ...@@ -66,7 +66,6 @@ static inline int is_allowed_peer_cg(struct __sk_buff *skb,
SEC("cgroup_skb/ingress") SEC("cgroup_skb/ingress")
int ingress_lookup(struct __sk_buff *skb) int ingress_lookup(struct __sk_buff *skb)
{ {
__u32 serv_port_key = 0;
struct ipv6hdr ip6h; struct ipv6hdr ip6h;
struct tcphdr tcph; struct tcphdr tcph;
......
...@@ -109,6 +109,7 @@ int BPF_PROG(cgrp_kfunc_acquire_unreleased, struct cgroup *cgrp, const char *pat ...@@ -109,6 +109,7 @@ int BPF_PROG(cgrp_kfunc_acquire_unreleased, struct cgroup *cgrp, const char *pat
acquired = bpf_cgroup_acquire(cgrp); acquired = bpf_cgroup_acquire(cgrp);
/* Acquired cgroup is never released. */ /* Acquired cgroup is never released. */
__sink(acquired);
return 0; return 0;
} }
......
...@@ -84,7 +84,6 @@ int BPF_PROG(update_cookie_tracing, struct socket *sock, ...@@ -84,7 +84,6 @@ int BPF_PROG(update_cookie_tracing, struct socket *sock,
struct sockaddr *uaddr, int addr_len, int flags) struct sockaddr *uaddr, int addr_len, int flags)
{ {
struct socket_cookie *p; struct socket_cookie *p;
struct tcp_sock *tcp_sk;
if (uaddr->sa_family != AF_INET6) if (uaddr->sa_family != AF_INET6)
return 0; return 0;
......
...@@ -24,7 +24,6 @@ void bpf_rcu_read_unlock(void) __ksym; ...@@ -24,7 +24,6 @@ void bpf_rcu_read_unlock(void) __ksym;
SEC("?iter.s/cgroup") SEC("?iter.s/cgroup")
int cgroup_iter(struct bpf_iter__cgroup *ctx) int cgroup_iter(struct bpf_iter__cgroup *ctx)
{ {
struct seq_file *seq = ctx->meta->seq;
struct cgroup *cgrp = ctx->cgroup; struct cgroup *cgrp = ctx->cgroup;
long *ptr; long *ptr;
......
...@@ -77,7 +77,7 @@ int balancer_ingress(struct __sk_buff *ctx) ...@@ -77,7 +77,7 @@ int balancer_ingress(struct __sk_buff *ctx)
void *data_end = (void *)(long)ctx->data_end; void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data; void *data = (void *)(long)ctx->data;
void *ptr; void *ptr;
int ret = 0, nh_off, i = 0; int nh_off, i = 0;
nh_off = 14; nh_off = 14;
......
...@@ -23,6 +23,7 @@ int BPF_PROG(test_alloc_no_release, struct task_struct *task, u64 clone_flags) ...@@ -23,6 +23,7 @@ int BPF_PROG(test_alloc_no_release, struct task_struct *task, u64 clone_flags)
struct bpf_cpumask *cpumask; struct bpf_cpumask *cpumask;
cpumask = create_cpumask(); cpumask = create_cpumask();
__sink(cpumask);
/* cpumask is never released. */ /* cpumask is never released. */
return 0; return 0;
...@@ -51,6 +52,7 @@ int BPF_PROG(test_acquire_wrong_cpumask, struct task_struct *task, u64 clone_fla ...@@ -51,6 +52,7 @@ int BPF_PROG(test_acquire_wrong_cpumask, struct task_struct *task, u64 clone_fla
/* Can't acquire a non-struct bpf_cpumask. */ /* Can't acquire a non-struct bpf_cpumask. */
cpumask = bpf_cpumask_acquire((struct bpf_cpumask *)task->cpus_ptr); cpumask = bpf_cpumask_acquire((struct bpf_cpumask *)task->cpus_ptr);
__sink(cpumask);
return 0; return 0;
} }
...@@ -63,6 +65,7 @@ int BPF_PROG(test_mutate_cpumask, struct task_struct *task, u64 clone_flags) ...@@ -63,6 +65,7 @@ int BPF_PROG(test_mutate_cpumask, struct task_struct *task, u64 clone_flags)
/* Can't set the CPU of a non-struct bpf_cpumask. */ /* Can't set the CPU of a non-struct bpf_cpumask. */
bpf_cpumask_set_cpu(0, (struct bpf_cpumask *)task->cpus_ptr); bpf_cpumask_set_cpu(0, (struct bpf_cpumask *)task->cpus_ptr);
__sink(cpumask);
return 0; return 0;
} }
......
...@@ -353,7 +353,6 @@ SEC("tp_btf/task_newtask") ...@@ -353,7 +353,6 @@ SEC("tp_btf/task_newtask")
int BPF_PROG(test_insert_leave, struct task_struct *task, u64 clone_flags) int BPF_PROG(test_insert_leave, struct task_struct *task, u64 clone_flags)
{ {
struct bpf_cpumask *cpumask; struct bpf_cpumask *cpumask;
struct __cpumask_map_value *v;
cpumask = create_cpumask(); cpumask = create_cpumask();
if (!cpumask) if (!cpumask)
......
...@@ -271,7 +271,7 @@ SEC("?raw_tp") ...@@ -271,7 +271,7 @@ SEC("?raw_tp")
__failure __msg("value is outside of the allowed memory range") __failure __msg("value is outside of the allowed memory range")
int data_slice_out_of_bounds_map_value(void *ctx) int data_slice_out_of_bounds_map_value(void *ctx)
{ {
__u32 key = 0, map_val; __u32 map_val;
struct bpf_dynptr ptr; struct bpf_dynptr ptr;
void *data; void *data;
...@@ -388,7 +388,6 @@ int data_slice_missing_null_check2(void *ctx) ...@@ -388,7 +388,6 @@ int data_slice_missing_null_check2(void *ctx)
/* this should fail */ /* this should fail */
*data2 = 3; *data2 = 3;
done:
bpf_ringbuf_discard_dynptr(&ptr, 0); bpf_ringbuf_discard_dynptr(&ptr, 0);
return 0; return 0;
} }
...@@ -440,6 +439,7 @@ int invalid_write1(void *ctx) ...@@ -440,6 +439,7 @@ int invalid_write1(void *ctx)
/* this should fail */ /* this should fail */
data = bpf_dynptr_data(&ptr, 0, 1); data = bpf_dynptr_data(&ptr, 0, 1);
__sink(data);
return 0; return 0;
} }
...@@ -1374,6 +1374,7 @@ int invalid_slice_rdwr_rdonly(struct __sk_buff *skb) ...@@ -1374,6 +1374,7 @@ int invalid_slice_rdwr_rdonly(struct __sk_buff *skb)
* changing packet data * changing packet data
*/ */
hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer)); hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
__sink(hdr);
return 0; return 0;
} }
...@@ -35,7 +35,7 @@ SEC("?tp/syscalls/sys_enter_nanosleep") ...@@ -35,7 +35,7 @@ SEC("?tp/syscalls/sys_enter_nanosleep")
int test_read_write(void *ctx) int test_read_write(void *ctx)
{ {
char write_data[64] = "hello there, world!!"; char write_data[64] = "hello there, world!!";
char read_data[64] = {}, buf[64] = {}; char read_data[64] = {};
struct bpf_dynptr ptr; struct bpf_dynptr ptr;
int i; int i;
...@@ -170,7 +170,6 @@ int test_skb_readonly(struct __sk_buff *skb) ...@@ -170,7 +170,6 @@ int test_skb_readonly(struct __sk_buff *skb)
{ {
__u8 write_data[2] = {1, 2}; __u8 write_data[2] = {1, 2};
struct bpf_dynptr ptr; struct bpf_dynptr ptr;
__u64 *data;
int ret; int ret;
if (bpf_dynptr_from_skb(skb, 0, &ptr)) { if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
...@@ -191,10 +190,8 @@ int test_skb_readonly(struct __sk_buff *skb) ...@@ -191,10 +190,8 @@ int test_skb_readonly(struct __sk_buff *skb)
SEC("?cgroup_skb/egress") SEC("?cgroup_skb/egress")
int test_dynptr_skb_data(struct __sk_buff *skb) int test_dynptr_skb_data(struct __sk_buff *skb)
{ {
__u8 write_data[2] = {1, 2};
struct bpf_dynptr ptr; struct bpf_dynptr ptr;
__u64 *data; __u64 *data;
int ret;
if (bpf_dynptr_from_skb(skb, 0, &ptr)) { if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
err = 1; err = 1;
......
...@@ -120,8 +120,6 @@ int new_get_skb_ifindex(int val, struct __sk_buff *skb, int var) ...@@ -120,8 +120,6 @@ int new_get_skb_ifindex(int val, struct __sk_buff *skb, int var)
void *data = (void *)(long)skb->data; void *data = (void *)(long)skb->data;
struct ipv6hdr ip6, *ip6p; struct ipv6hdr ip6, *ip6p;
int ifindex = skb->ifindex; int ifindex = skb->ifindex;
__u32 eth_proto;
__u32 nh_off;
/* check that BPF extension can read packet via direct packet access */ /* check that BPF extension can read packet via direct packet access */
if (data + 14 + sizeof(ip6) > data_end) if (data + 14 + sizeof(ip6) > data_end)
......
...@@ -23,7 +23,7 @@ struct { ...@@ -23,7 +23,7 @@ struct {
SEC("freplace/handle_kprobe") SEC("freplace/handle_kprobe")
int new_handle_kprobe(struct pt_regs *ctx) int new_handle_kprobe(struct pt_regs *ctx)
{ {
struct hmap_elem zero = {}, *val; struct hmap_elem *val;
int key = 0; int key = 0;
val = bpf_map_lookup_elem(&hash_map, &key); val = bpf_map_lookup_elem(&hash_map, &key);
......
...@@ -45,7 +45,6 @@ __failure __msg("unbounded memory access") ...@@ -45,7 +45,6 @@ __failure __msg("unbounded memory access")
int iter_err_unsafe_asm_loop(const void *ctx) int iter_err_unsafe_asm_loop(const void *ctx)
{ {
struct bpf_iter_num it; struct bpf_iter_num it;
int *v, i = 0;
MY_PID_GUARD(); MY_PID_GUARD();
...@@ -88,7 +87,7 @@ __success ...@@ -88,7 +87,7 @@ __success
int iter_while_loop(const void *ctx) int iter_while_loop(const void *ctx)
{ {
struct bpf_iter_num it; struct bpf_iter_num it;
int *v, i; int *v;
MY_PID_GUARD(); MY_PID_GUARD();
...@@ -106,7 +105,7 @@ __success ...@@ -106,7 +105,7 @@ __success
int iter_while_loop_auto_cleanup(const void *ctx) int iter_while_loop_auto_cleanup(const void *ctx)
{ {
__attribute__((cleanup(bpf_iter_num_destroy))) struct bpf_iter_num it; __attribute__((cleanup(bpf_iter_num_destroy))) struct bpf_iter_num it;
int *v, i; int *v;
MY_PID_GUARD(); MY_PID_GUARD();
...@@ -124,7 +123,7 @@ __success ...@@ -124,7 +123,7 @@ __success
int iter_for_loop(const void *ctx) int iter_for_loop(const void *ctx)
{ {
struct bpf_iter_num it; struct bpf_iter_num it;
int *v, i; int *v;
MY_PID_GUARD(); MY_PID_GUARD();
...@@ -192,7 +191,7 @@ __success ...@@ -192,7 +191,7 @@ __success
int iter_manual_unroll_loop(const void *ctx) int iter_manual_unroll_loop(const void *ctx)
{ {
struct bpf_iter_num it; struct bpf_iter_num it;
int *v, i; int *v;
MY_PID_GUARD(); MY_PID_GUARD();
...@@ -621,7 +620,7 @@ __success ...@@ -621,7 +620,7 @@ __success
int iter_stack_array_loop(const void *ctx) int iter_stack_array_loop(const void *ctx)
{ {
long arr1[16], arr2[16], sum = 0; long arr1[16], arr2[16], sum = 0;
int *v, i; int i;
MY_PID_GUARD(); MY_PID_GUARD();
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h> #include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h> #include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
/* weak and shared between two files */ /* weak and shared between two files */
const volatile int my_tid __weak; const volatile int my_tid __weak;
...@@ -51,6 +52,7 @@ __weak int set_output_weak(int x) ...@@ -51,6 +52,7 @@ __weak int set_output_weak(int x)
* cause problems for BPF static linker * cause problems for BPF static linker
*/ */
whatever = bpf_core_type_size(struct task_struct); whatever = bpf_core_type_size(struct task_struct);
__sink(whatever);
output_weak1 = x; output_weak1 = x;
return x; return x;
...@@ -71,6 +73,7 @@ int BPF_PROG(handler1, struct pt_regs *regs, long id) ...@@ -71,6 +73,7 @@ int BPF_PROG(handler1, struct pt_regs *regs, long id)
/* make sure we have CO-RE relocations in main program */ /* make sure we have CO-RE relocations in main program */
whatever = bpf_core_type_size(struct task_struct); whatever = bpf_core_type_size(struct task_struct);
__sink(whatever);
set_output_val2(1000); set_output_val2(1000);
set_output_ctx2(ctx); /* ctx definition is hidden in BPF_PROG macro */ set_output_ctx2(ctx); /* ctx definition is hidden in BPF_PROG macro */
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h> #include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h> #include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
/* weak and shared between both files */ /* weak and shared between both files */
const volatile int my_tid __weak; const volatile int my_tid __weak;
...@@ -51,6 +52,7 @@ __weak int set_output_weak(int x) ...@@ -51,6 +52,7 @@ __weak int set_output_weak(int x)
* cause problems for BPF static linker * cause problems for BPF static linker
*/ */
whatever = 2 * bpf_core_type_size(struct task_struct); whatever = 2 * bpf_core_type_size(struct task_struct);
__sink(whatever);
output_weak2 = x; output_weak2 = x;
return 2 * x; return 2 * x;
...@@ -71,6 +73,7 @@ int BPF_PROG(handler2, struct pt_regs *regs, long id) ...@@ -71,6 +73,7 @@ int BPF_PROG(handler2, struct pt_regs *regs, long id)
/* make sure we have CO-RE relocations in main program */ /* make sure we have CO-RE relocations in main program */
whatever = bpf_core_type_size(struct task_struct); whatever = bpf_core_type_size(struct task_struct);
__sink(whatever);
set_output_val1(2000); set_output_val1(2000);
set_output_ctx1(ctx); /* ctx definition is hidden in BPF_PROG macro */ set_output_ctx1(ctx); /* ctx definition is hidden in BPF_PROG macro */
......
...@@ -313,7 +313,6 @@ SEC("tc") ...@@ -313,7 +313,6 @@ SEC("tc")
int map_list_push_pop_multiple(void *ctx) int map_list_push_pop_multiple(void *ctx)
{ {
struct map_value *v; struct map_value *v;
int ret;
v = bpf_map_lookup_elem(&array_map, &(int){0}); v = bpf_map_lookup_elem(&array_map, &(int){0});
if (!v) if (!v)
...@@ -326,7 +325,6 @@ int inner_map_list_push_pop_multiple(void *ctx) ...@@ -326,7 +325,6 @@ int inner_map_list_push_pop_multiple(void *ctx)
{ {
struct map_value *v; struct map_value *v;
void *map; void *map;
int ret;
map = bpf_map_lookup_elem(&map_of_maps, &(int){0}); map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
if (!map) if (!map)
...@@ -352,7 +350,6 @@ SEC("tc") ...@@ -352,7 +350,6 @@ SEC("tc")
int map_list_in_list(void *ctx) int map_list_in_list(void *ctx)
{ {
struct map_value *v; struct map_value *v;
int ret;
v = bpf_map_lookup_elem(&array_map, &(int){0}); v = bpf_map_lookup_elem(&array_map, &(int){0});
if (!v) if (!v)
...@@ -365,7 +362,6 @@ int inner_map_list_in_list(void *ctx) ...@@ -365,7 +362,6 @@ int inner_map_list_in_list(void *ctx)
{ {
struct map_value *v; struct map_value *v;
void *map; void *map;
int ret;
map = bpf_map_lookup_elem(&map_of_maps, &(int){0}); map = bpf_map_lookup_elem(&map_of_maps, &(int){0});
if (!map) if (!map)
......
...@@ -557,7 +557,6 @@ SEC("?tc") ...@@ -557,7 +557,6 @@ SEC("?tc")
int incorrect_head_off2(void *ctx) int incorrect_head_off2(void *ctx)
{ {
struct foo *f; struct foo *f;
struct bar *b;
f = bpf_obj_new(typeof(*f)); f = bpf_obj_new(typeof(*f));
if (!f) if (!f)
......
...@@ -77,7 +77,6 @@ int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry, ...@@ -77,7 +77,6 @@ int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry, struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags) unsigned int flags)
{ {
__u32 pid = bpf_get_current_pid_tgid() >> 32;
struct local_storage *storage; struct local_storage *storage;
int err; int err;
......
...@@ -515,7 +515,6 @@ int test_ls_map_kptr_ref1(void *ctx) ...@@ -515,7 +515,6 @@ int test_ls_map_kptr_ref1(void *ctx)
{ {
struct task_struct *current; struct task_struct *current;
struct map_value *v; struct map_value *v;
int ret;
current = bpf_get_current_task_btf(); current = bpf_get_current_task_btf();
if (!current) if (!current)
...@@ -534,7 +533,6 @@ int test_ls_map_kptr_ref2(void *ctx) ...@@ -534,7 +533,6 @@ int test_ls_map_kptr_ref2(void *ctx)
{ {
struct task_struct *current; struct task_struct *current;
struct map_value *v; struct map_value *v;
int ret;
current = bpf_get_current_task_btf(); current = bpf_get_current_task_btf();
if (!current) if (!current)
...@@ -550,7 +548,6 @@ int test_ls_map_kptr_ref_del(void *ctx) ...@@ -550,7 +548,6 @@ int test_ls_map_kptr_ref_del(void *ctx)
{ {
struct task_struct *current; struct task_struct *current;
struct map_value *v; struct map_value *v;
int ret;
current = bpf_get_current_task_btf(); current = bpf_get_current_task_btf();
if (!current) if (!current)
......
...@@ -26,7 +26,6 @@ SEC("cgroup/skb") ...@@ -26,7 +26,6 @@ SEC("cgroup/skb")
int bpf_nextcnt(struct __sk_buff *skb) int bpf_nextcnt(struct __sk_buff *skb)
{ {
union percpu_net_cnt *percpu_cnt; union percpu_net_cnt *percpu_cnt;
char fmt[] = "%d %llu %llu\n";
union net_cnt *cnt; union net_cnt *cnt;
__u64 ts, dt; __u64 ts, dt;
int ret; int ret;
......
...@@ -53,7 +53,6 @@ static int __strncmp(const void *m1, const void *m2, size_t len) ...@@ -53,7 +53,6 @@ static int __strncmp(const void *m1, const void *m2, size_t len)
do { \ do { \
static const char _expectedval[EXPECTED_STRSIZE] = \ static const char _expectedval[EXPECTED_STRSIZE] = \
_expected; \ _expected; \
static const char _ptrtype[64] = #_type; \
__u64 _hflags = _flags | BTF_F_COMPACT; \ __u64 _hflags = _flags | BTF_F_COMPACT; \
static _type _ptrdata = __VA_ARGS__; \ static _type _ptrdata = __VA_ARGS__; \
static struct btf_ptr _ptr = { }; \ static struct btf_ptr _ptr = { }; \
......
...@@ -22,7 +22,6 @@ long dropped __attribute__((aligned(128))) = 0; ...@@ -22,7 +22,6 @@ long dropped __attribute__((aligned(128))) = 0;
SEC("fentry/" SYS_PREFIX "sys_getpgid") SEC("fentry/" SYS_PREFIX "sys_getpgid")
int bench_perfbuf(void *ctx) int bench_perfbuf(void *ctx)
{ {
__u64 *sample;
int i; int i;
for (i = 0; i < batch_cnt; i++) { for (i = 0; i < batch_cnt; i++) {
......
...@@ -345,7 +345,7 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx) ...@@ -345,7 +345,7 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx)
SEC("raw_tracepoint/kfree_skb") SEC("raw_tracepoint/kfree_skb")
int on_event(struct bpf_raw_tracepoint_args* ctx) int on_event(struct bpf_raw_tracepoint_args* ctx)
{ {
int i, ret = 0; int ret = 0;
ret |= __on_event(ctx); ret |= __on_event(ctx);
ret |= __on_event(ctx); ret |= __on_event(ctx);
ret |= __on_event(ctx); ret |= __on_event(ctx);
......
...@@ -16,17 +16,6 @@ struct node_data { ...@@ -16,17 +16,6 @@ struct node_data {
struct bpf_list_node node; struct bpf_list_node node;
}; };
static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
{
struct node_data *node_a;
struct node_data *node_b;
node_a = container_of(a, struct node_data, node);
node_b = container_of(b, struct node_data, node);
return node_a->key < node_b->key;
}
#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) #define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
private(A) struct bpf_spin_lock glock; private(A) struct bpf_spin_lock glock;
private(A) struct bpf_rb_root groot __contains(node_data, node); private(A) struct bpf_rb_root groot __contains(node_data, node);
......
...@@ -105,7 +105,7 @@ long rbtree_api_remove_unadded_node(void *ctx) ...@@ -105,7 +105,7 @@ long rbtree_api_remove_unadded_node(void *ctx)
} }
SEC("?tc") SEC("?tc")
__failure __msg("Unreleased reference id=2 alloc_insn=11") __failure __msg("Unreleased reference id=2 alloc_insn=10")
long rbtree_api_remove_no_drop(void *ctx) long rbtree_api_remove_no_drop(void *ctx)
{ {
struct bpf_rb_node *res; struct bpf_rb_node *res;
...@@ -119,6 +119,7 @@ long rbtree_api_remove_no_drop(void *ctx) ...@@ -119,6 +119,7 @@ long rbtree_api_remove_no_drop(void *ctx)
res = bpf_rbtree_remove(&groot, res); res = bpf_rbtree_remove(&groot, res);
n = container_of(res, struct node_data, node); n = container_of(res, struct node_data, node);
__sink(n);
bpf_spin_unlock(&glock); bpf_spin_unlock(&glock);
/* bpf_obj_drop(n) is missing here */ /* bpf_obj_drop(n) is missing here */
......
...@@ -179,8 +179,6 @@ SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") ...@@ -179,8 +179,6 @@ SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
int miss_lock(void *ctx) int miss_lock(void *ctx)
{ {
struct task_struct *task; struct task_struct *task;
struct css_set *cgroups;
struct cgroup *dfl_cgrp;
/* missing bpf_rcu_read_lock() */ /* missing bpf_rcu_read_lock() */
task = bpf_get_current_task_btf(); task = bpf_get_current_task_btf();
...@@ -195,8 +193,6 @@ SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") ...@@ -195,8 +193,6 @@ SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
int miss_unlock(void *ctx) int miss_unlock(void *ctx)
{ {
struct task_struct *task; struct task_struct *task;
struct css_set *cgroups;
struct cgroup *dfl_cgrp;
/* missing bpf_rcu_read_unlock() */ /* missing bpf_rcu_read_unlock() */
task = bpf_get_current_task_btf(); task = bpf_get_current_task_btf();
......
...@@ -23,7 +23,6 @@ SEC("raw_tp/sys_enter") ...@@ -23,7 +23,6 @@ SEC("raw_tp/sys_enter")
int BPF_PROG(read_bpf_task_storage_busy) int BPF_PROG(read_bpf_task_storage_busy)
{ {
int *value; int *value;
int key;
if (!CONFIG_PREEMPT) if (!CONFIG_PREEMPT)
return 0; return 0;
......
...@@ -17,8 +17,6 @@ SEC("cgroup/recvmsg4") ...@@ -17,8 +17,6 @@ SEC("cgroup/recvmsg4")
int recvmsg4_prog(struct bpf_sock_addr *ctx) int recvmsg4_prog(struct bpf_sock_addr *ctx)
{ {
struct bpf_sock *sk; struct bpf_sock *sk;
__u32 user_ip4;
__u16 user_port;
sk = ctx->sk; sk = ctx->sk;
if (!sk) if (!sk)
......
...@@ -20,8 +20,6 @@ SEC("cgroup/recvmsg6") ...@@ -20,8 +20,6 @@ SEC("cgroup/recvmsg6")
int recvmsg6_prog(struct bpf_sock_addr *ctx) int recvmsg6_prog(struct bpf_sock_addr *ctx)
{ {
struct bpf_sock *sk; struct bpf_sock *sk;
__u32 user_ip4;
__u16 user_port;
sk = ctx->sk; sk = ctx->sk;
if (!sk) if (!sk)
......
...@@ -21,8 +21,6 @@ ...@@ -21,8 +21,6 @@
SEC("cgroup/sendmsg4") SEC("cgroup/sendmsg4")
int sendmsg_v4_prog(struct bpf_sock_addr *ctx) int sendmsg_v4_prog(struct bpf_sock_addr *ctx)
{ {
int prio;
if (ctx->type != SOCK_DGRAM) if (ctx->type != SOCK_DGRAM)
return 0; return 0;
......
#include <linux/bpf.h> #include <linux/bpf.h>
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h> #include <bpf/bpf_endian.h>
#include "bpf_misc.h"
struct { struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP); __uint(type, BPF_MAP_TYPE_SOCKMAP);
...@@ -40,6 +41,9 @@ int bpf_prog2(struct __sk_buff *skb) ...@@ -40,6 +41,9 @@ int bpf_prog2(struct __sk_buff *skb)
__u8 *d = data; __u8 *d = data;
__u8 sk, map; __u8 sk, map;
__sink(lport);
__sink(rport);
if (data + 8 > data_end) if (data + 8 > data_end)
return SK_DROP; return SK_DROP;
......
...@@ -391,7 +391,6 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, ...@@ -391,7 +391,6 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
struct strobe_map_raw map; struct strobe_map_raw map;
void *location; void *location;
uint64_t len; uint64_t len;
int i;
descr->tag_len = 0; /* presume no tag is set */ descr->tag_len = 0; /* presume no tag is set */
descr->cnt = -1; /* presume no value is set */ descr->cnt = -1; /* presume no value is set */
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include <linux/bpf.h> #include <linux/bpf.h>
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#include "bpf_legacy.h" #include "bpf_legacy.h"
#include "bpf_misc.h"
struct { struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY); __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
...@@ -20,6 +21,8 @@ int subprog_tail2(struct __sk_buff *skb) ...@@ -20,6 +21,8 @@ int subprog_tail2(struct __sk_buff *skb)
else else
bpf_tail_call_static(skb, &jmp_table, 1); bpf_tail_call_static(skb, &jmp_table, 1);
__sink(arr[sizeof(arr) - 1]);
return skb->len; return skb->len;
} }
...@@ -30,6 +33,8 @@ int subprog_tail(struct __sk_buff *skb) ...@@ -30,6 +33,8 @@ int subprog_tail(struct __sk_buff *skb)
bpf_tail_call_static(skb, &jmp_table, 0); bpf_tail_call_static(skb, &jmp_table, 0);
__sink(arr[sizeof(arr) - 1]);
return skb->len * 2; return skb->len * 2;
} }
...@@ -38,6 +43,8 @@ int classifier_0(struct __sk_buff *skb) ...@@ -38,6 +43,8 @@ int classifier_0(struct __sk_buff *skb)
{ {
volatile char arr[128] = {}; volatile char arr[128] = {};
__sink(arr[sizeof(arr) - 1]);
return subprog_tail2(skb); return subprog_tail2(skb);
} }
...@@ -46,6 +53,8 @@ int classifier_1(struct __sk_buff *skb) ...@@ -46,6 +53,8 @@ int classifier_1(struct __sk_buff *skb)
{ {
volatile char arr[128] = {}; volatile char arr[128] = {};
__sink(arr[sizeof(arr) - 1]);
return skb->len * 3; return skb->len * 3;
} }
...@@ -54,6 +63,8 @@ int entry(struct __sk_buff *skb) ...@@ -54,6 +63,8 @@ int entry(struct __sk_buff *skb)
{ {
volatile char arr[128] = {}; volatile char arr[128] = {};
__sink(arr[sizeof(arr) - 1]);
return subprog_tail(skb); return subprog_tail(skb);
} }
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h> #include <linux/bpf.h>
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define __unused __attribute__((unused)) #define __unused __attribute__((unused))
...@@ -36,6 +37,8 @@ int entry(struct __sk_buff *skb) ...@@ -36,6 +37,8 @@ int entry(struct __sk_buff *skb)
/* Have data on stack which size is not a multiple of 8 */ /* Have data on stack which size is not a multiple of 8 */
volatile char arr[1] = {}; volatile char arr[1] = {};
__sink(arr[0]);
return subprog_tail(skb); return subprog_tail(skb);
} }
......
...@@ -109,6 +109,7 @@ int BPF_PROG(task_kfunc_acquire_unreleased, struct task_struct *task, u64 clone_ ...@@ -109,6 +109,7 @@ int BPF_PROG(task_kfunc_acquire_unreleased, struct task_struct *task, u64 clone_
acquired = bpf_task_acquire(task); acquired = bpf_task_acquire(task);
/* Acquired task is never released. */ /* Acquired task is never released. */
__sink(acquired);
return 0; return 0;
} }
......
...@@ -171,8 +171,6 @@ static void lookup_compare_pid(const struct task_struct *p) ...@@ -171,8 +171,6 @@ static void lookup_compare_pid(const struct task_struct *p)
SEC("tp_btf/task_newtask") SEC("tp_btf/task_newtask")
int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags) int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags)
{ {
struct task_struct *acquired;
if (!is_test_kfunc_task()) if (!is_test_kfunc_task())
return 0; return 0;
...@@ -183,8 +181,6 @@ int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags) ...@@ -183,8 +181,6 @@ int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags)
SEC("tp_btf/task_newtask") SEC("tp_btf/task_newtask")
int BPF_PROG(test_task_from_pid_current, struct task_struct *task, u64 clone_flags) int BPF_PROG(test_task_from_pid_current, struct task_struct *task, u64 clone_flags)
{ {
struct task_struct *current, *acquired;
if (!is_test_kfunc_task()) if (!is_test_kfunc_task())
return 0; return 0;
...@@ -208,8 +204,6 @@ static int is_pid_lookup_valid(s32 pid) ...@@ -208,8 +204,6 @@ static int is_pid_lookup_valid(s32 pid)
SEC("tp_btf/task_newtask") SEC("tp_btf/task_newtask")
int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_flags) int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_flags)
{ {
struct task_struct *acquired;
if (!is_test_kfunc_task()) if (!is_test_kfunc_task())
return 0; return 0;
......
...@@ -75,7 +75,6 @@ nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32, ...@@ -75,7 +75,6 @@ nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32,
struct bpf_ct_opts___local opts_def = { .l4proto = IPPROTO_TCP, .netns_id = -1 }; struct bpf_ct_opts___local opts_def = { .l4proto = IPPROTO_TCP, .netns_id = -1 };
struct bpf_sock_tuple bpf_tuple; struct bpf_sock_tuple bpf_tuple;
struct nf_conn *ct; struct nf_conn *ct;
int err;
__builtin_memset(&bpf_tuple, 0, sizeof(bpf_tuple.ipv4)); __builtin_memset(&bpf_tuple, 0, sizeof(bpf_tuple.ipv4));
......
...@@ -455,7 +455,6 @@ static ret_t forward_to_next_hop(struct __sk_buff *skb, struct bpf_dynptr *dynpt ...@@ -455,7 +455,6 @@ static ret_t forward_to_next_hop(struct __sk_buff *skb, struct bpf_dynptr *dynpt
static ret_t skip_next_hops(__u64 *offset, int n) static ret_t skip_next_hops(__u64 *offset, int n)
{ {
__u32 res;
switch (n) { switch (n) {
case 1: case 1:
*offset += sizeof(struct in_addr); *offset += sizeof(struct in_addr);
......
...@@ -42,7 +42,6 @@ int test_core_bitfields(void *ctx) ...@@ -42,7 +42,6 @@ int test_core_bitfields(void *ctx)
{ {
struct core_reloc_bitfields *in = (void *)&data.in; struct core_reloc_bitfields *in = (void *)&data.in;
struct core_reloc_bitfields_output *out = (void *)&data.out; struct core_reloc_bitfields_output *out = (void *)&data.out;
uint64_t res;
out->ub1 = BPF_CORE_READ_BITFIELD_PROBED(in, ub1); out->ub1 = BPF_CORE_READ_BITFIELD_PROBED(in, ub1);
out->ub2 = BPF_CORE_READ_BITFIELD_PROBED(in, ub2); out->ub2 = BPF_CORE_READ_BITFIELD_PROBED(in, ub2);
......
...@@ -18,6 +18,8 @@ int f1(struct __sk_buff *skb) ...@@ -18,6 +18,8 @@ int f1(struct __sk_buff *skb)
{ {
volatile char buf[MAX_STACK] = {}; volatile char buf[MAX_STACK] = {};
__sink(buf[MAX_STACK - 1]);
return f0(0, skb) + skb->len; return f0(0, skb) + skb->len;
} }
...@@ -34,6 +36,8 @@ int f3(int val, struct __sk_buff *skb, int var) ...@@ -34,6 +36,8 @@ int f3(int val, struct __sk_buff *skb, int var)
{ {
volatile char buf[MAX_STACK] = {}; volatile char buf[MAX_STACK] = {};
__sink(buf[MAX_STACK - 1]);
return skb->ifindex * val * var; return skb->ifindex * val * var;
} }
......
...@@ -18,6 +18,8 @@ int f1(struct __sk_buff *skb) ...@@ -18,6 +18,8 @@ int f1(struct __sk_buff *skb)
{ {
volatile char buf[MAX_STACK] = {}; volatile char buf[MAX_STACK] = {};
__sink(buf[MAX_STACK - 1]);
return f0(0, skb) + skb->len; return f0(0, skb) + skb->len;
} }
...@@ -34,6 +36,8 @@ int f3(int val, struct __sk_buff *skb, int var) ...@@ -34,6 +36,8 @@ int f3(int val, struct __sk_buff *skb, int var)
{ {
volatile char buf[MAX_STACK] = {}; volatile char buf[MAX_STACK] = {};
__sink(buf[MAX_STACK - 1]);
return skb->ifindex * val * var; return skb->ifindex * val * var;
} }
......
...@@ -28,7 +28,7 @@ struct bigelement { ...@@ -28,7 +28,7 @@ struct bigelement {
SEC("raw_tracepoint/sys_enter") SEC("raw_tracepoint/sys_enter")
int bpf_hash_large_key_test(void *ctx) int bpf_hash_large_key_test(void *ctx)
{ {
int zero = 0, err = 1, value = 42; int zero = 0, value = 42;
struct bigelement *key; struct bigelement *key;
key = bpf_map_lookup_elem(&key_map, &zero); key = bpf_map_lookup_elem(&key_map, &zero);
......
...@@ -35,7 +35,6 @@ SEC("raw_tp/sys_enter") ...@@ -35,7 +35,6 @@ SEC("raw_tp/sys_enter")
int handler2(const void *ctx) int handler2(const void *ctx)
{ {
int *active; int *active;
__u32 cpu;
active = bpf_this_cpu_ptr(&bpf_prog_active); active = bpf_this_cpu_ptr(&bpf_prog_active);
write_active(active); write_active(active);
......
...@@ -56,7 +56,7 @@ int handle_legacy(void *ctx) ...@@ -56,7 +56,7 @@ int handle_legacy(void *ctx)
SEC("tp/raw_syscalls/sys_enter") SEC("tp/raw_syscalls/sys_enter")
int handle_modern(void *ctx) int handle_modern(void *ctx)
{ {
int zero = 0, cur_pid; int cur_pid;
cur_pid = bpf_get_current_pid_tgid() >> 32; cur_pid = bpf_get_current_pid_tgid() >> 32;
if (cur_pid != my_pid_var) if (cur_pid != my_pid_var)
......
...@@ -33,7 +33,7 @@ struct { ...@@ -33,7 +33,7 @@ struct {
SEC("cgroup/skb") SEC("cgroup/skb")
int bpf_map_lock_test(struct __sk_buff *skb) int bpf_map_lock_test(struct __sk_buff *skb)
{ {
struct hmap_elem zero = {}, *val; struct hmap_elem *val;
int rnd = bpf_get_prandom_u32(); int rnd = bpf_get_prandom_u32();
int key = 0, err = 1, i; int key = 0, err = 1, i;
struct array_elem *q; struct array_elem *q;
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <stddef.h> #include <stddef.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct { struct {
__uint(type, BPF_MAP_TYPE_ARRAY); __uint(type, BPF_MAP_TYPE_ARRAY);
...@@ -19,6 +20,7 @@ int test_obj_id(void *ctx) ...@@ -19,6 +20,7 @@ int test_obj_id(void *ctx)
__u64 *value; __u64 *value;
value = bpf_map_lookup_elem(&test_map_id, &key); value = bpf_map_lookup_elem(&test_map_id, &key);
__sink(value);
return 0; return 0;
} }
...@@ -87,7 +87,6 @@ int xdp_ingress_v6(struct xdp_md *xdp) ...@@ -87,7 +87,6 @@ int xdp_ingress_v6(struct xdp_md *xdp)
__u8 tcp_hdr_opt_len = 0; __u8 tcp_hdr_opt_len = 0;
struct tcphdr *tcp_hdr; struct tcphdr *tcp_hdr;
__u64 tcp_offset = 0; __u64 tcp_offset = 0;
__u32 off;
int err; int err;
tcp_offset = sizeof(struct ethhdr) + sizeof(struct ipv6hdr); tcp_offset = sizeof(struct ethhdr) + sizeof(struct ipv6hdr);
......
...@@ -30,7 +30,7 @@ __u32 server_id; ...@@ -30,7 +30,7 @@ __u32 server_id;
static int parse_hdr_opt(struct bpf_dynptr *ptr, __u32 *off, __u8 *hdr_bytes_remaining, static int parse_hdr_opt(struct bpf_dynptr *ptr, __u32 *off, __u8 *hdr_bytes_remaining,
__u32 *server_id) __u32 *server_id)
{ {
__u8 *tcp_opt, kind, hdr_len; __u8 kind, hdr_len;
__u8 buffer[sizeof(kind) + sizeof(hdr_len) + sizeof(*server_id)]; __u8 buffer[sizeof(kind) + sizeof(hdr_len) + sizeof(*server_id)];
__u8 *data; __u8 *data;
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/pkt_cls.h> #include <linux/pkt_cls.h>
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h> #include <bpf/bpf_endian.h>
#include "bpf_misc.h"
/* llvm will optimize both subprograms into exactly the same BPF assembly /* llvm will optimize both subprograms into exactly the same BPF assembly
* *
...@@ -51,6 +52,8 @@ int get_skb_len(struct __sk_buff *skb) ...@@ -51,6 +52,8 @@ int get_skb_len(struct __sk_buff *skb)
{ {
volatile char buf[MAX_STACK] = {}; volatile char buf[MAX_STACK] = {};
__sink(buf[MAX_STACK - 1]);
return skb->len; return skb->len;
} }
...@@ -73,6 +76,8 @@ int get_skb_ifindex(int val, struct __sk_buff *skb, int var) ...@@ -73,6 +76,8 @@ int get_skb_ifindex(int val, struct __sk_buff *skb, int var)
{ {
volatile char buf[MAX_STACK] = {}; volatile char buf[MAX_STACK] = {};
__sink(buf[MAX_STACK - 1]);
return skb->ifindex * val * var; return skb->ifindex * val * var;
} }
......
...@@ -41,7 +41,6 @@ int test_ringbuf(void *ctx) ...@@ -41,7 +41,6 @@ int test_ringbuf(void *ctx)
{ {
int cur_pid = bpf_get_current_pid_tgid() >> 32; int cur_pid = bpf_get_current_pid_tgid() >> 32;
struct sample *sample; struct sample *sample;
int zero = 0;
if (cur_pid != pid) if (cur_pid != pid)
return 0; return 0;
......
...@@ -53,6 +53,7 @@ int test_ringbuf_mem_map_key(void *ctx) ...@@ -53,6 +53,7 @@ int test_ringbuf_mem_map_key(void *ctx)
/* test using 'sample' (PTR_TO_MEM | MEM_ALLOC) as map key arg /* test using 'sample' (PTR_TO_MEM | MEM_ALLOC) as map key arg
*/ */
lookup_val = (int *)bpf_map_lookup_elem(&hash_map, sample); lookup_val = (int *)bpf_map_lookup_elem(&hash_map, sample);
__sink(lookup_val);
/* workaround - memcpy is necessary so that verifier doesn't /* workaround - memcpy is necessary so that verifier doesn't
* complain with: * complain with:
......
...@@ -59,7 +59,6 @@ int test_ringbuf(void *ctx) ...@@ -59,7 +59,6 @@ int test_ringbuf(void *ctx)
int cur_pid = bpf_get_current_pid_tgid() >> 32; int cur_pid = bpf_get_current_pid_tgid() >> 32;
struct sample *sample; struct sample *sample;
void *rb; void *rb;
int zero = 0;
if (cur_pid != pid) if (cur_pid != pid)
return 0; return 0;
......
...@@ -64,7 +64,7 @@ SEC("sk_reuseport") ...@@ -64,7 +64,7 @@ SEC("sk_reuseport")
int _select_by_skb_data(struct sk_reuseport_md *reuse_md) int _select_by_skb_data(struct sk_reuseport_md *reuse_md)
{ {
__u32 linum, index = 0, flags = 0, index_zero = 0; __u32 linum, index = 0, flags = 0, index_zero = 0;
__u32 *result_cnt, *linum_value; __u32 *result_cnt;
struct data_check data_check = {}; struct data_check data_check = {};
struct cmd *cmd, cmd_copy; struct cmd *cmd, cmd_copy;
void *data, *data_end; void *data, *data_end;
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <sys/socket.h> #include <sys/socket.h>
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h> #include <bpf/bpf_endian.h>
#include "bpf_misc.h"
#if defined(IPROUTE2_HAVE_LIBBPF) #if defined(IPROUTE2_HAVE_LIBBPF)
/* Use a new-style map definition. */ /* Use a new-style map definition. */
...@@ -57,7 +58,6 @@ get_tuple(struct __sk_buff *skb, bool *ipv4, bool *tcp) ...@@ -57,7 +58,6 @@ get_tuple(struct __sk_buff *skb, bool *ipv4, bool *tcp)
void *data = (void *)(long)skb->data; void *data = (void *)(long)skb->data;
struct bpf_sock_tuple *result; struct bpf_sock_tuple *result;
struct ethhdr *eth; struct ethhdr *eth;
__u64 tuple_len;
__u8 proto = 0; __u8 proto = 0;
__u64 ihl_len; __u64 ihl_len;
...@@ -94,6 +94,7 @@ get_tuple(struct __sk_buff *skb, bool *ipv4, bool *tcp) ...@@ -94,6 +94,7 @@ get_tuple(struct __sk_buff *skb, bool *ipv4, bool *tcp)
return NULL; return NULL;
*tcp = (proto == IPPROTO_TCP); *tcp = (proto == IPPROTO_TCP);
__sink(ihl_len);
return result; return result;
} }
...@@ -173,7 +174,6 @@ int bpf_sk_assign_test(struct __sk_buff *skb) ...@@ -173,7 +174,6 @@ int bpf_sk_assign_test(struct __sk_buff *skb)
struct bpf_sock_tuple *tuple; struct bpf_sock_tuple *tuple;
bool ipv4 = false; bool ipv4 = false;
bool tcp = false; bool tcp = false;
int tuple_len;
int ret = 0; int ret = 0;
tuple = get_tuple(skb, &ipv4, &tcp); tuple = get_tuple(skb, &ipv4, &tcp);
......
...@@ -391,7 +391,6 @@ SEC("sk_lookup") ...@@ -391,7 +391,6 @@ SEC("sk_lookup")
int ctx_narrow_access(struct bpf_sk_lookup *ctx) int ctx_narrow_access(struct bpf_sk_lookup *ctx)
{ {
struct bpf_sock *sk; struct bpf_sock *sk;
int err, family;
__u32 val_u32; __u32 val_u32;
bool v4; bool v4;
...@@ -645,9 +644,7 @@ static __always_inline int select_server_a(struct bpf_sk_lookup *ctx) ...@@ -645,9 +644,7 @@ static __always_inline int select_server_a(struct bpf_sk_lookup *ctx)
SEC("sk_lookup") SEC("sk_lookup")
int multi_prog_redir1(struct bpf_sk_lookup *ctx) int multi_prog_redir1(struct bpf_sk_lookup *ctx)
{ {
int ret; (void)select_server_a(ctx);
ret = select_server_a(ctx);
bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY); bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
return SK_PASS; return SK_PASS;
} }
...@@ -655,9 +652,7 @@ int multi_prog_redir1(struct bpf_sk_lookup *ctx) ...@@ -655,9 +652,7 @@ int multi_prog_redir1(struct bpf_sk_lookup *ctx)
SEC("sk_lookup") SEC("sk_lookup")
int multi_prog_redir2(struct bpf_sk_lookup *ctx) int multi_prog_redir2(struct bpf_sk_lookup *ctx)
{ {
int ret; (void)select_server_a(ctx);
ret = select_server_a(ctx);
bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY); bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
return SK_PASS; return SK_PASS;
} }
......
...@@ -110,7 +110,6 @@ int err_modify_sk_pointer(struct __sk_buff *skb) ...@@ -110,7 +110,6 @@ int err_modify_sk_pointer(struct __sk_buff *skb)
{ {
struct bpf_sock_tuple tuple = {}; struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk; struct bpf_sock *sk;
__u32 family;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
if (sk) { if (sk) {
...@@ -125,7 +124,6 @@ int err_modify_sk_or_null_pointer(struct __sk_buff *skb) ...@@ -125,7 +124,6 @@ int err_modify_sk_or_null_pointer(struct __sk_buff *skb)
{ {
struct bpf_sock_tuple tuple = {}; struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk; struct bpf_sock *sk;
__u32 family;
sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
sk += 1; sk += 1;
......
...@@ -121,7 +121,7 @@ static void tpcpy(struct bpf_tcp_sock *dst, ...@@ -121,7 +121,7 @@ static void tpcpy(struct bpf_tcp_sock *dst,
SEC("cgroup_skb/egress") SEC("cgroup_skb/egress")
int egress_read_sock_fields(struct __sk_buff *skb) int egress_read_sock_fields(struct __sk_buff *skb)
{ {
struct bpf_spinlock_cnt cli_cnt_init = { .lock = 0, .cnt = 0xeB9F }; struct bpf_spinlock_cnt cli_cnt_init = { .lock = {}, .cnt = 0xeB9F };
struct bpf_spinlock_cnt *pkt_out_cnt, *pkt_out_cnt10; struct bpf_spinlock_cnt *pkt_out_cnt, *pkt_out_cnt10;
struct bpf_tcp_sock *tp, *tp_ret; struct bpf_tcp_sock *tp, *tp_ret;
struct bpf_sock *sk, *sk_ret; struct bpf_sock *sk, *sk_ret;
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <sys/socket.h> #include <sys/socket.h>
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h> #include <bpf/bpf_endian.h>
#include "bpf_misc.h"
/* Sockmap sample program connects a client and a backend together /* Sockmap sample program connects a client and a backend together
* using cgroups. * using cgroups.
...@@ -111,12 +112,15 @@ int bpf_prog2(struct __sk_buff *skb) ...@@ -111,12 +112,15 @@ int bpf_prog2(struct __sk_buff *skb)
int len, *f, ret, zero = 0; int len, *f, ret, zero = 0;
__u64 flags = 0; __u64 flags = 0;
__sink(rport);
if (lport == 10000) if (lport == 10000)
ret = 10; ret = 10;
else else
ret = 1; ret = 1;
len = (__u32)skb->data_end - (__u32)skb->data; len = (__u32)skb->data_end - (__u32)skb->data;
__sink(len);
f = bpf_map_lookup_elem(&sock_skb_opts, &zero); f = bpf_map_lookup_elem(&sock_skb_opts, &zero);
if (f && *f) { if (f && *f) {
ret = 3; ret = 3;
...@@ -180,7 +184,6 @@ int bpf_prog3(struct __sk_buff *skb) ...@@ -180,7 +184,6 @@ int bpf_prog3(struct __sk_buff *skb)
if (err) if (err)
return SK_DROP; return SK_DROP;
bpf_write_pass(skb, 13); bpf_write_pass(skb, 13);
tls_out:
return ret; return ret;
} }
...@@ -188,8 +191,7 @@ SEC("sockops") ...@@ -188,8 +191,7 @@ SEC("sockops")
int bpf_sockmap(struct bpf_sock_ops *skops) int bpf_sockmap(struct bpf_sock_ops *skops)
{ {
__u32 lport, rport; __u32 lport, rport;
int op, err = 0, index, key, ret; int op, err, ret;
op = (int) skops->op; op = (int) skops->op;
...@@ -228,6 +230,8 @@ int bpf_sockmap(struct bpf_sock_ops *skops) ...@@ -228,6 +230,8 @@ int bpf_sockmap(struct bpf_sock_ops *skops)
break; break;
} }
__sink(err);
return 0; return 0;
} }
...@@ -321,6 +325,10 @@ int bpf_prog8(struct sk_msg_md *msg) ...@@ -321,6 +325,10 @@ int bpf_prog8(struct sk_msg_md *msg)
} else { } else {
return SK_DROP; return SK_DROP;
} }
__sink(data_end);
__sink(data);
return SK_PASS; return SK_PASS;
} }
SEC("sk_msg4") SEC("sk_msg4")
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/version.h> #include <linux/version.h>
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
struct hmap_elem { struct hmap_elem {
volatile int cnt; volatile int cnt;
...@@ -89,6 +90,8 @@ int bpf_spin_lock_test(struct __sk_buff *skb) ...@@ -89,6 +90,8 @@ int bpf_spin_lock_test(struct __sk_buff *skb)
credit = q->credit; credit = q->credit;
bpf_spin_unlock(&q->lock); bpf_spin_unlock(&q->lock);
__sink(credit);
/* spin_lock in cgroup local storage */ /* spin_lock in cgroup local storage */
cls = bpf_get_local_storage(&cls_map, 0); cls = bpf_get_local_storage(&cls_map, 0);
bpf_spin_lock(&cls->lock); bpf_spin_lock(&cls->lock);
......
...@@ -163,9 +163,9 @@ static int skb_get_type(struct __sk_buff *skb) ...@@ -163,9 +163,9 @@ static int skb_get_type(struct __sk_buff *skb)
ip6h = data + sizeof(struct ethhdr); ip6h = data + sizeof(struct ethhdr);
if (ip6h + 1 > data_end) if (ip6h + 1 > data_end)
return -1; return -1;
if (v6_equal(ip6h->saddr, (struct in6_addr)ip6_src)) if (v6_equal(ip6h->saddr, (struct in6_addr){{ip6_src}}))
ns = SRC_NS; ns = SRC_NS;
else if (v6_equal(ip6h->saddr, (struct in6_addr)ip6_dst)) else if (v6_equal(ip6h->saddr, (struct in6_addr){{ip6_dst}}))
ns = DST_NS; ns = DST_NS;
inet_proto = ip6h->nexthdr; inet_proto = ip6h->nexthdr;
trans = ip6h + 1; trans = ip6h + 1;
......
...@@ -94,7 +94,7 @@ int tc_dst(struct __sk_buff *skb) ...@@ -94,7 +94,7 @@ int tc_dst(struct __sk_buff *skb)
redirect = is_remote_ep_v4(skb, __bpf_constant_htonl(ip4_src)); redirect = is_remote_ep_v4(skb, __bpf_constant_htonl(ip4_src));
break; break;
case __bpf_constant_htons(ETH_P_IPV6): case __bpf_constant_htons(ETH_P_IPV6):
redirect = is_remote_ep_v6(skb, (struct in6_addr)ip6_src); redirect = is_remote_ep_v6(skb, (struct in6_addr){{ip6_src}});
break; break;
} }
...@@ -119,7 +119,7 @@ int tc_src(struct __sk_buff *skb) ...@@ -119,7 +119,7 @@ int tc_src(struct __sk_buff *skb)
redirect = is_remote_ep_v4(skb, __bpf_constant_htonl(ip4_dst)); redirect = is_remote_ep_v4(skb, __bpf_constant_htonl(ip4_dst));
break; break;
case __bpf_constant_htons(ETH_P_IPV6): case __bpf_constant_htons(ETH_P_IPV6):
redirect = is_remote_ep_v6(skb, (struct in6_addr)ip6_dst); redirect = is_remote_ep_v6(skb, (struct in6_addr){{ip6_dst}});
break; break;
} }
......
...@@ -46,8 +46,6 @@ int bpf_testcb(struct bpf_sock_ops *skops) ...@@ -46,8 +46,6 @@ int bpf_testcb(struct bpf_sock_ops *skops)
struct bpf_sock_ops *reuse = skops; struct bpf_sock_ops *reuse = skops;
struct tcphdr *thdr; struct tcphdr *thdr;
int window_clamp = 9216; int window_clamp = 9216;
int good_call_rv = 0;
int bad_call_rv = 0;
int save_syn = 1; int save_syn = 1;
int rv = -1; int rv = -1;
int v = 0; int v = 0;
......
...@@ -209,7 +209,6 @@ int erspan_get_tunnel(struct __sk_buff *skb) ...@@ -209,7 +209,6 @@ int erspan_get_tunnel(struct __sk_buff *skb)
{ {
struct bpf_tunnel_key key; struct bpf_tunnel_key key;
struct erspan_metadata md; struct erspan_metadata md;
__u32 index;
int ret; int ret;
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
...@@ -289,7 +288,6 @@ int ip4ip6erspan_get_tunnel(struct __sk_buff *skb) ...@@ -289,7 +288,6 @@ int ip4ip6erspan_get_tunnel(struct __sk_buff *skb)
{ {
struct bpf_tunnel_key key; struct bpf_tunnel_key key;
struct erspan_metadata md; struct erspan_metadata md;
__u32 index;
int ret; int ret;
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
...@@ -405,8 +403,6 @@ int vxlan_get_tunnel_src(struct __sk_buff *skb) ...@@ -405,8 +403,6 @@ int vxlan_get_tunnel_src(struct __sk_buff *skb)
int ret; int ret;
struct bpf_tunnel_key key; struct bpf_tunnel_key key;
struct vxlan_metadata md; struct vxlan_metadata md;
__u32 orig_daddr;
__u32 index = 0;
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_FLAGS); BPF_F_TUNINFO_FLAGS);
...@@ -443,9 +439,7 @@ int veth_set_outer_dst(struct __sk_buff *skb) ...@@ -443,9 +439,7 @@ int veth_set_outer_dst(struct __sk_buff *skb)
void *data_end = (void *)(long)skb->data_end; void *data_end = (void *)(long)skb->data_end;
struct udphdr *udph; struct udphdr *udph;
struct iphdr *iph; struct iphdr *iph;
__u32 index = 0;
int ret = 0; int ret = 0;
int shrink;
__s64 csum; __s64 csum;
if ((void *)eth + sizeof(*eth) > data_end) { if ((void *)eth + sizeof(*eth) > data_end) {
......
...@@ -18,8 +18,6 @@ int usdt_100_sum; ...@@ -18,8 +18,6 @@ int usdt_100_sum;
SEC("usdt//proc/self/exe:test:usdt_100") SEC("usdt//proc/self/exe:test:usdt_100")
int BPF_USDT(usdt_100, int x) int BPF_USDT(usdt_100, int x)
{ {
long tmp;
if (my_pid != (bpf_get_current_pid_tgid() >> 32)) if (my_pid != (bpf_get_current_pid_tgid() >> 32))
return 0; return 0;
......
...@@ -11,7 +11,7 @@ int balancer_ingress(struct __sk_buff *ctx) ...@@ -11,7 +11,7 @@ int balancer_ingress(struct __sk_buff *ctx)
void *data_end = (void *)(long)ctx->data_end; void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data; void *data = (void *)(long)ctx->data;
void *ptr; void *ptr;
int ret = 0, nh_off, i = 0; int nh_off, i = 0;
nh_off = 14; nh_off = 14;
......
...@@ -11,7 +11,7 @@ int balancer_ingress(struct __sk_buff *ctx) ...@@ -11,7 +11,7 @@ int balancer_ingress(struct __sk_buff *ctx)
void *data_end = (void *)(long)ctx->data_end; void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data; void *data = (void *)(long)ctx->data;
void *ptr; void *ptr;
int ret = 0, nh_off, i = 0; int nh_off, i = 0;
nh_off = 14; nh_off = 14;
......
...@@ -11,7 +11,7 @@ int balancer_ingress(struct __sk_buff *ctx) ...@@ -11,7 +11,7 @@ int balancer_ingress(struct __sk_buff *ctx)
void *data_end = (void *)(long)ctx->data_end; void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data; void *data = (void *)(long)ctx->data;
void *ptr; void *ptr;
int ret = 0, nh_off, i = 0; int nh_off, i = 0;
nh_off = 32; nh_off = 32;
......
...@@ -5,8 +5,6 @@ ...@@ -5,8 +5,6 @@
SEC("xdp") SEC("xdp")
int _xdp_adjust_tail_grow(struct xdp_md *xdp) int _xdp_adjust_tail_grow(struct xdp_md *xdp)
{ {
void *data_end = (void *)(long)xdp->data_end;
void *data = (void *)(long)xdp->data;
int data_len = bpf_xdp_get_buff_len(xdp); int data_len = bpf_xdp_get_buff_len(xdp);
int offset = 0; int offset = 0;
/* SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) */ /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) */
......
...@@ -45,8 +45,6 @@ SEC("fentry/FUNC") ...@@ -45,8 +45,6 @@ SEC("fentry/FUNC")
int BPF_PROG(trace_on_entry, struct xdp_buff *xdp) int BPF_PROG(trace_on_entry, struct xdp_buff *xdp)
{ {
struct meta meta; struct meta meta;
void *data_end = (void *)(long)xdp->data_end;
void *data = (void *)(long)xdp->data;
meta.ifindex = xdp->rxq->dev->ifindex; meta.ifindex = xdp->rxq->dev->ifindex;
meta.pkt_len = bpf_xdp_get_buff_len((struct xdp_md *)xdp); meta.pkt_len = bpf_xdp_get_buff_len((struct xdp_md *)xdp);
......
...@@ -82,7 +82,6 @@ static __always_inline int handle_ipv4(struct xdp_md *xdp, struct bpf_dynptr *xd ...@@ -82,7 +82,6 @@ static __always_inline int handle_ipv4(struct xdp_md *xdp, struct bpf_dynptr *xd
struct iptnl_info *tnl; struct iptnl_info *tnl;
struct ethhdr *new_eth; struct ethhdr *new_eth;
struct ethhdr *old_eth; struct ethhdr *old_eth;
__u32 transport_hdr_sz;
struct iphdr *iph; struct iphdr *iph;
__u16 *next_iph; __u16 *next_iph;
__u16 payload_len; __u16 payload_len;
...@@ -165,7 +164,6 @@ static __always_inline int handle_ipv6(struct xdp_md *xdp, struct bpf_dynptr *xd ...@@ -165,7 +164,6 @@ static __always_inline int handle_ipv6(struct xdp_md *xdp, struct bpf_dynptr *xd
struct iptnl_info *tnl; struct iptnl_info *tnl;
struct ethhdr *new_eth; struct ethhdr *new_eth;
struct ethhdr *old_eth; struct ethhdr *old_eth;
__u32 transport_hdr_sz;
struct ipv6hdr *ip6h; struct ipv6hdr *ip6h;
__u16 payload_len; __u16 payload_len;
struct vip vip = {}; struct vip vip = {};
......
...@@ -371,45 +371,6 @@ bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval, ...@@ -371,45 +371,6 @@ bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval,
return true; return true;
} }
static __attribute__ ((noinline))
bool decap_v6(struct xdp_md *xdp, void **data, void **data_end, bool inner_v4)
{
struct eth_hdr *new_eth;
struct eth_hdr *old_eth;
old_eth = *data;
new_eth = *data + sizeof(struct ipv6hdr);
memcpy(new_eth->eth_source, old_eth->eth_source, 6);
memcpy(new_eth->eth_dest, old_eth->eth_dest, 6);
if (inner_v4)
new_eth->eth_proto = 8;
else
new_eth->eth_proto = 56710;
if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct ipv6hdr)))
return false;
*data = (void *)(long)xdp->data;
*data_end = (void *)(long)xdp->data_end;
return true;
}
static __attribute__ ((noinline))
bool decap_v4(struct xdp_md *xdp, void **data, void **data_end)
{
struct eth_hdr *new_eth;
struct eth_hdr *old_eth;
old_eth = *data;
new_eth = *data + sizeof(struct iphdr);
memcpy(new_eth->eth_source, old_eth->eth_source, 6);
memcpy(new_eth->eth_dest, old_eth->eth_dest, 6);
new_eth->eth_proto = 8;
if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr)))
return false;
*data = (void *)(long)xdp->data;
*data_end = (void *)(long)xdp->data_end;
return true;
}
static __attribute__ ((noinline)) static __attribute__ ((noinline))
int swap_mac_and_send(void *data, void *data_end) int swap_mac_and_send(void *data, void *data_end)
{ {
...@@ -430,7 +391,6 @@ int send_icmp_reply(void *data, void *data_end) ...@@ -430,7 +391,6 @@ int send_icmp_reply(void *data, void *data_end)
__u16 *next_iph_u16; __u16 *next_iph_u16;
__u32 tmp_addr = 0; __u32 tmp_addr = 0;
struct iphdr *iph; struct iphdr *iph;
__u32 csum1 = 0;
__u32 csum = 0; __u32 csum = 0;
__u64 off = 0; __u64 off = 0;
...@@ -662,7 +622,6 @@ static int process_l3_headers_v4(struct packet_description *pckt, ...@@ -662,7 +622,6 @@ static int process_l3_headers_v4(struct packet_description *pckt,
void *data_end) void *data_end)
{ {
struct iphdr *iph; struct iphdr *iph;
__u64 iph_len;
int action; int action;
iph = data + off; iph = data + off;
...@@ -696,7 +655,6 @@ static int process_packet(void *data, __u64 off, void *data_end, ...@@ -696,7 +655,6 @@ static int process_packet(void *data, __u64 off, void *data_end,
struct packet_description pckt = { }; struct packet_description pckt = { };
struct vip_definition vip = { }; struct vip_definition vip = { };
struct lb_stats *data_stats; struct lb_stats *data_stats;
struct eth_hdr *eth = data;
void *lru_map = &lru_cache; void *lru_map = &lru_cache;
struct vip_meta *vip_info; struct vip_meta *vip_info;
__u32 lru_stats_key = 513; __u32 lru_stats_key = 513;
...@@ -704,7 +662,6 @@ static int process_packet(void *data, __u64 off, void *data_end, ...@@ -704,7 +662,6 @@ static int process_packet(void *data, __u64 off, void *data_end,
__u32 stats_key = 512; __u32 stats_key = 512;
struct ctl_value *cval; struct ctl_value *cval;
__u16 pkt_bytes; __u16 pkt_bytes;
__u64 iph_len;
__u8 protocol; __u8 protocol;
__u32 vip_num; __u32 vip_num;
int action; int action;
......
...@@ -209,19 +209,6 @@ int xdp_prognum2(struct xdp_md *ctx) ...@@ -209,19 +209,6 @@ int xdp_prognum2(struct xdp_md *ctx)
return XDP_PASS; return XDP_PASS;
} }
static __always_inline
void shift_mac_4bytes_16bit(void *data)
{
__u16 *p = data;
p[7] = p[5]; /* delete p[7] was vlan_hdr->h_vlan_TCI */
p[6] = p[4]; /* delete p[6] was ethhdr->h_proto */
p[5] = p[3];
p[4] = p[2];
p[3] = p[1];
p[2] = p[0];
}
static __always_inline static __always_inline
void shift_mac_4bytes_32bit(void *data) void shift_mac_4bytes_32bit(void *data)
{ {
......
...@@ -63,7 +63,6 @@ SEC("?tp_btf/sys_enter") ...@@ -63,7 +63,6 @@ SEC("?tp_btf/sys_enter")
int BPF_PROG(untrusted_ptr, struct pt_regs *regs, long id) int BPF_PROG(untrusted_ptr, struct pt_regs *regs, long id)
{ {
struct task_struct *task, *task_dup; struct task_struct *task, *task_dup;
long *ptr;
task = bpf_get_current_task_btf(); task = bpf_get_current_task_btf();
task_dup = bpf_rdonly_cast(task, bpf_core_type_id_kernel(struct task_struct)); task_dup = bpf_rdonly_cast(task, bpf_core_type_id_kernel(struct task_struct));
......
...@@ -17,7 +17,6 @@ SEC("cgroup/sock_create") ...@@ -17,7 +17,6 @@ SEC("cgroup/sock_create")
int sock(struct bpf_sock *ctx) int sock(struct bpf_sock *ctx)
{ {
int *sk_storage; int *sk_storage;
__u32 key;
if (ctx->type != SOCK_DGRAM) if (ctx->type != SOCK_DGRAM)
return 1; return 1;
...@@ -46,7 +45,6 @@ SEC("cgroup/sock_release") ...@@ -46,7 +45,6 @@ SEC("cgroup/sock_release")
int sock_release(struct bpf_sock *ctx) int sock_release(struct bpf_sock *ctx)
{ {
int *sk_storage; int *sk_storage;
__u32 key;
if (ctx->type != SOCK_DGRAM) if (ctx->type != SOCK_DGRAM)
return 1; return 1;
......
...@@ -162,8 +162,6 @@ SEC("fentry/" SYS_PREFIX "sys_prctl") ...@@ -162,8 +162,6 @@ SEC("fentry/" SYS_PREFIX "sys_prctl")
int test_user_ringbuf_protocol(void *ctx) int test_user_ringbuf_protocol(void *ctx)
{ {
long status = 0; long status = 0;
struct sample *sample = NULL;
struct bpf_dynptr ptr;
if (!is_test_process()) if (!is_test_process())
return 0; return 0;
...@@ -183,10 +181,6 @@ int test_user_ringbuf_protocol(void *ctx) ...@@ -183,10 +181,6 @@ int test_user_ringbuf_protocol(void *ctx)
SEC("fentry/" SYS_PREFIX "sys_getpgid") SEC("fentry/" SYS_PREFIX "sys_getpgid")
int test_user_ringbuf(void *ctx) int test_user_ringbuf(void *ctx)
{ {
int status = 0;
struct sample *sample = NULL;
struct bpf_dynptr ptr;
if (!is_test_process()) if (!is_test_process())
return 0; return 0;
......
...@@ -70,7 +70,6 @@ xdp_process_echo_packet(struct xdp_md *xdp, bool dut) ...@@ -70,7 +70,6 @@ xdp_process_echo_packet(struct xdp_md *xdp, bool dut)
struct tlv_hdr *tlv; struct tlv_hdr *tlv;
struct udphdr *uh; struct udphdr *uh;
__be16 port; __be16 port;
__u8 *cmd;
if (eh + 1 > (struct ethhdr *)data_end) if (eh + 1 > (struct ethhdr *)data_end)
return -EINVAL; return -EINVAL;
......
...@@ -89,7 +89,6 @@ static __always_inline int icmp_check(struct xdp_md *ctx, int type) ...@@ -89,7 +89,6 @@ static __always_inline int icmp_check(struct xdp_md *ctx, int type)
SEC("xdp") SEC("xdp")
int xdping_client(struct xdp_md *ctx) int xdping_client(struct xdp_md *ctx)
{ {
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data; void *data = (void *)(long)ctx->data;
struct pinginfo *pinginfo = NULL; struct pinginfo *pinginfo = NULL;
struct ethhdr *eth = data; struct ethhdr *eth = data;
...@@ -153,7 +152,6 @@ int xdping_client(struct xdp_md *ctx) ...@@ -153,7 +152,6 @@ int xdping_client(struct xdp_md *ctx)
SEC("xdp") SEC("xdp")
int xdping_server(struct xdp_md *ctx) int xdping_server(struct xdp_md *ctx)
{ {
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data; void *data = (void *)(long)ctx->data;
struct ethhdr *eth = data; struct ethhdr *eth = data;
struct icmphdr *icmph; struct icmphdr *icmph;
......
...@@ -321,7 +321,6 @@ int edgewall(struct xdp_md *ctx) ...@@ -321,7 +321,6 @@ int edgewall(struct xdp_md *ctx)
void *data = (void *)(long)(ctx->data); void *data = (void *)(long)(ctx->data);
struct fw_match_info match_info = {}; struct fw_match_info match_info = {};
struct pkt_info info = {}; struct pkt_info info = {};
__u8 parse_err = NO_ERR;
void *transport_hdr; void *transport_hdr;
struct ethhdr *eth; struct ethhdr *eth;
bool filter_res; bool filter_res;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment