Commit 7d3851a3 authored by Martin KaFai Lau's avatar Martin KaFai Lau Committed by Alexei Starovoitov

selftests/bpf: Sanitize the SEC and inline usages in the bpf-tcp-cc tests

It is needed to remove the BPF_STRUCT_OPS usages from the tcp-cc tests
because it is defined in bpf_tcp_helpers.h which is going to be retired.
While at it, this patch consolidates all tcp-cc struct_ops programs to
use the SEC("struct_ops") + BPF_PROG().

It also removes the unnecessary __always_inline usages from the
tcp-cc tests.
Signed-off-by: default avatarMartin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20240509175026.3423614-5-martin.lau@linux.devSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent cc5b18ce
...@@ -17,10 +17,6 @@ ...@@ -17,10 +17,6 @@
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h> #include <bpf/bpf_tracing.h>
#define BPF_STRUCT_OPS(name, args...) \
SEC("struct_ops/"#name) \
BPF_PROG(name, args)
#define USEC_PER_SEC 1000000UL #define USEC_PER_SEC 1000000UL
#define TCP_PACING_SS_RATIO (200) #define TCP_PACING_SS_RATIO (200)
#define TCP_PACING_CA_RATIO (120) #define TCP_PACING_CA_RATIO (120)
...@@ -114,18 +110,21 @@ static bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) ...@@ -114,18 +110,21 @@ static bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
return flag & FLAG_DATA_ACKED; return flag & FLAG_DATA_ACKED;
} }
void BPF_STRUCT_OPS(bpf_cubic_init, struct sock *sk) SEC("struct_ops")
void BPF_PROG(bpf_cubic_init, struct sock *sk)
{ {
cubictcp_init(sk); cubictcp_init(sk);
} }
void BPF_STRUCT_OPS(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event) SEC("struct_ops")
void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event)
{ {
cubictcp_cwnd_event(sk, event); cubictcp_cwnd_event(sk, event);
} }
void BPF_STRUCT_OPS(bpf_cubic_cong_control, struct sock *sk, __u32 ack, int flag, SEC("struct_ops")
const struct rate_sample *rs) void BPF_PROG(bpf_cubic_cong_control, struct sock *sk, __u32 ack, int flag,
const struct rate_sample *rs)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
...@@ -151,23 +150,26 @@ void BPF_STRUCT_OPS(bpf_cubic_cong_control, struct sock *sk, __u32 ack, int flag ...@@ -151,23 +150,26 @@ void BPF_STRUCT_OPS(bpf_cubic_cong_control, struct sock *sk, __u32 ack, int flag
tcp_update_pacing_rate(sk); tcp_update_pacing_rate(sk);
} }
__u32 BPF_STRUCT_OPS(bpf_cubic_recalc_ssthresh, struct sock *sk) SEC("struct_ops")
__u32 BPF_PROG(bpf_cubic_recalc_ssthresh, struct sock *sk)
{ {
return cubictcp_recalc_ssthresh(sk); return cubictcp_recalc_ssthresh(sk);
} }
void BPF_STRUCT_OPS(bpf_cubic_state, struct sock *sk, __u8 new_state) SEC("struct_ops")
void BPF_PROG(bpf_cubic_state, struct sock *sk, __u8 new_state)
{ {
cubictcp_state(sk, new_state); cubictcp_state(sk, new_state);
} }
void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk, SEC("struct_ops")
const struct ack_sample *sample) void BPF_PROG(bpf_cubic_acked, struct sock *sk, const struct ack_sample *sample)
{ {
cubictcp_acked(sk, sample); cubictcp_acked(sk, sample);
} }
__u32 BPF_STRUCT_OPS(bpf_cubic_undo_cwnd, struct sock *sk) SEC("struct_ops")
__u32 BPF_PROG(bpf_cubic_undo_cwnd, struct sock *sk)
{ {
return tcp_reno_undo_cwnd(sk); return tcp_reno_undo_cwnd(sk);
} }
......
...@@ -91,7 +91,7 @@ struct bictcp { ...@@ -91,7 +91,7 @@ struct bictcp {
__u32 curr_rtt; /* the minimum rtt of current round */ __u32 curr_rtt; /* the minimum rtt of current round */
}; };
static inline void bictcp_reset(struct bictcp *ca) static void bictcp_reset(struct bictcp *ca)
{ {
ca->cnt = 0; ca->cnt = 0;
ca->last_max_cwnd = 0; ca->last_max_cwnd = 0;
...@@ -112,7 +112,7 @@ extern unsigned long CONFIG_HZ __kconfig; ...@@ -112,7 +112,7 @@ extern unsigned long CONFIG_HZ __kconfig;
#define USEC_PER_SEC 1000000UL #define USEC_PER_SEC 1000000UL
#define USEC_PER_JIFFY (USEC_PER_SEC / HZ) #define USEC_PER_JIFFY (USEC_PER_SEC / HZ)
static __always_inline __u64 div64_u64(__u64 dividend, __u64 divisor) static __u64 div64_u64(__u64 dividend, __u64 divisor)
{ {
return dividend / divisor; return dividend / divisor;
} }
...@@ -120,7 +120,7 @@ static __always_inline __u64 div64_u64(__u64 dividend, __u64 divisor) ...@@ -120,7 +120,7 @@ static __always_inline __u64 div64_u64(__u64 dividend, __u64 divisor)
#define div64_ul div64_u64 #define div64_ul div64_u64
#define BITS_PER_U64 (sizeof(__u64) * 8) #define BITS_PER_U64 (sizeof(__u64) * 8)
static __always_inline int fls64(__u64 x) static int fls64(__u64 x)
{ {
int num = BITS_PER_U64 - 1; int num = BITS_PER_U64 - 1;
...@@ -153,12 +153,12 @@ static __always_inline int fls64(__u64 x) ...@@ -153,12 +153,12 @@ static __always_inline int fls64(__u64 x)
return num + 1; return num + 1;
} }
static __always_inline __u32 bictcp_clock_us(const struct sock *sk) static __u32 bictcp_clock_us(const struct sock *sk)
{ {
return tcp_sk(sk)->tcp_mstamp; return tcp_sk(sk)->tcp_mstamp;
} }
static __always_inline void bictcp_hystart_reset(struct sock *sk) static void bictcp_hystart_reset(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk); struct bictcp *ca = inet_csk_ca(sk);
...@@ -169,8 +169,7 @@ static __always_inline void bictcp_hystart_reset(struct sock *sk) ...@@ -169,8 +169,7 @@ static __always_inline void bictcp_hystart_reset(struct sock *sk)
ca->sample_cnt = 0; ca->sample_cnt = 0;
} }
/* "struct_ops/" prefix is a requirement */ SEC("struct_ops")
SEC("struct_ops/bpf_cubic_init")
void BPF_PROG(bpf_cubic_init, struct sock *sk) void BPF_PROG(bpf_cubic_init, struct sock *sk)
{ {
struct bictcp *ca = inet_csk_ca(sk); struct bictcp *ca = inet_csk_ca(sk);
...@@ -184,8 +183,7 @@ void BPF_PROG(bpf_cubic_init, struct sock *sk) ...@@ -184,8 +183,7 @@ void BPF_PROG(bpf_cubic_init, struct sock *sk)
tcp_sk(sk)->snd_ssthresh = initial_ssthresh; tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
} }
/* "struct_ops" prefix is a requirement */ SEC("struct_ops")
SEC("struct_ops/bpf_cubic_cwnd_event")
void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event) void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event)
{ {
if (event == CA_EVENT_TX_START) { if (event == CA_EVENT_TX_START) {
...@@ -230,7 +228,7 @@ static const __u8 v[] = { ...@@ -230,7 +228,7 @@ static const __u8 v[] = {
* Newton-Raphson iteration. * Newton-Raphson iteration.
* Avg err ~= 0.195% * Avg err ~= 0.195%
*/ */
static __always_inline __u32 cubic_root(__u64 a) static __u32 cubic_root(__u64 a)
{ {
__u32 x, b, shift; __u32 x, b, shift;
...@@ -263,8 +261,7 @@ static __always_inline __u32 cubic_root(__u64 a) ...@@ -263,8 +261,7 @@ static __always_inline __u32 cubic_root(__u64 a)
/* /*
* Compute congestion window to use. * Compute congestion window to use.
*/ */
static __always_inline void bictcp_update(struct bictcp *ca, __u32 cwnd, static void bictcp_update(struct bictcp *ca, __u32 cwnd, __u32 acked)
__u32 acked)
{ {
__u32 delta, bic_target, max_cnt; __u32 delta, bic_target, max_cnt;
__u64 offs, t; __u64 offs, t;
...@@ -377,8 +374,8 @@ static __always_inline void bictcp_update(struct bictcp *ca, __u32 cwnd, ...@@ -377,8 +374,8 @@ static __always_inline void bictcp_update(struct bictcp *ca, __u32 cwnd,
ca->cnt = max(ca->cnt, 2U); ca->cnt = max(ca->cnt, 2U);
} }
/* Or simply use the BPF_STRUCT_OPS to avoid the SEC boiler plate. */ SEC("struct_ops")
void BPF_STRUCT_OPS(bpf_cubic_cong_avoid, struct sock *sk, __u32 ack, __u32 acked) void BPF_PROG(bpf_cubic_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk); struct bictcp *ca = inet_csk_ca(sk);
...@@ -397,7 +394,8 @@ void BPF_STRUCT_OPS(bpf_cubic_cong_avoid, struct sock *sk, __u32 ack, __u32 acke ...@@ -397,7 +394,8 @@ void BPF_STRUCT_OPS(bpf_cubic_cong_avoid, struct sock *sk, __u32 ack, __u32 acke
tcp_cong_avoid_ai(tp, ca->cnt, acked); tcp_cong_avoid_ai(tp, ca->cnt, acked);
} }
__u32 BPF_STRUCT_OPS(bpf_cubic_recalc_ssthresh, struct sock *sk) SEC("struct_ops")
__u32 BPF_PROG(bpf_cubic_recalc_ssthresh, struct sock *sk)
{ {
const struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk); struct bictcp *ca = inet_csk_ca(sk);
...@@ -414,7 +412,8 @@ __u32 BPF_STRUCT_OPS(bpf_cubic_recalc_ssthresh, struct sock *sk) ...@@ -414,7 +412,8 @@ __u32 BPF_STRUCT_OPS(bpf_cubic_recalc_ssthresh, struct sock *sk)
return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U); return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
} }
void BPF_STRUCT_OPS(bpf_cubic_state, struct sock *sk, __u8 new_state) SEC("struct_ops")
void BPF_PROG(bpf_cubic_state, struct sock *sk, __u8 new_state)
{ {
if (new_state == TCP_CA_Loss) { if (new_state == TCP_CA_Loss) {
bictcp_reset(inet_csk_ca(sk)); bictcp_reset(inet_csk_ca(sk));
...@@ -433,7 +432,7 @@ void BPF_STRUCT_OPS(bpf_cubic_state, struct sock *sk, __u8 new_state) ...@@ -433,7 +432,7 @@ void BPF_STRUCT_OPS(bpf_cubic_state, struct sock *sk, __u8 new_state)
* We apply another 100% factor because @rate is doubled at this point. * We apply another 100% factor because @rate is doubled at this point.
* We cap the cushion to 1ms. * We cap the cushion to 1ms.
*/ */
static __always_inline __u32 hystart_ack_delay(struct sock *sk) static __u32 hystart_ack_delay(struct sock *sk)
{ {
unsigned long rate; unsigned long rate;
...@@ -444,7 +443,7 @@ static __always_inline __u32 hystart_ack_delay(struct sock *sk) ...@@ -444,7 +443,7 @@ static __always_inline __u32 hystart_ack_delay(struct sock *sk)
div64_ul((__u64)GSO_MAX_SIZE * 4 * USEC_PER_SEC, rate)); div64_ul((__u64)GSO_MAX_SIZE * 4 * USEC_PER_SEC, rate));
} }
static __always_inline void hystart_update(struct sock *sk, __u32 delay) static void hystart_update(struct sock *sk, __u32 delay)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk); struct bictcp *ca = inet_csk_ca(sk);
...@@ -492,8 +491,8 @@ static __always_inline void hystart_update(struct sock *sk, __u32 delay) ...@@ -492,8 +491,8 @@ static __always_inline void hystart_update(struct sock *sk, __u32 delay)
int bpf_cubic_acked_called = 0; int bpf_cubic_acked_called = 0;
void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk, SEC("struct_ops")
const struct ack_sample *sample) void BPF_PROG(bpf_cubic_acked, struct sock *sk, const struct ack_sample *sample)
{ {
const struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk); struct bictcp *ca = inet_csk_ca(sk);
...@@ -524,7 +523,8 @@ void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk, ...@@ -524,7 +523,8 @@ void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk,
extern __u32 tcp_reno_undo_cwnd(struct sock *sk) __ksym; extern __u32 tcp_reno_undo_cwnd(struct sock *sk) __ksym;
__u32 BPF_STRUCT_OPS(bpf_cubic_undo_cwnd, struct sock *sk) SEC("struct_ops")
__u32 BPF_PROG(bpf_cubic_undo_cwnd, struct sock *sk)
{ {
return tcp_reno_undo_cwnd(sk); return tcp_reno_undo_cwnd(sk);
} }
......
...@@ -48,8 +48,7 @@ struct dctcp { ...@@ -48,8 +48,7 @@ struct dctcp {
static unsigned int dctcp_shift_g = 4; /* g = 1/2^4 */ static unsigned int dctcp_shift_g = 4; /* g = 1/2^4 */
static unsigned int dctcp_alpha_on_init = DCTCP_MAX_ALPHA; static unsigned int dctcp_alpha_on_init = DCTCP_MAX_ALPHA;
static __always_inline void dctcp_reset(const struct tcp_sock *tp, static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
struct dctcp *ca)
{ {
ca->next_seq = tp->snd_nxt; ca->next_seq = tp->snd_nxt;
...@@ -57,7 +56,7 @@ static __always_inline void dctcp_reset(const struct tcp_sock *tp, ...@@ -57,7 +56,7 @@ static __always_inline void dctcp_reset(const struct tcp_sock *tp,
ca->old_delivered_ce = tp->delivered_ce; ca->old_delivered_ce = tp->delivered_ce;
} }
SEC("struct_ops/dctcp_init") SEC("struct_ops")
void BPF_PROG(dctcp_init, struct sock *sk) void BPF_PROG(dctcp_init, struct sock *sk)
{ {
const struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
...@@ -104,7 +103,7 @@ void BPF_PROG(dctcp_init, struct sock *sk) ...@@ -104,7 +103,7 @@ void BPF_PROG(dctcp_init, struct sock *sk)
dctcp_reset(tp, ca); dctcp_reset(tp, ca);
} }
SEC("struct_ops/dctcp_ssthresh") SEC("struct_ops")
__u32 BPF_PROG(dctcp_ssthresh, struct sock *sk) __u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
{ {
struct dctcp *ca = inet_csk_ca(sk); struct dctcp *ca = inet_csk_ca(sk);
...@@ -114,7 +113,7 @@ __u32 BPF_PROG(dctcp_ssthresh, struct sock *sk) ...@@ -114,7 +113,7 @@ __u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U); return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
} }
SEC("struct_ops/dctcp_update_alpha") SEC("struct_ops")
void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags) void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags)
{ {
const struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
...@@ -144,7 +143,7 @@ void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags) ...@@ -144,7 +143,7 @@ void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags)
} }
} }
static __always_inline void dctcp_react_to_loss(struct sock *sk) static void dctcp_react_to_loss(struct sock *sk)
{ {
struct dctcp *ca = inet_csk_ca(sk); struct dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
...@@ -153,7 +152,7 @@ static __always_inline void dctcp_react_to_loss(struct sock *sk) ...@@ -153,7 +152,7 @@ static __always_inline void dctcp_react_to_loss(struct sock *sk)
tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U); tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
} }
SEC("struct_ops/dctcp_state") SEC("struct_ops")
void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state) void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state)
{ {
if (new_state == TCP_CA_Recovery && if (new_state == TCP_CA_Recovery &&
...@@ -164,7 +163,7 @@ void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state) ...@@ -164,7 +163,7 @@ void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state)
*/ */
} }
static __always_inline void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state) static void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
...@@ -179,9 +178,8 @@ static __always_inline void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state) ...@@ -179,9 +178,8 @@ static __always_inline void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
* S: 0 <- last pkt was non-CE * S: 0 <- last pkt was non-CE
* 1 <- last pkt was CE * 1 <- last pkt was CE
*/ */
static __always_inline static void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt, __u32 *prior_rcv_nxt, __u32 *ce_state)
__u32 *prior_rcv_nxt, __u32 *ce_state)
{ {
__u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0; __u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0;
...@@ -201,7 +199,7 @@ void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt, ...@@ -201,7 +199,7 @@ void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
dctcp_ece_ack_cwr(sk, new_ce_state); dctcp_ece_ack_cwr(sk, new_ce_state);
} }
SEC("struct_ops/dctcp_cwnd_event") SEC("struct_ops")
void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev) void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
{ {
struct dctcp *ca = inet_csk_ca(sk); struct dctcp *ca = inet_csk_ca(sk);
...@@ -220,7 +218,7 @@ void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev) ...@@ -220,7 +218,7 @@ void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
} }
} }
SEC("struct_ops/dctcp_cwnd_undo") SEC("struct_ops")
__u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk) __u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
{ {
const struct dctcp *ca = inet_csk_ca(sk); const struct dctcp *ca = inet_csk_ca(sk);
...@@ -230,7 +228,7 @@ __u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk) ...@@ -230,7 +228,7 @@ __u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym; extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
SEC("struct_ops/dctcp_reno_cong_avoid") SEC("struct_ops")
void BPF_PROG(dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked) void BPF_PROG(dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
{ {
tcp_reno_cong_avoid(sk, ack, acked); tcp_reno_cong_avoid(sk, ack, acked);
......
...@@ -13,7 +13,8 @@ ...@@ -13,7 +13,8 @@
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
const char cubic[] = "cubic"; const char cubic[] = "cubic";
void BPF_STRUCT_OPS(dctcp_nouse_release, struct sock *sk) SEC("struct_ops")
void BPF_PROG(dctcp_nouse_release, struct sock *sk)
{ {
bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION, bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
(void *)cubic, sizeof(cubic)); (void *)cubic, sizeof(cubic));
......
...@@ -8,7 +8,8 @@ ...@@ -8,7 +8,8 @@
char _license[] SEC("license") = "X"; char _license[] SEC("license") = "X";
void BPF_STRUCT_OPS(nogpltcp_init, struct sock *sk) SEC("struct_ops")
void BPF_PROG(nogpltcp_init, struct sock *sk)
{ {
} }
......
...@@ -6,13 +6,13 @@ ...@@ -6,13 +6,13 @@
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
SEC("struct_ops/incompl_cong_ops_ssthresh") SEC("struct_ops")
__u32 BPF_PROG(incompl_cong_ops_ssthresh, struct sock *sk) __u32 BPF_PROG(incompl_cong_ops_ssthresh, struct sock *sk)
{ {
return tcp_sk(sk)->snd_ssthresh; return tcp_sk(sk)->snd_ssthresh;
} }
SEC("struct_ops/incompl_cong_ops_undo_cwnd") SEC("struct_ops")
__u32 BPF_PROG(incompl_cong_ops_undo_cwnd, struct sock *sk) __u32 BPF_PROG(incompl_cong_ops_undo_cwnd, struct sock *sk)
{ {
return tcp_sk(sk)->snd_cwnd; return tcp_sk(sk)->snd_cwnd;
......
...@@ -27,7 +27,7 @@ extern void cubictcp_state(struct sock *sk, u8 new_state) __ksym; ...@@ -27,7 +27,7 @@ extern void cubictcp_state(struct sock *sk, u8 new_state) __ksym;
extern void cubictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) __ksym; extern void cubictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) __ksym;
extern void cubictcp_acked(struct sock *sk, const struct ack_sample *sample) __ksym; extern void cubictcp_acked(struct sock *sk, const struct ack_sample *sample) __ksym;
SEC("struct_ops/init") SEC("struct_ops")
void BPF_PROG(init, struct sock *sk) void BPF_PROG(init, struct sock *sk)
{ {
bbr_init(sk); bbr_init(sk);
...@@ -35,38 +35,38 @@ void BPF_PROG(init, struct sock *sk) ...@@ -35,38 +35,38 @@ void BPF_PROG(init, struct sock *sk)
cubictcp_init(sk); cubictcp_init(sk);
} }
SEC("struct_ops/in_ack_event") SEC("struct_ops")
void BPF_PROG(in_ack_event, struct sock *sk, u32 flags) void BPF_PROG(in_ack_event, struct sock *sk, u32 flags)
{ {
dctcp_update_alpha(sk, flags); dctcp_update_alpha(sk, flags);
} }
SEC("struct_ops/cong_control") SEC("struct_ops")
void BPF_PROG(cong_control, struct sock *sk, u32 ack, int flag, const struct rate_sample *rs) void BPF_PROG(cong_control, struct sock *sk, u32 ack, int flag, const struct rate_sample *rs)
{ {
bbr_main(sk, ack, flag, rs); bbr_main(sk, ack, flag, rs);
} }
SEC("struct_ops/cong_avoid") SEC("struct_ops")
void BPF_PROG(cong_avoid, struct sock *sk, u32 ack, u32 acked) void BPF_PROG(cong_avoid, struct sock *sk, u32 ack, u32 acked)
{ {
cubictcp_cong_avoid(sk, ack, acked); cubictcp_cong_avoid(sk, ack, acked);
} }
SEC("struct_ops/sndbuf_expand") SEC("struct_ops")
u32 BPF_PROG(sndbuf_expand, struct sock *sk) u32 BPF_PROG(sndbuf_expand, struct sock *sk)
{ {
return bbr_sndbuf_expand(sk); return bbr_sndbuf_expand(sk);
} }
SEC("struct_ops/undo_cwnd") SEC("struct_ops")
u32 BPF_PROG(undo_cwnd, struct sock *sk) u32 BPF_PROG(undo_cwnd, struct sock *sk)
{ {
bbr_undo_cwnd(sk); bbr_undo_cwnd(sk);
return dctcp_cwnd_undo(sk); return dctcp_cwnd_undo(sk);
} }
SEC("struct_ops/cwnd_event") SEC("struct_ops")
void BPF_PROG(cwnd_event, struct sock *sk, enum tcp_ca_event event) void BPF_PROG(cwnd_event, struct sock *sk, enum tcp_ca_event event)
{ {
bbr_cwnd_event(sk, event); bbr_cwnd_event(sk, event);
...@@ -74,7 +74,7 @@ void BPF_PROG(cwnd_event, struct sock *sk, enum tcp_ca_event event) ...@@ -74,7 +74,7 @@ void BPF_PROG(cwnd_event, struct sock *sk, enum tcp_ca_event event)
cubictcp_cwnd_event(sk, event); cubictcp_cwnd_event(sk, event);
} }
SEC("struct_ops/ssthresh") SEC("struct_ops")
u32 BPF_PROG(ssthresh, struct sock *sk) u32 BPF_PROG(ssthresh, struct sock *sk)
{ {
bbr_ssthresh(sk); bbr_ssthresh(sk);
...@@ -82,13 +82,13 @@ u32 BPF_PROG(ssthresh, struct sock *sk) ...@@ -82,13 +82,13 @@ u32 BPF_PROG(ssthresh, struct sock *sk)
return cubictcp_recalc_ssthresh(sk); return cubictcp_recalc_ssthresh(sk);
} }
SEC("struct_ops/min_tso_segs") SEC("struct_ops")
u32 BPF_PROG(min_tso_segs, struct sock *sk) u32 BPF_PROG(min_tso_segs, struct sock *sk)
{ {
return bbr_min_tso_segs(sk); return bbr_min_tso_segs(sk);
} }
SEC("struct_ops/set_state") SEC("struct_ops")
void BPF_PROG(set_state, struct sock *sk, u8 new_state) void BPF_PROG(set_state, struct sock *sk, u8 new_state)
{ {
bbr_set_state(sk, new_state); bbr_set_state(sk, new_state);
...@@ -96,7 +96,7 @@ void BPF_PROG(set_state, struct sock *sk, u8 new_state) ...@@ -96,7 +96,7 @@ void BPF_PROG(set_state, struct sock *sk, u8 new_state)
cubictcp_state(sk, new_state); cubictcp_state(sk, new_state);
} }
SEC("struct_ops/pkts_acked") SEC("struct_ops")
void BPF_PROG(pkts_acked, struct sock *sk, const struct ack_sample *sample) void BPF_PROG(pkts_acked, struct sock *sk, const struct ack_sample *sample)
{ {
cubictcp_acked(sk, sample); cubictcp_acked(sk, sample);
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
SEC("struct_ops/unsupp_cong_op_get_info") SEC("struct_ops")
size_t BPF_PROG(unsupp_cong_op_get_info, struct sock *sk, u32 ext, int *attr, size_t BPF_PROG(unsupp_cong_op_get_info, struct sock *sk, u32 ext, int *attr,
union tcp_cc_info *info) union tcp_cc_info *info)
{ {
......
...@@ -9,31 +9,31 @@ char _license[] SEC("license") = "GPL"; ...@@ -9,31 +9,31 @@ char _license[] SEC("license") = "GPL";
int ca1_cnt = 0; int ca1_cnt = 0;
int ca2_cnt = 0; int ca2_cnt = 0;
SEC("struct_ops/ca_update_1_init") SEC("struct_ops")
void BPF_PROG(ca_update_1_init, struct sock *sk) void BPF_PROG(ca_update_1_init, struct sock *sk)
{ {
ca1_cnt++; ca1_cnt++;
} }
SEC("struct_ops/ca_update_2_init") SEC("struct_ops")
void BPF_PROG(ca_update_2_init, struct sock *sk) void BPF_PROG(ca_update_2_init, struct sock *sk)
{ {
ca2_cnt++; ca2_cnt++;
} }
SEC("struct_ops/ca_update_cong_control") SEC("struct_ops")
void BPF_PROG(ca_update_cong_control, struct sock *sk, void BPF_PROG(ca_update_cong_control, struct sock *sk,
const struct rate_sample *rs) const struct rate_sample *rs)
{ {
} }
SEC("struct_ops/ca_update_ssthresh") SEC("struct_ops")
__u32 BPF_PROG(ca_update_ssthresh, struct sock *sk) __u32 BPF_PROG(ca_update_ssthresh, struct sock *sk)
{ {
return tcp_sk(sk)->snd_ssthresh; return tcp_sk(sk)->snd_ssthresh;
} }
SEC("struct_ops/ca_update_undo_cwnd") SEC("struct_ops")
__u32 BPF_PROG(ca_update_undo_cwnd, struct sock *sk) __u32 BPF_PROG(ca_update_undo_cwnd, struct sock *sk)
{ {
return tcp_sk(sk)->snd_cwnd; return tcp_sk(sk)->snd_cwnd;
......
...@@ -10,17 +10,17 @@ char _license[] SEC("license") = "GPL"; ...@@ -10,17 +10,17 @@ char _license[] SEC("license") = "GPL";
#define min(a, b) ((a) < (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b))
static inline unsigned int tcp_left_out(const struct tcp_sock *tp) static unsigned int tcp_left_out(const struct tcp_sock *tp)
{ {
return tp->sacked_out + tp->lost_out; return tp->sacked_out + tp->lost_out;
} }
static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) static unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
{ {
return tp->packets_out - tcp_left_out(tp) + tp->retrans_out; return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
} }
SEC("struct_ops/write_sk_pacing_init") SEC("struct_ops")
void BPF_PROG(write_sk_pacing_init, struct sock *sk) void BPF_PROG(write_sk_pacing_init, struct sock *sk)
{ {
#ifdef ENABLE_ATOMICS_TESTS #ifdef ENABLE_ATOMICS_TESTS
...@@ -31,7 +31,7 @@ void BPF_PROG(write_sk_pacing_init, struct sock *sk) ...@@ -31,7 +31,7 @@ void BPF_PROG(write_sk_pacing_init, struct sock *sk)
#endif #endif
} }
SEC("struct_ops/write_sk_pacing_cong_control") SEC("struct_ops")
void BPF_PROG(write_sk_pacing_cong_control, struct sock *sk, void BPF_PROG(write_sk_pacing_cong_control, struct sock *sk,
const struct rate_sample *rs) const struct rate_sample *rs)
{ {
...@@ -43,13 +43,13 @@ void BPF_PROG(write_sk_pacing_cong_control, struct sock *sk, ...@@ -43,13 +43,13 @@ void BPF_PROG(write_sk_pacing_cong_control, struct sock *sk,
tp->app_limited = (tp->delivered + tcp_packets_in_flight(tp)) ?: 1; tp->app_limited = (tp->delivered + tcp_packets_in_flight(tp)) ?: 1;
} }
SEC("struct_ops/write_sk_pacing_ssthresh") SEC("struct_ops")
__u32 BPF_PROG(write_sk_pacing_ssthresh, struct sock *sk) __u32 BPF_PROG(write_sk_pacing_ssthresh, struct sock *sk)
{ {
return tcp_sk(sk)->snd_ssthresh; return tcp_sk(sk)->snd_ssthresh;
} }
SEC("struct_ops/write_sk_pacing_undo_cwnd") SEC("struct_ops")
__u32 BPF_PROG(write_sk_pacing_undo_cwnd, struct sock *sk) __u32 BPF_PROG(write_sk_pacing_undo_cwnd, struct sock *sk)
{ {
return tcp_sk(sk)->snd_cwnd; return tcp_sk(sk)->snd_cwnd;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment