Commit 0ce089cb authored by Daniel Xu's avatar Daniel Xu Committed by Alexei Starovoitov

bpf: selftests: Namespace struct_opt callbacks in bpf_dctcp

With generated kfunc prototypes, the existing callback names will
conflict. Fix by namespacing with a bpf_ prefix.
Signed-off-by: default avatarDaniel Xu <dxu@dxuuu.xyz>
Link: https://lore.kernel.org/r/efe7aadad8a054e5aeeba94b1d2e4502eee09d7a.1718207789.git.dxu@dxuuu.xyzSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 2b8dd873
...@@ -65,7 +65,7 @@ static void dctcp_reset(const struct tcp_sock *tp, struct bpf_dctcp *ca) ...@@ -65,7 +65,7 @@ static void dctcp_reset(const struct tcp_sock *tp, struct bpf_dctcp *ca)
} }
SEC("struct_ops") SEC("struct_ops")
void BPF_PROG(dctcp_init, struct sock *sk) void BPF_PROG(bpf_dctcp_init, struct sock *sk)
{ {
const struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
struct bpf_dctcp *ca = inet_csk_ca(sk); struct bpf_dctcp *ca = inet_csk_ca(sk);
...@@ -77,7 +77,7 @@ void BPF_PROG(dctcp_init, struct sock *sk) ...@@ -77,7 +77,7 @@ void BPF_PROG(dctcp_init, struct sock *sk)
(void *)fallback, sizeof(fallback)) == -EBUSY) (void *)fallback, sizeof(fallback)) == -EBUSY)
ebusy_cnt++; ebusy_cnt++;
/* Switch back to myself and the recurred dctcp_init() /* Switch back to myself and the recurred bpf_dctcp_init()
* will get -EBUSY for all bpf_setsockopt(TCP_CONGESTION), * will get -EBUSY for all bpf_setsockopt(TCP_CONGESTION),
* except the last "cdg" one. * except the last "cdg" one.
*/ */
...@@ -112,7 +112,7 @@ void BPF_PROG(dctcp_init, struct sock *sk) ...@@ -112,7 +112,7 @@ void BPF_PROG(dctcp_init, struct sock *sk)
} }
SEC("struct_ops") SEC("struct_ops")
__u32 BPF_PROG(dctcp_ssthresh, struct sock *sk) __u32 BPF_PROG(bpf_dctcp_ssthresh, struct sock *sk)
{ {
struct bpf_dctcp *ca = inet_csk_ca(sk); struct bpf_dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
...@@ -122,7 +122,7 @@ __u32 BPF_PROG(dctcp_ssthresh, struct sock *sk) ...@@ -122,7 +122,7 @@ __u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
} }
SEC("struct_ops") SEC("struct_ops")
void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags) void BPF_PROG(bpf_dctcp_update_alpha, struct sock *sk, __u32 flags)
{ {
const struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
struct bpf_dctcp *ca = inet_csk_ca(sk); struct bpf_dctcp *ca = inet_csk_ca(sk);
...@@ -161,12 +161,12 @@ static void dctcp_react_to_loss(struct sock *sk) ...@@ -161,12 +161,12 @@ static void dctcp_react_to_loss(struct sock *sk)
} }
SEC("struct_ops") SEC("struct_ops")
void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state) void BPF_PROG(bpf_dctcp_state, struct sock *sk, __u8 new_state)
{ {
if (new_state == TCP_CA_Recovery && if (new_state == TCP_CA_Recovery &&
new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state)) new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state))
dctcp_react_to_loss(sk); dctcp_react_to_loss(sk);
/* We handle RTO in dctcp_cwnd_event to ensure that we perform only /* We handle RTO in bpf_dctcp_cwnd_event to ensure that we perform only
* one loss-adjustment per RTT. * one loss-adjustment per RTT.
*/ */
} }
...@@ -208,7 +208,7 @@ static void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt, ...@@ -208,7 +208,7 @@ static void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
} }
SEC("struct_ops") SEC("struct_ops")
void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev) void BPF_PROG(bpf_dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
{ {
struct bpf_dctcp *ca = inet_csk_ca(sk); struct bpf_dctcp *ca = inet_csk_ca(sk);
...@@ -227,7 +227,7 @@ void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev) ...@@ -227,7 +227,7 @@ void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
} }
SEC("struct_ops") SEC("struct_ops")
__u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk) __u32 BPF_PROG(bpf_dctcp_cwnd_undo, struct sock *sk)
{ {
const struct bpf_dctcp *ca = inet_csk_ca(sk); const struct bpf_dctcp *ca = inet_csk_ca(sk);
...@@ -237,28 +237,28 @@ __u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk) ...@@ -237,28 +237,28 @@ __u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym; extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
SEC("struct_ops") SEC("struct_ops")
void BPF_PROG(dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked) void BPF_PROG(bpf_dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
{ {
tcp_reno_cong_avoid(sk, ack, acked); tcp_reno_cong_avoid(sk, ack, acked);
} }
SEC(".struct_ops") SEC(".struct_ops")
struct tcp_congestion_ops dctcp_nouse = { struct tcp_congestion_ops dctcp_nouse = {
.init = (void *)dctcp_init, .init = (void *)bpf_dctcp_init,
.set_state = (void *)dctcp_state, .set_state = (void *)bpf_dctcp_state,
.flags = TCP_CONG_NEEDS_ECN, .flags = TCP_CONG_NEEDS_ECN,
.name = "bpf_dctcp_nouse", .name = "bpf_dctcp_nouse",
}; };
SEC(".struct_ops") SEC(".struct_ops")
struct tcp_congestion_ops dctcp = { struct tcp_congestion_ops dctcp = {
.init = (void *)dctcp_init, .init = (void *)bpf_dctcp_init,
.in_ack_event = (void *)dctcp_update_alpha, .in_ack_event = (void *)bpf_dctcp_update_alpha,
.cwnd_event = (void *)dctcp_cwnd_event, .cwnd_event = (void *)bpf_dctcp_cwnd_event,
.ssthresh = (void *)dctcp_ssthresh, .ssthresh = (void *)bpf_dctcp_ssthresh,
.cong_avoid = (void *)dctcp_cong_avoid, .cong_avoid = (void *)bpf_dctcp_cong_avoid,
.undo_cwnd = (void *)dctcp_cwnd_undo, .undo_cwnd = (void *)bpf_dctcp_cwnd_undo,
.set_state = (void *)dctcp_state, .set_state = (void *)bpf_dctcp_state,
.flags = TCP_CONG_NEEDS_ECN, .flags = TCP_CONG_NEEDS_ECN,
.name = "bpf_dctcp", .name = "bpf_dctcp",
}; };
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment