Commit 02db34d0 authored by David S. Miller's avatar David S. Miller

Merge branch 'bpf-BASE_RTT'

Lawrence Brakmo says:

====================
bpf: add support for BASE_RTT

This patch set adds the following functionality to socket_ops BPF
programs.
1) Add bpf helper function bpf_getsocketops. Currently only supports
   TCP_CONGESTION
2) Add BPF_SOCKET_OPS_BASE_RTT op to get the base RTT of the
   connection. In general, the base RTT indicates the threshold such
   that RTTs above it indicate congestion. More details in the
   relevant patches.

Consists of the following patches:

[PATCH net-next 1/5] bpf: add support for BPF_SOCK_OPS_BASE_RTT
[PATCH net-next 2/5] bpf: Adding helper function bpf_getsockops
[PATCH net-next 3/5] bpf: Add BPF_SOCKET_OPS_BASE_RTT support to
[PATCH net-next 4/5] bpf: sample BPF_SOCKET_OPS_BASE_RTT program
[PATCH net-next 5/5] bpf: create samples/bpf/tcp_bpf.readme
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 62d3f60b bfdf7569
...@@ -613,12 +613,22 @@ union bpf_attr { ...@@ -613,12 +613,22 @@ union bpf_attr {
* int bpf_setsockopt(bpf_socket, level, optname, optval, optlen) * int bpf_setsockopt(bpf_socket, level, optname, optval, optlen)
* Calls setsockopt. Not all opts are available, only those with * Calls setsockopt. Not all opts are available, only those with
* integer optvals plus TCP_CONGESTION. * integer optvals plus TCP_CONGESTION.
* Supported levels: SOL_SOCKET and IPROTO_TCP * Supported levels: SOL_SOCKET and IPPROTO_TCP
* @bpf_socket: pointer to bpf_socket * @bpf_socket: pointer to bpf_socket
* @level: SOL_SOCKET or IPROTO_TCP * @level: SOL_SOCKET or IPPROTO_TCP
* @optname: option name * @optname: option name
* @optval: pointer to option value * @optval: pointer to option value
* @optlen: length of optval in byes * @optlen: length of optval in bytes
* Return: 0 or negative error
*
* int bpf_getsockopt(bpf_socket, level, optname, optval, optlen)
* Calls getsockopt. Not all opts are available.
* Supported levels: IPPROTO_TCP
* @bpf_socket: pointer to bpf_socket
* @level: IPPROTO_TCP
* @optname: option name
* @optval: pointer to option value
* @optlen: length of optval in bytes
* Return: 0 or negative error * Return: 0 or negative error
* *
* int bpf_skb_adjust_room(skb, len_diff, mode, flags) * int bpf_skb_adjust_room(skb, len_diff, mode, flags)
...@@ -721,7 +731,8 @@ union bpf_attr { ...@@ -721,7 +731,8 @@ union bpf_attr {
FN(sock_map_update), \ FN(sock_map_update), \
FN(xdp_adjust_meta), \ FN(xdp_adjust_meta), \
FN(perf_event_read_value), \ FN(perf_event_read_value), \
FN(perf_prog_read_value), FN(perf_prog_read_value), \
FN(getsockopt),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call * function eBPF program intends to call
...@@ -955,6 +966,13 @@ enum { ...@@ -955,6 +966,13 @@ enum {
BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control
* needs ECN * needs ECN
*/ */
BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is
* based on the path and may be
* dependent on the congestion control
* algorithm. In general it indicates
* a congestion threshold. RTTs above
* this indicate congestion
*/
}; };
#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */ #define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
......
...@@ -3273,7 +3273,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, ...@@ -3273,7 +3273,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
static const struct bpf_func_proto bpf_setsockopt_proto = { static const struct bpf_func_proto bpf_setsockopt_proto = {
.func = bpf_setsockopt, .func = bpf_setsockopt,
.gpl_only = true, .gpl_only = false,
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX, .arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING, .arg2_type = ARG_ANYTHING,
...@@ -3282,6 +3282,48 @@ static const struct bpf_func_proto bpf_setsockopt_proto = { ...@@ -3282,6 +3282,48 @@ static const struct bpf_func_proto bpf_setsockopt_proto = {
.arg5_type = ARG_CONST_SIZE, .arg5_type = ARG_CONST_SIZE,
}; };
BPF_CALL_5(bpf_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
int, level, int, optname, char *, optval, int, optlen)
{
struct sock *sk = bpf_sock->sk;
int ret = 0;
if (!sk_fullsock(sk))
goto err_clear;
#ifdef CONFIG_INET
if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) {
if (optname == TCP_CONGESTION) {
struct inet_connection_sock *icsk = inet_csk(sk);
if (!icsk->icsk_ca_ops || optlen <= 1)
goto err_clear;
strncpy(optval, icsk->icsk_ca_ops->name, optlen);
optval[optlen - 1] = 0;
} else {
goto err_clear;
}
} else {
goto err_clear;
}
return ret;
#endif
err_clear:
memset(optval, 0, optlen);
return -EINVAL;
}
static const struct bpf_func_proto bpf_getsockopt_proto = {
.func = bpf_getsockopt,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_UNINIT_MEM,
.arg5_type = ARG_CONST_SIZE,
};
static const struct bpf_func_proto * static const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id) bpf_base_func_proto(enum bpf_func_id func_id)
{ {
...@@ -3460,6 +3502,8 @@ static const struct bpf_func_proto * ...@@ -3460,6 +3502,8 @@ static const struct bpf_func_proto *
switch (func_id) { switch (func_id) {
case BPF_FUNC_setsockopt: case BPF_FUNC_setsockopt:
return &bpf_setsockopt_proto; return &bpf_setsockopt_proto;
case BPF_FUNC_getsockopt:
return &bpf_getsockopt_proto;
case BPF_FUNC_sock_map_update: case BPF_FUNC_sock_map_update:
return &bpf_sock_map_update_proto; return &bpf_sock_map_update_proto;
default: default:
......
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
* nv_cong_dec_mult Decrease cwnd by X% (30%) of congestion when detected * nv_cong_dec_mult Decrease cwnd by X% (30%) of congestion when detected
* nv_ssthresh_factor On congestion set ssthresh to this * <desired cwnd> / 8 * nv_ssthresh_factor On congestion set ssthresh to this * <desired cwnd> / 8
* nv_rtt_factor RTT averaging factor * nv_rtt_factor RTT averaging factor
* nv_loss_dec_factor Decrease cwnd by this (50%) when losses occur * nv_loss_dec_factor Decrease cwnd to this (80%) when losses occur
* nv_dec_eval_min_calls Wait this many RTT measurements before dec cwnd * nv_dec_eval_min_calls Wait this many RTT measurements before dec cwnd
* nv_inc_eval_min_calls Wait this many RTT measurements before inc cwnd * nv_inc_eval_min_calls Wait this many RTT measurements before inc cwnd
* nv_ssthresh_eval_min_calls Wait this many RTT measurements before stopping * nv_ssthresh_eval_min_calls Wait this many RTT measurements before stopping
...@@ -61,7 +61,7 @@ static int nv_min_cwnd __read_mostly = 2; ...@@ -61,7 +61,7 @@ static int nv_min_cwnd __read_mostly = 2;
static int nv_cong_dec_mult __read_mostly = 30 * 128 / 100; /* = 30% */ static int nv_cong_dec_mult __read_mostly = 30 * 128 / 100; /* = 30% */
static int nv_ssthresh_factor __read_mostly = 8; /* = 1 */ static int nv_ssthresh_factor __read_mostly = 8; /* = 1 */
static int nv_rtt_factor __read_mostly = 128; /* = 1/2*old + 1/2*new */ static int nv_rtt_factor __read_mostly = 128; /* = 1/2*old + 1/2*new */
static int nv_loss_dec_factor __read_mostly = 512; /* => 50% */ static int nv_loss_dec_factor __read_mostly = 819; /* => 80% */
static int nv_cwnd_growth_rate_neg __read_mostly = 8; static int nv_cwnd_growth_rate_neg __read_mostly = 8;
static int nv_cwnd_growth_rate_pos __read_mostly; /* 0 => fixed like Reno */ static int nv_cwnd_growth_rate_pos __read_mostly; /* 0 => fixed like Reno */
static int nv_dec_eval_min_calls __read_mostly = 60; static int nv_dec_eval_min_calls __read_mostly = 60;
...@@ -101,6 +101,11 @@ struct tcpnv { ...@@ -101,6 +101,11 @@ struct tcpnv {
u32 nv_last_rtt; /* last rtt */ u32 nv_last_rtt; /* last rtt */
u32 nv_min_rtt; /* active min rtt. Used to determine slope */ u32 nv_min_rtt; /* active min rtt. Used to determine slope */
u32 nv_min_rtt_new; /* min rtt for future use */ u32 nv_min_rtt_new; /* min rtt for future use */
u32 nv_base_rtt; /* If non-zero it represents the threshold for
* congestion */
u32 nv_lower_bound_rtt; /* Used in conjunction with nv_base_rtt. It is
* set to 80% of nv_base_rtt. It helps reduce
* unfairness between flows */
u32 nv_rtt_max_rate; /* max rate seen during current RTT */ u32 nv_rtt_max_rate; /* max rate seen during current RTT */
u32 nv_rtt_start_seq; /* current RTT ends when packet arrives u32 nv_rtt_start_seq; /* current RTT ends when packet arrives
* acking beyond nv_rtt_start_seq */ * acking beyond nv_rtt_start_seq */
...@@ -132,9 +137,24 @@ static inline void tcpnv_reset(struct tcpnv *ca, struct sock *sk) ...@@ -132,9 +137,24 @@ static inline void tcpnv_reset(struct tcpnv *ca, struct sock *sk)
static void tcpnv_init(struct sock *sk) static void tcpnv_init(struct sock *sk)
{ {
struct tcpnv *ca = inet_csk_ca(sk); struct tcpnv *ca = inet_csk_ca(sk);
int base_rtt;
tcpnv_reset(ca, sk); tcpnv_reset(ca, sk);
/* See if base_rtt is available from socket_ops bpf program.
* It is meant to be used in environments, such as communication
* within a datacenter, where we have reasonable estimates of
* RTTs
*/
base_rtt = tcp_call_bpf(sk, BPF_SOCK_OPS_BASE_RTT);
if (base_rtt > 0) {
ca->nv_base_rtt = base_rtt;
ca->nv_lower_bound_rtt = (base_rtt * 205) >> 8; /* 80% */
} else {
ca->nv_base_rtt = 0;
ca->nv_lower_bound_rtt = 0;
}
ca->nv_allow_cwnd_growth = 1; ca->nv_allow_cwnd_growth = 1;
ca->nv_min_rtt_reset_jiffies = jiffies + 2 * HZ; ca->nv_min_rtt_reset_jiffies = jiffies + 2 * HZ;
ca->nv_min_rtt = NV_INIT_RTT; ca->nv_min_rtt = NV_INIT_RTT;
...@@ -144,6 +164,19 @@ static void tcpnv_init(struct sock *sk) ...@@ -144,6 +164,19 @@ static void tcpnv_init(struct sock *sk)
ca->cwnd_growth_factor = 0; ca->cwnd_growth_factor = 0;
} }
/* If provided, apply upper (base_rtt) and lower (lower_bound_rtt)
* bounds to RTT.
*/
inline u32 nv_get_bounded_rtt(struct tcpnv *ca, u32 val)
{
if (ca->nv_lower_bound_rtt > 0 && val < ca->nv_lower_bound_rtt)
return ca->nv_lower_bound_rtt;
else if (ca->nv_base_rtt > 0 && val > ca->nv_base_rtt)
return ca->nv_base_rtt;
else
return val;
}
static void tcpnv_cong_avoid(struct sock *sk, u32 ack, u32 acked) static void tcpnv_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
...@@ -265,6 +298,9 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample) ...@@ -265,6 +298,9 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
if (ca->nv_eval_call_cnt < 255) if (ca->nv_eval_call_cnt < 255)
ca->nv_eval_call_cnt++; ca->nv_eval_call_cnt++;
/* Apply bounds to rtt. Only used to update min_rtt */
avg_rtt = nv_get_bounded_rtt(ca, avg_rtt);
/* update min rtt if necessary */ /* update min rtt if necessary */
if (avg_rtt < ca->nv_min_rtt) if (avg_rtt < ca->nv_min_rtt)
ca->nv_min_rtt = avg_rtt; ca->nv_min_rtt = avg_rtt;
......
...@@ -129,6 +129,7 @@ always += tcp_bufs_kern.o ...@@ -129,6 +129,7 @@ always += tcp_bufs_kern.o
always += tcp_cong_kern.o always += tcp_cong_kern.o
always += tcp_iw_kern.o always += tcp_iw_kern.o
always += tcp_clamp_kern.o always += tcp_clamp_kern.o
always += tcp_basertt_kern.o
always += xdp_redirect_kern.o always += xdp_redirect_kern.o
always += xdp_redirect_map_kern.o always += xdp_redirect_map_kern.o
always += xdp_redirect_cpu_kern.o always += xdp_redirect_cpu_kern.o
......
/* Copyright (c) 2017 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* BPF program to set base_rtt to 80us when host is running TCP-NV and
* both hosts are in the same datacenter (as determined by IPv6 prefix).
*
* Use load_sock_ops to load this BPF program.
*/
#include <uapi/linux/bpf.h>
#include <uapi/linux/tcp.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
#include <uapi/linux/ip.h>
#include <linux/socket.h>
#include "bpf_helpers.h"
#include "bpf_endian.h"
#define DEBUG 1
#define bpf_printk(fmt, ...) \
({ \
char ____fmt[] = fmt; \
bpf_trace_printk(____fmt, sizeof(____fmt), \
##__VA_ARGS__); \
})
SEC("sockops")
int bpf_basertt(struct bpf_sock_ops *skops)
{
char cong[20];
char nv[] = "nv";
int rv = 0, n;
int op;
op = (int) skops->op;
#ifdef DEBUG
bpf_printk("BPF command: %d\n", op);
#endif
/* Check if both hosts are in the same datacenter. For this
* example they are if the 1st 5.5 bytes in the IPv6 address
* are the same.
*/
if (skops->family == AF_INET6 &&
skops->local_ip6[0] == skops->remote_ip6[0] &&
(bpf_ntohl(skops->local_ip6[1]) & 0xfff00000) ==
(bpf_ntohl(skops->remote_ip6[1]) & 0xfff00000)) {
switch (op) {
case BPF_SOCK_OPS_BASE_RTT:
n = bpf_getsockopt(skops, SOL_TCP, TCP_CONGESTION,
cong, sizeof(cong));
if (!n && !__builtin_memcmp(cong, nv, sizeof(nv)+1)) {
/* Set base_rtt to 80us */
rv = 80;
} else if (n) {
rv = n;
} else {
rv = -1;
}
break;
default:
rv = -1;
}
} else {
rv = -1;
}
#ifdef DEBUG
bpf_printk("Returning %d\n", rv);
#endif
skops->reply = rv;
return 1;
}
char _license[] SEC("license") = "GPL";
This file describes how to run the tcp_*_kern.o tcp_bpf (or socket_ops)
programs. These programs attach to a cgroupv2. The following commands create
a cgroupv2 and attach a bash shell to the group.
mkdir -p /tmp/cgroupv2
mount -t cgroup2 none /tmp/cgroupv2
mkdir -p /tmp/cgroupv2/foo
bash
echo $$ >> /tmp/cgroupv2/foo/cgroup.procs
Anything that runs under this shell belongs to the foo cgroupv2 To load
(attach) one of the tcp_*_kern.o programs:
./load_sock_ops -l /tmp/cgroupv2/foo tcp_basertt_kern.o
If the "-l" flag is used, the load_sock_ops program will continue to run
printing the BPF log buffer. The tcp_*_kern.o programs use special print
functions to print logging information (if enabled by the ifdef).
If using netperf/netserver to create traffic, you need to run them under the
cgroupv2 to which the BPF programs are attached (i.e. under bash shell
attached to the cgroupv2).
To remove (unattach) a socket_ops BPF program from a cgroupv2:
./load_sock_ops -r /tmp/cgroupv2/foo
...@@ -67,6 +67,9 @@ static int (*bpf_xdp_adjust_meta)(void *ctx, int offset) = ...@@ -67,6 +67,9 @@ static int (*bpf_xdp_adjust_meta)(void *ctx, int offset) =
static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval, static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval,
int optlen) = int optlen) =
(void *) BPF_FUNC_setsockopt; (void *) BPF_FUNC_setsockopt;
static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval,
int optlen) =
(void *) BPF_FUNC_getsockopt;
static int (*bpf_sk_redirect_map)(void *map, int key, int flags) = static int (*bpf_sk_redirect_map)(void *map, int key, int flags) =
(void *) BPF_FUNC_sk_redirect_map; (void *) BPF_FUNC_sk_redirect_map;
static int (*bpf_sock_map_update)(void *map, void *key, void *value, static int (*bpf_sock_map_update)(void *map, void *key, void *value,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment