Commit 636d549f authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'bpf: misc performance improvements for cgroup'

Stanislav Fomichev says:

====================

First patch adds custom getsockopt for TCP_ZEROCOPY_RECEIVE
to remove kmalloc and lock_sock overhead from the dat path.

Second patch removes kzalloc/kfree from getsockopt for the common cases.

Third patch switches cgroup_bpf_enabled to be per-attach to
to add only overhead for the cgroup attach types used on the system.

No visible user-side changes.

v9:
- include linux/tcp.h instead of netinet/tcp.h in sockopt_sk.c
- note that v9 depends on the commit 4be34f3d ("bpf: Don't leak
  memory in bpf getsockopt when optlen == 0") from bpf tree

v8:
- add bpi.h to tools/include/uapi in the same patch (Martin KaFai Lau)
- kmalloc instead of kzalloc when exporting buffer (Martin KaFai Lau)
- note that v8 depends on the commit 4be34f3d ("bpf: Don't leak
  memory in bpf getsockopt when optlen == 0") from bpf tree

v7:
- add comment about buffer contents for retval != 0 (Martin KaFai Lau)
- export tcp.h into tools/include/uapi (Martin KaFai Lau)
- note that v7 depends on the commit 4be34f3d ("bpf: Don't leak
  memory in bpf getsockopt when optlen == 0") from bpf tree

v6:
- avoid indirect cost for new bpf_bypass_getsockopt (Eric Dumazet)

v5:
- reorder patches to reduce the churn (Martin KaFai Lau)

v4:
- update performance numbers
- bypass_bpf_getsockopt (Martin KaFai Lau)

v3:
- remove extra newline, add comment about sizeof tcp_zerocopy_receive
  (Martin KaFai Lau)
- add another patch to remove lock_sock overhead from
  TCP_ZEROCOPY_RECEIVE; technically, this makes patch #1 obsolete,
  but I'd still prefer to keep it to help with other socket
  options

v2:
- perf numbers for getsockopt kmalloc reduction (Song Liu)
- (sk) in BPF_CGROUP_PRE_CONNECT_ENABLED (Song Liu)
- 128 -> 64 buffer size, BUILD_BUG_ON (Martin KaFai Lau)
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 13ca51d5 a9ed15da
......@@ -23,8 +23,8 @@ struct ctl_table_header;
#ifdef CONFIG_CGROUP_BPF
extern struct static_key_false cgroup_bpf_enabled_key;
#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
extern struct static_key_false cgroup_bpf_enabled_key[MAX_BPF_ATTACH_TYPE];
#define cgroup_bpf_enabled(type) static_branch_unlikely(&cgroup_bpf_enabled_key[type])
DECLARE_PER_CPU(struct bpf_cgroup_storage*,
bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
......@@ -147,6 +147,10 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
int __user *optlen, int max_optlen,
int retval);
int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
int optname, void *optval,
int *optlen, int retval);
static inline enum bpf_cgroup_storage_type cgroup_storage_type(
struct bpf_map *map)
{
......@@ -185,7 +189,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled) \
if (cgroup_bpf_enabled(BPF_CGROUP_INET_INGRESS)) \
__ret = __cgroup_bpf_run_filter_skb(sk, skb, \
BPF_CGROUP_INET_INGRESS); \
\
......@@ -195,7 +199,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
if (cgroup_bpf_enabled(BPF_CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
typeof(sk) __sk = sk_to_full_sk(sk); \
if (sk_fullsock(__sk)) \
__ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
......@@ -207,7 +211,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled) { \
if (cgroup_bpf_enabled(type)) { \
__ret = __cgroup_bpf_run_filter_sk(sk, type); \
} \
__ret; \
......@@ -228,7 +232,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled) \
if (cgroup_bpf_enabled(type)) \
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
NULL); \
__ret; \
......@@ -237,7 +241,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled) { \
if (cgroup_bpf_enabled(type)) { \
lock_sock(sk); \
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
t_ctx); \
......@@ -252,8 +256,10 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr) \
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_BIND, NULL)
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
sk->sk_prot->pre_connect)
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \
((cgroup_bpf_enabled(BPF_CGROUP_INET4_CONNECT) || \
cgroup_bpf_enabled(BPF_CGROUP_INET6_CONNECT)) && \
(sk)->sk_prot->pre_connect)
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
......@@ -297,7 +303,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled) \
if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS)) \
__ret = __cgroup_bpf_run_filter_sock_ops(sk, \
sock_ops, \
BPF_CGROUP_SOCK_OPS); \
......@@ -307,7 +313,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled && (sock_ops)->sk) { \
if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
if (__sk && sk_fullsock(__sk)) \
__ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
......@@ -320,7 +326,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled) \
if (cgroup_bpf_enabled(BPF_CGROUP_DEVICE)) \
__ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
access, \
BPF_CGROUP_DEVICE); \
......@@ -332,7 +338,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled) \
if (cgroup_bpf_enabled(BPF_CGROUP_SYSCTL)) \
__ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
buf, count, pos, \
BPF_CGROUP_SYSCTL); \
......@@ -343,7 +349,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
kernel_optval) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled) \
if (cgroup_bpf_enabled(BPF_CGROUP_SETSOCKOPT)) \
__ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
optname, optval, \
optlen, \
......@@ -354,7 +360,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled) \
if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
get_user(__ret, optlen); \
__ret; \
})
......@@ -363,11 +369,24 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
max_optlen, retval) \
({ \
int __ret = retval; \
if (cgroup_bpf_enabled) \
__ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \
optname, optval, \
optlen, max_optlen, \
retval); \
if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
!INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
tcp_bpf_bypass_getsockopt, \
level, optname)) \
__ret = __cgroup_bpf_run_filter_getsockopt( \
sock, level, optname, optval, optlen, \
max_optlen, retval); \
__ret; \
})
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
optlen, retval) \
({ \
int __ret = retval; \
if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
__ret = __cgroup_bpf_run_filter_getsockopt_kern( \
sock, level, optname, optval, optlen, retval); \
__ret; \
})
......@@ -427,7 +446,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
return 0;
}
#define cgroup_bpf_enabled (0)
#define cgroup_bpf_enabled(type) (0)
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; })
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
......@@ -452,6 +471,8 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
optlen, max_optlen, retval) ({ retval; })
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
optlen, retval) ({ retval; })
#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
kernel_optval) ({ 0; })
......
......@@ -1298,6 +1298,11 @@ struct bpf_sysctl_kern {
u64 tmp_reg;
};
#define BPF_SOCKOPT_KERN_BUF_SIZE 32
struct bpf_sockopt_buf {
u8 data[BPF_SOCKOPT_KERN_BUF_SIZE];
};
struct bpf_sockopt_kern {
struct sock *sk;
u8 *optval;
......
......@@ -60,4 +60,10 @@
#define INDIRECT_CALL_INET(f, f2, f1, ...) f(__VA_ARGS__)
#endif
#if IS_ENABLED(CONFIG_INET)
#define INDIRECT_CALL_INET_1(f, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
#else
#define INDIRECT_CALL_INET_1(f, f1, ...) f(__VA_ARGS__)
#endif
#endif
......@@ -1174,6 +1174,8 @@ struct proto {
int (*backlog_rcv) (struct sock *sk,
struct sk_buff *skb);
bool (*bpf_bypass_getsockopt)(int level,
int optname);
void (*release_cb)(struct sock *sk);
......
......@@ -403,6 +403,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int tcp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
bool tcp_bpf_bypass_getsockopt(int level, int optname);
int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen);
void tcp_set_keepalive(struct sock *sk, int val);
......
......@@ -19,7 +19,7 @@
#include "../cgroup/cgroup-internal.h"
DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_BPF_ATTACH_TYPE);
EXPORT_SYMBOL(cgroup_bpf_enabled_key);
void cgroup_bpf_offline(struct cgroup *cgrp)
......@@ -128,7 +128,7 @@ static void cgroup_bpf_release(struct work_struct *work)
if (pl->link)
bpf_cgroup_link_auto_detach(pl->link);
kfree(pl);
static_branch_dec(&cgroup_bpf_enabled_key);
static_branch_dec(&cgroup_bpf_enabled_key[type]);
}
old_array = rcu_dereference_protected(
cgrp->bpf.effective[type],
......@@ -499,7 +499,7 @@ int __cgroup_bpf_attach(struct cgroup *cgrp,
if (old_prog)
bpf_prog_put(old_prog);
else
static_branch_inc(&cgroup_bpf_enabled_key);
static_branch_inc(&cgroup_bpf_enabled_key[type]);
bpf_cgroup_storages_link(new_storage, cgrp, type);
return 0;
......@@ -698,7 +698,7 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
cgrp->bpf.flags[type] = 0;
if (old_prog)
bpf_prog_put(old_prog);
static_branch_dec(&cgroup_bpf_enabled_key);
static_branch_dec(&cgroup_bpf_enabled_key[type]);
return 0;
cleanup:
......@@ -1298,7 +1298,8 @@ static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
return empty;
}
static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen,
struct bpf_sockopt_buf *buf)
{
if (unlikely(max_optlen < 0))
return -EINVAL;
......@@ -1310,6 +1311,15 @@ static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
max_optlen = PAGE_SIZE;
}
if (max_optlen <= sizeof(buf->data)) {
/* When the optval fits into BPF_SOCKOPT_KERN_BUF_SIZE
* bytes avoid the cost of kzalloc.
*/
ctx->optval = buf->data;
ctx->optval_end = ctx->optval + max_optlen;
return max_optlen;
}
ctx->optval = kzalloc(max_optlen, GFP_USER);
if (!ctx->optval)
return -ENOMEM;
......@@ -1319,16 +1329,26 @@ static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
return max_optlen;
}
static void sockopt_free_buf(struct bpf_sockopt_kern *ctx)
static void sockopt_free_buf(struct bpf_sockopt_kern *ctx,
struct bpf_sockopt_buf *buf)
{
if (ctx->optval == buf->data)
return;
kfree(ctx->optval);
}
static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx,
struct bpf_sockopt_buf *buf)
{
return ctx->optval != buf->data;
}
int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
int *optname, char __user *optval,
int *optlen, char **kernel_optval)
{
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
struct bpf_sockopt_buf buf = {};
struct bpf_sockopt_kern ctx = {
.sk = sk,
.level = *level,
......@@ -1340,8 +1360,7 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
* attached to the hook so we don't waste time allocating
* memory and locking the socket.
*/
if (!cgroup_bpf_enabled ||
__cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT))
if (__cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT))
return 0;
/* Allocate a bit more than the initial user buffer for
......@@ -1350,7 +1369,7 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
*/
max_optlen = max_t(int, 16, *optlen);
max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
if (max_optlen < 0)
return max_optlen;
......@@ -1390,14 +1409,31 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
*/
if (ctx.optlen != 0) {
*optlen = ctx.optlen;
*kernel_optval = ctx.optval;
/* We've used bpf_sockopt_kern->buf as an intermediary
* storage, but the BPF program indicates that we need
* to pass this data to the kernel setsockopt handler.
* No way to export on-stack buf, have to allocate a
* new buffer.
*/
if (!sockopt_buf_allocated(&ctx, &buf)) {
void *p = kmalloc(ctx.optlen, GFP_USER);
if (!p) {
ret = -ENOMEM;
goto out;
}
memcpy(p, ctx.optval, ctx.optlen);
*kernel_optval = p;
} else {
*kernel_optval = ctx.optval;
}
/* export and don't free sockopt buf */
return 0;
}
}
out:
sockopt_free_buf(&ctx);
sockopt_free_buf(&ctx, &buf);
return ret;
}
......@@ -1407,6 +1443,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
int retval)
{
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
struct bpf_sockopt_buf buf = {};
struct bpf_sockopt_kern ctx = {
.sk = sk,
.level = level,
......@@ -1419,13 +1456,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
* attached to the hook so we don't waste time allocating
* memory and locking the socket.
*/
if (!cgroup_bpf_enabled ||
__cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
if (__cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
return retval;
ctx.optlen = max_optlen;
max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
if (max_optlen < 0)
return max_optlen;
......@@ -1483,9 +1519,55 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
ret = ctx.retval;
out:
sockopt_free_buf(&ctx);
sockopt_free_buf(&ctx, &buf);
return ret;
}
int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
int optname, void *optval,
int *optlen, int retval)
{
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
struct bpf_sockopt_kern ctx = {
.sk = sk,
.level = level,
.optname = optname,
.retval = retval,
.optlen = *optlen,
.optval = optval,
.optval_end = optval + *optlen,
};
int ret;
/* Note that __cgroup_bpf_run_filter_getsockopt doesn't copy
* user data back into BPF buffer when reval != 0. This is
* done as an optimization to avoid extra copy, assuming
* kernel won't populate the data in case of an error.
* Here we always pass the data and memset() should
* be called if that data shouldn't be "exported".
*/
ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT],
&ctx, BPF_PROG_RUN);
if (!ret)
return -EPERM;
if (ctx.optlen > *optlen)
return -EFAULT;
/* BPF programs only allowed to set retval to 0, not some
* arbitrary value.
*/
if (ctx.retval != 0 && ctx.retval != retval)
return -EFAULT;
/* BPF programs can shrink the buffer, export the modifications.
*/
if (ctx.optlen != 0)
*optlen = ctx.optlen;
return ctx.retval;
}
#endif
static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
......
......@@ -777,18 +777,19 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr,
return -ENOTCONN;
sin->sin_port = inet->inet_dport;
sin->sin_addr.s_addr = inet->inet_daddr;
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, (struct sockaddr *)sin,
BPF_CGROUP_INET4_GETPEERNAME,
NULL);
} else {
__be32 addr = inet->inet_rcv_saddr;
if (!addr)
addr = inet->inet_saddr;
sin->sin_port = inet->inet_sport;
sin->sin_addr.s_addr = addr;
}
if (cgroup_bpf_enabled)
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, (struct sockaddr *)sin,
peer ? BPF_CGROUP_INET4_GETPEERNAME :
BPF_CGROUP_INET4_GETSOCKNAME,
BPF_CGROUP_INET4_GETSOCKNAME,
NULL);
}
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
return sizeof(*sin);
}
......
......@@ -4099,6 +4099,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
return -EFAULT;
lock_sock(sk);
err = tcp_zerocopy_receive(sk, &zc);
err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname,
&zc, &len, err);
release_sock(sk);
if (len >= offsetofend(struct tcp_zerocopy_receive, err))
goto zerocopy_rcv_sk_err;
......@@ -4133,6 +4135,18 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
return 0;
}
bool tcp_bpf_bypass_getsockopt(int level, int optname)
{
/* TCP do_tcp_getsockopt has optimized getsockopt implementation
* to avoid extra socket lock for TCP_ZEROCOPY_RECEIVE.
*/
if (level == SOL_TCP && optname == TCP_ZEROCOPY_RECEIVE)
return true;
return false;
}
EXPORT_SYMBOL(tcp_bpf_bypass_getsockopt);
int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
int __user *optlen)
{
......
......@@ -2793,6 +2793,7 @@ struct proto tcp_prot = {
.shutdown = tcp_shutdown,
.setsockopt = tcp_setsockopt,
.getsockopt = tcp_getsockopt,
.bpf_bypass_getsockopt = tcp_bpf_bypass_getsockopt,
.keepalive = tcp_set_keepalive,
.recvmsg = tcp_recvmsg,
.sendmsg = tcp_sendmsg,
......
......@@ -1124,7 +1124,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
rcu_read_unlock();
}
if (cgroup_bpf_enabled && !connected) {
if (cgroup_bpf_enabled(BPF_CGROUP_UDP4_SENDMSG) && !connected) {
err = BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk,
(struct sockaddr *)usin, &ipc.addr);
if (err)
......@@ -1858,9 +1858,8 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
*addr_len = sizeof(*sin);
if (cgroup_bpf_enabled)
BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk,
(struct sockaddr *)sin);
BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk,
(struct sockaddr *)sin);
}
if (udp_sk(sk)->gro_enabled)
......
......@@ -527,18 +527,19 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
sin->sin6_addr = sk->sk_v6_daddr;
if (np->sndflow)
sin->sin6_flowinfo = np->flow_label;
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, (struct sockaddr *)sin,
BPF_CGROUP_INET6_GETPEERNAME,
NULL);
} else {
if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
sin->sin6_addr = np->saddr;
else
sin->sin6_addr = sk->sk_v6_rcv_saddr;
sin->sin6_port = inet->inet_sport;
}
if (cgroup_bpf_enabled)
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, (struct sockaddr *)sin,
peer ? BPF_CGROUP_INET6_GETPEERNAME :
BPF_CGROUP_INET6_GETSOCKNAME,
BPF_CGROUP_INET6_GETSOCKNAME,
NULL);
}
sin->sin6_scope_id = ipv6_iface_scope_id(&sin->sin6_addr,
sk->sk_bound_dev_if);
return sizeof(*sin);
......
......@@ -2121,6 +2121,7 @@ struct proto tcpv6_prot = {
.shutdown = tcp_shutdown,
.setsockopt = tcp_setsockopt,
.getsockopt = tcp_getsockopt,
.bpf_bypass_getsockopt = tcp_bpf_bypass_getsockopt,
.keepalive = tcp_set_keepalive,
.recvmsg = tcp_recvmsg,
.sendmsg = tcp_sendmsg,
......
......@@ -409,9 +409,8 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
}
*addr_len = sizeof(*sin6);
if (cgroup_bpf_enabled)
BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
(struct sockaddr *)sin6);
BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
(struct sockaddr *)sin6);
}
if (udp_sk(sk)->gro_enabled)
......@@ -1462,7 +1461,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl6.saddr = np->saddr;
fl6.fl6_sport = inet->inet_sport;
if (cgroup_bpf_enabled && !connected) {
if (cgroup_bpf_enabled(BPF_CGROUP_UDP6_SENDMSG) && !connected) {
err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
(struct sockaddr *)sin6, &fl6.saddr);
if (err)
......
......@@ -2126,6 +2126,9 @@ SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname,
return __sys_setsockopt(fd, level, optname, optval, optlen);
}
INDIRECT_CALLABLE_DECLARE(bool tcp_bpf_bypass_getsockopt(int level,
int optname));
/*
* Get a socket option. Because we don't know the option lengths we have
* to pass a user mode parameter for the protocols to sort out.
......
This diff is collapsed.
......@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Facebook */
#include <linux/err.h>
#include <netinet/tcp.h>
#include <test_progs.h>
#include "bpf_dctcp.skel.h"
#include "bpf_cubic.skel.h"
......
......@@ -7,6 +7,7 @@
#include <string.h>
#include <linux/pkt_cls.h>
#include <netinet/tcp.h>
#include <test_progs.h>
......
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Cloudflare
#include <error.h>
#include <netinet/tcp.h>
#include "test_progs.h"
#include "test_skmsg_load_helpers.skel.h"
......
......@@ -2,6 +2,12 @@
#include <test_progs.h>
#include "cgroup_helpers.h"
#include <linux/tcp.h>
#ifndef SOL_TCP
#define SOL_TCP IPPROTO_TCP
#endif
#define SOL_CUSTOM 0xdeadbeef
static int getsetsockopt(void)
......@@ -11,6 +17,7 @@ static int getsetsockopt(void)
char u8[4];
__u32 u32;
char cc[16]; /* TCP_CA_NAME_MAX */
struct tcp_zerocopy_receive zc;
} buf = {};
socklen_t optlen;
char *big_buf = NULL;
......@@ -154,6 +161,27 @@ static int getsetsockopt(void)
goto err;
}
/* TCP_ZEROCOPY_RECEIVE triggers */
memset(&buf, 0, sizeof(buf));
optlen = sizeof(buf.zc);
err = getsockopt(fd, SOL_TCP, TCP_ZEROCOPY_RECEIVE, &buf, &optlen);
if (err) {
log_err("Unexpected getsockopt(TCP_ZEROCOPY_RECEIVE) err=%d errno=%d",
err, errno);
goto err;
}
memset(&buf, 0, sizeof(buf));
buf.zc.address = 12345; /* rejected by BPF */
optlen = sizeof(buf.zc);
errno = 0;
err = getsockopt(fd, SOL_TCP, TCP_ZEROCOPY_RECEIVE, &buf, &optlen);
if (errno != EPERM) {
log_err("Unexpected getsockopt(TCP_ZEROCOPY_RECEIVE) err=%d errno=%d",
err, errno);
goto err;
}
free(big_buf);
close(fd);
return 0;
......
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <linux/tcp.h>
#include <linux/bpf.h>
#include <netinet/in.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
......@@ -12,6 +12,10 @@ __u32 _version SEC("version") = 1;
#define PAGE_SIZE 4096
#endif
#ifndef SOL_TCP
#define SOL_TCP IPPROTO_TCP
#endif
#define SOL_CUSTOM 0xdeadbeef
struct sockopt_sk {
......@@ -57,6 +61,21 @@ int _getsockopt(struct bpf_sockopt *ctx)
return 1;
}
if (ctx->level == SOL_TCP && ctx->optname == TCP_ZEROCOPY_RECEIVE) {
/* Verify that TCP_ZEROCOPY_RECEIVE triggers.
* It has a custom implementation for performance
* reasons.
*/
if (optval + sizeof(struct tcp_zerocopy_receive) > optval_end)
return 0; /* EPERM, bounds check */
if (((struct tcp_zerocopy_receive *)optval)->address != 0)
return 0; /* EPERM, unexpected data */
return 1;
}
if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
if (optval + 1 > optval_end)
return 0; /* EPERM, bounds check */
......
......@@ -16,7 +16,6 @@ typedef __u16 __sum16;
#include <linux/if_packet.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <netinet/tcp.h>
#include <linux/filter.h>
#include <linux/perf_event.h>
#include <linux/socket.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment