Commit 20a6d915 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'sockmap/sk_skb program memory acct fixes'

John Fastabend says:

====================

Users of sockmap and skmsg trying to build proxys and other tools
have pointed out to me the error handling can be problematic. If
the proxy is under-provisioned and/or the BPF admin does not have
the ability to update/modify memory provisions on the sockets
its possible data may be dropped. For some things we have retries
so everything works out OK, but for most things this is likely
not great. And things go bad.

The original design dropped memory accounting on the receive
socket as early as possible. We did this early in sk_skb
handling and then charged it to the redirect socket immediately
after running the BPF program.

But, this design caused a fundamental problem. Namely, what should we do
if we redirect to a socket that has already reached its socket memory
limits. For proxy use cases the network admin can tune memory limits.
But, in general we punted on this problem and told folks to simply make
your memory limits high enough to handle your workload. This is not a
really good answer. When deploying into environments where we expect this
to be transparent its no longer the case because we need to tune params.
In fact its really only viable in cases where we have fine grained
control over the application. For example a proxy redirecting from an
ingress socket to an egress socket. The result is I get bug
reports because its surprising for one, but more importantly also breaks
some use cases. So lets fix it.

This series cleans up the different cases so that in many common
modes, such as passing packet up to receive socket, we can simply
use the underlying assumption that the TCP stack already has done
memory accounting.

Next instead of trying to do memory accounting against the socket
we plan to redirect into we keep memory accounting on the receive
socket until the skb can be put on the redirect socket. This means
if we do an egress redirect to a socket and sock_writable() returns
EAGAIN we can requeue the skb on the workqueue and try again. The
same scenario plays out for ingress. If the skb can not be put on
the receive queue of the redirect socket than we simply requeue and
retry. In both cases memory is still accounted for against the
receiving socket.

This also handles head of line blocking. With the above scheme the
skb is on a queue associated with the socket it will be sent/recv'd
on, but the memory accounting is against the received socket. This
means the receive socket can advance to the next skb and avoid head
of line blocking. At least until its receive memory on the socket
runs out. This will put some maximum size on the amount of data any
socket can enqueue giving us bounds on the skb lists so they can't grow
indefinitely.

Overall I think this is a win. Tested with test_sockmap.

These are fixes, but I tagged it for bpf-next considering we are
at -rc8.

v1->v2: Fix uninitialized/unused variables (kernel test robot)
v2->v3: fix typo in patch2 err=0 needs to be <0 so use err=-EIO
---
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents ebb034b1 0b17ad25
...@@ -433,10 +433,12 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb) ...@@ -433,10 +433,12 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb, static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
u32 off, u32 len, bool ingress) u32 off, u32 len, bool ingress)
{ {
if (ingress) if (!ingress) {
return sk_psock_skb_ingress(psock, skb); if (!sock_writeable(psock->sk))
else return -EAGAIN;
return skb_send_sock_locked(psock->sk, skb, off, len); return skb_send_sock_locked(psock->sk, skb, off, len);
}
return sk_psock_skb_ingress(psock, skb);
} }
static void sk_psock_backlog(struct work_struct *work) static void sk_psock_backlog(struct work_struct *work)
...@@ -682,19 +684,8 @@ EXPORT_SYMBOL_GPL(sk_psock_msg_verdict); ...@@ -682,19 +684,8 @@ EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog, static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
struct sk_buff *skb) struct sk_buff *skb)
{ {
int ret;
skb->sk = psock->sk;
bpf_compute_data_end_sk_skb(skb); bpf_compute_data_end_sk_skb(skb);
ret = bpf_prog_run_pin_on_cpu(prog, skb); return bpf_prog_run_pin_on_cpu(prog, skb);
/* strparser clones the skb before handing it to a upper layer,
* meaning skb_orphan has been called. We NULL sk on the way out
* to ensure we don't trigger a BUG_ON() in skb/sk operations
* later and because we are not charging the memory of this skb
* to any socket yet.
*/
skb->sk = NULL;
return ret;
} }
static struct sk_psock *sk_psock_from_strp(struct strparser *strp) static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
...@@ -709,38 +700,35 @@ static void sk_psock_skb_redirect(struct sk_buff *skb) ...@@ -709,38 +700,35 @@ static void sk_psock_skb_redirect(struct sk_buff *skb)
{ {
struct sk_psock *psock_other; struct sk_psock *psock_other;
struct sock *sk_other; struct sock *sk_other;
bool ingress;
sk_other = tcp_skb_bpf_redirect_fetch(skb); sk_other = tcp_skb_bpf_redirect_fetch(skb);
/* This error is a buggy BPF program, it returned a redirect
* return code, but then didn't set a redirect interface.
*/
if (unlikely(!sk_other)) { if (unlikely(!sk_other)) {
kfree_skb(skb); kfree_skb(skb);
return; return;
} }
psock_other = sk_psock(sk_other); psock_other = sk_psock(sk_other);
/* This error indicates the socket is being torn down or had another
* error that caused the pipe to break. We can't send a packet on
* a socket that is in this state so we drop the skb.
*/
if (!psock_other || sock_flag(sk_other, SOCK_DEAD) || if (!psock_other || sock_flag(sk_other, SOCK_DEAD) ||
!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) { !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
kfree_skb(skb); kfree_skb(skb);
return; return;
} }
ingress = tcp_skb_bpf_ingress(skb);
if ((!ingress && sock_writeable(sk_other)) ||
(ingress &&
atomic_read(&sk_other->sk_rmem_alloc) <=
sk_other->sk_rcvbuf)) {
if (!ingress)
skb_set_owner_w(skb, sk_other);
skb_queue_tail(&psock_other->ingress_skb, skb); skb_queue_tail(&psock_other->ingress_skb, skb);
schedule_work(&psock_other->work); schedule_work(&psock_other->work);
} else {
kfree_skb(skb);
}
} }
static void sk_psock_tls_verdict_apply(struct sk_buff *skb, int verdict) static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int verdict)
{ {
switch (verdict) { switch (verdict) {
case __SK_REDIRECT: case __SK_REDIRECT:
skb_set_owner_r(skb, sk);
sk_psock_skb_redirect(skb); sk_psock_skb_redirect(skb);
break; break;
case __SK_PASS: case __SK_PASS:
...@@ -758,11 +746,17 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb) ...@@ -758,11 +746,17 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
rcu_read_lock(); rcu_read_lock();
prog = READ_ONCE(psock->progs.skb_verdict); prog = READ_ONCE(psock->progs.skb_verdict);
if (likely(prog)) { if (likely(prog)) {
/* We skip full set_owner_r here because if we do a SK_PASS
* or SK_DROP we can skip skb memory accounting and use the
* TLS context.
*/
skb->sk = psock->sk;
tcp_skb_bpf_redirect_clear(skb); tcp_skb_bpf_redirect_clear(skb);
ret = sk_psock_bpf_run(psock, prog, skb); ret = sk_psock_bpf_run(psock, prog, skb);
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb)); ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
skb->sk = NULL;
} }
sk_psock_tls_verdict_apply(skb, ret); sk_psock_tls_verdict_apply(skb, psock->sk, ret);
rcu_read_unlock(); rcu_read_unlock();
return ret; return ret;
} }
...@@ -771,7 +765,9 @@ EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read); ...@@ -771,7 +765,9 @@ EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
static void sk_psock_verdict_apply(struct sk_psock *psock, static void sk_psock_verdict_apply(struct sk_psock *psock,
struct sk_buff *skb, int verdict) struct sk_buff *skb, int verdict)
{ {
struct tcp_skb_cb *tcp;
struct sock *sk_other; struct sock *sk_other;
int err = -EIO;
switch (verdict) { switch (verdict) {
case __SK_PASS: case __SK_PASS:
...@@ -780,16 +776,24 @@ static void sk_psock_verdict_apply(struct sk_psock *psock, ...@@ -780,16 +776,24 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
!sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
goto out_free; goto out_free;
} }
if (atomic_read(&sk_other->sk_rmem_alloc) <=
sk_other->sk_rcvbuf) {
struct tcp_skb_cb *tcp = TCP_SKB_CB(skb);
tcp = TCP_SKB_CB(skb);
tcp->bpf.flags |= BPF_F_INGRESS; tcp->bpf.flags |= BPF_F_INGRESS;
/* If the queue is empty then we can submit directly
* into the msg queue. If its not empty we have to
* queue work otherwise we may get OOO data. Otherwise,
* if sk_psock_skb_ingress errors will be handled by
* retrying later from workqueue.
*/
if (skb_queue_empty(&psock->ingress_skb)) {
err = sk_psock_skb_ingress(psock, skb);
}
if (err < 0) {
skb_queue_tail(&psock->ingress_skb, skb); skb_queue_tail(&psock->ingress_skb, skb);
schedule_work(&psock->work); schedule_work(&psock->work);
break;
} }
goto out_free; break;
case __SK_REDIRECT: case __SK_REDIRECT:
sk_psock_skb_redirect(skb); sk_psock_skb_redirect(skb);
break; break;
...@@ -814,9 +818,9 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb) ...@@ -814,9 +818,9 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
kfree_skb(skb); kfree_skb(skb);
goto out; goto out;
} }
skb_set_owner_r(skb, sk);
prog = READ_ONCE(psock->progs.skb_verdict); prog = READ_ONCE(psock->progs.skb_verdict);
if (likely(prog)) { if (likely(prog)) {
skb_orphan(skb);
tcp_skb_bpf_redirect_clear(skb); tcp_skb_bpf_redirect_clear(skb);
ret = sk_psock_bpf_run(psock, prog, skb); ret = sk_psock_bpf_run(psock, prog, skb);
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb)); ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
...@@ -839,8 +843,11 @@ static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb) ...@@ -839,8 +843,11 @@ static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
rcu_read_lock(); rcu_read_lock();
prog = READ_ONCE(psock->progs.skb_parser); prog = READ_ONCE(psock->progs.skb_parser);
if (likely(prog)) if (likely(prog)) {
skb->sk = psock->sk;
ret = sk_psock_bpf_run(psock, prog, skb); ret = sk_psock_bpf_run(psock, prog, skb);
skb->sk = NULL;
}
rcu_read_unlock(); rcu_read_unlock();
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment