Commit 4ff3dfc9 authored by Paolo Abeni's avatar Paolo Abeni

Merge branch 'splice-net-handle-msg_splice_pages-in-chelsio-tls'

David Howells says:

====================
splice, net: Handle MSG_SPLICE_PAGES in Chelsio-TLS

Here are patches to make Chelsio-TLS handle the MSG_SPLICE_PAGES internal
sendmsg flag.  MSG_SPLICE_PAGES is an internal hint that tells the protocol
that it should splice the pages supplied if it can.  Its sendpage
implementation is then turned into a wrapper around that.
====================

Link: https://lore.kernel.org/r/20230531110008.642903-1-dhowells@redhat.comSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 735c9ee9 26acc982
......@@ -1092,7 +1092,17 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
if (copy > size)
copy = size;
if (skb_tailroom(skb) > 0) {
if (msg->msg_flags & MSG_SPLICE_PAGES) {
err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
sk->sk_allocation);
if (err < 0) {
if (err == -EMSGSIZE)
goto new_buf;
goto do_fault;
}
copy = err;
sk_wmem_queued_add(sk, copy);
} else if (skb_tailroom(skb) > 0) {
copy = min(copy, skb_tailroom(skb));
if (is_tls_tx(csk))
copy = min_t(int, copy, csk->tlshws.txleft);
......@@ -1230,110 +1240,15 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
int chtls_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
{
struct chtls_sock *csk;
struct chtls_dev *cdev;
int mss, err, copied;
struct tcp_sock *tp;
long timeo;
tp = tcp_sk(sk);
copied = 0;
csk = rcu_dereference_sk_user_data(sk);
cdev = csk->cdev;
lock_sock(sk);
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
err = sk_stream_wait_connect(sk, &timeo);
if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
err != 0)
goto out_err;
mss = csk->mss;
csk_set_flag(csk, CSK_TX_MORE_DATA);
while (size > 0) {
struct sk_buff *skb = skb_peek_tail(&csk->txq);
int copy, i;
struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, };
struct bio_vec bvec;
if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
(copy = mss - skb->len) <= 0) {
new_buf:
if (!csk_mem_free(cdev, sk))
goto wait_for_sndbuf;
if (is_tls_tx(csk)) {
skb = get_record_skb(sk,
select_size(sk, size,
flags,
TX_TLSHDR_LEN),
true);
} else {
skb = get_tx_skb(sk, 0);
}
if (!skb)
goto wait_for_memory;
copy = mss;
}
if (copy > size)
copy = size;
if (flags & MSG_SENDPAGE_NOTLAST)
msg.msg_flags |= MSG_MORE;
i = skb_shinfo(skb)->nr_frags;
if (skb_can_coalesce(skb, i, page, offset)) {
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
} else if (i < MAX_SKB_FRAGS) {
get_page(page);
skb_fill_page_desc(skb, i, page, offset, copy);
} else {
tx_skb_finalize(skb);
push_frames_if_head(sk);
goto new_buf;
}
skb->len += copy;
if (skb->len == mss)
tx_skb_finalize(skb);
skb->data_len += copy;
skb->truesize += copy;
sk->sk_wmem_queued += copy;
tp->write_seq += copy;
copied += copy;
offset += copy;
size -= copy;
if (corked(tp, flags) &&
(sk_stream_wspace(sk) < sk_stream_min_wspace(sk)))
ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_NO_APPEND;
if (!size)
break;
if (unlikely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND))
push_frames_if_head(sk);
continue;
wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
err = csk_wait_memory(cdev, sk, &timeo);
if (err)
goto do_error;
}
out:
csk_reset_flag(csk, CSK_TX_MORE_DATA);
if (copied)
chtls_tcp_push(sk, flags);
done:
release_sock(sk);
return copied;
do_error:
if (copied)
goto out;
out_err:
if (csk_conn_inline(csk))
csk_reset_flag(csk, CSK_TX_MORE_DATA);
copied = sk_stream_error(sk, flags, err);
goto done;
bvec_set_page(&bvec, page, size, offset);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
return chtls_sendmsg(sk, &msg, size);
}
static void chtls_select_window(struct sock *sk)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment