Commit c729ed6f authored by David Howells's avatar David Howells Committed by Jakub Kicinski

net: Use sendmsg(MSG_SPLICE_PAGES) not sendpage in skb_send_sock()

Use sendmsg() with MSG_SPLICE_PAGES rather than sendpage in
skb_send_sock().  This causes pages to be spliced from the source iterator
if possible.

This allows ->sendpage() to be replaced by something that can handle
multiple multipage folios in a single transaction.

Note that this could perhaps be improved to fill out a bvec array with all
the frags and then make a single sendmsg call, possibly sticking the header
on the front also.
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
cc: Jens Axboe <axboe@kernel.dk>
cc: Matthew Wilcox <willy@infradead.org>
Link: https://lore.kernel.org/r/20230623225513.2732256-3-dhowells@redhat.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent f8dd95b2
...@@ -2989,32 +2989,32 @@ int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, ...@@ -2989,32 +2989,32 @@ int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
} }
EXPORT_SYMBOL_GPL(skb_splice_bits); EXPORT_SYMBOL_GPL(skb_splice_bits);
static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg, static int sendmsg_locked(struct sock *sk, struct msghdr *msg)
struct kvec *vec, size_t num, size_t size)
{ {
struct socket *sock = sk->sk_socket; struct socket *sock = sk->sk_socket;
size_t size = msg_data_left(msg);
if (!sock) if (!sock)
return -EINVAL; return -EINVAL;
return kernel_sendmsg(sock, msg, vec, num, size);
if (!sock->ops->sendmsg_locked)
return sock_no_sendmsg_locked(sk, msg, size);
return sock->ops->sendmsg_locked(sk, msg, size);
} }
static int sendpage_unlocked(struct sock *sk, struct page *page, int offset, static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg)
size_t size, int flags)
{ {
struct socket *sock = sk->sk_socket; struct socket *sock = sk->sk_socket;
if (!sock) if (!sock)
return -EINVAL; return -EINVAL;
return kernel_sendpage(sock, page, offset, size, flags); return sock_sendmsg(sock, msg);
} }
typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg, typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg);
struct kvec *vec, size_t num, size_t size);
typedef int (*sendpage_func)(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
int len, sendmsg_func sendmsg, sendpage_func sendpage) int len, sendmsg_func sendmsg)
{ {
unsigned int orig_len = len; unsigned int orig_len = len;
struct sk_buff *head = skb; struct sk_buff *head = skb;
...@@ -3034,8 +3034,9 @@ static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, ...@@ -3034,8 +3034,9 @@ static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
memset(&msg, 0, sizeof(msg)); memset(&msg, 0, sizeof(msg));
msg.msg_flags = MSG_DONTWAIT; msg.msg_flags = MSG_DONTWAIT;
ret = INDIRECT_CALL_2(sendmsg, kernel_sendmsg_locked, iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen);
sendmsg_unlocked, sk, &msg, &kv, 1, slen); ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked,
sendmsg_unlocked, sk, &msg);
if (ret <= 0) if (ret <= 0)
goto error; goto error;
...@@ -3066,11 +3067,18 @@ static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, ...@@ -3066,11 +3067,18 @@ static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
slen = min_t(size_t, len, skb_frag_size(frag) - offset); slen = min_t(size_t, len, skb_frag_size(frag) - offset);
while (slen) { while (slen) {
ret = INDIRECT_CALL_2(sendpage, kernel_sendpage_locked, struct bio_vec bvec;
sendpage_unlocked, sk, struct msghdr msg = {
skb_frag_page(frag), .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT,
skb_frag_off(frag) + offset, };
slen, MSG_DONTWAIT);
bvec_set_page(&bvec, skb_frag_page(frag), slen,
skb_frag_off(frag) + offset);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1,
slen);
ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked,
sendmsg_unlocked, sk, &msg);
if (ret <= 0) if (ret <= 0)
goto error; goto error;
...@@ -3107,16 +3115,14 @@ static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, ...@@ -3107,16 +3115,14 @@ static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
int len) int len)
{ {
return __skb_send_sock(sk, skb, offset, len, kernel_sendmsg_locked, return __skb_send_sock(sk, skb, offset, len, sendmsg_locked);
kernel_sendpage_locked);
} }
EXPORT_SYMBOL_GPL(skb_send_sock_locked); EXPORT_SYMBOL_GPL(skb_send_sock_locked);
/* Send skb data on a socket. Socket must be unlocked. */ /* Send skb data on a socket. Socket must be unlocked. */
int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len)
{ {
return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked, return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked);
sendpage_unlocked);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment