Commit 77698878 authored by David Howells's avatar David Howells Committed by Jakub Kicinski

nvme-tcp: Use sendmsg(MSG_SPLICE_PAGES) rather then sendpage

When transmitting data, call down into TCP using a sendmsg with
MSG_SPLICE_PAGES instead of sendpage.
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Tested-by: default avatarSagi Grimberg <sagi@grimberg.me>
Acked-by: default avatarWillem de Bruijn <willemb@google.com>
cc: Keith Busch <kbusch@kernel.org>
cc: Jens Axboe <axboe@fb.com>
cc: Christoph Hellwig <hch@lst.de>
cc: Chaitanya Kulkarni <kch@nvidia.com>
cc: Jens Axboe <axboe@kernel.dk>
cc: Matthew Wilcox <willy@infradead.org>
cc: linux-nvme@lists.infradead.org
Link: https://lore.kernel.org/r/20230623225513.2732256-8-dhowells@redhat.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent a1a5e875
...@@ -997,25 +997,28 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) ...@@ -997,25 +997,28 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
u32 h2cdata_left = req->h2cdata_left; u32 h2cdata_left = req->h2cdata_left;
while (true) { while (true) {
struct bio_vec bvec;
struct msghdr msg = {
.msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
};
struct page *page = nvme_tcp_req_cur_page(req); struct page *page = nvme_tcp_req_cur_page(req);
size_t offset = nvme_tcp_req_cur_offset(req); size_t offset = nvme_tcp_req_cur_offset(req);
size_t len = nvme_tcp_req_cur_length(req); size_t len = nvme_tcp_req_cur_length(req);
bool last = nvme_tcp_pdu_last_send(req, len); bool last = nvme_tcp_pdu_last_send(req, len);
int req_data_sent = req->data_sent; int req_data_sent = req->data_sent;
int ret, flags = MSG_DONTWAIT; int ret;
if (last && !queue->data_digest && !nvme_tcp_queue_more(queue)) if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
flags |= MSG_EOR; msg.msg_flags |= MSG_EOR;
else else
flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST; msg.msg_flags |= MSG_MORE;
if (sendpage_ok(page)) { if (!sendpage_ok(page))
ret = kernel_sendpage(queue->sock, page, offset, len, msg.msg_flags &= ~MSG_SPLICE_PAGES,
flags);
} else { bvec_set_page(&bvec, page, len, offset);
ret = sock_no_sendpage(queue->sock, page, offset, len, iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
flags); ret = sock_sendmsg(queue->sock, &msg);
}
if (ret <= 0) if (ret <= 0)
return ret; return ret;
...@@ -1054,22 +1057,24 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req) ...@@ -1054,22 +1057,24 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
{ {
struct nvme_tcp_queue *queue = req->queue; struct nvme_tcp_queue *queue = req->queue;
struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req); struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
struct bio_vec bvec;
struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
bool inline_data = nvme_tcp_has_inline_data(req); bool inline_data = nvme_tcp_has_inline_data(req);
u8 hdgst = nvme_tcp_hdgst_len(queue); u8 hdgst = nvme_tcp_hdgst_len(queue);
int len = sizeof(*pdu) + hdgst - req->offset; int len = sizeof(*pdu) + hdgst - req->offset;
int flags = MSG_DONTWAIT;
int ret; int ret;
if (inline_data || nvme_tcp_queue_more(queue)) if (inline_data || nvme_tcp_queue_more(queue))
flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST; msg.msg_flags |= MSG_MORE;
else else
flags |= MSG_EOR; msg.msg_flags |= MSG_EOR;
if (queue->hdr_digest && !req->offset) if (queue->hdr_digest && !req->offset)
nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
ret = kernel_sendpage(queue->sock, virt_to_page(pdu), bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
offset_in_page(pdu) + req->offset, len, flags); iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
ret = sock_sendmsg(queue->sock, &msg);
if (unlikely(ret <= 0)) if (unlikely(ret <= 0))
return ret; return ret;
...@@ -1093,6 +1098,8 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req) ...@@ -1093,6 +1098,8 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
{ {
struct nvme_tcp_queue *queue = req->queue; struct nvme_tcp_queue *queue = req->queue;
struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req); struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req);
struct bio_vec bvec;
struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_MORE, };
u8 hdgst = nvme_tcp_hdgst_len(queue); u8 hdgst = nvme_tcp_hdgst_len(queue);
int len = sizeof(*pdu) - req->offset + hdgst; int len = sizeof(*pdu) - req->offset + hdgst;
int ret; int ret;
...@@ -1101,13 +1108,11 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req) ...@@ -1101,13 +1108,11 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
if (!req->h2cdata_left) if (!req->h2cdata_left)
ret = kernel_sendpage(queue->sock, virt_to_page(pdu), msg.msg_flags |= MSG_SPLICE_PAGES;
offset_in_page(pdu) + req->offset, len,
MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST); bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
else iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
ret = sock_no_sendpage(queue->sock, virt_to_page(pdu), ret = sock_sendmsg(queue->sock, &msg);
offset_in_page(pdu) + req->offset, len,
MSG_DONTWAIT | MSG_MORE);
if (unlikely(ret <= 0)) if (unlikely(ret <= 0))
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment