Commit 54cdcca0 authored by Jens Axboe's avatar Jens Axboe

io_uring/net: switch io_send() and io_send_zc() to using io_async_msghdr

No functional changes in this patch, just in preparation for carrying
more state then what is being done now, if necessary. While unifying
some of this code, add a generic send setup prep handler that they can
both use.

This gets rid of some manual msghdr and sockaddr on the stack, and makes
it look a bit more like the sendmsg/recvmsg variants. Going forward, more
can get unified on top.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 0ae9b9a1
...@@ -322,36 +322,25 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req, ...@@ -322,36 +322,25 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
int io_send_prep_async(struct io_kiocb *req) int io_send_prep_async(struct io_kiocb *req)
{ {
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *io; struct io_async_msghdr *io;
int ret; int ret;
if (req_has_async_data(req)) if (req_has_async_data(req))
return 0; return 0;
zc->done_io = 0; sr->done_io = 0;
if (!zc->addr) if (!sr->addr)
return 0; return 0;
io = io_msg_alloc_async_prep(req); io = io_msg_alloc_async_prep(req);
if (!io) if (!io)
return -ENOMEM; return -ENOMEM;
ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr); memset(&io->msg, 0, sizeof(io->msg));
return ret; ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &io->msg.msg_iter);
} if (unlikely(ret))
return ret;
static int io_setup_async_addr(struct io_kiocb *req, io->msg.msg_name = &io->addr;
struct sockaddr_storage *addr_storage, io->msg.msg_namelen = sr->addr_len;
unsigned int issue_flags) return move_addr_to_kernel(sr->addr, sr->addr_len, &io->addr);
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *io;
if (!sr->addr || req_has_async_data(req))
return -EAGAIN;
io = io_msg_alloc_async(req, issue_flags);
if (!io)
return -ENOMEM;
memcpy(&io->addr, addr_storage, sizeof(io->addr));
return -EAGAIN;
} }
int io_sendmsg_prep_async(struct io_kiocb *req) int io_sendmsg_prep_async(struct io_kiocb *req)
...@@ -475,45 +464,66 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) ...@@ -475,45 +464,66 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
return IOU_OK; return IOU_OK;
} }
int io_send(struct io_kiocb *req, unsigned int issue_flags) static struct io_async_msghdr *io_send_setup(struct io_kiocb *req,
struct io_async_msghdr *stack_msg,
unsigned int issue_flags)
{ {
struct sockaddr_storage __address;
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct msghdr msg; struct io_async_msghdr *kmsg;
struct socket *sock;
unsigned flags;
int min_ret = 0;
int ret; int ret;
msg.msg_name = NULL; if (req_has_async_data(req)) {
msg.msg_control = NULL; kmsg = req->async_data;
msg.msg_controllen = 0; } else {
msg.msg_namelen = 0; kmsg = stack_msg;
msg.msg_ubuf = NULL; kmsg->free_iov = NULL;
kmsg->msg.msg_name = NULL;
if (sr->addr) { kmsg->msg.msg_namelen = 0;
if (req_has_async_data(req)) { kmsg->msg.msg_control = NULL;
struct io_async_msghdr *io = req->async_data; kmsg->msg.msg_controllen = 0;
kmsg->msg.msg_ubuf = NULL;
msg.msg_name = &io->addr;
} else { if (sr->addr) {
ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address); ret = move_addr_to_kernel(sr->addr, sr->addr_len,
&kmsg->addr);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
return ret; return ERR_PTR(ret);
msg.msg_name = (struct sockaddr *)&__address; kmsg->msg.msg_name = &kmsg->addr;
kmsg->msg.msg_namelen = sr->addr_len;
} }
msg.msg_namelen = sr->addr_len;
ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
&kmsg->msg.msg_iter);
if (unlikely(ret))
return ERR_PTR(ret);
} }
if (!(req->flags & REQ_F_POLLED) && if (!(req->flags & REQ_F_POLLED) &&
(sr->flags & IORING_RECVSEND_POLL_FIRST)) (sr->flags & IORING_RECVSEND_POLL_FIRST))
return io_setup_async_addr(req, &__address, issue_flags); return ERR_PTR(io_setup_async_msg(req, kmsg, issue_flags));
return kmsg;
}
int io_send(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr iomsg, *kmsg;
size_t len = sr->len;
struct socket *sock;
unsigned flags;
int min_ret = 0;
int ret;
sock = sock_from_file(req->file); sock = sock_from_file(req->file);
if (unlikely(!sock)) if (unlikely(!sock))
return -ENOTSOCK; return -ENOTSOCK;
ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter); kmsg = io_send_setup(req, &iomsg, issue_flags);
if (IS_ERR(kmsg))
return PTR_ERR(kmsg);
ret = import_ubuf(ITER_SOURCE, sr->buf, len, &kmsg->msg.msg_iter);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
...@@ -521,21 +531,21 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags) ...@@ -521,21 +531,21 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
if (issue_flags & IO_URING_F_NONBLOCK) if (issue_flags & IO_URING_F_NONBLOCK)
flags |= MSG_DONTWAIT; flags |= MSG_DONTWAIT;
if (flags & MSG_WAITALL) if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&msg.msg_iter); min_ret = iov_iter_count(&kmsg->msg.msg_iter);
flags &= ~MSG_INTERNAL_SENDMSG_FLAGS; flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
msg.msg_flags = flags; kmsg->msg.msg_flags = flags;
ret = sock_sendmsg(sock, &msg); ret = sock_sendmsg(sock, &kmsg->msg);
if (ret < min_ret) { if (ret < min_ret) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
return io_setup_async_addr(req, &__address, issue_flags); return io_setup_async_msg(req, kmsg, issue_flags);
if (ret > 0 && io_net_retry(sock, flags)) { if (ret > 0 && io_net_retry(sock, flags)) {
sr->len -= ret; sr->len -= ret;
sr->buf += ret; sr->buf += ret;
sr->done_io += ret; sr->done_io += ret;
req->flags |= REQ_F_BL_NO_RECYCLE; req->flags |= REQ_F_BL_NO_RECYCLE;
return io_setup_async_addr(req, &__address, issue_flags); return io_setup_async_msg(req, kmsg, issue_flags);
} }
if (ret == -ERESTARTSYS) if (ret == -ERESTARTSYS)
ret = -EINTR; ret = -EINTR;
...@@ -545,6 +555,7 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags) ...@@ -545,6 +555,7 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
ret += sr->done_io; ret += sr->done_io;
else if (sr->done_io) else if (sr->done_io)
ret = sr->done_io; ret = sr->done_io;
io_req_msg_cleanup(req, kmsg, issue_flags);
io_req_set_res(req, ret, 0); io_req_set_res(req, ret, 0);
return IOU_OK; return IOU_OK;
} }
...@@ -1158,11 +1169,35 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb, ...@@ -1158,11 +1169,35 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
return ret; return ret;
} }
static int io_send_zc_import(struct io_kiocb *req, struct io_async_msghdr *kmsg)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
int ret;
if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
ret = io_import_fixed(ITER_SOURCE, &kmsg->msg.msg_iter, req->imu,
(u64)(uintptr_t)sr->buf, sr->len);
if (unlikely(ret))
return ret;
kmsg->msg.sg_from_iter = io_sg_from_iter;
} else {
io_notif_set_extended(sr->notif);
ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
if (unlikely(ret))
return ret;
ret = io_notif_account_mem(sr->notif, sr->len);
if (unlikely(ret))
return ret;
kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
}
return ret;
}
int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct sockaddr_storage __address;
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
struct msghdr msg; struct io_async_msghdr iomsg, *kmsg;
struct socket *sock; struct socket *sock;
unsigned msg_flags; unsigned msg_flags;
int ret, min_ret = 0; int ret, min_ret = 0;
...@@ -1173,67 +1208,37 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) ...@@ -1173,67 +1208,37 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags)) if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
return -EOPNOTSUPP; return -EOPNOTSUPP;
msg.msg_name = NULL; kmsg = io_send_setup(req, &iomsg, issue_flags);
msg.msg_control = NULL; if (IS_ERR(kmsg))
msg.msg_controllen = 0; return PTR_ERR(kmsg);
msg.msg_namelen = 0;
if (zc->addr) {
if (req_has_async_data(req)) {
struct io_async_msghdr *io = req->async_data;
msg.msg_name = &io->addr;
} else {
ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
if (unlikely(ret < 0))
return ret;
msg.msg_name = (struct sockaddr *)&__address;
}
msg.msg_namelen = zc->addr_len;
}
if (!(req->flags & REQ_F_POLLED) &&
(zc->flags & IORING_RECVSEND_POLL_FIRST))
return io_setup_async_addr(req, &__address, issue_flags);
if (zc->flags & IORING_RECVSEND_FIXED_BUF) { if (!zc->done_io) {
ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu, ret = io_send_zc_import(req, kmsg);
(u64)(uintptr_t)zc->buf, zc->len);
if (unlikely(ret))
return ret;
msg.sg_from_iter = io_sg_from_iter;
} else {
io_notif_set_extended(zc->notif);
ret = import_ubuf(ITER_SOURCE, zc->buf, zc->len, &msg.msg_iter);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
ret = io_notif_account_mem(zc->notif, zc->len);
if (unlikely(ret))
return ret;
msg.sg_from_iter = io_sg_from_iter_iovec;
} }
msg_flags = zc->msg_flags | MSG_ZEROCOPY; msg_flags = zc->msg_flags | MSG_ZEROCOPY;
if (issue_flags & IO_URING_F_NONBLOCK) if (issue_flags & IO_URING_F_NONBLOCK)
msg_flags |= MSG_DONTWAIT; msg_flags |= MSG_DONTWAIT;
if (msg_flags & MSG_WAITALL) if (msg_flags & MSG_WAITALL)
min_ret = iov_iter_count(&msg.msg_iter); min_ret = iov_iter_count(&kmsg->msg.msg_iter);
msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS; msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
msg.msg_flags = msg_flags; kmsg->msg.msg_flags = msg_flags;
msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg; kmsg->msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
ret = sock_sendmsg(sock, &msg); ret = sock_sendmsg(sock, &kmsg->msg);
if (unlikely(ret < min_ret)) { if (unlikely(ret < min_ret)) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK)) if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
return io_setup_async_addr(req, &__address, issue_flags); return io_setup_async_msg(req, kmsg, issue_flags);
if (ret > 0 && io_net_retry(sock, msg.msg_flags)) { if (ret > 0 && io_net_retry(sock, kmsg->msg.msg_flags)) {
zc->len -= ret; zc->len -= ret;
zc->buf += ret; zc->buf += ret;
zc->done_io += ret; zc->done_io += ret;
req->flags |= REQ_F_BL_NO_RECYCLE; req->flags |= REQ_F_BL_NO_RECYCLE;
return io_setup_async_addr(req, &__address, issue_flags); return io_setup_async_msg(req, kmsg, issue_flags);
} }
if (ret == -ERESTARTSYS) if (ret == -ERESTARTSYS)
ret = -EINTR; ret = -EINTR;
...@@ -1251,6 +1256,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) ...@@ -1251,6 +1256,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
*/ */
if (!(issue_flags & IO_URING_F_UNLOCKED)) { if (!(issue_flags & IO_URING_F_UNLOCKED)) {
io_notif_flush(zc->notif); io_notif_flush(zc->notif);
io_netmsg_recycle(req, issue_flags);
req->flags &= ~REQ_F_NEED_CLEANUP; req->flags &= ~REQ_F_NEED_CLEANUP;
} }
io_req_set_res(req, ret, IORING_CQE_F_MORE); io_req_set_res(req, ret, IORING_CQE_F_MORE);
......
...@@ -602,6 +602,7 @@ const struct io_cold_def io_cold_defs[] = { ...@@ -602,6 +602,7 @@ const struct io_cold_def io_cold_defs[] = {
.name = "SEND", .name = "SEND",
#if defined(CONFIG_NET) #if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr), .async_size = sizeof(struct io_async_msghdr),
.cleanup = io_sendmsg_recvmsg_cleanup,
.fail = io_sendrecv_fail, .fail = io_sendrecv_fail,
.prep_async = io_send_prep_async, .prep_async = io_send_prep_async,
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment