Commit 2f9c9515 authored by Jens Axboe's avatar Jens Axboe

io_uring/net: support bundles for recv

If IORING_OP_RECV is used with provided buffers, the caller may also set
IORING_RECVSEND_BUNDLE to turn it into a multi-buffer recv. This grabs
buffers available and receives into them, posting a single completion for
all of it.

This can be used with multishot receive as well, or without it.

Now that both send and receive support bundles, add a feature flag for
it as well. If IORING_FEAT_RECVSEND_BUNDLE is set after registering the
ring, then the kernel supports bundles for recv and send.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent a05d1f62
...@@ -352,13 +352,13 @@ enum io_uring_op { ...@@ -352,13 +352,13 @@ enum io_uring_op {
* IORING_NOTIF_USAGE_ZC_COPIED if data was copied * IORING_NOTIF_USAGE_ZC_COPIED if data was copied
* (at least partially). * (at least partially).
* *
* IORING_RECVSEND_BUNDLE Used with IOSQE_BUFFER_SELECT. If set, send will * IORING_RECVSEND_BUNDLE Used with IOSQE_BUFFER_SELECT. If set, send or
* grab as many buffers from the buffer group ID * recv will grab as many buffers from the buffer
* given and send them all. The completion result * group ID given and send them all. The completion
* will be the number of buffers send, with the * result will be the number of buffers send, with
* starting buffer ID in cqe->flags as per usual * the starting buffer ID in cqe->flags as per
* for provided buffer usage. The buffers will be * usual for provided buffer usage. The buffers
* contigious from the starting buffer ID. * will be contigious from the starting buffer ID.
*/ */
#define IORING_RECVSEND_POLL_FIRST (1U << 0) #define IORING_RECVSEND_POLL_FIRST (1U << 0)
#define IORING_RECV_MULTISHOT (1U << 1) #define IORING_RECV_MULTISHOT (1U << 1)
...@@ -529,6 +529,7 @@ struct io_uring_params { ...@@ -529,6 +529,7 @@ struct io_uring_params {
#define IORING_FEAT_CQE_SKIP (1U << 11) #define IORING_FEAT_CQE_SKIP (1U << 11)
#define IORING_FEAT_LINKED_FILE (1U << 12) #define IORING_FEAT_LINKED_FILE (1U << 12)
#define IORING_FEAT_REG_REG_RING (1U << 13) #define IORING_FEAT_REG_REG_RING (1U << 13)
#define IORING_FEAT_RECVSEND_BUNDLE (1U << 14)
/* /*
* io_uring_register(2) opcodes and arguments * io_uring_register(2) opcodes and arguments
......
...@@ -3583,7 +3583,8 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p, ...@@ -3583,7 +3583,8 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED | IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS | IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP | IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
IORING_FEAT_LINKED_FILE | IORING_FEAT_REG_REG_RING; IORING_FEAT_LINKED_FILE | IORING_FEAT_REG_REG_RING |
IORING_FEAT_RECVSEND_BUNDLE;
if (copy_to_user(params, p, sizeof(*p))) { if (copy_to_user(params, p, sizeof(*p))) {
ret = -EFAULT; ret = -EFAULT;
......
...@@ -747,7 +747,8 @@ static int io_recvmsg_prep_setup(struct io_kiocb *req) ...@@ -747,7 +747,8 @@ static int io_recvmsg_prep_setup(struct io_kiocb *req)
return ret; return ret;
} }
#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT) #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT | \
IORING_RECVSEND_BUNDLE)
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
...@@ -761,21 +762,14 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -761,21 +762,14 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
sr->len = READ_ONCE(sqe->len); sr->len = READ_ONCE(sqe->len);
sr->flags = READ_ONCE(sqe->ioprio); sr->flags = READ_ONCE(sqe->ioprio);
if (sr->flags & ~(RECVMSG_FLAGS)) if (sr->flags & ~RECVMSG_FLAGS)
return -EINVAL; return -EINVAL;
sr->msg_flags = READ_ONCE(sqe->msg_flags); sr->msg_flags = READ_ONCE(sqe->msg_flags);
if (sr->msg_flags & MSG_DONTWAIT) if (sr->msg_flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT; req->flags |= REQ_F_NOWAIT;
if (sr->msg_flags & MSG_ERRQUEUE) if (sr->msg_flags & MSG_ERRQUEUE)
req->flags |= REQ_F_CLEAR_POLLIN; req->flags |= REQ_F_CLEAR_POLLIN;
if (sr->flags & IORING_RECV_MULTISHOT) { if (req->flags & REQ_F_BUFFER_SELECT) {
if (!(req->flags & REQ_F_BUFFER_SELECT))
return -EINVAL;
if (sr->msg_flags & MSG_WAITALL)
return -EINVAL;
if (req->opcode == IORING_OP_RECV && sr->len)
return -EINVAL;
req->flags |= REQ_F_APOLL_MULTISHOT;
/* /*
* Store the buffer group for this multishot receive separately, * Store the buffer group for this multishot receive separately,
* as if we end up doing an io-wq based issue that selects a * as if we end up doing an io-wq based issue that selects a
...@@ -785,6 +779,20 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -785,6 +779,20 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
* restore it. * restore it.
*/ */
sr->buf_group = req->buf_index; sr->buf_group = req->buf_index;
req->buf_list = NULL;
}
if (sr->flags & IORING_RECV_MULTISHOT) {
if (!(req->flags & REQ_F_BUFFER_SELECT))
return -EINVAL;
if (sr->msg_flags & MSG_WAITALL)
return -EINVAL;
if (req->opcode == IORING_OP_RECV && sr->len)
return -EINVAL;
req->flags |= REQ_F_APOLL_MULTISHOT;
}
if (sr->flags & IORING_RECVSEND_BUNDLE) {
if (req->opcode == IORING_OP_RECVMSG)
return -EINVAL;
} }
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
...@@ -805,19 +813,28 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, ...@@ -805,19 +813,28 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
struct io_async_msghdr *kmsg, struct io_async_msghdr *kmsg,
bool mshot_finished, unsigned issue_flags) bool mshot_finished, unsigned issue_flags)
{ {
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
unsigned int cflags; unsigned int cflags;
cflags = io_put_kbuf(req, issue_flags); if (sr->flags & IORING_RECVSEND_BUNDLE)
cflags = io_put_kbufs(req, io_bundle_nbufs(kmsg, *ret),
issue_flags);
else
cflags = io_put_kbuf(req, issue_flags);
if (kmsg->msg.msg_inq > 0) if (kmsg->msg.msg_inq > 0)
cflags |= IORING_CQE_F_SOCK_NONEMPTY; cflags |= IORING_CQE_F_SOCK_NONEMPTY;
/* bundle with no more immediate buffers, we're done */
if (sr->flags & IORING_RECVSEND_BUNDLE && req->flags & REQ_F_BL_EMPTY)
goto finish;
/* /*
* Fill CQE for this receive and see if we should keep trying to * Fill CQE for this receive and see if we should keep trying to
* receive from this socket. * receive from this socket.
*/ */
if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished && if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished &&
io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) { io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) {
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE; int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
io_mshot_prep_retry(req, kmsg); io_mshot_prep_retry(req, kmsg);
...@@ -837,6 +854,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, ...@@ -837,6 +854,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
} }
/* Finish the request / stop multishot. */ /* Finish the request / stop multishot. */
finish:
io_req_set_res(req, *ret, cflags); io_req_set_res(req, *ret, cflags);
if (issue_flags & IO_URING_F_MULTISHOT) if (issue_flags & IO_URING_F_MULTISHOT)
...@@ -1020,6 +1038,69 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) ...@@ -1020,6 +1038,69 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
return ret; return ret;
} }
static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg,
size_t *len, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
int ret;
/*
* If the ring isn't locked, then don't use the peek interface
* to grab multiple buffers as we will lock/unlock between
* this selection and posting the buffers.
*/
if (!(issue_flags & IO_URING_F_UNLOCKED) &&
sr->flags & IORING_RECVSEND_BUNDLE) {
struct buf_sel_arg arg = {
.iovs = &kmsg->fast_iov,
.nr_iovs = 1,
.mode = KBUF_MODE_EXPAND,
};
if (kmsg->free_iov) {
arg.nr_iovs = kmsg->free_iov_nr;
arg.iovs = kmsg->free_iov;
arg.mode |= KBUF_MODE_FREE;
}
if (kmsg->msg.msg_inq > 0)
arg.max_len = min_not_zero(sr->len, kmsg->msg.msg_inq);
ret = io_buffers_peek(req, &arg);
if (unlikely(ret < 0))
return ret;
/* special case 1 vec, can be a fast path */
if (ret == 1) {
sr->buf = arg.iovs[0].iov_base;
sr->len = arg.iovs[0].iov_len;
goto map_ubuf;
}
iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, arg.iovs, ret,
arg.out_len);
if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) {
kmsg->free_iov_nr = ret;
kmsg->free_iov = arg.iovs;
}
} else {
void __user *buf;
*len = sr->len;
buf = io_buffer_select(req, len, issue_flags);
if (!buf)
return -ENOBUFS;
sr->buf = buf;
sr->len = *len;
map_ubuf:
ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
&kmsg->msg.msg_iter);
if (unlikely(ret))
return ret;
}
return 0;
}
int io_recv(struct io_kiocb *req, unsigned int issue_flags) int io_recv(struct io_kiocb *req, unsigned int issue_flags)
{ {
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
...@@ -1044,17 +1125,10 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags) ...@@ -1044,17 +1125,10 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
retry_multishot: retry_multishot:
if (io_do_buffer_select(req)) { if (io_do_buffer_select(req)) {
void __user *buf; ret = io_recv_buf_select(req, kmsg, &len, issue_flags);
buf = io_buffer_select(req, &len, issue_flags);
if (!buf)
return -ENOBUFS;
sr->buf = buf;
sr->len = len;
ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
&kmsg->msg.msg_iter);
if (unlikely(ret)) if (unlikely(ret))
goto out_free; goto out_free;
sr->buf = NULL;
} }
kmsg->msg.msg_inq = -1; kmsg->msg.msg_inq = -1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment