Commit d10f19df authored by Jens Axboe's avatar Jens Axboe

io_uring/uring_cmd: switch to always allocating async data

Basic conversion ensuring async_data is allocated off the prep path. Adds
a basic alloc cache as well, as passthrough IO can be quite high in rate.
Tested-by: default avatarAnuj Gupta <anuj20.g@samsung.com>
Reviewed-by: default avatarAnuj Gupta <anuj20.g@samsung.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e2ea5a70
...@@ -301,6 +301,7 @@ struct io_ring_ctx { ...@@ -301,6 +301,7 @@ struct io_ring_ctx {
struct io_alloc_cache apoll_cache; struct io_alloc_cache apoll_cache;
struct io_alloc_cache netmsg_cache; struct io_alloc_cache netmsg_cache;
struct io_alloc_cache rw_cache; struct io_alloc_cache rw_cache;
struct io_alloc_cache uring_cache;
/* /*
* Any cancelable uring_cmd is added to this list in * Any cancelable uring_cmd is added to this list in
......
...@@ -313,6 +313,8 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) ...@@ -313,6 +313,8 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
sizeof(struct io_async_msghdr)); sizeof(struct io_async_msghdr));
io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX, io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX,
sizeof(struct io_async_rw)); sizeof(struct io_async_rw));
io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX,
sizeof(struct uring_cache));
io_futex_cache_init(ctx); io_futex_cache_init(ctx);
init_completion(&ctx->ref_comp); init_completion(&ctx->ref_comp);
xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1); xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
...@@ -2826,6 +2828,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) ...@@ -2826,6 +2828,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free); io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free);
io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free); io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free); io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
io_alloc_cache_free(&ctx->uring_cache, io_uring_cache_free);
io_futex_cache_free(ctx); io_futex_cache_free(ctx);
io_destroy_buffers(ctx); io_destroy_buffers(ctx);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
......
...@@ -677,7 +677,6 @@ const struct io_cold_def io_cold_defs[] = { ...@@ -677,7 +677,6 @@ const struct io_cold_def io_cold_defs[] = {
[IORING_OP_URING_CMD] = { [IORING_OP_URING_CMD] = {
.name = "URING_CMD", .name = "URING_CMD",
.async_size = 2 * sizeof(struct io_uring_sqe), .async_size = 2 * sizeof(struct io_uring_sqe),
.prep_async = io_uring_cmd_prep_async,
}, },
[IORING_OP_SEND_ZC] = { [IORING_OP_SEND_ZC] = {
.name = "SEND_ZC", .name = "SEND_ZC",
......
...@@ -14,6 +14,38 @@ ...@@ -14,6 +14,38 @@
#include "rsrc.h" #include "rsrc.h"
#include "uring_cmd.h" #include "uring_cmd.h"
static struct uring_cache *io_uring_async_get(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_cache_entry *entry;
struct uring_cache *cache;
entry = io_alloc_cache_get(&ctx->uring_cache);
if (entry) {
cache = container_of(entry, struct uring_cache, cache);
req->flags |= REQ_F_ASYNC_DATA;
req->async_data = cache;
return cache;
}
if (!io_alloc_async_data(req))
return req->async_data;
return NULL;
}
static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
struct uring_cache *cache = req->async_data;
if (issue_flags & IO_URING_F_UNLOCKED)
return;
if (io_alloc_cache_put(&req->ctx->uring_cache, &cache->cache)) {
ioucmd->sqe = NULL;
req->async_data = NULL;
req->flags &= ~REQ_F_ASYNC_DATA;
}
}
bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx, bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
struct task_struct *task, bool cancel_all) struct task_struct *task, bool cancel_all)
{ {
...@@ -128,6 +160,7 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2, ...@@ -128,6 +160,7 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
io_req_set_res(req, ret, 0); io_req_set_res(req, ret, 0);
if (req->ctx->flags & IORING_SETUP_CQE32) if (req->ctx->flags & IORING_SETUP_CQE32)
io_req_set_cqe32_extra(req, res2, 0); io_req_set_cqe32_extra(req, res2, 0);
io_req_uring_cleanup(req, issue_flags);
if (req->ctx->flags & IORING_SETUP_IOPOLL) { if (req->ctx->flags & IORING_SETUP_IOPOLL) {
/* order with io_iopoll_req_issued() checking ->iopoll_complete */ /* order with io_iopoll_req_issued() checking ->iopoll_complete */
smp_store_release(&req->iopoll_completed, 1); smp_store_release(&req->iopoll_completed, 1);
...@@ -142,13 +175,19 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2, ...@@ -142,13 +175,19 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
} }
EXPORT_SYMBOL_GPL(io_uring_cmd_done); EXPORT_SYMBOL_GPL(io_uring_cmd_done);
int io_uring_cmd_prep_async(struct io_kiocb *req) static int io_uring_cmd_prep_setup(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{ {
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
struct uring_cache *cache;
memcpy(req->async_data, ioucmd->sqe, uring_sqe_size(req->ctx)); cache = io_uring_async_get(req);
ioucmd->sqe = req->async_data; if (cache) {
return 0; memcpy(cache->sqes, sqe, uring_sqe_size(req->ctx));
ioucmd->sqe = req->async_data;
return 0;
}
return -ENOMEM;
} }
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
...@@ -173,9 +212,9 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -173,9 +212,9 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req->imu = ctx->user_bufs[index]; req->imu = ctx->user_bufs[index];
io_req_set_rsrc_node(req, ctx, 0); io_req_set_rsrc_node(req, ctx, 0);
} }
ioucmd->sqe = sqe;
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op); ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
return 0;
return io_uring_cmd_prep_setup(req, sqe);
} }
int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
...@@ -206,23 +245,14 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) ...@@ -206,23 +245,14 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
} }
ret = file->f_op->uring_cmd(ioucmd, issue_flags); ret = file->f_op->uring_cmd(ioucmd, issue_flags);
if (ret == -EAGAIN) { if (ret == -EAGAIN || ret == -EIOCBQUEUED)
if (!req_has_async_data(req)) {
if (io_alloc_async_data(req))
return -ENOMEM;
io_uring_cmd_prep_async(req);
}
return -EAGAIN;
}
if (ret != -EIOCBQUEUED) {
if (ret < 0)
req_set_fail(req);
io_req_set_res(req, ret, 0);
return ret; return ret;
}
return IOU_ISSUE_SKIP_COMPLETE; if (ret < 0)
req_set_fail(req);
io_req_uring_cleanup(req, issue_flags);
io_req_set_res(req, ret, 0);
return ret;
} }
int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
...@@ -311,3 +341,8 @@ int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags) ...@@ -311,3 +341,8 @@ int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
} }
EXPORT_SYMBOL_GPL(io_uring_cmd_sock); EXPORT_SYMBOL_GPL(io_uring_cmd_sock);
#endif #endif
void io_uring_cache_free(struct io_cache_entry *entry)
{
kfree(container_of(entry, struct uring_cache, cache));
}
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
struct uring_cache {
union {
struct io_cache_entry cache;
struct io_uring_sqe sqes[2];
};
};
int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags); int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags);
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_uring_cmd_prep_async(struct io_kiocb *req); int io_uring_cmd_prep_async(struct io_kiocb *req);
void io_uring_cache_free(struct io_cache_entry *entry);
bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx, bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
struct task_struct *task, bool cancel_all); struct task_struct *task, bool cancel_all);
\ No newline at end of file
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment