Commit 65453d1e authored by Jens Axboe's avatar Jens Axboe

io_uring: enable req cache for task_work items

task_work is run without utilizing the req alloc cache, so any deferred
items don't get to take advantage of either the alloc or free side of it.
With task_work now being wrapped by io_uring, we can use the ctx
completion state to both use the req cache and the completion flush
batching.

With this, the only request type that cannot take advantage of the req
cache is IRQ driven IO for regular files / block devices. Anything else,
including IOPOLL polled IO to those same tyes, will take advantage of it.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 7cbf1722
...@@ -1051,6 +1051,8 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, ...@@ -1051,6 +1051,8 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
const struct iovec *fast_iov, const struct iovec *fast_iov,
struct iov_iter *iter, bool force); struct iov_iter *iter, bool force);
static void io_req_task_queue(struct io_kiocb *req); static void io_req_task_queue(struct io_kiocb *req);
static void io_submit_flush_completions(struct io_comp_state *cs,
struct io_ring_ctx *ctx);
static struct kmem_cache *req_cachep; static struct kmem_cache *req_cachep;
...@@ -2139,6 +2141,7 @@ static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) ...@@ -2139,6 +2141,7 @@ static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
static bool __tctx_task_work(struct io_uring_task *tctx) static bool __tctx_task_work(struct io_uring_task *tctx)
{ {
struct io_ring_ctx *ctx = NULL;
struct io_wq_work_list list; struct io_wq_work_list list;
struct io_wq_work_node *node; struct io_wq_work_node *node;
...@@ -2153,11 +2156,28 @@ static bool __tctx_task_work(struct io_uring_task *tctx) ...@@ -2153,11 +2156,28 @@ static bool __tctx_task_work(struct io_uring_task *tctx)
node = list.first; node = list.first;
while (node) { while (node) {
struct io_wq_work_node *next = node->next; struct io_wq_work_node *next = node->next;
struct io_ring_ctx *this_ctx;
struct io_kiocb *req; struct io_kiocb *req;
req = container_of(node, struct io_kiocb, io_task_work.node); req = container_of(node, struct io_kiocb, io_task_work.node);
this_ctx = req->ctx;
req->task_work.func(&req->task_work); req->task_work.func(&req->task_work);
node = next; node = next;
if (!ctx) {
ctx = this_ctx;
} else if (ctx != this_ctx) {
mutex_lock(&ctx->uring_lock);
io_submit_flush_completions(&ctx->submit_state.comp, ctx);
mutex_unlock(&ctx->uring_lock);
ctx = this_ctx;
}
}
if (ctx && ctx->submit_state.comp.nr) {
mutex_lock(&ctx->uring_lock);
io_submit_flush_completions(&ctx->submit_state.comp, ctx);
mutex_unlock(&ctx->uring_lock);
} }
return list.first != NULL; return list.first != NULL;
...@@ -2280,7 +2300,7 @@ static void __io_req_task_submit(struct io_kiocb *req) ...@@ -2280,7 +2300,7 @@ static void __io_req_task_submit(struct io_kiocb *req)
if (!ctx->sqo_dead && if (!ctx->sqo_dead &&
!__io_sq_thread_acquire_mm(ctx) && !__io_sq_thread_acquire_mm(ctx) &&
!__io_sq_thread_acquire_files(ctx)) !__io_sq_thread_acquire_files(ctx))
__io_queue_sqe(req, NULL); __io_queue_sqe(req, &ctx->submit_state.comp);
else else
__io_req_task_cancel(req, -EFAULT); __io_req_task_cancel(req, -EFAULT);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment