Commit 360cd42c authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: optimise io_req_local_work_add

Chains of memory accesses are never good for performance.
The req->task->io_uring->in_cancel in io_req_local_work_add() is there
so that when a task is exiting via io_uring_try_cancel_requests() and
starts waiting for completions, it gets woken up by every new task_work
item queued.

Do a little trick by announcing waiting in io_uring_try_cancel_requests(),
making io_req_local_work_add() wake us up. We also need to check for
deferred tw items after prepare_to_wait(TASK_INTERRUPTIBLE);
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/fb11597e9bbcb365901824f8c5c2cf0d6ee100d0.1680782017.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c66ae3ec
...@@ -1335,10 +1335,6 @@ static void io_req_local_work_add(struct io_kiocb *req, unsigned flags) ...@@ -1335,10 +1335,6 @@ static void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
&req->io_task_work.node)); &req->io_task_work.node));
if (!first) { if (!first) {
if (unlikely(atomic_read(&req->task->io_uring->in_cancel))) {
io_move_task_work_from_local(ctx);
return;
}
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
if (ctx->has_evfd) if (ctx->has_evfd)
...@@ -3205,6 +3201,12 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, ...@@ -3205,6 +3201,12 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
enum io_wq_cancel cret; enum io_wq_cancel cret;
bool ret = false; bool ret = false;
/* set it so io_req_local_work_add() would wake us up */
if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
atomic_set(&ctx->cq_wait_nr, 1);
smp_mb();
}
/* failed during ring init, it couldn't have issued any requests */ /* failed during ring init, it couldn't have issued any requests */
if (!ctx->rings) if (!ctx->rings)
return false; return false;
...@@ -3259,6 +3261,8 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) ...@@ -3259,6 +3261,8 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
{ {
struct io_uring_task *tctx = current->io_uring; struct io_uring_task *tctx = current->io_uring;
struct io_ring_ctx *ctx; struct io_ring_ctx *ctx;
struct io_tctx_node *node;
unsigned long index;
s64 inflight; s64 inflight;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
...@@ -3280,9 +3284,6 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) ...@@ -3280,9 +3284,6 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
break; break;
if (!sqd) { if (!sqd) {
struct io_tctx_node *node;
unsigned long index;
xa_for_each(&tctx->xa, index, node) { xa_for_each(&tctx->xa, index, node) {
/* sqpoll task will cancel all its requests */ /* sqpoll task will cancel all its requests */
if (node->ctx->sq_data) if (node->ctx->sq_data)
...@@ -3305,7 +3306,13 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) ...@@ -3305,7 +3306,13 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE); prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
io_run_task_work(); io_run_task_work();
io_uring_drop_tctx_refs(current); io_uring_drop_tctx_refs(current);
xa_for_each(&tctx->xa, index, node) {
if (!llist_empty(&node->ctx->work_llist)) {
WARN_ON_ONCE(node->ctx->submitter_task &&
node->ctx->submitter_task != current);
goto end_wait;
}
}
/* /*
* If we've seen completions, retry without waiting. This * If we've seen completions, retry without waiting. This
* avoids a race where a completion comes in before we did * avoids a race where a completion comes in before we did
...@@ -3313,6 +3320,7 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) ...@@ -3313,6 +3320,7 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
*/ */
if (inflight == tctx_inflight(tctx, !cancel_all)) if (inflight == tctx_inflight(tctx, !cancel_all))
schedule(); schedule();
end_wait:
finish_wait(&tctx->wait, &wait); finish_wait(&tctx->wait, &wait);
} while (1); } while (1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment