Commit d7593606 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: split tw fallback into a function

When the target process is dying and so task_work_add() is not allowed
we push all task_work item to the fallback workqueue. Move the part
responsible for moving tw items out of __io_req_task_work_add() into
a separate function. Makes it a bit cleaner and gives the compiler a bit
of extra info.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/e503dab9d7af95470ca6b214c6de17715ae4e748.1668162751.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e52d2e58
...@@ -1105,6 +1105,20 @@ void tctx_task_work(struct callback_head *cb) ...@@ -1105,6 +1105,20 @@ void tctx_task_work(struct callback_head *cb)
trace_io_uring_task_work_run(tctx, count, loops); trace_io_uring_task_work_run(tctx, count, loops);
} }
static __cold void io_fallback_tw(struct io_uring_task *tctx)
{
struct llist_node *node = llist_del_all(&tctx->task_list);
struct io_kiocb *req;
while (node) {
req = container_of(node, struct io_kiocb, io_task_work.node);
node = node->next;
if (llist_add(&req->io_task_work.node,
&req->ctx->fallback_llist))
schedule_delayed_work(&req->ctx->fallback_work, 1);
}
}
static void io_req_local_work_add(struct io_kiocb *req) static void io_req_local_work_add(struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
...@@ -1131,7 +1145,6 @@ void __io_req_task_work_add(struct io_kiocb *req, bool allow_local) ...@@ -1131,7 +1145,6 @@ void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
{ {
struct io_uring_task *tctx = req->task->io_uring; struct io_uring_task *tctx = req->task->io_uring;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct llist_node *node;
if (allow_local && ctx->flags & IORING_SETUP_DEFER_TASKRUN) { if (allow_local && ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
io_req_local_work_add(req); io_req_local_work_add(req);
...@@ -1148,15 +1161,7 @@ void __io_req_task_work_add(struct io_kiocb *req, bool allow_local) ...@@ -1148,15 +1161,7 @@ void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method))) if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
return; return;
node = llist_del_all(&tctx->task_list); io_fallback_tw(tctx);
while (node) {
req = container_of(node, struct io_kiocb, io_task_work.node);
node = node->next;
if (llist_add(&req->io_task_work.node,
&req->ctx->fallback_llist))
schedule_delayed_work(&req->ctx->fallback_work, 1);
}
} }
static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx) static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment