Commit dfbe5561 authored by Jens Axboe's avatar Jens Axboe

io_uring: flush offloaded and delayed task_work on exit

io_uring offloads task_work for cancelation purposes when the task is
exiting. This is conceptually fine, but we should be nicer and actually
wait for that work to complete before returning.

Add an argument to io_fallback_tw() telling it to flush the deferred
work when it's all queued up, and have it flush a ctx behind whenever
the ctx changes.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 10e1c0d5
...@@ -1237,18 +1237,32 @@ static inline struct llist_node *io_llist_cmpxchg(struct llist_head *head, ...@@ -1237,18 +1237,32 @@ static inline struct llist_node *io_llist_cmpxchg(struct llist_head *head,
return cmpxchg(&head->first, old, new); return cmpxchg(&head->first, old, new);
} }
static __cold void io_fallback_tw(struct io_uring_task *tctx) static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
{ {
struct llist_node *node = llist_del_all(&tctx->task_list); struct llist_node *node = llist_del_all(&tctx->task_list);
struct io_ring_ctx *last_ctx = NULL;
struct io_kiocb *req; struct io_kiocb *req;
while (node) { while (node) {
req = container_of(node, struct io_kiocb, io_task_work.node); req = container_of(node, struct io_kiocb, io_task_work.node);
node = node->next; node = node->next;
if (sync && last_ctx != req->ctx) {
if (last_ctx) {
flush_delayed_work(&last_ctx->fallback_work);
percpu_ref_put(&last_ctx->refs);
}
last_ctx = req->ctx;
percpu_ref_get(&last_ctx->refs);
}
if (llist_add(&req->io_task_work.node, if (llist_add(&req->io_task_work.node,
&req->ctx->fallback_llist)) &req->ctx->fallback_llist))
schedule_delayed_work(&req->ctx->fallback_work, 1); schedule_delayed_work(&req->ctx->fallback_work, 1);
} }
if (last_ctx) {
flush_delayed_work(&last_ctx->fallback_work);
percpu_ref_put(&last_ctx->refs);
}
} }
void tctx_task_work(struct callback_head *cb) void tctx_task_work(struct callback_head *cb)
...@@ -1263,7 +1277,7 @@ void tctx_task_work(struct callback_head *cb) ...@@ -1263,7 +1277,7 @@ void tctx_task_work(struct callback_head *cb)
unsigned int count = 0; unsigned int count = 0;
if (unlikely(current->flags & PF_EXITING)) { if (unlikely(current->flags & PF_EXITING)) {
io_fallback_tw(tctx); io_fallback_tw(tctx, true);
return; return;
} }
...@@ -1358,7 +1372,7 @@ static void io_req_normal_work_add(struct io_kiocb *req) ...@@ -1358,7 +1372,7 @@ static void io_req_normal_work_add(struct io_kiocb *req)
if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method))) if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
return; return;
io_fallback_tw(tctx); io_fallback_tw(tctx, false);
} }
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags) void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
...@@ -3108,6 +3122,8 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) ...@@ -3108,6 +3122,8 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
if (ctx->rings) if (ctx->rings)
io_kill_timeouts(ctx, NULL, true); io_kill_timeouts(ctx, NULL, true);
flush_delayed_work(&ctx->fallback_work);
INIT_WORK(&ctx->exit_work, io_ring_exit_work); INIT_WORK(&ctx->exit_work, io_ring_exit_work);
/* /*
* Use system_unbound_wq to avoid spawning tons of event kworkers * Use system_unbound_wq to avoid spawning tons of event kworkers
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment