Commit 9936c7c2 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: deduplicate core cancellations sequence

Files and task cancellations go over same steps trying to cancel
requests in io-wq, poll, etc. Deduplicate it with a helper.

note: new io_uring_try_cancel_requests() is former
__io_uring_cancel_task_requests() with files passed as an agrument and
flushing overflowed requests.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 57cd657b
...@@ -1003,9 +1003,9 @@ enum io_mem_account { ...@@ -1003,9 +1003,9 @@ enum io_mem_account {
ACCT_PINNED, ACCT_PINNED,
}; };
static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx, static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
struct task_struct *task); struct task_struct *task,
struct files_struct *files);
static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node); static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node);
static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node( static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
struct io_ring_ctx *ctx); struct io_ring_ctx *ctx);
...@@ -8817,7 +8817,7 @@ static void io_ring_exit_work(struct work_struct *work) ...@@ -8817,7 +8817,7 @@ static void io_ring_exit_work(struct work_struct *work)
* as nobody else will be looking for them. * as nobody else will be looking for them.
*/ */
do { do {
__io_uring_cancel_task_requests(ctx, NULL); io_uring_try_cancel_requests(ctx, NULL, NULL);
} while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20)); } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
io_ring_ctx_free(ctx); io_ring_ctx_free(ctx);
} }
...@@ -8931,6 +8931,40 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx, ...@@ -8931,6 +8931,40 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
} }
} }
static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
struct task_struct *task,
struct files_struct *files)
{
struct io_task_cancel cancel = { .task = task, .files = files, };
while (1) {
enum io_wq_cancel cret;
bool ret = false;
if (ctx->io_wq) {
cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb,
&cancel, true);
ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
}
/* SQPOLL thread does its own polling */
if (!(ctx->flags & IORING_SETUP_SQPOLL) && !files) {
while (!list_empty_careful(&ctx->iopoll_list)) {
io_iopoll_try_reap_events(ctx);
ret = true;
}
}
ret |= io_poll_remove_all(ctx, task, files);
ret |= io_kill_timeouts(ctx, task, files);
ret |= io_run_task_work();
io_cqring_overflow_flush(ctx, true, task, files);
if (!ret)
break;
cond_resched();
}
}
static int io_uring_count_inflight(struct io_ring_ctx *ctx, static int io_uring_count_inflight(struct io_ring_ctx *ctx,
struct task_struct *task, struct task_struct *task,
struct files_struct *files) struct files_struct *files)
...@@ -8950,7 +8984,6 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx, ...@@ -8950,7 +8984,6 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
struct files_struct *files) struct files_struct *files)
{ {
while (!list_empty_careful(&ctx->inflight_list)) { while (!list_empty_careful(&ctx->inflight_list)) {
struct io_task_cancel cancel = { .task = task, .files = files };
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
int inflight; int inflight;
...@@ -8958,13 +8991,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx, ...@@ -8958,13 +8991,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
if (!inflight) if (!inflight)
break; break;
io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true); io_uring_try_cancel_requests(ctx, task, files);
io_poll_remove_all(ctx, task, files);
io_kill_timeouts(ctx, task, files);
io_cqring_overflow_flush(ctx, true, task, files);
/* cancellations _may_ trigger task work */
io_run_task_work();
prepare_to_wait(&task->io_uring->wait, &wait, prepare_to_wait(&task->io_uring->wait, &wait,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
if (inflight == io_uring_count_inflight(ctx, task, files)) if (inflight == io_uring_count_inflight(ctx, task, files))
...@@ -8973,37 +9000,6 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx, ...@@ -8973,37 +9000,6 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
} }
} }
static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
struct task_struct *task)
{
while (1) {
struct io_task_cancel cancel = { .task = task, .files = NULL, };
enum io_wq_cancel cret;
bool ret = false;
if (ctx->io_wq) {
cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb,
&cancel, true);
ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
}
/* SQPOLL thread does its own polling */
if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
while (!list_empty_careful(&ctx->iopoll_list)) {
io_iopoll_try_reap_events(ctx);
ret = true;
}
}
ret |= io_poll_remove_all(ctx, task, NULL);
ret |= io_kill_timeouts(ctx, task, NULL);
ret |= io_run_task_work();
if (!ret)
break;
cond_resched();
}
}
static void io_disable_sqo_submit(struct io_ring_ctx *ctx) static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
{ {
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
...@@ -9033,11 +9029,10 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx, ...@@ -9033,11 +9029,10 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
} }
io_cancel_defer_files(ctx, task, files); io_cancel_defer_files(ctx, task, files);
io_cqring_overflow_flush(ctx, true, task, files);
io_uring_cancel_files(ctx, task, files); io_uring_cancel_files(ctx, task, files);
if (!files) if (!files)
__io_uring_cancel_task_requests(ctx, task); io_uring_try_cancel_requests(ctx, task, NULL);
if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) { if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
atomic_dec(&task->io_uring->in_idle); atomic_dec(&task->io_uring->in_idle);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment