Commit 3dd0c97a authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: get rid of files in exit cancel

We don't match against files on cancellation anymore, so no need to drag
around files_struct anymore, just pass a flag telling whether only
inflight or all requests should be killed.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/7bfc5409a78f8e2d6b27dec3293ec2d248677348.1621201931.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent acfb381d
...@@ -1036,7 +1036,7 @@ static bool io_disarm_next(struct io_kiocb *req); ...@@ -1036,7 +1036,7 @@ static bool io_disarm_next(struct io_kiocb *req);
static void io_uring_del_task_file(unsigned long index); static void io_uring_del_task_file(unsigned long index);
static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
struct task_struct *task, struct task_struct *task,
struct files_struct *files); bool cancel_all);
static void io_uring_cancel_sqpoll(struct io_sq_data *sqd); static void io_uring_cancel_sqpoll(struct io_sq_data *sqd);
static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx); static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx);
...@@ -1105,15 +1105,14 @@ static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl) ...@@ -1105,15 +1105,14 @@ static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
percpu_ref_put(ref); percpu_ref_put(ref);
} }
static bool io_match_task(struct io_kiocb *head, static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
struct task_struct *task, bool cancel_all)
struct files_struct *files)
{ {
struct io_kiocb *req; struct io_kiocb *req;
if (task && head->task != task) if (task && head->task != task)
return false; return false;
if (!files) if (cancel_all)
return true; return true;
io_for_each_link(req, head) { io_for_each_link(req, head) {
...@@ -5256,7 +5255,7 @@ static bool io_poll_remove_one(struct io_kiocb *req) ...@@ -5256,7 +5255,7 @@ static bool io_poll_remove_one(struct io_kiocb *req)
* Returns true if we found and killed one or more poll requests * Returns true if we found and killed one or more poll requests
*/ */
static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
struct files_struct *files) bool cancel_all)
{ {
struct hlist_node *tmp; struct hlist_node *tmp;
struct io_kiocb *req; struct io_kiocb *req;
...@@ -5268,7 +5267,7 @@ static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, ...@@ -5268,7 +5267,7 @@ static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
list = &ctx->cancel_hash[i]; list = &ctx->cancel_hash[i];
hlist_for_each_entry_safe(req, tmp, list, hash_node) { hlist_for_each_entry_safe(req, tmp, list, hash_node) {
if (io_match_task(req, tsk, files)) if (io_match_task(req, tsk, cancel_all))
posted += io_poll_remove_one(req); posted += io_poll_remove_one(req);
} }
} }
...@@ -8742,7 +8741,7 @@ static void io_ring_exit_work(struct work_struct *work) ...@@ -8742,7 +8741,7 @@ static void io_ring_exit_work(struct work_struct *work)
* as nobody else will be looking for them. * as nobody else will be looking for them.
*/ */
do { do {
io_uring_try_cancel_requests(ctx, NULL, NULL); io_uring_try_cancel_requests(ctx, NULL, true);
if (ctx->sq_data) { if (ctx->sq_data) {
struct io_sq_data *sqd = ctx->sq_data; struct io_sq_data *sqd = ctx->sq_data;
struct task_struct *tsk; struct task_struct *tsk;
...@@ -8793,14 +8792,14 @@ static void io_ring_exit_work(struct work_struct *work) ...@@ -8793,14 +8792,14 @@ static void io_ring_exit_work(struct work_struct *work)
/* Returns true if we found and killed one or more timeouts */ /* Returns true if we found and killed one or more timeouts */
static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
struct files_struct *files) bool cancel_all)
{ {
struct io_kiocb *req, *tmp; struct io_kiocb *req, *tmp;
int canceled = 0; int canceled = 0;
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
if (io_match_task(req, tsk, files)) { if (io_match_task(req, tsk, cancel_all)) {
io_kill_timeout(req, -ECANCELED); io_kill_timeout(req, -ECANCELED);
canceled++; canceled++;
} }
...@@ -8826,8 +8825,8 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) ...@@ -8826,8 +8825,8 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
io_unregister_personality(ctx, index); io_unregister_personality(ctx, index);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
io_kill_timeouts(ctx, NULL, NULL); io_kill_timeouts(ctx, NULL, true);
io_poll_remove_all(ctx, NULL, NULL); io_poll_remove_all(ctx, NULL, true);
/* if we failed setting up the ctx, we might not have any rings */ /* if we failed setting up the ctx, we might not have any rings */
io_iopoll_try_reap_events(ctx); io_iopoll_try_reap_events(ctx);
...@@ -8853,7 +8852,7 @@ static int io_uring_release(struct inode *inode, struct file *file) ...@@ -8853,7 +8852,7 @@ static int io_uring_release(struct inode *inode, struct file *file)
struct io_task_cancel { struct io_task_cancel {
struct task_struct *task; struct task_struct *task;
struct files_struct *files; bool all;
}; };
static bool io_cancel_task_cb(struct io_wq_work *work, void *data) static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
...@@ -8862,30 +8861,29 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data) ...@@ -8862,30 +8861,29 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
struct io_task_cancel *cancel = data; struct io_task_cancel *cancel = data;
bool ret; bool ret;
if (cancel->files && (req->flags & REQ_F_LINK_TIMEOUT)) { if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) {
unsigned long flags; unsigned long flags;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
/* protect against races with linked timeouts */ /* protect against races with linked timeouts */
spin_lock_irqsave(&ctx->completion_lock, flags); spin_lock_irqsave(&ctx->completion_lock, flags);
ret = io_match_task(req, cancel->task, cancel->files); ret = io_match_task(req, cancel->task, cancel->all);
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
} else { } else {
ret = io_match_task(req, cancel->task, cancel->files); ret = io_match_task(req, cancel->task, cancel->all);
} }
return ret; return ret;
} }
static bool io_cancel_defer_files(struct io_ring_ctx *ctx, static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
struct task_struct *task, struct task_struct *task, bool cancel_all)
struct files_struct *files)
{ {
struct io_defer_entry *de; struct io_defer_entry *de;
LIST_HEAD(list); LIST_HEAD(list);
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
list_for_each_entry_reverse(de, &ctx->defer_list, list) { list_for_each_entry_reverse(de, &ctx->defer_list, list) {
if (io_match_task(de->req, task, files)) { if (io_match_task(de->req, task, cancel_all)) {
list_cut_position(&list, &ctx->defer_list, &de->list); list_cut_position(&list, &ctx->defer_list, &de->list);
break; break;
} }
...@@ -8929,9 +8927,9 @@ static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx) ...@@ -8929,9 +8927,9 @@ static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
struct task_struct *task, struct task_struct *task,
struct files_struct *files) bool cancel_all)
{ {
struct io_task_cancel cancel = { .task = task, .files = files, }; struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
struct io_uring_task *tctx = task ? task->io_uring : NULL; struct io_uring_task *tctx = task ? task->io_uring : NULL;
while (1) { while (1) {
...@@ -8951,7 +8949,7 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, ...@@ -8951,7 +8949,7 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
} }
/* SQPOLL thread does its own polling */ /* SQPOLL thread does its own polling */
if ((!(ctx->flags & IORING_SETUP_SQPOLL) && !files) || if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
(ctx->sq_data && ctx->sq_data->thread == current)) { (ctx->sq_data && ctx->sq_data->thread == current)) {
while (!list_empty_careful(&ctx->iopoll_list)) { while (!list_empty_careful(&ctx->iopoll_list)) {
io_iopoll_try_reap_events(ctx); io_iopoll_try_reap_events(ctx);
...@@ -8959,9 +8957,9 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, ...@@ -8959,9 +8957,9 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
} }
} }
ret |= io_cancel_defer_files(ctx, task, files); ret |= io_cancel_defer_files(ctx, task, cancel_all);
ret |= io_poll_remove_all(ctx, task, files); ret |= io_poll_remove_all(ctx, task, cancel_all);
ret |= io_kill_timeouts(ctx, task, files); ret |= io_kill_timeouts(ctx, task, cancel_all);
ret |= io_run_task_work(); ret |= io_run_task_work();
ret |= io_run_ctx_fallback(ctx); ret |= io_run_ctx_fallback(ctx);
if (!ret) if (!ret)
...@@ -9067,7 +9065,7 @@ static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked) ...@@ -9067,7 +9065,7 @@ static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
return percpu_counter_sum(&tctx->inflight); return percpu_counter_sum(&tctx->inflight);
} }
static void io_uring_try_cancel(struct files_struct *files) static void io_uring_try_cancel(bool cancel_all)
{ {
struct io_uring_task *tctx = current->io_uring; struct io_uring_task *tctx = current->io_uring;
struct io_tctx_node *node; struct io_tctx_node *node;
...@@ -9078,7 +9076,7 @@ static void io_uring_try_cancel(struct files_struct *files) ...@@ -9078,7 +9076,7 @@ static void io_uring_try_cancel(struct files_struct *files)
/* sqpoll task will cancel all its requests */ /* sqpoll task will cancel all its requests */
if (!ctx->sq_data) if (!ctx->sq_data)
io_uring_try_cancel_requests(ctx, current, files); io_uring_try_cancel_requests(ctx, current, cancel_all);
} }
} }
...@@ -9104,7 +9102,7 @@ static void io_uring_cancel_sqpoll(struct io_sq_data *sqd) ...@@ -9104,7 +9102,7 @@ static void io_uring_cancel_sqpoll(struct io_sq_data *sqd)
if (!inflight) if (!inflight)
break; break;
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
io_uring_try_cancel_requests(ctx, current, NULL); io_uring_try_cancel_requests(ctx, current, true);
prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE); prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
/* /*
...@@ -9128,6 +9126,7 @@ void __io_uring_cancel(struct files_struct *files) ...@@ -9128,6 +9126,7 @@ void __io_uring_cancel(struct files_struct *files)
struct io_uring_task *tctx = current->io_uring; struct io_uring_task *tctx = current->io_uring;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
s64 inflight; s64 inflight;
bool cancel_all = !files;
if (tctx->io_wq) if (tctx->io_wq)
io_wq_exit_start(tctx->io_wq); io_wq_exit_start(tctx->io_wq);
...@@ -9136,10 +9135,10 @@ void __io_uring_cancel(struct files_struct *files) ...@@ -9136,10 +9135,10 @@ void __io_uring_cancel(struct files_struct *files)
atomic_inc(&tctx->in_idle); atomic_inc(&tctx->in_idle);
do { do {
/* read completions before cancelations */ /* read completions before cancelations */
inflight = tctx_inflight(tctx, !!files); inflight = tctx_inflight(tctx, !cancel_all);
if (!inflight) if (!inflight)
break; break;
io_uring_try_cancel(files); io_uring_try_cancel(cancel_all);
prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE); prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
/* /*
...@@ -9147,14 +9146,14 @@ void __io_uring_cancel(struct files_struct *files) ...@@ -9147,14 +9146,14 @@ void __io_uring_cancel(struct files_struct *files)
* avoids a race where a completion comes in before we did * avoids a race where a completion comes in before we did
* prepare_to_wait(). * prepare_to_wait().
*/ */
if (inflight == tctx_inflight(tctx, !!files)) if (inflight == tctx_inflight(tctx, !cancel_all))
schedule(); schedule();
finish_wait(&tctx->wait, &wait); finish_wait(&tctx->wait, &wait);
} while (1); } while (1);
atomic_dec(&tctx->in_idle); atomic_dec(&tctx->in_idle);
io_uring_clean_tctx(tctx); io_uring_clean_tctx(tctx);
if (!files) { if (cancel_all) {
/* for exec all current's requests should be gone, kill tctx */ /* for exec all current's requests should be gone, kill tctx */
__io_uring_free(current); __io_uring_free(current);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment