Commit 634578f8 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: fix racy IOPOLL flush overflow

It's not safe to call io_cqring_overflow_flush() for IOPOLL mode without
hodling uring_lock, because it does synchronisation differently. Make
sure we have it.

As for io_ring_exit_work(), we don't even need it there because
io_ring_ctx_wait_and_kill() already set force flag making all overflowed
requests to be dropped.

Cc: <stable@vger.kernel.org> # 5.5+
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 31bff9a5
...@@ -8654,8 +8654,6 @@ static void io_ring_exit_work(struct work_struct *work) ...@@ -8654,8 +8654,6 @@ static void io_ring_exit_work(struct work_struct *work)
* as nobody else will be looking for them. * as nobody else will be looking for them.
*/ */
do { do {
if (ctx->rings)
io_cqring_overflow_flush(ctx, true, NULL, NULL);
io_iopoll_try_reap_events(ctx); io_iopoll_try_reap_events(ctx);
} while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20)); } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
io_ring_ctx_free(ctx); io_ring_ctx_free(ctx);
...@@ -8665,6 +8663,8 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) ...@@ -8665,6 +8663,8 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
{ {
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
percpu_ref_kill(&ctx->refs); percpu_ref_kill(&ctx->refs);
if (ctx->rings)
io_cqring_overflow_flush(ctx, true, NULL, NULL);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
io_kill_timeouts(ctx, NULL, NULL); io_kill_timeouts(ctx, NULL, NULL);
...@@ -8674,8 +8674,6 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) ...@@ -8674,8 +8674,6 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
io_wq_cancel_all(ctx->io_wq); io_wq_cancel_all(ctx->io_wq);
/* if we failed setting up the ctx, we might not have any rings */ /* if we failed setting up the ctx, we might not have any rings */
if (ctx->rings)
io_cqring_overflow_flush(ctx, true, NULL, NULL);
io_iopoll_try_reap_events(ctx); io_iopoll_try_reap_events(ctx);
idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx); idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
...@@ -8840,7 +8838,9 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx, ...@@ -8840,7 +8838,9 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
} }
io_cancel_defer_files(ctx, task, files); io_cancel_defer_files(ctx, task, files);
io_ring_submit_lock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));
io_cqring_overflow_flush(ctx, true, task, files); io_cqring_overflow_flush(ctx, true, task, files);
io_ring_submit_unlock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));
if (!files) if (!files)
__io_uring_cancel_task_requests(ctx, task); __io_uring_cancel_task_requests(ctx, task);
...@@ -9172,8 +9172,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, ...@@ -9172,8 +9172,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
*/ */
ret = 0; ret = 0;
if (ctx->flags & IORING_SETUP_SQPOLL) { if (ctx->flags & IORING_SETUP_SQPOLL) {
io_ring_submit_lock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));
if (!list_empty_careful(&ctx->cq_overflow_list)) if (!list_empty_careful(&ctx->cq_overflow_list))
io_cqring_overflow_flush(ctx, false, NULL, NULL); io_cqring_overflow_flush(ctx, false, NULL, NULL);
io_ring_submit_unlock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));
if (flags & IORING_ENTER_SQ_WAKEUP) if (flags & IORING_ENTER_SQ_WAKEUP)
wake_up(&ctx->sq_data->wait); wake_up(&ctx->sq_data->wait);
if (flags & IORING_ENTER_SQ_WAIT) if (flags & IORING_ENTER_SQ_WAIT)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment