Commit 1b346e4a authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: don't check overflow flush failures

The only way to fail overflowed CQEs flush is for CQ to be fully packed.
There is one place checking for flush failures, i.e. io_cqring_wait(),
but we limit the number to be waited for by the CQ size, so getting a
failure automatically means that we're done with waiting.

Don't check for failures, rarely but they might spuriously fail CQ
waiting with -EBUSY.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/6b720a45c03345655517f8202cbd0bece2848fb2.1670384893.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent a85381d8
...@@ -618,13 +618,12 @@ static void io_cqring_overflow_kill(struct io_ring_ctx *ctx) ...@@ -618,13 +618,12 @@ static void io_cqring_overflow_kill(struct io_ring_ctx *ctx)
} }
/* Returns true if there are no backlogged entries after the flush */ /* Returns true if there are no backlogged entries after the flush */
static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx) static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
{ {
bool all_flushed;
size_t cqe_size = sizeof(struct io_uring_cqe); size_t cqe_size = sizeof(struct io_uring_cqe);
if (__io_cqring_events(ctx) == ctx->cq_entries) if (__io_cqring_events(ctx) == ctx->cq_entries)
return false; return;
if (ctx->flags & IORING_SETUP_CQE32) if (ctx->flags & IORING_SETUP_CQE32)
cqe_size <<= 1; cqe_size <<= 1;
...@@ -643,30 +642,23 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx) ...@@ -643,30 +642,23 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
kfree(ocqe); kfree(ocqe);
} }
all_flushed = list_empty(&ctx->cq_overflow_list); if (list_empty(&ctx->cq_overflow_list)) {
if (all_flushed) {
clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq); clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
} }
io_cq_unlock_post(ctx); io_cq_unlock_post(ctx);
return all_flushed;
} }
static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx) static void io_cqring_overflow_flush(struct io_ring_ctx *ctx)
{ {
bool ret = true;
if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) { if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
/* iopoll syncs against uring_lock, not completion_lock */ /* iopoll syncs against uring_lock, not completion_lock */
if (ctx->flags & IORING_SETUP_IOPOLL) if (ctx->flags & IORING_SETUP_IOPOLL)
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
ret = __io_cqring_overflow_flush(ctx); __io_cqring_overflow_flush(ctx);
if (ctx->flags & IORING_SETUP_IOPOLL) if (ctx->flags & IORING_SETUP_IOPOLL)
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
} }
return ret;
} }
void __io_put_task(struct task_struct *task, int nr) void __io_put_task(struct task_struct *task, int nr)
...@@ -2494,11 +2486,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, ...@@ -2494,11 +2486,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
trace_io_uring_cqring_wait(ctx, min_events); trace_io_uring_cqring_wait(ctx, min_events);
do { do {
/* if we can't even flush overflow, don't wait for more */ io_cqring_overflow_flush(ctx);
if (!io_cqring_overflow_flush(ctx)) {
ret = -EBUSY;
break;
}
prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq, prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
TASK_INTERRUPTIBLE); TASK_INTERRUPTIBLE);
ret = io_cqring_wait_schedule(ctx, &iowq, timeout); ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment