Commit 90f67366 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: remove extra argument for overflow flush

Unlike __io_cqring_overflow_flush(), nobody does forced flushing with
io_cqring_overflow_flush(), so removed the argument from it.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/7594f869ca41b7cfb5a35a3c7c2d402242834e9e.1628536684.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent cd0ca2e0
...@@ -1523,7 +1523,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) ...@@ -1523,7 +1523,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
return all_flushed; return all_flushed;
} }
static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
{ {
bool ret = true; bool ret = true;
...@@ -1531,7 +1531,7 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) ...@@ -1531,7 +1531,7 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
/* iopoll syncs against uring_lock, not completion_lock */ /* iopoll syncs against uring_lock, not completion_lock */
if (ctx->flags & IORING_SETUP_IOPOLL) if (ctx->flags & IORING_SETUP_IOPOLL)
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
ret = __io_cqring_overflow_flush(ctx, force); ret = __io_cqring_overflow_flush(ctx, false);
if (ctx->flags & IORING_SETUP_IOPOLL) if (ctx->flags & IORING_SETUP_IOPOLL)
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
} }
...@@ -7058,7 +7058,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, ...@@ -7058,7 +7058,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
int ret; int ret;
do { do {
io_cqring_overflow_flush(ctx, false); io_cqring_overflow_flush(ctx);
if (io_cqring_events(ctx) >= min_events) if (io_cqring_events(ctx) >= min_events)
return 0; return 0;
if (!io_run_task_work()) if (!io_run_task_work())
...@@ -7096,7 +7096,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, ...@@ -7096,7 +7096,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
trace_io_uring_cqring_wait(ctx, min_events); trace_io_uring_cqring_wait(ctx, min_events);
do { do {
/* if we can't even flush overflow, don't wait for more */ /* if we can't even flush overflow, don't wait for more */
if (!io_cqring_overflow_flush(ctx, false)) { if (!io_cqring_overflow_flush(ctx)) {
ret = -EBUSY; ret = -EBUSY;
break; break;
} }
...@@ -9365,7 +9365,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, ...@@ -9365,7 +9365,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
*/ */
ret = 0; ret = 0;
if (ctx->flags & IORING_SETUP_SQPOLL) { if (ctx->flags & IORING_SETUP_SQPOLL) {
io_cqring_overflow_flush(ctx, false); io_cqring_overflow_flush(ctx);
if (unlikely(ctx->sq_data->thread == NULL)) { if (unlikely(ctx->sq_data->thread == NULL)) {
ret = -EOWNERDEAD; ret = -EOWNERDEAD;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment