Commit 6b231248 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: consolidate overflow flushing

Consolidate __io_cqring_overflow_flush and io_cqring_overflow_kill()
into a single function as it once was, it's easier to work with it this
way.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/986b42c35e76a6be7aa0cdcda0a236a2222da3a7.1712708261.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 8d09a88e
...@@ -668,26 +668,7 @@ static void io_cq_unlock_post(struct io_ring_ctx *ctx) ...@@ -668,26 +668,7 @@ static void io_cq_unlock_post(struct io_ring_ctx *ctx)
io_commit_cqring_flush(ctx); io_commit_cqring_flush(ctx);
} }
static void io_cqring_overflow_kill(struct io_ring_ctx *ctx) static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool dying)
{
struct io_overflow_cqe *ocqe;
LIST_HEAD(list);
lockdep_assert_held(&ctx->uring_lock);
spin_lock(&ctx->completion_lock);
list_splice_init(&ctx->cq_overflow_list, &list);
clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
spin_unlock(&ctx->completion_lock);
while (!list_empty(&list)) {
ocqe = list_first_entry(&list, struct io_overflow_cqe, list);
list_del(&ocqe->list);
kfree(ocqe);
}
}
static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
{ {
size_t cqe_size = sizeof(struct io_uring_cqe); size_t cqe_size = sizeof(struct io_uring_cqe);
...@@ -704,11 +685,14 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx) ...@@ -704,11 +685,14 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
struct io_uring_cqe *cqe; struct io_uring_cqe *cqe;
struct io_overflow_cqe *ocqe; struct io_overflow_cqe *ocqe;
if (!io_get_cqe_overflow(ctx, &cqe, true))
break;
ocqe = list_first_entry(&ctx->cq_overflow_list, ocqe = list_first_entry(&ctx->cq_overflow_list,
struct io_overflow_cqe, list); struct io_overflow_cqe, list);
memcpy(cqe, &ocqe->cqe, cqe_size);
if (!dying) {
if (!io_get_cqe_overflow(ctx, &cqe, true))
break;
memcpy(cqe, &ocqe->cqe, cqe_size);
}
list_del(&ocqe->list); list_del(&ocqe->list);
kfree(ocqe); kfree(ocqe);
} }
...@@ -720,10 +704,16 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx) ...@@ -720,10 +704,16 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
io_cq_unlock_post(ctx); io_cq_unlock_post(ctx);
} }
static void io_cqring_overflow_kill(struct io_ring_ctx *ctx)
{
if (ctx->rings)
__io_cqring_overflow_flush(ctx, true);
}
static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx) static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx)
{ {
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
__io_cqring_overflow_flush(ctx); __io_cqring_overflow_flush(ctx, false);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
} }
...@@ -1531,7 +1521,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) ...@@ -1531,7 +1521,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
check_cq = READ_ONCE(ctx->check_cq); check_cq = READ_ONCE(ctx->check_cq);
if (unlikely(check_cq)) { if (unlikely(check_cq)) {
if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
__io_cqring_overflow_flush(ctx); __io_cqring_overflow_flush(ctx, false);
/* /*
* Similarly do not spin if we have not informed the user of any * Similarly do not spin if we have not informed the user of any
* dropped CQE. * dropped CQE.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment