Commit 5d772916 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: keep unlock_post inlined in hot path

This partially reverts

6c16fe3c ("io_uring: kill io_cqring_ev_posted() and __io_cq_unlock_post()")

The redundancy of __io_cq_unlock_post() was always to keep it inlined
into __io_submit_flush_completions(). Inline it back and rename with
hope of clarifying the intention behind it.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/372a16c485fca44c069be2e92fc5e7332a1d7fd7.1669310258.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c3b49093
...@@ -582,7 +582,8 @@ void __io_commit_cqring_flush(struct io_ring_ctx *ctx) ...@@ -582,7 +582,8 @@ void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
io_eventfd_flush_signal(ctx); io_eventfd_flush_signal(ctx);
} }
void io_cq_unlock_post(struct io_ring_ctx *ctx) /* keep it inlined for io_submit_flush_completions() */
static inline void io_cq_unlock_post_inline(struct io_ring_ctx *ctx)
__releases(ctx->completion_lock) __releases(ctx->completion_lock)
{ {
io_commit_cqring(ctx); io_commit_cqring(ctx);
...@@ -592,6 +593,12 @@ void io_cq_unlock_post(struct io_ring_ctx *ctx) ...@@ -592,6 +593,12 @@ void io_cq_unlock_post(struct io_ring_ctx *ctx)
io_cqring_wake(ctx); io_cqring_wake(ctx);
} }
void io_cq_unlock_post(struct io_ring_ctx *ctx)
__releases(ctx->completion_lock)
{
io_cq_unlock_post_inline(ctx);
}
/* Returns true if there are no backlogged entries after the flush */ /* Returns true if there are no backlogged entries after the flush */
static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
{ {
...@@ -1389,7 +1396,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) ...@@ -1389,7 +1396,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
if (!(req->flags & REQ_F_CQE_SKIP)) if (!(req->flags & REQ_F_CQE_SKIP))
__io_fill_cqe_req(ctx, req); __io_fill_cqe_req(ctx, req);
} }
io_cq_unlock_post(ctx); io_cq_unlock_post_inline(ctx);
if (!wq_list_empty(&ctx->submit_state.compl_reqs)) { if (!wq_list_empty(&ctx->submit_state.compl_reqs)) {
io_free_batch_list(ctx, state->compl_reqs.first); io_free_batch_list(ctx, state->compl_reqs.first);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment