Commit 88ab95be authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: helper for empty req cache checks

Add io_req_cache_empty(), which checks if there are requests in the
inline req cache or not. It'll be needed in the future, but also nicely
cleans up a few spots poking into ->free_list directly.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/b18662389f3fb483d0bd07906647f65f6037475a.1649771823.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 23a5c43b
...@@ -2224,6 +2224,11 @@ static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx, ...@@ -2224,6 +2224,11 @@ static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
spin_unlock(&ctx->completion_lock); spin_unlock(&ctx->completion_lock);
} }
static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
{
return !ctx->submit_state.free_list.next;
}
/* /*
* A request might get retired back into the request caches even before opcode * A request might get retired back into the request caches even before opcode
* handlers and io_issue_sqe() are done with it, e.g. inline completion path. * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
...@@ -2245,7 +2250,7 @@ static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) ...@@ -2245,7 +2250,7 @@ static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
*/ */
if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH) { if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH) {
io_flush_cached_locked_reqs(ctx, &ctx->submit_state); io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
if (state->free_list.next) if (!io_req_cache_empty(ctx))
return true; return true;
} }
...@@ -2274,7 +2279,7 @@ static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) ...@@ -2274,7 +2279,7 @@ static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx) static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
{ {
if (unlikely(!ctx->submit_state.free_list.next)) if (unlikely(io_req_cache_empty(ctx)))
return __io_alloc_req_refill(ctx); return __io_alloc_req_refill(ctx);
return true; return true;
} }
...@@ -9809,7 +9814,7 @@ static void io_req_caches_free(struct io_ring_ctx *ctx) ...@@ -9809,7 +9814,7 @@ static void io_req_caches_free(struct io_ring_ctx *ctx)
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
io_flush_cached_locked_reqs(ctx, state); io_flush_cached_locked_reqs(ctx, state);
while (state->free_list.next) { while (!io_req_cache_empty(ctx)) {
struct io_wq_work_node *node; struct io_wq_work_node *node;
struct io_kiocb *req; struct io_kiocb *req;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment