Commit fa05457a authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: add helper to return req to cache list

Don't hand code wq_stack_add_head() to ->free_list, which serves for
recycling io_kiocb, add a helper doing it for us.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/f206f575486a8dd3d52f074ab37ed146b2d215b7.1649771823.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 88ab95be
...@@ -1508,6 +1508,11 @@ static inline void req_fail_link_node(struct io_kiocb *req, int res) ...@@ -1508,6 +1508,11 @@ static inline void req_fail_link_node(struct io_kiocb *req, int res)
req->cqe.res = res; req->cqe.res = res;
} }
static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
{
wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
}
static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref) static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
{ {
struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
...@@ -2238,7 +2243,6 @@ static inline bool io_req_cache_empty(struct io_ring_ctx *ctx) ...@@ -2238,7 +2243,6 @@ static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
__must_hold(&ctx->uring_lock) __must_hold(&ctx->uring_lock)
{ {
struct io_submit_state *state = &ctx->submit_state;
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
void *reqs[IO_REQ_ALLOC_BATCH]; void *reqs[IO_REQ_ALLOC_BATCH];
int ret, i; int ret, i;
...@@ -2272,7 +2276,7 @@ static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) ...@@ -2272,7 +2276,7 @@ static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
struct io_kiocb *req = reqs[i]; struct io_kiocb *req = reqs[i];
io_preinit_req(req, ctx); io_preinit_req(req, ctx);
wq_stack_add_head(&req->comp_list, &state->free_list); io_req_add_to_cache(req, ctx);
} }
return true; return true;
} }
...@@ -2715,7 +2719,7 @@ static void io_free_batch_list(struct io_ring_ctx *ctx, ...@@ -2715,7 +2719,7 @@ static void io_free_batch_list(struct io_ring_ctx *ctx,
} }
task_refs++; task_refs++;
node = req->comp_list.next; node = req->comp_list.next;
wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); io_req_add_to_cache(req, ctx);
} while (node); } while (node);
if (task) if (task)
...@@ -7872,7 +7876,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) ...@@ -7872,7 +7876,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
req = io_alloc_req(ctx); req = io_alloc_req(ctx);
sqe = io_get_sqe(ctx); sqe = io_get_sqe(ctx);
if (unlikely(!sqe)) { if (unlikely(!sqe)) {
wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); io_req_add_to_cache(req, ctx);
break; break;
} }
/* will complete beyond this point, count as submitted */ /* will complete beyond this point, count as submitted */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment