Commit c3524383 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: batch-free linked requests as well

There is no reason to not batch deallocation of linked requests. Take
away its next req first and handle it as everything else in
io_req_multi_free().
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 2757a23e
...@@ -1728,17 +1728,21 @@ static void io_req_task_queue(struct io_kiocb *req) ...@@ -1728,17 +1728,21 @@ static void io_req_task_queue(struct io_kiocb *req)
wake_up_process(tsk); wake_up_process(tsk);
} }
static void io_free_req(struct io_kiocb *req) static void io_queue_next(struct io_kiocb *req)
{ {
struct io_kiocb *nxt = NULL; struct io_kiocb *nxt = NULL;
io_req_find_next(req, &nxt); io_req_find_next(req, &nxt);
__io_free_req(req);
if (nxt) if (nxt)
io_req_task_queue(nxt); io_req_task_queue(nxt);
} }
static void io_free_req(struct io_kiocb *req)
{
io_queue_next(req);
__io_free_req(req);
}
/* /*
* Drop reference to request, return next in chain (if there is one) if this * Drop reference to request, return next in chain (if there is one) if this
* was the last reference to this request. * was the last reference to this request.
...@@ -1835,16 +1839,19 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) ...@@ -1835,16 +1839,19 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
} }
static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req) static inline void io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
{ {
if ((req->flags & REQ_F_LINK_HEAD) || io_is_fallback_req(req)) if (unlikely(io_is_fallback_req(req))) {
return false; io_free_req(req);
return;
}
if (req->flags & REQ_F_LINK_HEAD)
io_queue_next(req);
io_dismantle_req(req); io_dismantle_req(req);
rb->reqs[rb->to_free++] = req; rb->reqs[rb->to_free++] = req;
if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs))) if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
io_free_req_many(req->ctx, rb); io_free_req_many(req->ctx, rb);
return true;
} }
static int io_put_kbuf(struct io_kiocb *req) static int io_put_kbuf(struct io_kiocb *req)
...@@ -1910,9 +1917,8 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, ...@@ -1910,9 +1917,8 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
__io_cqring_fill_event(req, req->result, cflags); __io_cqring_fill_event(req, req->result, cflags);
(*nr_events)++; (*nr_events)++;
if (refcount_dec_and_test(&req->refs) && if (refcount_dec_and_test(&req->refs))
!io_req_multi_free(&rb, req)) io_req_multi_free(&rb, req);
io_free_req(req);
} }
io_commit_cqring(ctx); io_commit_cqring(ctx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment