Commit d81499bf authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: inline linked part of io_req_find_next

Inline part of __io_req_find_next() that returns a request but doesn't
need io_disarm_next(). It's just two places, but makes links a bit
faster.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/4126d13f23d0e91b39b3558e16bd86cafa7fcef2.1631115443.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6b639522
...@@ -2074,17 +2074,8 @@ static bool io_disarm_next(struct io_kiocb *req) ...@@ -2074,17 +2074,8 @@ static bool io_disarm_next(struct io_kiocb *req)
return posted; return posted;
} }
static struct io_kiocb *__io_req_find_next(struct io_kiocb *req) static void __io_req_find_next_prep(struct io_kiocb *req)
{ {
struct io_kiocb *nxt;
/*
* If LINK is set, we have dependent requests in this chain. If we
* didn't fail this request, queue the first one up, moving any other
* dependencies to the next request. In case of failure, fail the rest
* of the chain.
*/
if (req->flags & IO_DISARM_MASK) {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
bool posted; bool posted;
...@@ -2095,17 +2086,25 @@ static struct io_kiocb *__io_req_find_next(struct io_kiocb *req) ...@@ -2095,17 +2086,25 @@ static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
spin_unlock(&ctx->completion_lock); spin_unlock(&ctx->completion_lock);
if (posted) if (posted)
io_cqring_ev_posted(ctx); io_cqring_ev_posted(ctx);
}
nxt = req->link;
req->link = NULL;
return nxt;
} }
static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
{ {
struct io_kiocb *nxt;
if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK)))) if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
return NULL; return NULL;
return __io_req_find_next(req); /*
* If LINK is set, we have dependent requests in this chain. If we
* didn't fail this request, queue the first one up, moving any other
* dependencies to the next request. In case of failure, fail the rest
* of the chain.
*/
if (unlikely(req->flags & IO_DISARM_MASK))
__io_req_find_next_prep(req);
nxt = req->link;
req->link = NULL;
return nxt;
} }
static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked) static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment