Commit a38d68db authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: help inlining of io_req_complete()

__io_req_complete() inlining is a bit weird, some compilers don't
optimise out the non-NULL branch of it even when called as
io_req_complete(). Help it a bit by extracting state and stateless
helpers out of __io_req_complete().
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 8662daec
...@@ -1886,7 +1886,8 @@ static void io_cqring_fill_event(struct io_kiocb *req, long res) ...@@ -1886,7 +1886,8 @@ static void io_cqring_fill_event(struct io_kiocb *req, long res)
__io_cqring_fill_event(req, res, 0); __io_cqring_fill_event(req, res, 0);
} }
static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags) static void io_req_complete_nostate(struct io_kiocb *req, long res,
unsigned int cflags)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
unsigned long flags; unsigned long flags;
...@@ -1897,6 +1898,7 @@ static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags) ...@@ -1897,6 +1898,7 @@ static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx); io_cqring_ev_posted(ctx);
io_put_req(req);
} }
static void io_submit_flush_completions(struct io_comp_state *cs) static void io_submit_flush_completions(struct io_comp_state *cs)
...@@ -1932,23 +1934,27 @@ static void io_submit_flush_completions(struct io_comp_state *cs) ...@@ -1932,23 +1934,27 @@ static void io_submit_flush_completions(struct io_comp_state *cs)
cs->nr = 0; cs->nr = 0;
} }
static void __io_req_complete(struct io_kiocb *req, long res, unsigned cflags, static void io_req_complete_state(struct io_kiocb *req, long res,
struct io_comp_state *cs) unsigned int cflags, struct io_comp_state *cs)
{ {
if (!cs) {
io_cqring_add_event(req, res, cflags);
io_put_req(req);
} else {
io_clean_op(req); io_clean_op(req);
req->result = res; req->result = res;
req->compl.cflags = cflags; req->compl.cflags = cflags;
list_add_tail(&req->compl.list, &cs->list); list_add_tail(&req->compl.list, &cs->list);
if (++cs->nr >= 32) if (++cs->nr >= 32)
io_submit_flush_completions(cs); io_submit_flush_completions(cs);
}
} }
static void io_req_complete(struct io_kiocb *req, long res) static inline void __io_req_complete(struct io_kiocb *req, long res,
unsigned cflags, struct io_comp_state *cs)
{
if (!cs)
io_req_complete_nostate(req, res, cflags);
else
io_req_complete_state(req, res, cflags, cs);
}
static inline void io_req_complete(struct io_kiocb *req, long res)
{ {
__io_req_complete(req, res, 0, NULL); __io_req_complete(req, res, 0, NULL);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment