Commit 7c660731 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: consolidate putting reqs task

We grab a task for each request and while putting it it also have to do
extra work like inflight accounting and waking up that task. This
sequence is duplicated several time, it's good time to add a helper.
More to that, the helper generates better code due to better locality
and so not failing alias analysis.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ecfc8492
...@@ -2023,17 +2023,22 @@ static void io_dismantle_req(struct io_kiocb *req) ...@@ -2023,17 +2023,22 @@ static void io_dismantle_req(struct io_kiocb *req)
io_req_clean_work(req); io_req_clean_work(req);
} }
static inline void io_put_task(struct task_struct *task, int nr)
{
struct io_uring_task *tctx = task->io_uring;
percpu_counter_sub(&tctx->inflight, nr);
if (unlikely(atomic_read(&tctx->in_idle)))
wake_up(&tctx->wait);
put_task_struct_many(task, nr);
}
static void __io_free_req(struct io_kiocb *req) static void __io_free_req(struct io_kiocb *req)
{ {
struct io_uring_task *tctx = req->task->io_uring;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
io_dismantle_req(req); io_dismantle_req(req);
io_put_task(req->task, 1);
percpu_counter_dec(&tctx->inflight);
if (atomic_read(&tctx->in_idle))
wake_up(&tctx->wait);
put_task_struct(req->task);
if (likely(!io_is_fallback_req(req))) if (likely(!io_is_fallback_req(req)))
kmem_cache_free(req_cachep, req); kmem_cache_free(req_cachep, req);
...@@ -2287,12 +2292,7 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx, ...@@ -2287,12 +2292,7 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
if (rb->to_free) if (rb->to_free)
__io_req_free_batch_flush(ctx, rb); __io_req_free_batch_flush(ctx, rb);
if (rb->task) { if (rb->task) {
struct io_uring_task *tctx = rb->task->io_uring; io_put_task(rb->task, rb->task_refs);
percpu_counter_sub(&tctx->inflight, rb->task_refs);
if (atomic_read(&tctx->in_idle))
wake_up(&tctx->wait);
put_task_struct_many(rb->task, rb->task_refs);
rb->task = NULL; rb->task = NULL;
} }
} }
...@@ -2306,14 +2306,8 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req) ...@@ -2306,14 +2306,8 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
io_queue_next(req); io_queue_next(req);
if (req->task != rb->task) { if (req->task != rb->task) {
if (rb->task) { if (rb->task)
struct io_uring_task *tctx = rb->task->io_uring; io_put_task(rb->task, rb->task_refs);
percpu_counter_sub(&tctx->inflight, rb->task_refs);
if (atomic_read(&tctx->in_idle))
wake_up(&tctx->wait);
put_task_struct_many(rb->task, rb->task_refs);
}
rb->task = req->task; rb->task = req->task;
rb->task_refs = 0; rb->task_refs = 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment