Commit 5af1d13e authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: batch put_task_struct()

As every iopoll request have a task ref, it becomes expensive to put
them one by one, instead we can put several at once integrating that
into io_req_free_batch().
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent dd6f843a
...@@ -1544,7 +1544,6 @@ static void io_dismantle_req(struct io_kiocb *req) ...@@ -1544,7 +1544,6 @@ static void io_dismantle_req(struct io_kiocb *req)
kfree(req->io); kfree(req->io);
if (req->file) if (req->file)
io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE)); io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
__io_put_req_task(req);
io_req_clean_work(req); io_req_clean_work(req);
if (req->flags & REQ_F_INFLIGHT) { if (req->flags & REQ_F_INFLIGHT) {
...@@ -1564,6 +1563,7 @@ static void __io_free_req(struct io_kiocb *req) ...@@ -1564,6 +1563,7 @@ static void __io_free_req(struct io_kiocb *req)
struct io_ring_ctx *ctx; struct io_ring_ctx *ctx;
io_dismantle_req(req); io_dismantle_req(req);
__io_put_req_task(req);
ctx = req->ctx; ctx = req->ctx;
if (likely(!io_is_fallback_req(req))) if (likely(!io_is_fallback_req(req)))
kmem_cache_free(req_cachep, req); kmem_cache_free(req_cachep, req);
...@@ -1807,8 +1807,18 @@ static void io_free_req(struct io_kiocb *req) ...@@ -1807,8 +1807,18 @@ static void io_free_req(struct io_kiocb *req)
struct req_batch { struct req_batch {
void *reqs[IO_IOPOLL_BATCH]; void *reqs[IO_IOPOLL_BATCH];
int to_free; int to_free;
struct task_struct *task;
int task_refs;
}; };
static inline void io_init_req_batch(struct req_batch *rb)
{
rb->to_free = 0;
rb->task_refs = 0;
rb->task = NULL;
}
static void __io_req_free_batch_flush(struct io_ring_ctx *ctx, static void __io_req_free_batch_flush(struct io_ring_ctx *ctx,
struct req_batch *rb) struct req_batch *rb)
{ {
...@@ -1822,6 +1832,10 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx, ...@@ -1822,6 +1832,10 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
{ {
if (rb->to_free) if (rb->to_free)
__io_req_free_batch_flush(ctx, rb); __io_req_free_batch_flush(ctx, rb);
if (rb->task) {
put_task_struct_many(rb->task, rb->task_refs);
rb->task = NULL;
}
} }
static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req) static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
...@@ -1833,6 +1847,17 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req) ...@@ -1833,6 +1847,17 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
if (req->flags & REQ_F_LINK_HEAD) if (req->flags & REQ_F_LINK_HEAD)
io_queue_next(req); io_queue_next(req);
if (req->flags & REQ_F_TASK_PINNED) {
if (req->task != rb->task) {
if (rb->task)
put_task_struct_many(rb->task, rb->task_refs);
rb->task = req->task;
rb->task_refs = 0;
}
rb->task_refs++;
req->flags &= ~REQ_F_TASK_PINNED;
}
io_dismantle_req(req); io_dismantle_req(req);
rb->reqs[rb->to_free++] = req; rb->reqs[rb->to_free++] = req;
if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs))) if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
...@@ -1978,7 +2003,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, ...@@ -1978,7 +2003,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
/* order with ->result store in io_complete_rw_iopoll() */ /* order with ->result store in io_complete_rw_iopoll() */
smp_rmb(); smp_rmb();
rb.to_free = 0; io_init_req_batch(&rb);
while (!list_empty(done)) { while (!list_empty(done)) {
int cflags = 0; int cflags = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment