Commit 8bab4c09 authored by Jens Axboe's avatar Jens Axboe

io_uring: allow conditional reschedule for intensive iterators

If we have a lot of threads and rings, the tctx list can get quite big.
This is especially true if we keep creating new threads and rings.
Likewise for the provided buffers list. Be nice and insert a conditional
reschedule point while iterating the nodes for deletion.

Link: https://lore.kernel.org/io-uring/00000000000064b6b405ccb41113@google.com/
Reported-by: syzbot+111d2a03f51f5ae73775@syzkaller.appspotmail.com
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 5b7aa38d
...@@ -9173,8 +9173,10 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx) ...@@ -9173,8 +9173,10 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx)
struct io_buffer *buf; struct io_buffer *buf;
unsigned long index; unsigned long index;
xa_for_each(&ctx->io_buffers, index, buf) xa_for_each(&ctx->io_buffers, index, buf) {
__io_remove_buffers(ctx, buf, index, -1U); __io_remove_buffers(ctx, buf, index, -1U);
cond_resched();
}
} }
static void io_req_cache_free(struct list_head *list) static void io_req_cache_free(struct list_head *list)
...@@ -9672,8 +9674,10 @@ static void io_uring_clean_tctx(struct io_uring_task *tctx) ...@@ -9672,8 +9674,10 @@ static void io_uring_clean_tctx(struct io_uring_task *tctx)
struct io_tctx_node *node; struct io_tctx_node *node;
unsigned long index; unsigned long index;
xa_for_each(&tctx->xa, index, node) xa_for_each(&tctx->xa, index, node) {
io_uring_del_tctx_node(index); io_uring_del_tctx_node(index);
cond_resched();
}
if (wq) { if (wq) {
/* /*
* Must be after io_uring_del_task_file() (removes nodes under * Must be after io_uring_del_task_file() (removes nodes under
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment