Commit 0b222eeb authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring/rsrc: remove rsrc_data refs

Instead of waiting for rsrc_data->refs to be downed to zero, check
whether there are rsrc nodes queued for completion, that's easier then
maintaining references.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/8e33fd143d83e11af3e386aea28eb6d6c6a1be10.1681395792.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 7d481e03
...@@ -334,6 +334,7 @@ struct io_ring_ctx { ...@@ -334,6 +334,7 @@ struct io_ring_ctx {
struct list_head rsrc_ref_list; struct list_head rsrc_ref_list;
struct io_alloc_cache rsrc_node_cache; struct io_alloc_cache rsrc_node_cache;
struct wait_queue_head rsrc_quiesce_wq; struct wait_queue_head rsrc_quiesce_wq;
unsigned rsrc_quiesce;
struct list_head io_buffers_pages; struct list_head io_buffers_pages;
......
...@@ -2831,8 +2831,8 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) ...@@ -2831,8 +2831,8 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
{ {
io_sq_thread_finish(ctx); io_sq_thread_finish(ctx);
/* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */ /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
io_wait_rsrc_data(ctx->buf_data); if (WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)))
io_wait_rsrc_data(ctx->file_data); return;
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
if (ctx->buf_data) if (ctx->buf_data)
......
...@@ -31,11 +31,6 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, ...@@ -31,11 +31,6 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
#define IORING_MAX_FIXED_FILES (1U << 20) #define IORING_MAX_FIXED_FILES (1U << 20)
#define IORING_MAX_REG_BUFFERS (1U << 14) #define IORING_MAX_REG_BUFFERS (1U << 14)
static inline bool io_put_rsrc_data_ref(struct io_rsrc_data *rsrc_data)
{
return !--rsrc_data->refs;
}
int __io_account_mem(struct user_struct *user, unsigned long nr_pages) int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
{ {
unsigned long page_limit, cur_pages, new_pages; unsigned long page_limit, cur_pages, new_pages;
...@@ -158,7 +153,6 @@ static void io_rsrc_put_work_one(struct io_rsrc_data *rsrc_data, ...@@ -158,7 +153,6 @@ static void io_rsrc_put_work_one(struct io_rsrc_data *rsrc_data,
static void __io_rsrc_put_work(struct io_rsrc_node *ref_node) static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
{ {
struct io_rsrc_data *rsrc_data = ref_node->rsrc_data; struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
struct io_ring_ctx *ctx = rsrc_data->ctx;
struct io_rsrc_put *prsrc, *tmp; struct io_rsrc_put *prsrc, *tmp;
if (ref_node->inline_items) if (ref_node->inline_items)
...@@ -171,14 +165,6 @@ static void __io_rsrc_put_work(struct io_rsrc_node *ref_node) ...@@ -171,14 +165,6 @@ static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
} }
io_rsrc_node_destroy(rsrc_data->ctx, ref_node); io_rsrc_node_destroy(rsrc_data->ctx, ref_node);
if (io_put_rsrc_data_ref(rsrc_data))
wake_up_all(&ctx->rsrc_quiesce_wq);
}
void io_wait_rsrc_data(struct io_rsrc_data *data)
{
if (data)
WARN_ON_ONCE(!io_put_rsrc_data_ref(data));
} }
void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node) void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
...@@ -201,6 +187,8 @@ void io_rsrc_node_ref_zero(struct io_rsrc_node *node) ...@@ -201,6 +187,8 @@ void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
list_del(&node->node); list_del(&node->node);
__io_rsrc_put_work(node); __io_rsrc_put_work(node);
} }
if (list_empty(&ctx->rsrc_ref_list) && unlikely(ctx->rsrc_quiesce))
wake_up_all(&ctx->rsrc_quiesce_wq);
} }
struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx) struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
...@@ -235,7 +223,6 @@ void io_rsrc_node_switch(struct io_ring_ctx *ctx, ...@@ -235,7 +223,6 @@ void io_rsrc_node_switch(struct io_ring_ctx *ctx,
if (WARN_ON_ONCE(!backup)) if (WARN_ON_ONCE(!backup))
return; return;
data_to_kill->refs++;
node->rsrc_data = data_to_kill; node->rsrc_data = data_to_kill;
list_add_tail(&node->node, &ctx->rsrc_ref_list); list_add_tail(&node->node, &ctx->rsrc_ref_list);
/* put master ref */ /* put master ref */
...@@ -269,8 +256,7 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, ...@@ -269,8 +256,7 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
return ret; return ret;
io_rsrc_node_switch(ctx, data); io_rsrc_node_switch(ctx, data);
/* kill initial ref */ if (list_empty(&ctx->rsrc_ref_list))
if (io_put_rsrc_data_ref(data))
return 0; return 0;
if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
...@@ -278,6 +264,7 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, ...@@ -278,6 +264,7 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
smp_mb(); smp_mb();
} }
ctx->rsrc_quiesce++;
data->quiesce = true; data->quiesce = true;
do { do {
prepare_to_wait(&ctx->rsrc_quiesce_wq, &we, TASK_INTERRUPTIBLE); prepare_to_wait(&ctx->rsrc_quiesce_wq, &we, TASK_INTERRUPTIBLE);
...@@ -286,12 +273,8 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, ...@@ -286,12 +273,8 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
ret = io_run_task_work_sig(ctx); ret = io_run_task_work_sig(ctx);
if (ret < 0) { if (ret < 0) {
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
if (!data->refs) { if (list_empty(&ctx->rsrc_ref_list))
ret = 0; ret = 0;
} else {
/* restore the master reference */
data->refs++;
}
break; break;
} }
...@@ -299,10 +282,12 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, ...@@ -299,10 +282,12 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
ret = 0; ret = 0;
} while (data->refs); } while (!list_empty(&ctx->rsrc_ref_list));
finish_wait(&ctx->rsrc_quiesce_wq, &we); finish_wait(&ctx->rsrc_quiesce_wq, &we);
data->quiesce = false; data->quiesce = false;
ctx->rsrc_quiesce--;
if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
atomic_set(&ctx->cq_wait_nr, 0); atomic_set(&ctx->cq_wait_nr, 0);
smp_mb(); smp_mb();
...@@ -371,7 +356,6 @@ __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, ...@@ -371,7 +356,6 @@ __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx,
data->nr = nr; data->nr = nr;
data->ctx = ctx; data->ctx = ctx;
data->do_put = do_put; data->do_put = do_put;
data->refs = 1;
if (utags) { if (utags) {
ret = -EFAULT; ret = -EFAULT;
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
......
...@@ -35,7 +35,6 @@ struct io_rsrc_data { ...@@ -35,7 +35,6 @@ struct io_rsrc_data {
u64 **tags; u64 **tags;
unsigned int nr; unsigned int nr;
rsrc_put_fn *do_put; rsrc_put_fn *do_put;
int refs;
bool quiesce; bool quiesce;
}; };
...@@ -69,7 +68,6 @@ struct io_mapped_ubuf { ...@@ -69,7 +68,6 @@ struct io_mapped_ubuf {
void io_rsrc_put_tw(struct callback_head *cb); void io_rsrc_put_tw(struct callback_head *cb);
void io_rsrc_node_ref_zero(struct io_rsrc_node *node); void io_rsrc_node_ref_zero(struct io_rsrc_node *node);
void io_rsrc_put_work(struct work_struct *work); void io_rsrc_put_work(struct work_struct *work);
void io_wait_rsrc_data(struct io_rsrc_data *data);
void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *ref_node); void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *ref_node);
int io_rsrc_node_switch_start(struct io_ring_ctx *ctx); int io_rsrc_node_switch_start(struct io_ring_ctx *ctx);
struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx); struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment