Commit 8e15c0e7 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring/rsrc: keep cached refs per node

We cache refs of the current node (i.e. ctx->rsrc_node) in
ctx->rsrc_cached_refs. We'll be moving away from atomics, so move the
cached refs in struct io_rsrc_node for now. It's a prep patch and
shouldn't change anything in practise.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/9edc3669c1d71b06c2dca78b2b2b8bb9292738b9.1680576071.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b8fb5b4f
......@@ -240,7 +240,6 @@ struct io_ring_ctx {
* uring_lock, and updated through io_uring_register(2)
*/
struct io_rsrc_node *rsrc_node;
int rsrc_cached_refs;
atomic_t cancel_seq;
struct io_file_table file_table;
unsigned nr_user_files;
......
......@@ -36,9 +36,11 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
void io_rsrc_refs_drop(struct io_ring_ctx *ctx)
__must_hold(&ctx->uring_lock)
{
if (ctx->rsrc_cached_refs) {
io_rsrc_put_node(ctx->rsrc_node, ctx->rsrc_cached_refs);
ctx->rsrc_cached_refs = 0;
struct io_rsrc_node *node = ctx->rsrc_node;
if (node && node->cached_refs) {
io_rsrc_put_node(node, node->cached_refs);
node->cached_refs = 0;
}
}
......@@ -151,11 +153,11 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slo
*slot = NULL;
}
void io_rsrc_refs_refill(struct io_ring_ctx *ctx)
void io_rsrc_refs_refill(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
__must_hold(&ctx->uring_lock)
{
ctx->rsrc_cached_refs += IO_RSRC_REF_BATCH;
refcount_add(IO_RSRC_REF_BATCH, &ctx->rsrc_node->refs);
node->cached_refs += IO_RSRC_REF_BATCH;
refcount_add(IO_RSRC_REF_BATCH, &node->refs);
}
static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
......@@ -300,6 +302,7 @@ void io_rsrc_node_switch(struct io_ring_ctx *ctx,
if (!ctx->rsrc_node) {
ctx->rsrc_node = ctx->rsrc_backup_node;
ctx->rsrc_backup_node = NULL;
ctx->rsrc_node->cached_refs = 0;
}
}
......
......@@ -43,6 +43,7 @@ struct io_rsrc_node {
struct io_rsrc_data *rsrc_data;
struct llist_node llist;
bool done;
int cached_refs;
};
struct io_mapped_ubuf {
......@@ -56,7 +57,7 @@ struct io_mapped_ubuf {
void io_rsrc_put_tw(struct callback_head *cb);
void io_rsrc_node_ref_zero(struct io_rsrc_node *node);
void io_rsrc_put_work(struct work_struct *work);
void io_rsrc_refs_refill(struct io_ring_ctx *ctx);
void io_rsrc_refs_refill(struct io_ring_ctx *ctx, struct io_rsrc_node *node);
void io_wait_rsrc_data(struct io_rsrc_data *data);
void io_rsrc_node_destroy(struct io_rsrc_node *ref_node);
void io_rsrc_refs_drop(struct io_ring_ctx *ctx);
......@@ -128,17 +129,18 @@ static inline void io_req_put_rsrc_locked(struct io_kiocb *req,
if (node) {
if (node == ctx->rsrc_node)
ctx->rsrc_cached_refs++;
node->cached_refs++;
else
io_rsrc_put_node(node, 1);
}
}
static inline void io_charge_rsrc_node(struct io_ring_ctx *ctx)
static inline void io_charge_rsrc_node(struct io_ring_ctx *ctx,
struct io_rsrc_node *node)
{
ctx->rsrc_cached_refs--;
if (unlikely(ctx->rsrc_cached_refs < 0))
io_rsrc_refs_refill(ctx);
node->cached_refs--;
if (unlikely(node->cached_refs < 0))
io_rsrc_refs_refill(ctx, node);
}
static inline void io_req_set_rsrc_node(struct io_kiocb *req,
......@@ -151,7 +153,7 @@ static inline void io_req_set_rsrc_node(struct io_kiocb *req,
lockdep_assert_held(&ctx->uring_lock);
req->rsrc_node = ctx->rsrc_node;
io_charge_rsrc_node(ctx);
io_charge_rsrc_node(ctx, ctx->rsrc_node);
io_ring_submit_unlock(ctx, issue_flags);
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment