Commit 528407b1 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring/rsrc: consolidate node caching

We store one pre-allocated rsrc node in ->rsrc_backup_node, merge it
with ->rsrc_node_cache.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/6d5410e51ccd29be7a716be045b51d6b371baef6.1681210788.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 786788a8
...@@ -326,7 +326,6 @@ struct io_ring_ctx { ...@@ -326,7 +326,6 @@ struct io_ring_ctx {
struct io_restriction restrictions; struct io_restriction restrictions;
/* slow path rsrc auxilary data, used by update/register */ /* slow path rsrc auxilary data, used by update/register */
struct io_rsrc_node *rsrc_backup_node;
struct io_mapped_ubuf *dummy_ubuf; struct io_mapped_ubuf *dummy_ubuf;
struct io_rsrc_data *file_data; struct io_rsrc_data *file_data;
struct io_rsrc_data *buf_data; struct io_rsrc_data *buf_data;
......
...@@ -23,6 +23,11 @@ static inline bool io_alloc_cache_put(struct io_alloc_cache *cache, ...@@ -23,6 +23,11 @@ static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
return false; return false;
} }
static inline bool io_alloc_cache_empty(struct io_alloc_cache *cache)
{
return !cache->list.next;
}
static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache) static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
{ {
if (cache->list.next) { if (cache->list.next) {
......
...@@ -2852,8 +2852,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) ...@@ -2852,8 +2852,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
/* there are no registered resources left, nobody uses it */ /* there are no registered resources left, nobody uses it */
if (ctx->rsrc_node) if (ctx->rsrc_node)
io_rsrc_node_destroy(ctx, ctx->rsrc_node); io_rsrc_node_destroy(ctx, ctx->rsrc_node);
if (ctx->rsrc_backup_node)
io_rsrc_node_destroy(ctx, ctx->rsrc_backup_node);
WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)); WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
......
...@@ -230,7 +230,7 @@ void io_rsrc_node_switch(struct io_ring_ctx *ctx, ...@@ -230,7 +230,7 @@ void io_rsrc_node_switch(struct io_ring_ctx *ctx,
struct io_rsrc_data *data_to_kill) struct io_rsrc_data *data_to_kill)
__must_hold(&ctx->uring_lock) __must_hold(&ctx->uring_lock)
{ {
WARN_ON_ONCE(!ctx->rsrc_backup_node); WARN_ON_ONCE(io_alloc_cache_empty(&ctx->rsrc_node_cache));
WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node); WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
if (data_to_kill) { if (data_to_kill) {
...@@ -245,18 +245,20 @@ void io_rsrc_node_switch(struct io_ring_ctx *ctx, ...@@ -245,18 +245,20 @@ void io_rsrc_node_switch(struct io_ring_ctx *ctx,
ctx->rsrc_node = NULL; ctx->rsrc_node = NULL;
} }
if (!ctx->rsrc_node) { if (!ctx->rsrc_node)
ctx->rsrc_node = ctx->rsrc_backup_node; ctx->rsrc_node = io_rsrc_node_alloc(ctx);
ctx->rsrc_backup_node = NULL;
}
} }
int io_rsrc_node_switch_start(struct io_ring_ctx *ctx) int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
{ {
if (ctx->rsrc_backup_node) if (io_alloc_cache_empty(&ctx->rsrc_node_cache)) {
return 0; struct io_rsrc_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx);
return ctx->rsrc_backup_node ? 0 : -ENOMEM; if (!node)
return -ENOMEM;
io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache);
}
return 0;
} }
__cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment