Commit f32f004c authored by Matthew Wilcox's avatar Matthew Wilcox

ida: Convert to XArray

Use the XA_TRACK_FREE ability to track which entries have a free bit,
similarly to how it uses the radix tree's IDR_FREE tag.  This eliminates
the per-cpu ida_bitmap preload, and fixes the memory consumption
regression I introduced when making the IDR able to store any pointer.
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent 371c752d
......@@ -214,8 +214,7 @@ static inline void idr_preload_end(void)
++id, (entry) = idr_get_next((idr), &(id)))
/*
* IDA - IDR based id allocator, use when translation from id to
* pointer isn't necessary.
* IDA - ID Allocator, use when translation from id to pointer isn't necessary.
*/
#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long))
......@@ -225,14 +224,14 @@ struct ida_bitmap {
unsigned long bitmap[IDA_BITMAP_LONGS];
};
DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap);
struct ida {
struct radix_tree_root ida_rt;
struct xarray xa;
};
#define IDA_INIT_FLAGS (XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC)
#define IDA_INIT(name) { \
.ida_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER | GFP_NOWAIT), \
.xa = XARRAY_INIT(name, IDA_INIT_FLAGS) \
}
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
......@@ -292,7 +291,7 @@ static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
static inline void ida_init(struct ida *ida)
{
INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT);
xa_init_flags(&ida->xa, IDA_INIT_FLAGS);
}
#define ida_simple_get(ida, start, end, gfp) \
......@@ -301,9 +300,6 @@ static inline void ida_init(struct ida *ida)
static inline bool ida_is_empty(const struct ida *ida)
{
return radix_tree_empty(&ida->ida_rt);
return xa_empty(&ida->xa);
}
/* in lib/radix-tree.c */
int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
#endif /* __IDR_H__ */
This diff is collapsed.
......@@ -255,54 +255,6 @@ static unsigned long next_index(unsigned long index,
return (index & ~node_maxindex(node)) + (offset << node->shift);
}
#ifndef __KERNEL__
static void dump_ida_node(void *entry, unsigned long index)
{
unsigned long i;
if (!entry)
return;
if (radix_tree_is_internal_node(entry)) {
struct radix_tree_node *node = entry_to_node(entry);
pr_debug("ida node: %p offset %d indices %lu-%lu parent %p free %lx shift %d count %d\n",
node, node->offset, index * IDA_BITMAP_BITS,
((index | node_maxindex(node)) + 1) *
IDA_BITMAP_BITS - 1,
node->parent, node->tags[0][0], node->shift,
node->count);
for (i = 0; i < RADIX_TREE_MAP_SIZE; i++)
dump_ida_node(node->slots[i],
index | (i << node->shift));
} else if (xa_is_value(entry)) {
pr_debug("ida excp: %p offset %d indices %lu-%lu data %lx\n",
entry, (int)(index & RADIX_TREE_MAP_MASK),
index * IDA_BITMAP_BITS,
index * IDA_BITMAP_BITS + BITS_PER_XA_VALUE,
xa_to_value(entry));
} else {
struct ida_bitmap *bitmap = entry;
pr_debug("ida btmp: %p offset %d indices %lu-%lu data", bitmap,
(int)(index & RADIX_TREE_MAP_MASK),
index * IDA_BITMAP_BITS,
(index + 1) * IDA_BITMAP_BITS - 1);
for (i = 0; i < IDA_BITMAP_LONGS; i++)
pr_cont(" %lx", bitmap->bitmap[i]);
pr_cont("\n");
}
}
static void ida_dump(struct ida *ida)
{
struct radix_tree_root *root = &ida->ida_rt;
pr_debug("ida: %p node %p free %d\n", ida, root->xa_head,
root->xa_flags >> ROOT_TAG_SHIFT);
dump_ida_node(root->xa_head, 0);
}
#endif
/*
* This assumes that the caller has performed appropriate preallocation, and
* that the caller has pinned this thread of control to the current CPU.
......@@ -2039,27 +1991,6 @@ void idr_preload(gfp_t gfp_mask)
}
EXPORT_SYMBOL(idr_preload);
int ida_pre_get(struct ida *ida, gfp_t gfp)
{
/*
* The IDA API has no preload_end() equivalent. Instead,
* ida_get_new() can return -EAGAIN, prompting the caller
* to return to the ida_pre_get() step.
*/
if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE))
preempt_enable();
if (!this_cpu_read(ida_bitmap)) {
struct ida_bitmap *bitmap = kzalloc(sizeof(*bitmap), gfp);
if (!bitmap)
return 0;
if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap))
kfree(bitmap);
}
return 1;
}
void __rcu **idr_get_free(struct radix_tree_root *root,
struct radix_tree_iter *iter, gfp_t gfp,
unsigned long max)
......@@ -2201,8 +2132,6 @@ static int radix_tree_cpu_dead(unsigned int cpu)
kmem_cache_free(radix_tree_node_cachep, node);
rtp->nr--;
}
kfree(per_cpu(ida_bitmap, cpu));
per_cpu(ida_bitmap, cpu) = NULL;
return 0;
}
......
......@@ -402,16 +402,15 @@ void ida_check_nomem(void)
*/
void ida_check_conv_user(void)
{
#if 0
DEFINE_IDA(ida);
unsigned long i;
radix_tree_cpu_dead(1);
for (i = 0; i < 1000000; i++) {
int id = ida_alloc(&ida, GFP_NOWAIT);
if (id == -ENOMEM) {
IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) !=
BITS_PER_XA_VALUE);
IDA_BUG_ON(&ida, ((i % IDA_BITMAP_BITS) !=
BITS_PER_XA_VALUE) &&
((i % IDA_BITMAP_BITS) != 0));
id = ida_alloc(&ida, GFP_KERNEL);
} else {
IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) ==
......@@ -420,7 +419,6 @@ void ida_check_conv_user(void)
IDA_BUG_ON(&ida, id != i);
}
ida_destroy(&ida);
#endif
}
void ida_check_random(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment