Commit 3095dd99 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xarray-6.6' of git://git.infradead.org/users/willy/xarray

Pull xarray fixes from Matthew Wilcox:

 - Fix a bug encountered by people using bittorrent where they'd get
   NULL pointer dereferences on page cache lookups when using XFS

 - Two documentation fixes

* tag 'xarray-6.6' of git://git.infradead.org/users/willy/xarray:
  idr: fix param name in idr_alloc_cyclic() doc
  xarray: Document necessary flag in alloc functions
  XArray: Do not return sibling entries from xa_load()
parents 7402e635 2a15de80
...@@ -856,6 +856,9 @@ static inline int __must_check xa_insert_irq(struct xarray *xa, ...@@ -856,6 +856,9 @@ static inline int __must_check xa_insert_irq(struct xarray *xa,
* stores the index into the @id pointer, then stores the entry at * stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id. * that index. A concurrent lookup will not see an uninitialised @id.
* *
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
* in xa_init_flags().
*
* Context: Any context. Takes and releases the xa_lock. May sleep if * Context: Any context. Takes and releases the xa_lock. May sleep if
* the @gfp flags permit. * the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or * Return: 0 on success, -ENOMEM if memory could not be allocated or
...@@ -886,6 +889,9 @@ static inline __must_check int xa_alloc(struct xarray *xa, u32 *id, ...@@ -886,6 +889,9 @@ static inline __must_check int xa_alloc(struct xarray *xa, u32 *id,
* stores the index into the @id pointer, then stores the entry at * stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id. * that index. A concurrent lookup will not see an uninitialised @id.
* *
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
* in xa_init_flags().
*
* Context: Any context. Takes and releases the xa_lock while * Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit. * disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or * Return: 0 on success, -ENOMEM if memory could not be allocated or
...@@ -916,6 +922,9 @@ static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id, ...@@ -916,6 +922,9 @@ static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id,
* stores the index into the @id pointer, then stores the entry at * stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id. * that index. A concurrent lookup will not see an uninitialised @id.
* *
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
* in xa_init_flags().
*
* Context: Process context. Takes and releases the xa_lock while * Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit. * disabling interrupts. May sleep if the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or * Return: 0 on success, -ENOMEM if memory could not be allocated or
...@@ -949,6 +958,9 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, ...@@ -949,6 +958,9 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
* The search for an empty entry will start at @next and will wrap * The search for an empty entry will start at @next and will wrap
* around if necessary. * around if necessary.
* *
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
* in xa_init_flags().
*
* Context: Any context. Takes and releases the xa_lock. May sleep if * Context: Any context. Takes and releases the xa_lock. May sleep if
* the @gfp flags permit. * the @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the * Return: 0 if the allocation succeeded without wrapping. 1 if the
...@@ -983,6 +995,9 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, ...@@ -983,6 +995,9 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
* The search for an empty entry will start at @next and will wrap * The search for an empty entry will start at @next and will wrap
* around if necessary. * around if necessary.
* *
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
* in xa_init_flags().
*
* Context: Any context. Takes and releases the xa_lock while * Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit. * disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the * Return: 0 if the allocation succeeded without wrapping. 1 if the
...@@ -1017,6 +1032,9 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry, ...@@ -1017,6 +1032,9 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
* The search for an empty entry will start at @next and will wrap * The search for an empty entry will start at @next and will wrap
* around if necessary. * around if necessary.
* *
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
* in xa_init_flags().
*
* Context: Process context. Takes and releases the xa_lock while * Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit. * disabling interrupts. May sleep if the @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the * Return: 0 if the allocation succeeded without wrapping. 1 if the
......
...@@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(idr_alloc); ...@@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(idr_alloc);
* @end: The maximum ID (exclusive). * @end: The maximum ID (exclusive).
* @gfp: Memory allocation flags. * @gfp: Memory allocation flags.
* *
* Allocates an unused ID in the range specified by @nextid and @end. If * Allocates an unused ID in the range specified by @start and @end. If
* @end is <= 0, it is treated as one larger than %INT_MAX. This allows * @end is <= 0, it is treated as one larger than %INT_MAX. This allows
* callers to use @start + N as @end as long as N is within integer range. * callers to use @start + N as @end as long as N is within integer range.
* The search for an unused ID will start at the last ID allocated and will * The search for an unused ID will start at the last ID allocated and will
......
...@@ -206,7 +206,7 @@ static void *xas_descend(struct xa_state *xas, struct xa_node *node) ...@@ -206,7 +206,7 @@ static void *xas_descend(struct xa_state *xas, struct xa_node *node)
void *entry = xa_entry(xas->xa, node, offset); void *entry = xa_entry(xas->xa, node, offset);
xas->xa_node = node; xas->xa_node = node;
if (xa_is_sibling(entry)) { while (xa_is_sibling(entry)) {
offset = xa_to_sibling(entry); offset = xa_to_sibling(entry);
entry = xa_entry(xas->xa, node, offset); entry = xa_entry(xas->xa, node, offset);
if (node->shift && xa_is_node(entry)) if (node->shift && xa_is_node(entry))
...@@ -1802,6 +1802,9 @@ EXPORT_SYMBOL(xa_get_order); ...@@ -1802,6 +1802,9 @@ EXPORT_SYMBOL(xa_get_order);
* stores the index into the @id pointer, then stores the entry at * stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id. * that index. A concurrent lookup will not see an uninitialised @id.
* *
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
* in xa_init_flags().
*
* Context: Any context. Expects xa_lock to be held on entry. May * Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit. * release and reacquire xa_lock if @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or * Return: 0 on success, -ENOMEM if memory could not be allocated or
...@@ -1850,6 +1853,9 @@ EXPORT_SYMBOL(__xa_alloc); ...@@ -1850,6 +1853,9 @@ EXPORT_SYMBOL(__xa_alloc);
* The search for an empty entry will start at @next and will wrap * The search for an empty entry will start at @next and will wrap
* around if necessary. * around if necessary.
* *
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
* in xa_init_flags().
*
* Context: Any context. Expects xa_lock to be held on entry. May * Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit. * release and reacquire xa_lock if @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the * Return: 0 if the allocation succeeded without wrapping. 1 if the
......
...@@ -159,7 +159,7 @@ void multiorder_tagged_iteration(struct xarray *xa) ...@@ -159,7 +159,7 @@ void multiorder_tagged_iteration(struct xarray *xa)
item_kill_tree(xa); item_kill_tree(xa);
} }
bool stop_iteration = false; bool stop_iteration;
static void *creator_func(void *ptr) static void *creator_func(void *ptr)
{ {
...@@ -201,6 +201,7 @@ static void multiorder_iteration_race(struct xarray *xa) ...@@ -201,6 +201,7 @@ static void multiorder_iteration_race(struct xarray *xa)
pthread_t worker_thread[num_threads]; pthread_t worker_thread[num_threads];
int i; int i;
stop_iteration = false;
pthread_create(&worker_thread[0], NULL, &creator_func, xa); pthread_create(&worker_thread[0], NULL, &creator_func, xa);
for (i = 1; i < num_threads; i++) for (i = 1; i < num_threads; i++)
pthread_create(&worker_thread[i], NULL, &iterator_func, xa); pthread_create(&worker_thread[i], NULL, &iterator_func, xa);
...@@ -211,6 +212,61 @@ static void multiorder_iteration_race(struct xarray *xa) ...@@ -211,6 +212,61 @@ static void multiorder_iteration_race(struct xarray *xa)
item_kill_tree(xa); item_kill_tree(xa);
} }
static void *load_creator(void *ptr)
{
/* 'order' is set up to ensure we have sibling entries */
unsigned int order;
struct radix_tree_root *tree = ptr;
int i;
rcu_register_thread();
item_insert_order(tree, 3 << RADIX_TREE_MAP_SHIFT, 0);
item_insert_order(tree, 2 << RADIX_TREE_MAP_SHIFT, 0);
for (i = 0; i < 10000; i++) {
for (order = 1; order < RADIX_TREE_MAP_SHIFT; order++) {
unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) -
(1 << order);
item_insert_order(tree, index, order);
item_delete_rcu(tree, index);
}
}
rcu_unregister_thread();
stop_iteration = true;
return NULL;
}
static void *load_worker(void *ptr)
{
unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) - 1;
rcu_register_thread();
while (!stop_iteration) {
struct item *item = xa_load(ptr, index);
assert(!xa_is_internal(item));
}
rcu_unregister_thread();
return NULL;
}
static void load_race(struct xarray *xa)
{
const int num_threads = sysconf(_SC_NPROCESSORS_ONLN) * 4;
pthread_t worker_thread[num_threads];
int i;
stop_iteration = false;
pthread_create(&worker_thread[0], NULL, &load_creator, xa);
for (i = 1; i < num_threads; i++)
pthread_create(&worker_thread[i], NULL, &load_worker, xa);
for (i = 0; i < num_threads; i++)
pthread_join(worker_thread[i], NULL);
item_kill_tree(xa);
}
static DEFINE_XARRAY(array); static DEFINE_XARRAY(array);
void multiorder_checks(void) void multiorder_checks(void)
...@@ -218,12 +274,20 @@ void multiorder_checks(void) ...@@ -218,12 +274,20 @@ void multiorder_checks(void)
multiorder_iteration(&array); multiorder_iteration(&array);
multiorder_tagged_iteration(&array); multiorder_tagged_iteration(&array);
multiorder_iteration_race(&array); multiorder_iteration_race(&array);
load_race(&array);
radix_tree_cpu_dead(0); radix_tree_cpu_dead(0);
} }
int __weak main(void) int __weak main(int argc, char **argv)
{ {
int opt;
while ((opt = getopt(argc, argv, "ls:v")) != -1) {
if (opt == 'v')
test_verbose++;
}
rcu_register_thread(); rcu_register_thread();
radix_tree_init(); radix_tree_init();
multiorder_checks(); multiorder_checks();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment