Commit 6f70eb2b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'idr-2018-02-06' of git://git.infradead.org/users/willy/linux-dax

Pull idr fixes from Matthew Wilcox:
 "One test-suite build fix for you and one run-time regression fix.

  The regression fix includes new tests to make sure they don't pop back
  up."

* 'idr-2018-02-06' of git://git.infradead.org/users/willy/linux-dax:
  idr: Fix handling of IDs above INT_MAX
  radix tree test suite: Fix build
parents 4c3579f6 4b0ad076
......@@ -36,8 +36,8 @@ int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
{
struct radix_tree_iter iter;
void __rcu **slot;
int base = idr->idr_base;
int id = *nextid;
unsigned int base = idr->idr_base;
unsigned int id = *nextid;
if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
return -EINVAL;
......@@ -204,10 +204,11 @@ int idr_for_each(const struct idr *idr,
radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
int ret;
unsigned long id = iter.index + base;
if (WARN_ON_ONCE(iter.index > INT_MAX))
if (WARN_ON_ONCE(id > INT_MAX))
break;
ret = fn(iter.index + base, rcu_dereference_raw(*slot), data);
ret = fn(id, rcu_dereference_raw(*slot), data);
if (ret)
return ret;
}
......@@ -230,8 +231,8 @@ void *idr_get_next(struct idr *idr, int *nextid)
{
struct radix_tree_iter iter;
void __rcu **slot;
int base = idr->idr_base;
int id = *nextid;
unsigned long base = idr->idr_base;
unsigned long id = *nextid;
id = (id < base) ? 0 : id - base;
slot = radix_tree_iter_find(&idr->idr_rt, &iter, id);
......
......@@ -178,6 +178,55 @@ void idr_get_next_test(int base)
idr_destroy(&idr);
}
int idr_u32_cb(int id, void *ptr, void *data)
{
BUG_ON(id < 0);
BUG_ON(ptr != DUMMY_PTR);
return 0;
}
void idr_u32_test1(struct idr *idr, u32 handle)
{
static bool warned = false;
u32 id = handle;
int sid = 0;
void *ptr;
BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL));
BUG_ON(id != handle);
BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL) != -ENOSPC);
BUG_ON(id != handle);
if (!warned && id > INT_MAX)
printk("vvv Ignore these warnings\n");
ptr = idr_get_next(idr, &sid);
if (id > INT_MAX) {
BUG_ON(ptr != NULL);
BUG_ON(sid != 0);
} else {
BUG_ON(ptr != DUMMY_PTR);
BUG_ON(sid != id);
}
idr_for_each(idr, idr_u32_cb, NULL);
if (!warned && id > INT_MAX) {
printk("^^^ Warnings over\n");
warned = true;
}
BUG_ON(idr_remove(idr, id) != DUMMY_PTR);
BUG_ON(!idr_is_empty(idr));
}
void idr_u32_test(int base)
{
DEFINE_IDR(idr);
idr_init_base(&idr, base);
idr_u32_test1(&idr, 10);
idr_u32_test1(&idr, 0x7fffffff);
idr_u32_test1(&idr, 0x80000000);
idr_u32_test1(&idr, 0x80000001);
idr_u32_test1(&idr, 0xffe00000);
idr_u32_test1(&idr, 0xffffffff);
}
void idr_checks(void)
{
unsigned long i;
......@@ -248,6 +297,9 @@ void idr_checks(void)
idr_get_next_test(0);
idr_get_next_test(1);
idr_get_next_test(4);
idr_u32_test(4);
idr_u32_test(1);
idr_u32_test(0);
}
/*
......
......@@ -29,7 +29,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
{
struct radix_tree_node *node;
if (flags & __GFP_NOWARN)
if (!(flags & __GFP_DIRECT_RECLAIM))
return NULL;
pthread_mutex_lock(&cachep->lock);
......@@ -73,10 +73,17 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
void *kmalloc(size_t size, gfp_t gfp)
{
void *ret = malloc(size);
void *ret;
if (!(gfp & __GFP_DIRECT_RECLAIM))
return NULL;
ret = malloc(size);
uatomic_inc(&nr_allocated);
if (kmalloc_verbose)
printf("Allocating %p from malloc\n", ret);
if (gfp & __GFP_ZERO)
memset(ret, 0, size);
return ret;
}
......
......@@ -11,6 +11,7 @@
#define __GFP_IO 0x40u
#define __GFP_FS 0x80u
#define __GFP_NOWARN 0x200u
#define __GFP_ZERO 0x8000u
#define __GFP_ATOMIC 0x80000u
#define __GFP_ACCOUNT 0x100000u
#define __GFP_DIRECT_RECLAIM 0x400000u
......
......@@ -3,6 +3,7 @@
#define SLAB_H
#include <linux/types.h>
#include <linux/gfp.h>
#define SLAB_HWCACHE_ALIGN 1
#define SLAB_PANIC 2
......@@ -11,6 +12,11 @@
void *kmalloc(size_t size, gfp_t);
void kfree(void *);
static inline void *kzalloc(size_t size, gfp_t gfp)
{
return kmalloc(size, gfp | __GFP_ZERO);
}
void *kmem_cache_alloc(struct kmem_cache *cachep, int flags);
void kmem_cache_free(struct kmem_cache *cachep, void *objp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment