Commit 7672dac3 authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe

RDMA/rxe: Address an issue with hardened user copy

Change rxe pools to use kzalloc instead of kmem_cache to allocate memory
for rxe objects. The pools are not really necessary and they trigger
hardened user copy warnings as the ioctl framework copies the QP number
directly to userspace.

Also the general project to move object alloation to the core code will
eventually clean these out anyhow.

Link: https://lore.kernel.org/r/20200827163535.2632-1-rpearson@hpe.comSigned-off-by: default avatarBob Pearson <rpearson@hpe.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 63fa15db
...@@ -282,13 +282,6 @@ static int __init rxe_module_init(void) ...@@ -282,13 +282,6 @@ static int __init rxe_module_init(void)
{ {
int err; int err;
/* initialize slab caches for managed objects */
err = rxe_cache_init();
if (err) {
pr_err("unable to init object pools\n");
return err;
}
err = rxe_net_init(); err = rxe_net_init();
if (err) if (err)
return err; return err;
...@@ -303,7 +296,6 @@ static void __exit rxe_module_exit(void) ...@@ -303,7 +296,6 @@ static void __exit rxe_module_exit(void)
rdma_link_unregister(&rxe_link_ops); rdma_link_unregister(&rxe_link_ops);
ib_unregister_driver(RDMA_DRIVER_RXE); ib_unregister_driver(RDMA_DRIVER_RXE);
rxe_net_exit(); rxe_net_exit();
rxe_cache_exit();
pr_info("unloaded\n"); pr_info("unloaded\n");
} }
......
...@@ -83,62 +83,6 @@ static inline const char *pool_name(struct rxe_pool *pool) ...@@ -83,62 +83,6 @@ static inline const char *pool_name(struct rxe_pool *pool)
return rxe_type_info[pool->type].name; return rxe_type_info[pool->type].name;
} }
static inline struct kmem_cache *pool_cache(struct rxe_pool *pool)
{
return rxe_type_info[pool->type].cache;
}
static void rxe_cache_clean(size_t cnt)
{
int i;
struct rxe_type_info *type;
for (i = 0; i < cnt; i++) {
type = &rxe_type_info[i];
if (!(type->flags & RXE_POOL_NO_ALLOC)) {
kmem_cache_destroy(type->cache);
type->cache = NULL;
}
}
}
int rxe_cache_init(void)
{
int err;
int i;
size_t size;
struct rxe_type_info *type;
for (i = 0; i < RXE_NUM_TYPES; i++) {
type = &rxe_type_info[i];
size = ALIGN(type->size, RXE_POOL_ALIGN);
if (!(type->flags & RXE_POOL_NO_ALLOC)) {
type->cache =
kmem_cache_create(type->name, size,
RXE_POOL_ALIGN,
RXE_POOL_CACHE_FLAGS, NULL);
if (!type->cache) {
pr_err("Unable to init kmem cache for %s\n",
type->name);
err = -ENOMEM;
goto err1;
}
}
}
return 0;
err1:
rxe_cache_clean(i);
return err;
}
void rxe_cache_exit(void)
{
rxe_cache_clean(RXE_NUM_TYPES);
}
static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min) static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
{ {
int err = 0; int err = 0;
...@@ -379,7 +323,7 @@ void *rxe_alloc(struct rxe_pool *pool) ...@@ -379,7 +323,7 @@ void *rxe_alloc(struct rxe_pool *pool)
if (atomic_inc_return(&pool->num_elem) > pool->max_elem) if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt; goto out_cnt;
elem = kmem_cache_zalloc(pool_cache(pool), elem = kzalloc(rxe_type_info[pool->type].size,
(pool->flags & RXE_POOL_ATOMIC) ? (pool->flags & RXE_POOL_ATOMIC) ?
GFP_ATOMIC : GFP_KERNEL); GFP_ATOMIC : GFP_KERNEL);
if (!elem) if (!elem)
...@@ -441,7 +385,7 @@ void rxe_elem_release(struct kref *kref) ...@@ -441,7 +385,7 @@ void rxe_elem_release(struct kref *kref)
pool->cleanup(elem); pool->cleanup(elem);
if (!(pool->flags & RXE_POOL_NO_ALLOC)) if (!(pool->flags & RXE_POOL_NO_ALLOC))
kmem_cache_free(pool_cache(pool), elem); kfree(elem);
atomic_dec(&pool->num_elem); atomic_dec(&pool->num_elem);
ib_device_put(&pool->rxe->ib_dev); ib_device_put(&pool->rxe->ib_dev);
rxe_pool_put(pool); rxe_pool_put(pool);
......
...@@ -42,7 +42,6 @@ struct rxe_type_info { ...@@ -42,7 +42,6 @@ struct rxe_type_info {
u32 min_index; u32 min_index;
size_t key_offset; size_t key_offset;
size_t key_size; size_t key_size;
struct kmem_cache *cache;
}; };
extern struct rxe_type_info rxe_type_info[]; extern struct rxe_type_info rxe_type_info[];
...@@ -86,12 +85,6 @@ struct rxe_pool { ...@@ -86,12 +85,6 @@ struct rxe_pool {
size_t key_size; size_t key_size;
}; };
/* initialize slab caches for managed objects */
int rxe_cache_init(void);
/* cleanup slab caches for managed objects */
void rxe_cache_exit(void);
/* initialize a pool of objects with given limit on /* initialize a pool of objects with given limit on
* number of elements. gets parameters from rxe_type_info * number of elements. gets parameters from rxe_type_info
* pool elements will be allocated out of a slab cache * pool elements will be allocated out of a slab cache
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment