Commit 2622aa71 authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe

RDMA/rxe: Make pool lookup and alloc APIs type safe

The allocate, lookup index, lookup key and cleanup routines in rxe_pool.c
currently are not type safe against relocating the pelem field in the
objects. Planned changes to move allocation of objects into rdma-core make
addressing this a requirement.

Use the elem_offset field in rxe_type_info make these APIs safe against
moving the pelem field.

Link: https://lore.kernel.org/r/20201216231550.27224-5-rpearson@hpe.comSigned-off-by: default avatarBob Pearson <rpearson@hpe.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent b994d49e
...@@ -315,7 +315,9 @@ void rxe_drop_index(void *arg) ...@@ -315,7 +315,9 @@ void rxe_drop_index(void *arg)
void *rxe_alloc(struct rxe_pool *pool) void *rxe_alloc(struct rxe_pool *pool)
{ {
struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rxe_pool_entry *elem; struct rxe_pool_entry *elem;
u8 *obj;
unsigned long flags; unsigned long flags;
might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC)); might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
...@@ -334,16 +336,17 @@ void *rxe_alloc(struct rxe_pool *pool) ...@@ -334,16 +336,17 @@ void *rxe_alloc(struct rxe_pool *pool)
if (atomic_inc_return(&pool->num_elem) > pool->max_elem) if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt; goto out_cnt;
elem = kzalloc(rxe_type_info[pool->type].size, obj = kzalloc(info->size, (pool->flags & RXE_POOL_ATOMIC) ?
(pool->flags & RXE_POOL_ATOMIC) ? GFP_ATOMIC : GFP_KERNEL);
GFP_ATOMIC : GFP_KERNEL); if (!obj)
if (!elem)
goto out_cnt; goto out_cnt;
elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
elem->pool = pool; elem->pool = pool;
kref_init(&elem->ref_cnt); kref_init(&elem->ref_cnt);
return elem; return obj;
out_cnt: out_cnt:
atomic_dec(&pool->num_elem); atomic_dec(&pool->num_elem);
...@@ -391,12 +394,17 @@ void rxe_elem_release(struct kref *kref) ...@@ -391,12 +394,17 @@ void rxe_elem_release(struct kref *kref)
struct rxe_pool_entry *elem = struct rxe_pool_entry *elem =
container_of(kref, struct rxe_pool_entry, ref_cnt); container_of(kref, struct rxe_pool_entry, ref_cnt);
struct rxe_pool *pool = elem->pool; struct rxe_pool *pool = elem->pool;
struct rxe_type_info *info = &rxe_type_info[pool->type];
u8 *obj;
if (pool->cleanup) if (pool->cleanup)
pool->cleanup(elem); pool->cleanup(elem);
if (!(pool->flags & RXE_POOL_NO_ALLOC)) if (!(pool->flags & RXE_POOL_NO_ALLOC)) {
kfree(elem); obj = (u8 *)elem - info->elem_offset;
kfree(obj);
}
atomic_dec(&pool->num_elem); atomic_dec(&pool->num_elem);
ib_device_put(&pool->rxe->ib_dev); ib_device_put(&pool->rxe->ib_dev);
rxe_pool_put(pool); rxe_pool_put(pool);
...@@ -404,8 +412,10 @@ void rxe_elem_release(struct kref *kref) ...@@ -404,8 +412,10 @@ void rxe_elem_release(struct kref *kref)
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index) void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
{ {
struct rb_node *node = NULL; struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rxe_pool_entry *elem = NULL; struct rb_node *node;
struct rxe_pool_entry *elem;
u8 *obj = NULL;
unsigned long flags; unsigned long flags;
read_lock_irqsave(&pool->pool_lock, flags); read_lock_irqsave(&pool->pool_lock, flags);
...@@ -422,21 +432,28 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index) ...@@ -422,21 +432,28 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
node = node->rb_left; node = node->rb_left;
else if (elem->index < index) else if (elem->index < index)
node = node->rb_right; node = node->rb_right;
else { else
kref_get(&elem->ref_cnt);
break; break;
} }
if (node) {
kref_get(&elem->ref_cnt);
obj = (u8 *)elem - info->elem_offset;
} else {
obj = NULL;
} }
out: out:
read_unlock_irqrestore(&pool->pool_lock, flags); read_unlock_irqrestore(&pool->pool_lock, flags);
return node ? elem : NULL; return obj;
} }
void *rxe_pool_get_key(struct rxe_pool *pool, void *key) void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
{ {
struct rb_node *node = NULL; struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rxe_pool_entry *elem = NULL; struct rb_node *node;
struct rxe_pool_entry *elem;
u8 *obj = NULL;
int cmp; int cmp;
unsigned long flags; unsigned long flags;
...@@ -461,10 +478,14 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key) ...@@ -461,10 +478,14 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
break; break;
} }
if (node) if (node) {
kref_get(&elem->ref_cnt); kref_get(&elem->ref_cnt);
obj = (u8 *)elem - info->elem_offset;
} else {
obj = NULL;
}
out: out:
read_unlock_irqrestore(&pool->pool_lock, flags); read_unlock_irqrestore(&pool->pool_lock, flags);
return node ? elem : NULL; return obj;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment