Commit 6003cb46 authored by Tomer Tayar's avatar Tomer Tayar Committed by Oded Gabbay

habanalabs: don't allow user to destroy CB handle more than once

The refcount of a CB buffer is initialized when user allocates a CB,
and is decreased when he destroys the CB handle.

If this refcount is increased also from kernel and user sends more than
one destroy requests for the handle, the buffer will be released/freed
and later be accessed when the refcount is put from kernel side.

To avoid it, prevent user from destroying the handle more than once.
Signed-off-by: default avatarTomer Tayar <ttayar@habana.ai>
Reviewed-by: default avatarOded Gabbay <ogabbay@kernel.org>
Signed-off-by: default avatarOded Gabbay <ogabbay@kernel.org>
parent 6710444c
...@@ -298,9 +298,31 @@ int hl_cb_create(struct hl_device *hdev, struct hl_mem_mgr *mmg, ...@@ -298,9 +298,31 @@ int hl_cb_create(struct hl_device *hdev, struct hl_mem_mgr *mmg,
int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle) int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle)
{ {
struct hl_cb *cb;
int rc; int rc;
/* Make sure that a CB handle isn't destroyed by user more than once */
if (!mmg->is_kernel_mem_mgr) {
cb = hl_cb_get(mmg, cb_handle);
if (!cb) {
dev_dbg(mmg->dev, "CB destroy failed, no CB was found for handle %#llx\n",
cb_handle);
rc = -EINVAL;
goto out;
}
rc = atomic_cmpxchg(&cb->is_handle_destroyed, 0, 1);
hl_cb_put(cb);
if (rc) {
dev_dbg(mmg->dev, "CB destroy failed, handle %#llx was already destroyed\n",
cb_handle);
rc = -EINVAL;
goto out;
}
}
rc = hl_mmap_mem_buf_put_handle(mmg, cb_handle); rc = hl_mmap_mem_buf_put_handle(mmg, cb_handle);
out:
if (rc < 0) if (rc < 0)
return rc; /* Invalid handle */ return rc; /* Invalid handle */
......
...@@ -853,7 +853,7 @@ static int device_early_init(struct hl_device *hdev) ...@@ -853,7 +853,7 @@ static int device_early_init(struct hl_device *hdev)
if (rc) if (rc)
goto free_chip_info; goto free_chip_info;
hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr); hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr, 1);
hdev->reset_wq = create_singlethread_workqueue("hl_device_reset"); hdev->reset_wq = create_singlethread_workqueue("hl_device_reset");
if (!hdev->reset_wq) { if (!hdev->reset_wq) {
......
...@@ -872,11 +872,13 @@ struct hl_mmap_mem_buf; ...@@ -872,11 +872,13 @@ struct hl_mmap_mem_buf;
* @dev: back pointer to the owning device * @dev: back pointer to the owning device
* @lock: protects handles * @lock: protects handles
* @handles: an idr holding all active handles to the memory buffers in the system. * @handles: an idr holding all active handles to the memory buffers in the system.
* @is_kernel_mem_mgr: indicate whether the memory manager is the per-device kernel memory manager
*/ */
struct hl_mem_mgr { struct hl_mem_mgr {
struct device *dev; struct device *dev;
spinlock_t lock; spinlock_t lock;
struct idr handles; struct idr handles;
u8 is_kernel_mem_mgr;
}; };
/** /**
...@@ -935,6 +937,7 @@ struct hl_mmap_mem_buf { ...@@ -935,6 +937,7 @@ struct hl_mmap_mem_buf {
* @size: holds the CB's size. * @size: holds the CB's size.
* @roundup_size: holds the cb size after roundup to page size. * @roundup_size: holds the cb size after roundup to page size.
* @cs_cnt: holds number of CS that this CB participates in. * @cs_cnt: holds number of CS that this CB participates in.
* @is_handle_destroyed: atomic boolean indicating whether or not the CB handle was destroyed.
* @is_pool: true if CB was acquired from the pool, false otherwise. * @is_pool: true if CB was acquired from the pool, false otherwise.
* @is_internal: internally allocated * @is_internal: internally allocated
* @is_mmu_mapped: true if the CB is mapped to the device's MMU. * @is_mmu_mapped: true if the CB is mapped to the device's MMU.
...@@ -951,6 +954,7 @@ struct hl_cb { ...@@ -951,6 +954,7 @@ struct hl_cb {
u32 size; u32 size;
u32 roundup_size; u32 roundup_size;
atomic_t cs_cnt; atomic_t cs_cnt;
atomic_t is_handle_destroyed;
u8 is_pool; u8 is_pool;
u8 is_internal; u8 is_internal;
u8 is_mmu_mapped; u8 is_mmu_mapped;
...@@ -3805,7 +3809,7 @@ __printf(4, 5) int hl_snprintf_resize(char **buf, size_t *size, size_t *offset, ...@@ -3805,7 +3809,7 @@ __printf(4, 5) int hl_snprintf_resize(char **buf, size_t *size, size_t *offset,
char *hl_format_as_binary(char *buf, size_t buf_len, u32 n); char *hl_format_as_binary(char *buf, size_t buf_len, u32 n);
const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type); const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type);
void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg); void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg, u8 is_kernel_mem_mgr);
void hl_mem_mgr_fini(struct hl_mem_mgr *mmg); void hl_mem_mgr_fini(struct hl_mem_mgr *mmg);
int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma, int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
void *args); void *args);
......
...@@ -164,7 +164,7 @@ int hl_device_open(struct inode *inode, struct file *filp) ...@@ -164,7 +164,7 @@ int hl_device_open(struct inode *inode, struct file *filp)
nonseekable_open(inode, filp); nonseekable_open(inode, filp);
hl_ctx_mgr_init(&hpriv->ctx_mgr); hl_ctx_mgr_init(&hpriv->ctx_mgr);
hl_mem_mgr_init(hpriv->hdev->dev, &hpriv->mem_mgr); hl_mem_mgr_init(hpriv->hdev->dev, &hpriv->mem_mgr, 0);
hpriv->taskpid = get_task_pid(current, PIDTYPE_PID); hpriv->taskpid = get_task_pid(current, PIDTYPE_PID);
......
...@@ -309,14 +309,16 @@ int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma, ...@@ -309,14 +309,16 @@ int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
* *
* @dev: owner device pointer * @dev: owner device pointer
* @mmg: structure to initialize * @mmg: structure to initialize
* @is_kernel_mem_mgr: indicate whether the memory manager is the per-device kernel memory manager
* *
* Initialize an instance of unified memory manager * Initialize an instance of unified memory manager
*/ */
void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg) void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg, u8 is_kernel_mem_mgr)
{ {
mmg->dev = dev; mmg->dev = dev;
spin_lock_init(&mmg->lock); spin_lock_init(&mmg->lock);
idr_init(&mmg->handles); idr_init(&mmg->handles);
mmg->is_kernel_mem_mgr = is_kernel_mem_mgr;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment