Commit f1cc6ace authored by Jacek Lawrynowicz's avatar Jacek Lawrynowicz

accel/ivpu: Fix dev open/close races with unbind

  - Add context_list_lock to synchronize user context addition/removal
  - Use drm_dev_enter() to prevent unbinding the device during ivpu_open()
    and vpu address allocation
Signed-off-by: default avatarJacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Reviewed-by: default avatarWachowski, Karol <karol.wachowski@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240122120945.1150728-2-jacek.lawrynowicz@linux.intel.com
parent d1b163aa
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_accel.h> #include <drm/drm_accel.h>
#include <drm/drm_file.h> #include <drm/drm_file.h>
...@@ -66,22 +67,20 @@ struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv) ...@@ -66,22 +67,20 @@ struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
return file_priv; return file_priv;
} }
struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id) static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv)
{ {
struct ivpu_file_priv *file_priv; mutex_lock(&file_priv->lock);
if (file_priv->bound) {
xa_lock_irq(&vdev->context_xa); ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id);
file_priv = xa_load(&vdev->context_xa, id);
/* file_priv may still be in context_xa during file_priv_release() */ ivpu_cmdq_release_all_locked(file_priv);
if (file_priv && !kref_get_unless_zero(&file_priv->ref)) ivpu_jsm_context_release(vdev, file_priv->ctx.id);
file_priv = NULL; ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
xa_unlock_irq(&vdev->context_xa); ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
file_priv->bound = false;
if (file_priv) drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id));
ivpu_dbg(vdev, KREF, "file_priv get by id: ctx %u refcount %u\n", }
file_priv->ctx.id, kref_read(&file_priv->ref)); mutex_unlock(&file_priv->lock);
return file_priv;
} }
static void file_priv_release(struct kref *ref) static void file_priv_release(struct kref *ref)
...@@ -89,13 +88,15 @@ static void file_priv_release(struct kref *ref) ...@@ -89,13 +88,15 @@ static void file_priv_release(struct kref *ref)
struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref); struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref);
struct ivpu_device *vdev = file_priv->vdev; struct ivpu_device *vdev = file_priv->vdev;
ivpu_dbg(vdev, FILE, "file_priv release: ctx %u\n", file_priv->ctx.id); ivpu_dbg(vdev, FILE, "file_priv release: ctx %u bound %d\n",
file_priv->ctx.id, (bool)file_priv->bound);
pm_runtime_get_sync(vdev->drm.dev);
mutex_lock(&vdev->context_list_lock);
file_priv_unbind(vdev, file_priv);
mutex_unlock(&vdev->context_list_lock);
pm_runtime_put_autosuspend(vdev->drm.dev);
ivpu_cmdq_release_all(file_priv);
ivpu_jsm_context_release(vdev, file_priv->ctx.id);
ivpu_bo_remove_all_bos_from_context(vdev, &file_priv->ctx);
ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
drm_WARN_ON(&vdev->drm, xa_erase_irq(&vdev->context_xa, file_priv->ctx.id) != file_priv);
mutex_destroy(&file_priv->lock); mutex_destroy(&file_priv->lock);
kfree(file_priv); kfree(file_priv);
} }
...@@ -232,49 +233,53 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file) ...@@ -232,49 +233,53 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
struct ivpu_device *vdev = to_ivpu_device(dev); struct ivpu_device *vdev = to_ivpu_device(dev);
struct ivpu_file_priv *file_priv; struct ivpu_file_priv *file_priv;
u32 ctx_id; u32 ctx_id;
void *old; int idx, ret;
int ret;
ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, NULL, vdev->context_xa_limit, GFP_KERNEL); if (!drm_dev_enter(dev, &idx))
if (ret) { return -ENODEV;
ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
return ret;
}
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv) { if (!file_priv) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_xa_erase; goto err_dev_exit;
} }
file_priv->vdev = vdev; file_priv->vdev = vdev;
file_priv->bound = true;
kref_init(&file_priv->ref); kref_init(&file_priv->ref);
mutex_init(&file_priv->lock); mutex_init(&file_priv->lock);
mutex_lock(&vdev->context_list_lock);
ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv,
vdev->context_xa_limit, GFP_KERNEL);
if (ret) {
ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
goto err_unlock;
}
ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id); ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
if (ret) if (ret)
goto err_mutex_destroy; goto err_xa_erase;
old = xa_store_irq(&vdev->context_xa, ctx_id, file_priv, GFP_KERNEL); mutex_unlock(&vdev->context_list_lock);
if (xa_is_err(old)) { drm_dev_exit(idx);
ret = xa_err(old);
ivpu_err(vdev, "Failed to store context %u: %d\n", ctx_id, ret); file->driver_priv = file_priv;
goto err_ctx_fini;
}
ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n", ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
ctx_id, current->comm, task_pid_nr(current)); ctx_id, current->comm, task_pid_nr(current));
file->driver_priv = file_priv;
return 0; return 0;
err_ctx_fini:
ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
err_mutex_destroy:
mutex_destroy(&file_priv->lock);
kfree(file_priv);
err_xa_erase: err_xa_erase:
xa_erase_irq(&vdev->context_xa, ctx_id); xa_erase_irq(&vdev->context_xa, ctx_id);
err_unlock:
mutex_unlock(&vdev->context_list_lock);
mutex_destroy(&file_priv->lock);
kfree(file_priv);
err_dev_exit:
drm_dev_exit(idx);
return ret; return ret;
} }
...@@ -531,6 +536,10 @@ static int ivpu_dev_init(struct ivpu_device *vdev) ...@@ -531,6 +536,10 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key); lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
INIT_LIST_HEAD(&vdev->bo_list); INIT_LIST_HEAD(&vdev->bo_list);
ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
if (ret)
goto err_xa_destroy;
ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock); ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
if (ret) if (ret)
goto err_xa_destroy; goto err_xa_destroy;
...@@ -602,14 +611,30 @@ static int ivpu_dev_init(struct ivpu_device *vdev) ...@@ -602,14 +611,30 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
return ret; return ret;
} }
static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
{
struct ivpu_file_priv *file_priv;
unsigned long ctx_id;
mutex_lock(&vdev->context_list_lock);
xa_for_each(&vdev->context_xa, ctx_id, file_priv)
file_priv_unbind(vdev, file_priv);
mutex_unlock(&vdev->context_list_lock);
}
static void ivpu_dev_fini(struct ivpu_device *vdev) static void ivpu_dev_fini(struct ivpu_device *vdev)
{ {
ivpu_pm_disable(vdev); ivpu_pm_disable(vdev);
ivpu_shutdown(vdev); ivpu_shutdown(vdev);
if (IVPU_WA(d3hot_after_power_off)) if (IVPU_WA(d3hot_after_power_off))
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot); pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
ivpu_jobs_abort_all(vdev);
ivpu_job_done_consumer_fini(vdev); ivpu_job_done_consumer_fini(vdev);
ivpu_pm_cancel_recovery(vdev); ivpu_pm_cancel_recovery(vdev);
ivpu_bo_unbind_all_user_contexts(vdev);
ivpu_ipc_fini(vdev); ivpu_ipc_fini(vdev);
ivpu_fw_fini(vdev); ivpu_fw_fini(vdev);
......
...@@ -115,6 +115,7 @@ struct ivpu_device { ...@@ -115,6 +115,7 @@ struct ivpu_device {
struct ivpu_mmu_context gctx; struct ivpu_mmu_context gctx;
struct ivpu_mmu_context rctx; struct ivpu_mmu_context rctx;
struct mutex context_list_lock; /* Protects user context addition/removal */
struct xarray context_xa; struct xarray context_xa;
struct xa_limit context_xa_limit; struct xa_limit context_xa_limit;
...@@ -147,6 +148,7 @@ struct ivpu_file_priv { ...@@ -147,6 +148,7 @@ struct ivpu_file_priv {
struct ivpu_cmdq *cmdq[IVPU_NUM_ENGINES]; struct ivpu_cmdq *cmdq[IVPU_NUM_ENGINES];
struct ivpu_mmu_context ctx; struct ivpu_mmu_context ctx;
bool has_mmu_faults; bool has_mmu_faults;
bool bound;
}; };
extern int ivpu_dbg_mask; extern int ivpu_dbg_mask;
...@@ -162,7 +164,6 @@ extern bool ivpu_disable_mmu_cont_pages; ...@@ -162,7 +164,6 @@ extern bool ivpu_disable_mmu_cont_pages;
extern int ivpu_test_mode; extern int ivpu_test_mode;
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv); struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id);
void ivpu_file_priv_put(struct ivpu_file_priv **link); void ivpu_file_priv_put(struct ivpu_file_priv **link);
int ivpu_boot(struct ivpu_device *vdev); int ivpu_boot(struct ivpu_device *vdev);
......
...@@ -77,7 +77,10 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, ...@@ -77,7 +77,10 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
const struct ivpu_addr_range *range) const struct ivpu_addr_range *range)
{ {
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
int ret; int idx, ret;
if (!drm_dev_enter(&vdev->drm, &idx))
return -ENODEV;
mutex_lock(&bo->lock); mutex_lock(&bo->lock);
...@@ -93,6 +96,8 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, ...@@ -93,6 +96,8 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
mutex_unlock(&bo->lock); mutex_unlock(&bo->lock);
drm_dev_exit(idx);
return ret; return ret;
} }
...@@ -128,14 +133,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo) ...@@ -128,14 +133,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
dma_resv_unlock(bo->base.base.resv); dma_resv_unlock(bo->base.base.resv);
} }
static void ivpu_bo_unbind(struct ivpu_bo *bo) void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
{
mutex_lock(&bo->lock);
ivpu_bo_unbind_locked(bo);
mutex_unlock(&bo->lock);
}
void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
{ {
struct ivpu_bo *bo; struct ivpu_bo *bo;
...@@ -239,7 +237,7 @@ static void ivpu_bo_free(struct drm_gem_object *obj) ...@@ -239,7 +237,7 @@ static void ivpu_bo_free(struct drm_gem_object *obj)
drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)); drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
ivpu_bo_unbind(bo); ivpu_bo_unbind_locked(bo);
mutex_destroy(&bo->lock); mutex_destroy(&bo->lock);
drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1); drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
......
...@@ -25,7 +25,7 @@ struct ivpu_bo { ...@@ -25,7 +25,7 @@ struct ivpu_bo {
}; };
int ivpu_bo_pin(struct ivpu_bo *bo); int ivpu_bo_pin(struct ivpu_bo *bo);
void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx); void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size); struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size);
struct ivpu_bo *ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags); struct ivpu_bo *ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags);
......
...@@ -112,16 +112,14 @@ static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engin ...@@ -112,16 +112,14 @@ static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engin
} }
} }
void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv) void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
{ {
int i; int i;
mutex_lock(&file_priv->lock); lockdep_assert_held(&file_priv->lock);
for (i = 0; i < IVPU_NUM_ENGINES; i++) for (i = 0; i < IVPU_NUM_ENGINES; i++)
ivpu_cmdq_release_locked(file_priv, i); ivpu_cmdq_release_locked(file_priv, i);
mutex_unlock(&file_priv->lock);
} }
/* /*
...@@ -161,15 +159,13 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev) ...@@ -161,15 +159,13 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
struct ivpu_file_priv *file_priv; struct ivpu_file_priv *file_priv;
unsigned long ctx_id; unsigned long ctx_id;
xa_for_each(&vdev->context_xa, ctx_id, file_priv) { mutex_lock(&vdev->context_list_lock);
file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id);
if (!file_priv)
continue;
xa_for_each(&vdev->context_xa, ctx_id, file_priv)
ivpu_cmdq_reset_all(file_priv); ivpu_cmdq_reset_all(file_priv);
ivpu_file_priv_put(&file_priv); mutex_unlock(&vdev->context_list_lock);
}
} }
static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job) static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
......
...@@ -56,7 +56,7 @@ struct ivpu_job { ...@@ -56,7 +56,7 @@ struct ivpu_job {
int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv); void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv);
void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev); void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
void ivpu_job_done_consumer_init(struct ivpu_device *vdev); void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment