Commit cb1e3818 authored by Rob Clark's avatar Rob Clark

drm/msm: fix locking inconsistency for gpu->hw_init()

Most, but not all, paths where calling the with struct_mutex held.  The
fast-path in msm_gem_get_iova() (plus some sub-code-paths that only run
the first time) was masking this issue.

So lets just always hold struct_mutex for hw_init().  And sprinkle some
WARN_ON()'s and might_lock() to avoid this sort of problem in the
future.
Signed-off-by: default avatarRob Clark <robdclark@gmail.com>
parent 42a105e9
...@@ -297,31 +297,28 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, ...@@ -297,31 +297,28 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
struct drm_gem_object *bo; struct drm_gem_object *bo;
void *ptr; void *ptr;
mutex_lock(&drm->struct_mutex);
bo = msm_gem_new(drm, fw->size - 4, MSM_BO_UNCACHED); bo = msm_gem_new(drm, fw->size - 4, MSM_BO_UNCACHED);
mutex_unlock(&drm->struct_mutex);
if (IS_ERR(bo)) if (IS_ERR(bo))
return bo; return bo;
ptr = msm_gem_get_vaddr(bo); ptr = msm_gem_get_vaddr_locked(bo);
if (!ptr) { if (!ptr) {
drm_gem_object_unreference_unlocked(bo); drm_gem_object_unreference(bo);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
if (iova) { if (iova) {
int ret = msm_gem_get_iova(bo, gpu->id, iova); int ret = msm_gem_get_iova_locked(bo, gpu->id, iova);
if (ret) { if (ret) {
drm_gem_object_unreference_unlocked(bo); drm_gem_object_unreference(bo);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
} }
memcpy(ptr, &fw->data[4], fw->size - 4); memcpy(ptr, &fw->data[4], fw->size - 4);
msm_gem_put_vaddr(bo); msm_gem_put_vaddr_locked(bo);
return bo; return bo;
} }
......
...@@ -294,17 +294,14 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) ...@@ -294,17 +294,14 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
*/ */
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2; bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
mutex_lock(&drm->struct_mutex);
a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize, MSM_BO_UNCACHED); a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize, MSM_BO_UNCACHED);
mutex_unlock(&drm->struct_mutex);
if (IS_ERR(a5xx_gpu->gpmu_bo)) if (IS_ERR(a5xx_gpu->gpmu_bo))
goto err; goto err;
if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->id, &a5xx_gpu->gpmu_iova)) if (msm_gem_get_iova_locked(a5xx_gpu->gpmu_bo, gpu->id, &a5xx_gpu->gpmu_iova))
goto err; goto err;
ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo); ptr = msm_gem_get_vaddr_locked(a5xx_gpu->gpmu_bo);
if (!ptr) if (!ptr)
goto err; goto err;
...@@ -323,7 +320,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) ...@@ -323,7 +320,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
cmds_size -= _size; cmds_size -= _size;
} }
msm_gem_put_vaddr(a5xx_gpu->gpmu_bo); msm_gem_put_vaddr_locked(a5xx_gpu->gpmu_bo);
a5xx_gpu->gpmu_dwords = dwords; a5xx_gpu->gpmu_dwords = dwords;
goto out; goto out;
...@@ -332,7 +329,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) ...@@ -332,7 +329,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
if (a5xx_gpu->gpmu_iova) if (a5xx_gpu->gpmu_iova)
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id); msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
if (a5xx_gpu->gpmu_bo) if (a5xx_gpu->gpmu_bo)
drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo); drm_gem_object_unreference(a5xx_gpu->gpmu_bo);
a5xx_gpu->gpmu_bo = NULL; a5xx_gpu->gpmu_bo = NULL;
a5xx_gpu->gpmu_iova = 0; a5xx_gpu->gpmu_iova = 0;
......
...@@ -159,7 +159,9 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev) ...@@ -159,7 +159,9 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
int ret; int ret;
pm_runtime_get_sync(&pdev->dev); pm_runtime_get_sync(&pdev->dev);
mutex_lock(&dev->struct_mutex);
ret = msm_gpu_hw_init(gpu); ret = msm_gpu_hw_init(gpu);
mutex_unlock(&dev->struct_mutex);
pm_runtime_put_sync(&pdev->dev); pm_runtime_put_sync(&pdev->dev);
if (ret) { if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret); dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
......
...@@ -64,7 +64,7 @@ int adreno_hw_init(struct msm_gpu *gpu) ...@@ -64,7 +64,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
DBG("%s", gpu->name); DBG("%s", gpu->name);
ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova); ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
if (ret) { if (ret) {
gpu->rb_iova = 0; gpu->rb_iova = 0;
dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret); dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
......
...@@ -314,6 +314,8 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, ...@@ -314,6 +314,8 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0; int ret = 0;
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
if (!msm_obj->domain[id].iova) { if (!msm_obj->domain[id].iova) {
struct msm_drm_private *priv = obj->dev->dev_private; struct msm_drm_private *priv = obj->dev->dev_private;
struct page **pages = get_pages(obj); struct page **pages = get_pages(obj);
...@@ -345,6 +347,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova) ...@@ -345,6 +347,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova)
* bo is deleted: * bo is deleted:
*/ */
if (msm_obj->domain[id].iova) { if (msm_obj->domain[id].iova) {
might_lock(&obj->dev->struct_mutex);
*iova = msm_obj->domain[id].iova; *iova = msm_obj->domain[id].iova;
return 0; return 0;
} }
......
...@@ -203,6 +203,8 @@ int msm_gpu_hw_init(struct msm_gpu *gpu) ...@@ -203,6 +203,8 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
{ {
int ret; int ret;
WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
if (!gpu->needs_hw_init) if (!gpu->needs_hw_init)
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment