Commit dd80d9c8 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: revert "partial revert "remove ctx->lock" v2"

This reverts commit 94f4c496.

We found that the bo_list is missing a protection for its list entries.
Since that is fixed now this workaround can be removed again.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 736ec9fa
...@@ -128,8 +128,6 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs ...@@ -128,8 +128,6 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
goto free_chunk; goto free_chunk;
} }
mutex_lock(&p->ctx->lock);
/* skip guilty context job */ /* skip guilty context job */
if (atomic_read(&p->ctx->guilty) == 1) { if (atomic_read(&p->ctx->guilty) == 1) {
ret = -ECANCELED; ret = -ECANCELED;
...@@ -691,7 +689,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, ...@@ -691,7 +689,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
dma_fence_put(parser->fence); dma_fence_put(parser->fence);
if (parser->ctx) { if (parser->ctx) {
mutex_unlock(&parser->ctx->lock);
amdgpu_ctx_put(parser->ctx); amdgpu_ctx_put(parser->ctx);
} }
if (parser->bo_list) if (parser->bo_list)
...@@ -1138,9 +1135,6 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, ...@@ -1138,9 +1135,6 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
{ {
int i, r; int i, r;
/* TODO: Investigate why we still need the context lock */
mutex_unlock(&p->ctx->lock);
for (i = 0; i < p->nchunks; ++i) { for (i = 0; i < p->nchunks; ++i) {
struct amdgpu_cs_chunk *chunk; struct amdgpu_cs_chunk *chunk;
...@@ -1151,34 +1145,32 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, ...@@ -1151,34 +1145,32 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
r = amdgpu_cs_process_fence_dep(p, chunk); r = amdgpu_cs_process_fence_dep(p, chunk);
if (r) if (r)
goto out; return r;
break; break;
case AMDGPU_CHUNK_ID_SYNCOBJ_IN: case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
r = amdgpu_cs_process_syncobj_in_dep(p, chunk); r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
if (r) if (r)
goto out; return r;
break; break;
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
r = amdgpu_cs_process_syncobj_out_dep(p, chunk); r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
if (r) if (r)
goto out; return r;
break; break;
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk); r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
if (r) if (r)
goto out; return r;
break; break;
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk); r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
if (r) if (r)
goto out; return r;
break; break;
} }
} }
out: return 0;
mutex_lock(&p->ctx->lock);
return r;
} }
static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
...@@ -1340,7 +1332,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -1340,7 +1332,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out; goto out;
r = amdgpu_cs_submit(&parser, cs); r = amdgpu_cs_submit(&parser, cs);
out: out:
amdgpu_cs_parser_fini(&parser, r, reserved_buffers); amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
......
...@@ -315,7 +315,6 @@ static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority, ...@@ -315,7 +315,6 @@ static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
kref_init(&ctx->refcount); kref_init(&ctx->refcount);
ctx->mgr = mgr; ctx->mgr = mgr;
spin_lock_init(&ctx->ring_lock); spin_lock_init(&ctx->ring_lock);
mutex_init(&ctx->lock);
ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter); ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);
ctx->reset_counter_query = ctx->reset_counter; ctx->reset_counter_query = ctx->reset_counter;
...@@ -407,7 +406,6 @@ static void amdgpu_ctx_fini(struct kref *ref) ...@@ -407,7 +406,6 @@ static void amdgpu_ctx_fini(struct kref *ref)
drm_dev_exit(idx); drm_dev_exit(idx);
} }
mutex_destroy(&ctx->lock);
kfree(ctx); kfree(ctx);
} }
......
...@@ -53,7 +53,6 @@ struct amdgpu_ctx { ...@@ -53,7 +53,6 @@ struct amdgpu_ctx {
bool preamble_presented; bool preamble_presented;
int32_t init_priority; int32_t init_priority;
int32_t override_priority; int32_t override_priority;
struct mutex lock;
atomic_t guilty; atomic_t guilty;
unsigned long ras_counter_ce; unsigned long ras_counter_ce;
unsigned long ras_counter_ue; unsigned long ras_counter_ue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment