Commit 63e3ab9a authored by Nirmoy Das's avatar Nirmoy Das Committed by Alex Deucher

drm/amdgpu: individualize fence allocation per entity

Allocate fences for each entity and remove ctx->fences reference as
fences should be bound to amdgpu_ctx_entity instead amdgpu_ctx.
Signed-off-by: default avatarNirmoy Das <nirmoy.das@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 7db1d560
...@@ -87,24 +87,24 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ...@@ -87,24 +87,24 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
memset(ctx, 0, sizeof(*ctx)); memset(ctx, 0, sizeof(*ctx));
ctx->adev = adev; ctx->adev = adev;
ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
sizeof(struct dma_fence*), GFP_KERNEL);
if (!ctx->fences)
return -ENOMEM;
ctx->entities[0] = kcalloc(num_entities, ctx->entities[0] = kcalloc(num_entities,
sizeof(struct amdgpu_ctx_entity), sizeof(struct amdgpu_ctx_entity),
GFP_KERNEL); GFP_KERNEL);
if (!ctx->entities[0]) { if (!ctx->entities[0])
r = -ENOMEM; return -ENOMEM;
goto error_free_fences;
}
for (i = 0; i < num_entities; ++i) { for (i = 0; i < num_entities; ++i) {
struct amdgpu_ctx_entity *entity = &ctx->entities[0][i]; struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
entity->sequence = 1; entity->sequence = 1;
entity->fences = &ctx->fences[amdgpu_sched_jobs * i]; entity->fences = kcalloc(amdgpu_sched_jobs,
sizeof(struct dma_fence*), GFP_KERNEL);
if (!entity->fences) {
r = -ENOMEM;
goto error_cleanup_memory;
}
} }
for (i = 1; i < AMDGPU_HW_IP_NUM; ++i) for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
ctx->entities[i] = ctx->entities[i - 1] + ctx->entities[i] = ctx->entities[i - 1] +
...@@ -181,11 +181,17 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ...@@ -181,11 +181,17 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
error_cleanup_entities: error_cleanup_entities:
for (i = 0; i < num_entities; ++i) for (i = 0; i < num_entities; ++i)
drm_sched_entity_destroy(&ctx->entities[0][i].entity); drm_sched_entity_destroy(&ctx->entities[0][i].entity);
kfree(ctx->entities[0]);
error_free_fences: error_cleanup_memory:
kfree(ctx->fences); for (i = 0; i < num_entities; ++i) {
ctx->fences = NULL; struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
kfree(entity->fences);
entity->fences = NULL;
}
kfree(ctx->entities[0]);
ctx->entities[0] = NULL;
return r; return r;
} }
...@@ -199,12 +205,16 @@ static void amdgpu_ctx_fini(struct kref *ref) ...@@ -199,12 +205,16 @@ static void amdgpu_ctx_fini(struct kref *ref)
if (!adev) if (!adev)
return; return;
for (i = 0; i < num_entities; ++i) for (i = 0; i < num_entities; ++i) {
struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
for (j = 0; j < amdgpu_sched_jobs; ++j) for (j = 0; j < amdgpu_sched_jobs; ++j)
dma_fence_put(ctx->entities[0][i].fences[j]); dma_fence_put(entity->fences[j]);
kfree(ctx->fences);
kfree(ctx->entities[0]);
kfree(entity->fences);
}
kfree(ctx->entities[0]);
mutex_destroy(&ctx->lock); mutex_destroy(&ctx->lock);
kfree(ctx); kfree(ctx);
......
...@@ -42,7 +42,6 @@ struct amdgpu_ctx { ...@@ -42,7 +42,6 @@ struct amdgpu_ctx {
unsigned reset_counter_query; unsigned reset_counter_query;
uint32_t vram_lost_counter; uint32_t vram_lost_counter;
spinlock_t ring_lock; spinlock_t ring_lock;
struct dma_fence **fences;
struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM]; struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM];
bool preamble_presented; bool preamble_presented;
enum drm_sched_priority init_priority; enum drm_sched_priority init_priority;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment