Commit 5c675bf2 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: clean up UVD instance handling v2

The whole handle, filp and entity handling is superfluous here.

We should have reviewed that more thoughtfully. It looks like somebody
just made the code instance aware without knowing the background.

v2: fix one more missed case in amdgpu_uvd_suspend
Reviewed-by: default avatarLeo Liu <leo.liu@amd.com>
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarChunming  Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 58c24b7c
...@@ -263,21 +263,20 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ...@@ -263,21 +263,20 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
return r; return r;
} }
}
ring = &adev->uvd.inst[j].ring; ring = &adev->uvd.inst[0].ring;
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&adev->uvd.inst[j].entity, &rq, r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
1, NULL); if (r) {
if (r != 0) { DRM_ERROR("Failed setting up UVD kernel entity.\n");
DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j); return r;
return r;
}
for (i = 0; i < adev->uvd.max_handles; ++i) {
atomic_set(&adev->uvd.inst[j].handles[i], 0);
adev->uvd.inst[j].filp[i] = NULL;
}
} }
for (i = 0; i < adev->uvd.max_handles; ++i) {
atomic_set(&adev->uvd.handles[i], 0);
adev->uvd.filp[i] = NULL;
}
/* from uvd v5.0 HW addressing capacity increased to 64 bits */ /* from uvd v5.0 HW addressing capacity increased to 64 bits */
if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
adev->uvd.address_64_bit = true; adev->uvd.address_64_bit = true;
...@@ -306,11 +305,12 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) ...@@ -306,11 +305,12 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{ {
int i, j; int i, j;
drm_sched_entity_destroy(&adev->uvd.inst->ring.sched,
&adev->uvd.entity);
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
kfree(adev->uvd.inst[j].saved_bo); kfree(adev->uvd.inst[j].saved_bo);
drm_sched_entity_destroy(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
&adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].gpu_addr,
(void **)&adev->uvd.inst[j].cpu_addr); (void **)&adev->uvd.inst[j].cpu_addr);
...@@ -333,20 +333,20 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) ...@@ -333,20 +333,20 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
cancel_delayed_work_sync(&adev->uvd.idle_work); cancel_delayed_work_sync(&adev->uvd.idle_work);
/* only valid for physical mode */
if (adev->asic_type < CHIP_POLARIS10) {
for (i = 0; i < adev->uvd.max_handles; ++i)
if (atomic_read(&adev->uvd.handles[i]))
break;
if (i == adev->uvd.max_handles)
return 0;
}
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
if (adev->uvd.inst[j].vcpu_bo == NULL) if (adev->uvd.inst[j].vcpu_bo == NULL)
continue; continue;
/* only valid for physical mode */
if (adev->asic_type < CHIP_POLARIS10) {
for (i = 0; i < adev->uvd.max_handles; ++i)
if (atomic_read(&adev->uvd.inst[j].handles[i]))
break;
if (i == adev->uvd.max_handles)
continue;
}
size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo); size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
ptr = adev->uvd.inst[j].cpu_addr; ptr = adev->uvd.inst[j].cpu_addr;
...@@ -398,30 +398,27 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) ...@@ -398,30 +398,27 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
{ {
struct amdgpu_ring *ring; struct amdgpu_ring *ring = &adev->uvd.inst[0].ring;
int i, j, r; int i, r;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
ring = &adev->uvd.inst[j].ring;
for (i = 0; i < adev->uvd.max_handles; ++i) { for (i = 0; i < adev->uvd.max_handles; ++i) {
uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]); uint32_t handle = atomic_read(&adev->uvd.handles[i]);
if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) {
struct dma_fence *fence;
r = amdgpu_uvd_get_destroy_msg(ring, handle,
false, &fence);
if (r) {
DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r);
continue;
}
dma_fence_wait(fence, false); if (handle != 0 && adev->uvd.filp[i] == filp) {
dma_fence_put(fence); struct dma_fence *fence;
adev->uvd.inst[j].filp[i] = NULL; r = amdgpu_uvd_get_destroy_msg(ring, handle, false,
atomic_set(&adev->uvd.inst[j].handles[i], 0); &fence);
if (r) {
DRM_ERROR("Error destroying UVD %d!\n", r);
continue;
} }
dma_fence_wait(fence, false);
dma_fence_put(fence);
adev->uvd.filp[i] = NULL;
atomic_set(&adev->uvd.handles[i], 0);
} }
} }
} }
...@@ -692,20 +689,19 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, ...@@ -692,20 +689,19 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
struct amdgpu_bo *bo, unsigned offset) struct amdgpu_bo *bo, unsigned offset)
{ {
struct amdgpu_device *adev = ctx->parser->adev; struct amdgpu_device *adev = ctx->parser->adev;
uint32_t ip_instance = ctx->parser->ring->me;
int32_t *msg, msg_type, handle; int32_t *msg, msg_type, handle;
void *ptr; void *ptr;
long r; long r;
int i; int i;
if (offset & 0x3F) { if (offset & 0x3F) {
DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance); DRM_ERROR("UVD messages must be 64 byte aligned!\n");
return -EINVAL; return -EINVAL;
} }
r = amdgpu_bo_kmap(bo, &ptr); r = amdgpu_bo_kmap(bo, &ptr);
if (r) { if (r) {
DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r); DRM_ERROR("Failed mapping the UVD) message (%ld)!\n", r);
return r; return r;
} }
...@@ -715,7 +711,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, ...@@ -715,7 +711,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
handle = msg[2]; handle = msg[2];
if (handle == 0) { if (handle == 0) {
DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance); DRM_ERROR("Invalid UVD handle!\n");
return -EINVAL; return -EINVAL;
} }
...@@ -726,18 +722,19 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, ...@@ -726,18 +722,19 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
/* try to alloc a new handle */ /* try to alloc a new handle */
for (i = 0; i < adev->uvd.max_handles; ++i) { for (i = 0; i < adev->uvd.max_handles; ++i) {
if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) { if (atomic_read(&adev->uvd.handles[i]) == handle) {
DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle); DRM_ERROR(")Handle 0x%x already in use!\n",
handle);
return -EINVAL; return -EINVAL;
} }
if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) { if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp; adev->uvd.filp[i] = ctx->parser->filp;
return 0; return 0;
} }
} }
DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance); DRM_ERROR("No more free UVD handles!\n");
return -ENOSPC; return -ENOSPC;
case 1: case 1:
...@@ -749,27 +746,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, ...@@ -749,27 +746,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
/* validate the handle */ /* validate the handle */
for (i = 0; i < adev->uvd.max_handles; ++i) { for (i = 0; i < adev->uvd.max_handles; ++i) {
if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) { if (atomic_read(&adev->uvd.handles[i]) == handle) {
if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) { if (adev->uvd.filp[i] != ctx->parser->filp) {
DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance); DRM_ERROR("UVD handle collision detected!\n");
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
} }
} }
DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle); DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
return -ENOENT; return -ENOENT;
case 2: case 2:
/* it's a destroy msg, free the handle */ /* it's a destroy msg, free the handle */
for (i = 0; i < adev->uvd.max_handles; ++i) for (i = 0; i < adev->uvd.max_handles; ++i)
atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0); atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
amdgpu_bo_kunmap(bo); amdgpu_bo_kunmap(bo);
return 0; return 0;
default: default:
DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type); DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
return -EINVAL; return -EINVAL;
} }
BUG(); BUG();
...@@ -1071,7 +1068,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ...@@ -1071,7 +1068,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r) if (r)
goto err_free; goto err_free;
r = amdgpu_job_submit(job, &adev->uvd.inst[ring->me].entity, r = amdgpu_job_submit(job, &adev->uvd.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f); AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r) if (r)
goto err_free; goto err_free;
...@@ -1273,7 +1270,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev) ...@@ -1273,7 +1270,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
* necessarily linear. So we need to count * necessarily linear. So we need to count
* all non-zero handles. * all non-zero handles.
*/ */
if (atomic_read(&adev->uvd.inst->handles[i])) if (atomic_read(&adev->uvd.handles[i]))
used_handles++; used_handles++;
} }
......
...@@ -42,12 +42,9 @@ struct amdgpu_uvd_inst { ...@@ -42,12 +42,9 @@ struct amdgpu_uvd_inst {
void *cpu_addr; void *cpu_addr;
uint64_t gpu_addr; uint64_t gpu_addr;
void *saved_bo; void *saved_bo;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct amdgpu_ring ring; struct amdgpu_ring ring;
struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
struct amdgpu_irq_src irq; struct amdgpu_irq_src irq;
struct drm_sched_entity entity;
uint32_t srbm_soft_reset; uint32_t srbm_soft_reset;
}; };
...@@ -56,10 +53,13 @@ struct amdgpu_uvd { ...@@ -56,10 +53,13 @@ struct amdgpu_uvd {
unsigned fw_version; unsigned fw_version;
unsigned max_handles; unsigned max_handles;
unsigned num_enc_rings; unsigned num_enc_rings;
uint8_t num_uvd_inst; uint8_t num_uvd_inst;
bool address_64_bit; bool address_64_bit;
bool use_ctx_buf; bool use_ctx_buf;
struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_sched_entity entity;
struct delayed_work idle_work; struct delayed_work idle_work;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment