Commit 5c53d19b authored by James Zhu's avatar James Zhu Committed by Alex Deucher

drm/amdgpu:All UVD instances share one idle_work handle

All UVD instanses have only one dpm control, so it is better
to share one idle_work handle.
Signed-off-by: default avatarJames Zhu <James.Zhu@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Tested-by: default avatarStefan Agner <stefan@agner.ch>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent d9fda248
...@@ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ...@@ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
unsigned version_major, version_minor, family_id; unsigned version_major, version_minor, family_id;
int i, j, r; int i, j, r;
INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
switch (adev->asic_type) { switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK #ifdef CONFIG_DRM_AMDGPU_CIK
...@@ -314,12 +314,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) ...@@ -314,12 +314,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
void *ptr; void *ptr;
int i, j; int i, j;
cancel_delayed_work_sync(&adev->uvd.idle_work);
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
if (adev->uvd.inst[j].vcpu_bo == NULL) if (adev->uvd.inst[j].vcpu_bo == NULL)
continue; continue;
cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
/* only valid for physical mode */ /* only valid for physical mode */
if (adev->asic_type < CHIP_POLARIS10) { if (adev->asic_type < CHIP_POLARIS10) {
for (i = 0; i < adev->uvd.max_handles; ++i) for (i = 0; i < adev->uvd.max_handles; ++i)
...@@ -1145,7 +1145,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -1145,7 +1145,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
static void amdgpu_uvd_idle_work_handler(struct work_struct *work) static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
{ {
struct amdgpu_device *adev = struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); container_of(work, struct amdgpu_device, uvd.idle_work.work);
unsigned fences = 0, i, j; unsigned fences = 0, i, j;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
...@@ -1167,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) ...@@ -1167,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
AMD_CG_STATE_GATE); AMD_CG_STATE_GATE);
} }
} else { } else {
schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
} }
} }
...@@ -1179,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) ...@@ -1179,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
return; return;
set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work); set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
if (set_clocks) { if (set_clocks) {
if (adev->pm.dpm_enabled) { if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, true); amdgpu_dpm_enable_uvd(adev, true);
...@@ -1196,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) ...@@ -1196,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
{ {
if (!amdgpu_sriov_vf(ring->adev)) if (!amdgpu_sriov_vf(ring->adev))
schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
} }
/** /**
......
...@@ -44,7 +44,6 @@ struct amdgpu_uvd_inst { ...@@ -44,7 +44,6 @@ struct amdgpu_uvd_inst {
void *saved_bo; void *saved_bo;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct delayed_work idle_work;
struct amdgpu_ring ring; struct amdgpu_ring ring;
struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
struct amdgpu_irq_src irq; struct amdgpu_irq_src irq;
...@@ -62,6 +61,7 @@ struct amdgpu_uvd { ...@@ -62,6 +61,7 @@ struct amdgpu_uvd {
bool address_64_bit; bool address_64_bit;
bool use_ctx_buf; bool use_ctx_buf;
struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
struct delayed_work idle_work;
}; };
int amdgpu_uvd_sw_init(struct amdgpu_device *adev); int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment