Commit c58a863b authored by Guchun Chen's avatar Guchun Chen Committed by Alex Deucher

drm/amdgpu: use adev_to_drm for consistency when accessing drm_device

adev_to_drm is used everywhere, so improve recent changes
when accessing drm_device pointer from amdgpu_device.
Signed-off-by: default avatarGuchun Chen <guchun.chen@amd.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent ec6abe83
......@@ -306,7 +306,7 @@ void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
uint64_t last;
int idx;
if (!drm_dev_enter(&adev->ddev, &idx))
if (!drm_dev_enter(adev_to_drm(adev), &idx))
return;
BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
......
......@@ -550,7 +550,7 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
drm_sched_stop(&ring->sched, NULL);
/* You can't wait for HW to signal if it's gone */
if (!drm_dev_is_unplugged(&adev->ddev))
if (!drm_dev_is_unplugged(adev_to_drm(adev)))
r = amdgpu_fence_wait_empty(ring);
else
r = -ENODEV;
......
......@@ -238,7 +238,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
return -EINVAL;
}
if (!drm_dev_enter(&adev->ddev, &idx))
if (!drm_dev_enter(adev_to_drm(adev), &idx))
return 0;
t = offset / AMDGPU_GPU_PAGE_SIZE;
......@@ -289,7 +289,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
return -EINVAL;
}
if (!drm_dev_enter(&adev->ddev, &idx))
if (!drm_dev_enter(adev_to_drm(adev), &idx))
return 0;
t = offset / AMDGPU_GPU_PAGE_SIZE;
......
......@@ -745,7 +745,7 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
int idx;
if (!drm_dev_enter(&adev->ddev, &idx))
if (!drm_dev_enter(adev_to_drm(adev), &idx))
return;
flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
......
......@@ -38,7 +38,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
struct amdgpu_device *adev = ring->adev;
int idx;
if (!drm_dev_enter(&adev->ddev, &idx)) {
if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s",
__func__, s_job->sched->name);
......
......@@ -440,7 +440,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
if (psp->adev->no_hw_access)
return 0;
if (!drm_dev_enter(&psp->adev->ddev, &idx))
if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
return 0;
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
......@@ -3272,7 +3272,7 @@ void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size
{
int idx;
if (!drm_dev_enter(&psp->adev->ddev, &idx))
if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
return;
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
......
......@@ -454,7 +454,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (!adev->uvd.inst[j].saved_bo)
return -ENOMEM;
if (drm_dev_enter(&adev->ddev, &idx)) {
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
/* re-write 0 since err_event_athub will corrupt VCPU buffer */
if (in_ras_intr)
memset(adev->uvd.inst[j].saved_bo, 0, size);
......@@ -487,7 +487,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
ptr = adev->uvd.inst[i].cpu_addr;
if (adev->uvd.inst[i].saved_bo != NULL) {
if (drm_dev_enter(&adev->ddev, &idx)) {
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
drm_dev_exit(idx);
}
......@@ -500,7 +500,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
if (drm_dev_enter(&adev->ddev, &idx)) {
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset,
le32_to_cpu(hdr->ucode_size_bytes));
drm_dev_exit(idx);
......
......@@ -313,7 +313,7 @@ int amdgpu_vce_resume(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->vce.fw->data;
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
if (drm_dev_enter(&adev->ddev, &idx)) {
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
adev->vce.fw->size - offset);
drm_dev_exit(idx);
......
......@@ -325,7 +325,7 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
if (!adev->vcn.inst[i].saved_bo)
return -ENOMEM;
if (drm_dev_enter(&adev->ddev, &idx)) {
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
drm_dev_exit(idx);
}
......@@ -349,7 +349,7 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
ptr = adev->vcn.inst[i].cpu_addr;
if (adev->vcn.inst[i].saved_bo != NULL) {
if (drm_dev_enter(&adev->ddev, &idx)) {
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
drm_dev_exit(idx);
}
......@@ -362,7 +362,7 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
if (drm_dev_enter(&adev->ddev, &idx)) {
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
le32_to_cpu(hdr->ucode_size_bytes));
drm_dev_exit(idx);
......
......@@ -845,7 +845,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
return r;
}
if (!drm_dev_enter(&adev->ddev, &idx))
if (!drm_dev_enter(adev_to_drm(adev), &idx))
return -ENODEV;
r = vm->update_funcs->map_table(vmbo);
......@@ -1395,7 +1395,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
if (list_empty(&vm->relocated))
return 0;
if (!drm_dev_enter(&adev->ddev, &idx))
if (!drm_dev_enter(adev_to_drm(adev), &idx))
return -ENODEV;
memset(&params, 0, sizeof(params));
......@@ -1718,7 +1718,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
enum amdgpu_sync_mode sync_mode;
int r, idx;
if (!drm_dev_enter(&adev->ddev, &idx))
if (!drm_dev_enter(adev_to_drm(adev), &idx))
return -ENODEV;
memset(&params, 0, sizeof(params));
......
......@@ -698,7 +698,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
return -ENOMEM;
}
if (drm_dev_enter(&adev->ddev, &idx)) {
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz);
ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN);
if (ret) {
......
......@@ -565,7 +565,7 @@ static int vce_v4_0_suspend(void *handle)
if (adev->vce.vcpu_bo == NULL)
return 0;
if (drm_dev_enter(&adev->ddev, &idx)) {
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
void *ptr = adev->vce.cpu_addr;
......@@ -615,7 +615,7 @@ static int vce_v4_0_resume(void *handle)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
if (drm_dev_enter(&adev->ddev, &idx)) {
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
void *ptr = adev->vce.cpu_addr;
......
......@@ -198,7 +198,7 @@ static int vcn_v2_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
if (drm_dev_enter(&adev->ddev, &idx)) {
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
fw_shared->present_flag_0 = 0;
drm_dev_exit(idx);
}
......
......@@ -240,7 +240,7 @@ static int vcn_v2_5_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
volatile struct amdgpu_fw_shared *fw_shared;
if (drm_dev_enter(&adev->ddev, &idx)) {
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
continue;
......
......@@ -263,7 +263,7 @@ static int vcn_v3_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, r, idx;
if (drm_dev_enter(&adev->ddev, &idx)) {
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_fw_shared *fw_shared;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment