Commit 13f62f54 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-next-4.11' of git://people.freedesktop.org/~agd5f/linux into drm-next

Some additional fixes for 4.11.  Delayed a bit due to Chinese New Year. Highlights:
- Powerplay fixes
- VCE and UVD powergating fixes
- Clean up amdgpu SI gfx code to match CI and VI
- Misc bug fixes

* 'drm-next-4.11' of git://people.freedesktop.org/~agd5f/linux: (30 commits)
  drm/amdgpu: report the number of bytes moved at buffer creation
  drm/amdgpu: fix a potential deadlock in amdgpu_bo_create_restricted()
  drm/amdgpu: add support for new smc firmware on polaris
  drm/amd/powerplay: refine code to avoid potential bug that the memory not cleared.
  drm/amdgpu: shut up #warning for compile testing
  drm/amdgpu/virt: fix double kfree on bo_va
  drm/radeon: remove some dead code
  drm/radeon: avoid kernel segfault in vce when gpu fails to resume
  drm/amd/powerplay: set fan speed to max in profile peak mode only.
  drm/amd/gfx6: update gb_addr_config
  drm/amdgpu: update HAINAN_GB_ADDR_CONFIG_GOLDEN
  drm/amdgpu: update VERDE_GB_ADDR_CONFIG_GOLDEN
  drm/amdgpu: refine si_read_register
  drm/amdgpu/gfx6: clean up spi configuration
  drm/amdgpu/gfx6: clean up cu configuration
  drm/amdgpu/gfx6: clean up rb configuration
  drm/amdgpu: refine vce3.0 code and related powerplay pg code.
  drm/amdgpu: move subfunctions to the front of vce_v2_0.c.
  drm/amdgpu: enable vce pg feature on Kv.
  drm/amdgpu: refine code for VCE2.0 and related dpm code.
  ...
parents f320d357 fad06127
...@@ -1709,6 +1709,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); ...@@ -1709,6 +1709,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
u32 ip_instance, u32 ring, u32 ip_instance, u32 ring,
struct amdgpu_ring **out_ring); struct amdgpu_ring **out_ring);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
......
...@@ -850,16 +850,37 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, ...@@ -850,16 +850,37 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
strcpy(fw_name, "amdgpu/fiji_smc.bin"); strcpy(fw_name, "amdgpu/fiji_smc.bin");
break; break;
case CHIP_POLARIS11: case CHIP_POLARIS11:
if (type == CGS_UCODE_ID_SMU) if (type == CGS_UCODE_ID_SMU) {
if (((adev->pdev->device == 0x67ef) &&
((adev->pdev->revision == 0xe0) ||
(adev->pdev->revision == 0xe2) ||
(adev->pdev->revision == 0xe5))) ||
((adev->pdev->device == 0x67ff) &&
((adev->pdev->revision == 0xcf) ||
(adev->pdev->revision == 0xef) ||
(adev->pdev->revision == 0xff))))
strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
else
strcpy(fw_name, "amdgpu/polaris11_smc.bin"); strcpy(fw_name, "amdgpu/polaris11_smc.bin");
else if (type == CGS_UCODE_ID_SMU_SK) } else if (type == CGS_UCODE_ID_SMU_SK) {
strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin"); strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
}
break; break;
case CHIP_POLARIS10: case CHIP_POLARIS10:
if (type == CGS_UCODE_ID_SMU) if (type == CGS_UCODE_ID_SMU) {
if ((adev->pdev->device == 0x67df) &&
((adev->pdev->revision == 0xe0) ||
(adev->pdev->revision == 0xe3) ||
(adev->pdev->revision == 0xe4) ||
(adev->pdev->revision == 0xe5) ||
(adev->pdev->revision == 0xe7) ||
(adev->pdev->revision == 0xef)))
strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
else
strcpy(fw_name, "amdgpu/polaris10_smc.bin"); strcpy(fw_name, "amdgpu/polaris10_smc.bin");
else if (type == CGS_UCODE_ID_SMU_SK) } else if (type == CGS_UCODE_ID_SMU_SK) {
strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
}
break; break;
case CHIP_POLARIS12: case CHIP_POLARIS12:
strcpy(fw_name, "amdgpu/polaris12_smc.bin"); strcpy(fw_name, "amdgpu/polaris12_smc.bin");
......
...@@ -344,8 +344,7 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) ...@@ -344,8 +344,7 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
* submission. This can result in a debt that can stop buffer migrations * submission. This can result in a debt that can stop buffer migrations
* temporarily. * temporarily.
*/ */
static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes)
u64 num_bytes)
{ {
spin_lock(&adev->mm_stats.lock); spin_lock(&adev->mm_stats.lock);
adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
......
...@@ -487,67 +487,50 @@ static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo) ...@@ -487,67 +487,50 @@ static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @bo_va: bo_va to update * @bo_va: bo_va to update
* @list: validation list
* @operation: map or unmap
* *
* Update the bo_va directly after setting it's address. Errors are not * Update the bo_va directly after setting its address. Errors are not
* vital here, so they are not reported back to userspace. * vital here, so they are not reported back to userspace.
*/ */
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va, struct amdgpu_bo_va *bo_va,
struct list_head *list,
uint32_t operation) uint32_t operation)
{ {
struct ttm_validate_buffer tv, *entry; struct ttm_validate_buffer *entry;
struct amdgpu_bo_list_entry vm_pd; int r = -ERESTARTSYS;
struct ww_acquire_ctx ticket;
struct list_head list, duplicates;
int r;
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
tv.bo = &bo_va->bo->tbo;
tv.shared = true;
list_add(&tv.head, &list);
amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd); list_for_each_entry(entry, list, head) {
/* Provide duplicates to avoid -EALREADY */
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r)
goto error_print;
list_for_each_entry(entry, &list, head) {
struct amdgpu_bo *bo = struct amdgpu_bo *bo =
container_of(entry->bo, struct amdgpu_bo, tbo); container_of(entry->bo, struct amdgpu_bo, tbo);
/* if anything is swapped out don't swap it in here, /* if anything is swapped out don't swap it in here,
just abort and wait for the next CS */ just abort and wait for the next CS */
if (!amdgpu_bo_gpu_accessible(bo)) if (!amdgpu_bo_gpu_accessible(bo))
goto error_unreserve; goto error;
if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow)) if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
goto error_unreserve; goto error;
} }
r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check, r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
NULL); NULL);
if (r) if (r)
goto error_unreserve; goto error;
r = amdgpu_vm_update_page_directory(adev, bo_va->vm); r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
if (r) if (r)
goto error_unreserve; goto error;
r = amdgpu_vm_clear_freed(adev, bo_va->vm); r = amdgpu_vm_clear_freed(adev, bo_va->vm);
if (r) if (r)
goto error_unreserve; goto error;
if (operation == AMDGPU_VA_OP_MAP) if (operation == AMDGPU_VA_OP_MAP)
r = amdgpu_vm_bo_update(adev, bo_va, false); r = amdgpu_vm_bo_update(adev, bo_va, false);
error_unreserve: error:
ttm_eu_backoff_reservation(&ticket, &list);
error_print:
if (r && r != -ERESTARTSYS) if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r); DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
} }
...@@ -564,7 +547,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -564,7 +547,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_bo_list_entry vm_pd; struct amdgpu_bo_list_entry vm_pd;
struct ttm_validate_buffer tv; struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
struct list_head list, duplicates; struct list_head list;
uint32_t invalid_flags, va_flags = 0; uint32_t invalid_flags, va_flags = 0;
int r = 0; int r = 0;
...@@ -602,14 +585,13 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -602,14 +585,13 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -ENOENT; return -ENOENT;
abo = gem_to_amdgpu_bo(gobj); abo = gem_to_amdgpu_bo(gobj);
INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
tv.bo = &abo->tbo; tv.bo = &abo->tbo;
tv.shared = true; tv.shared = false;
list_add(&tv.head, &list); list_add(&tv.head, &list);
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r) { if (r) {
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);
return r; return r;
...@@ -640,10 +622,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -640,10 +622,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
default: default:
break; break;
} }
ttm_eu_backoff_reservation(&ticket, &list);
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
!amdgpu_vm_debug) !amdgpu_vm_debug)
amdgpu_gem_va_update_vm(adev, bo_va, args->operation); amdgpu_gem_va_update_vm(adev, bo_va, &list, args->operation);
ttm_eu_backoff_reservation(&ticket, &list);
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);
return r; return r;
......
...@@ -323,6 +323,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, ...@@ -323,6 +323,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
enum ttm_bo_type type; enum ttm_bo_type type;
unsigned long page_align; unsigned long page_align;
u64 initial_bytes_moved;
size_t acc_size; size_t acc_size;
int r; int r;
...@@ -374,8 +375,10 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, ...@@ -374,8 +375,10 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
* See https://bugs.freedesktop.org/show_bug.cgi?id=88758 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
*/ */
#ifndef CONFIG_COMPILE_TEST
#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
thanks to write-combining thanks to write-combining
#endif
if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
...@@ -399,12 +402,20 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, ...@@ -399,12 +402,20 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock); locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock);
WARN_ON(!locked); WARN_ON(!locked);
} }
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL, &bo->placement, page_align, !kernel, NULL,
acc_size, sg, resv ? resv : &bo->tbo.ttm_resv, acc_size, sg, resv ? resv : &bo->tbo.ttm_resv,
&amdgpu_ttm_bo_destroy); &amdgpu_ttm_bo_destroy);
if (unlikely(r != 0)) amdgpu_cs_report_moved_bytes(adev,
atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved);
if (unlikely(r != 0)) {
if (!resv)
ww_mutex_unlock(&bo->tbo.resv->lock);
return r; return r;
}
bo->tbo.priority = ilog2(bo->tbo.num_pages); bo->tbo.priority = ilog2(bo->tbo.num_pages);
if (kernel) if (kernel)
......
...@@ -1142,13 +1142,23 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) ...@@ -1142,13 +1142,23 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
/* XXX select vce level based on ring/task */ /* XXX select vce level based on ring/task */
adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
mutex_unlock(&adev->pm.mutex); mutex_unlock(&adev->pm.mutex);
amdgpu_pm_compute_clocks(adev);
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_UNGATE);
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE);
} else { } else {
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_GATE);
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
adev->pm.dpm.vce_active = false; adev->pm.dpm.vce_active = false;
mutex_unlock(&adev->pm.mutex); mutex_unlock(&adev->pm.mutex);
}
amdgpu_pm_compute_clocks(adev); amdgpu_pm_compute_clocks(adev);
} }
}
} }
void amdgpu_pm_print_power_states(struct amdgpu_device *adev) void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
......
...@@ -1113,6 +1113,11 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) ...@@ -1113,6 +1113,11 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
amdgpu_dpm_enable_uvd(adev, false); amdgpu_dpm_enable_uvd(adev, false);
} else { } else {
amdgpu_asic_set_uvd_clocks(adev, 0, 0); amdgpu_asic_set_uvd_clocks(adev, 0, 0);
/* shutdown the UVD block */
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
} }
} else { } else {
schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
...@@ -1129,6 +1134,10 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) ...@@ -1129,6 +1134,10 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
amdgpu_dpm_enable_uvd(adev, true); amdgpu_dpm_enable_uvd(adev, true);
} else { } else {
amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_UNGATE);
} }
} }
} }
......
...@@ -321,6 +321,10 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work) ...@@ -321,6 +321,10 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
amdgpu_dpm_enable_vce(adev, false); amdgpu_dpm_enable_vce(adev, false);
} else { } else {
amdgpu_asic_set_vce_clocks(adev, 0, 0); amdgpu_asic_set_vce_clocks(adev, 0, 0);
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_GATE);
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
} }
} else { } else {
schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT); schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
...@@ -346,6 +350,11 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring) ...@@ -346,6 +350,11 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
amdgpu_dpm_enable_vce(adev, true); amdgpu_dpm_enable_vce(adev, true);
} else { } else {
amdgpu_asic_set_vce_clocks(adev, 53300, 40000); amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE);
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_UNGATE);
} }
} }
mutex_unlock(&adev->vce.idle_mutex); mutex_unlock(&adev->vce.idle_mutex);
......
...@@ -83,7 +83,6 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -83,7 +83,6 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
amdgpu_vm_bo_rmv(adev, bo_va); amdgpu_vm_bo_rmv(adev, bo_va);
ttm_eu_backoff_reservation(&ticket, &list); ttm_eu_backoff_reservation(&ticket, &list);
kfree(bo_va);
return r; return r;
} }
......
...@@ -2210,7 +2210,6 @@ static void ci_clear_vc(struct amdgpu_device *adev) ...@@ -2210,7 +2210,6 @@ static void ci_clear_vc(struct amdgpu_device *adev)
static int ci_upload_firmware(struct amdgpu_device *adev) static int ci_upload_firmware(struct amdgpu_device *adev)
{ {
struct ci_power_info *pi = ci_get_pi(adev);
int i, ret; int i, ret;
if (amdgpu_ci_is_smc_running(adev)) { if (amdgpu_ci_is_smc_running(adev)) {
...@@ -2227,7 +2226,7 @@ static int ci_upload_firmware(struct amdgpu_device *adev) ...@@ -2227,7 +2226,7 @@ static int ci_upload_firmware(struct amdgpu_device *adev)
amdgpu_ci_stop_smc_clock(adev); amdgpu_ci_stop_smc_clock(adev);
amdgpu_ci_reset_smc(adev); amdgpu_ci_reset_smc(adev);
ret = amdgpu_ci_load_smc_ucode(adev, pi->sram_end); ret = amdgpu_ci_load_smc_ucode(adev, SMC_RAM_END);
return ret; return ret;
...@@ -4257,12 +4256,6 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev, ...@@ -4257,12 +4256,6 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev,
if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) { if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
if (amdgpu_new_state->evclk) { if (amdgpu_new_state->evclk) {
/* turn the clocks on when encoding */
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE);
if (ret)
return ret;
pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev); pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
tmp = RREG32_SMC(ixDPM_TABLE_475); tmp = RREG32_SMC(ixDPM_TABLE_475);
tmp &= ~DPM_TABLE_475__VceBootLevel_MASK; tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
...@@ -4274,9 +4267,6 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev, ...@@ -4274,9 +4267,6 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev,
ret = ci_enable_vce_dpm(adev, false); ret = ci_enable_vce_dpm(adev, false);
if (ret) if (ret)
return ret; return ret;
/* turn the clocks off when not encoding */
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
} }
} }
return ret; return ret;
...@@ -6278,13 +6268,13 @@ static int ci_dpm_sw_init(void *handle) ...@@ -6278,13 +6268,13 @@ static int ci_dpm_sw_init(void *handle)
adev->pm.current_mclk = adev->clock.default_mclk; adev->pm.current_mclk = adev->clock.default_mclk;
adev->pm.int_thermal_type = THERMAL_TYPE_NONE; adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
if (amdgpu_dpm == 0)
return 0;
ret = ci_dpm_init_microcode(adev); ret = ci_dpm_init_microcode(adev);
if (ret) if (ret)
return ret; return ret;
if (amdgpu_dpm == 0)
return 0;
INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
ret = ci_dpm_init(adev); ret = ci_dpm_init(adev);
...@@ -6328,8 +6318,15 @@ static int ci_dpm_hw_init(void *handle) ...@@ -6328,8 +6318,15 @@ static int ci_dpm_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!amdgpu_dpm) if (!amdgpu_dpm) {
ret = ci_upload_firmware(adev);
if (ret) {
DRM_ERROR("ci_upload_firmware failed\n");
return ret;
}
ci_dpm_start_smc(adev);
return 0; return 0;
}
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
ci_dpm_setup_asic(adev); ci_dpm_setup_asic(adev);
...@@ -6351,6 +6348,8 @@ static int ci_dpm_hw_fini(void *handle) ...@@ -6351,6 +6348,8 @@ static int ci_dpm_hw_fini(void *handle)
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
ci_dpm_disable(adev); ci_dpm_disable(adev);
mutex_unlock(&adev->pm.mutex); mutex_unlock(&adev->pm.mutex);
} else {
ci_dpm_stop_smc(adev);
} }
return 0; return 0;
......
...@@ -1722,8 +1722,8 @@ static int cik_common_early_init(void *handle) ...@@ -1722,8 +1722,8 @@ static int cik_common_early_init(void *handle)
AMD_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_DMG |*/ AMD_PG_SUPPORT_GFX_DMG |*/
AMD_PG_SUPPORT_UVD | AMD_PG_SUPPORT_UVD |
/*AMD_PG_SUPPORT_VCE | AMD_PG_SUPPORT_VCE |
AMD_PG_SUPPORT_CP | /* AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GDS | AMD_PG_SUPPORT_GDS |
AMD_PG_SUPPORT_RLC_SMU_HS | AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_ACP | AMD_PG_SUPPORT_ACP |
......
This diff is collapsed.
...@@ -1550,11 +1550,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, ...@@ -1550,11 +1550,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
kv_dpm_powergate_vce(adev, false); kv_dpm_powergate_vce(adev, false);
/* turn the clocks on when encoding */
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE);
if (ret)
return ret;
if (pi->caps_stable_p_state) if (pi->caps_stable_p_state)
pi->vce_boot_level = table->count - 1; pi->vce_boot_level = table->count - 1;
else else
...@@ -1573,15 +1568,9 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, ...@@ -1573,15 +1568,9 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
amdgpu_kv_send_msg_to_smc_with_parameter(adev, amdgpu_kv_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_VCEDPM_SetEnabledMask, PPSMC_MSG_VCEDPM_SetEnabledMask,
(1 << pi->vce_boot_level)); (1 << pi->vce_boot_level));
kv_enable_vce_dpm(adev, true); kv_enable_vce_dpm(adev, true);
} else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
kv_enable_vce_dpm(adev, false); kv_enable_vce_dpm(adev, false);
/* turn the clocks off when not encoding */
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
if (ret)
return ret;
kv_dpm_powergate_vce(adev, true); kv_dpm_powergate_vce(adev, true);
} }
...@@ -1688,70 +1677,44 @@ static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) ...@@ -1688,70 +1677,44 @@ static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
struct kv_power_info *pi = kv_get_pi(adev); struct kv_power_info *pi = kv_get_pi(adev);
int ret; int ret;
if (pi->uvd_power_gated == gate)
return;
pi->uvd_power_gated = gate; pi->uvd_power_gated = gate;
if (gate) { if (gate) {
if (pi->caps_uvd_pg) { /* stop the UVD block */
/* disable clockgating so we can properly shut down the block */
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
/* shutdown the UVD block */
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE); AMD_PG_STATE_GATE);
/* XXX: check for errors */
}
kv_update_uvd_dpm(adev, gate); kv_update_uvd_dpm(adev, gate);
if (pi->caps_uvd_pg) if (pi->caps_uvd_pg)
/* power off the UVD block */ /* power off the UVD block */
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF); amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF);
} else { } else {
if (pi->caps_uvd_pg) { if (pi->caps_uvd_pg)
/* power on the UVD block */ /* power on the UVD block */
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
/* re-init the UVD block */ /* re-init the UVD block */
kv_update_uvd_dpm(adev, gate);
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_UNGATE); AMD_PG_STATE_UNGATE);
/* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
/* XXX: check for errors */
}
kv_update_uvd_dpm(adev, gate);
} }
} }
static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
{ {
struct kv_power_info *pi = kv_get_pi(adev); struct kv_power_info *pi = kv_get_pi(adev);
int ret;
if (pi->vce_power_gated == gate) if (pi->vce_power_gated == gate)
return; return;
pi->vce_power_gated = gate; pi->vce_power_gated = gate;
if (gate) { if (!pi->caps_vce_pg)
if (pi->caps_vce_pg) { return;
/* shutdown the VCE block */
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, if (gate)
AMD_PG_STATE_GATE);
/* XXX: check for errors */
/* power off the VCE block */
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
} else
} else {
if (pi->caps_vce_pg) {
/* power on the VCE block */
amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
/* re-init the VCE block */
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_UNGATE);
/* XXX: check for errors */
}
}
} }
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
...@@ -3009,8 +2972,7 @@ static int kv_dpm_late_init(void *handle) ...@@ -3009,8 +2972,7 @@ static int kv_dpm_late_init(void *handle)
kv_dpm_powergate_acp(adev, true); kv_dpm_powergate_acp(adev, true);
kv_dpm_powergate_samu(adev, true); kv_dpm_powergate_samu(adev, true);
kv_dpm_powergate_vce(adev, true);
kv_dpm_powergate_uvd(adev, true);
return 0; return 0;
} }
......
...@@ -1010,11 +1010,23 @@ static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = { ...@@ -1010,11 +1010,23 @@ static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
{PA_SC_RASTER_CONFIG, false, true}, {PA_SC_RASTER_CONFIG, false, true},
}; };
static uint32_t si_read_indexed_register(struct amdgpu_device *adev, static uint32_t si_get_register_value(struct amdgpu_device *adev,
u32 se_num, u32 sh_num, bool indexed, u32 se_num,
u32 reg_offset) u32 sh_num, u32 reg_offset)
{ {
if (indexed) {
uint32_t val; uint32_t val;
unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
switch (reg_offset) {
case mmCC_RB_BACKEND_DISABLE:
return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
case mmGC_USER_RB_BACKEND_DISABLE:
return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
case mmPA_SC_RASTER_CONFIG:
return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
}
mutex_lock(&adev->grbm_idx_mutex); mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff) if (se_num != 0xffffffff || sh_num != 0xffffffff)
...@@ -1026,8 +1038,53 @@ static uint32_t si_read_indexed_register(struct amdgpu_device *adev, ...@@ -1026,8 +1038,53 @@ static uint32_t si_read_indexed_register(struct amdgpu_device *adev,
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex); mutex_unlock(&adev->grbm_idx_mutex);
return val; return val;
} else {
unsigned idx;
switch (reg_offset) {
case mmGB_ADDR_CONFIG:
return adev->gfx.config.gb_addr_config;
case mmMC_ARB_RAMCFG:
return adev->gfx.config.mc_arb_ramcfg;
case mmGB_TILE_MODE0:
case mmGB_TILE_MODE1:
case mmGB_TILE_MODE2:
case mmGB_TILE_MODE3:
case mmGB_TILE_MODE4:
case mmGB_TILE_MODE5:
case mmGB_TILE_MODE6:
case mmGB_TILE_MODE7:
case mmGB_TILE_MODE8:
case mmGB_TILE_MODE9:
case mmGB_TILE_MODE10:
case mmGB_TILE_MODE11:
case mmGB_TILE_MODE12:
case mmGB_TILE_MODE13:
case mmGB_TILE_MODE14:
case mmGB_TILE_MODE15:
case mmGB_TILE_MODE16:
case mmGB_TILE_MODE17:
case mmGB_TILE_MODE18:
case mmGB_TILE_MODE19:
case mmGB_TILE_MODE20:
case mmGB_TILE_MODE21:
case mmGB_TILE_MODE22:
case mmGB_TILE_MODE23:
case mmGB_TILE_MODE24:
case mmGB_TILE_MODE25:
case mmGB_TILE_MODE26:
case mmGB_TILE_MODE27:
case mmGB_TILE_MODE28:
case mmGB_TILE_MODE29:
case mmGB_TILE_MODE30:
case mmGB_TILE_MODE31:
idx = (reg_offset - mmGB_TILE_MODE0);
return adev->gfx.config.tile_mode_array[idx];
default:
return RREG32(reg_offset);
}
}
} }
static int si_read_register(struct amdgpu_device *adev, u32 se_num, static int si_read_register(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 reg_offset, u32 *value) u32 sh_num, u32 reg_offset, u32 *value)
{ {
...@@ -1039,10 +1096,9 @@ static int si_read_register(struct amdgpu_device *adev, u32 se_num, ...@@ -1039,10 +1096,9 @@ static int si_read_register(struct amdgpu_device *adev, u32 se_num,
continue; continue;
if (!si_allowed_read_registers[i].untouched) if (!si_allowed_read_registers[i].untouched)
*value = si_allowed_read_registers[i].grbm_indexed ? *value = si_get_register_value(adev,
si_read_indexed_register(adev, se_num, si_allowed_read_registers[i].grbm_indexed,
sh_num, reg_offset) : se_num, sh_num, reg_offset);
RREG32(reg_offset);
return 0; return 0;
} }
return -EINVAL; return -EINVAL;
......
...@@ -143,8 +143,8 @@ ...@@ -143,8 +143,8 @@
#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003 #define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002 #define VERDE_GB_ADDR_CONFIG_GOLDEN 0x02010002
#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001 #define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \ #define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \ (((op) & 0xFF) << 8) | \
......
...@@ -159,9 +159,6 @@ static int uvd_v4_2_hw_init(void *handle) ...@@ -159,9 +159,6 @@ static int uvd_v4_2_hw_init(void *handle)
uvd_v4_2_enable_mgcg(adev, true); uvd_v4_2_enable_mgcg(adev, true);
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
r = uvd_v4_2_start(adev);
if (r)
goto done;
ring->ready = true; ring->ready = true;
r = amdgpu_ring_test_ring(ring); r = amdgpu_ring_test_ring(ring);
...@@ -198,7 +195,6 @@ static int uvd_v4_2_hw_init(void *handle) ...@@ -198,7 +195,6 @@ static int uvd_v4_2_hw_init(void *handle)
amdgpu_ring_commit(ring); amdgpu_ring_commit(ring);
done: done:
if (!r) if (!r)
DRM_INFO("UVD initialized successfully.\n"); DRM_INFO("UVD initialized successfully.\n");
...@@ -217,7 +213,9 @@ static int uvd_v4_2_hw_fini(void *handle) ...@@ -217,7 +213,9 @@ static int uvd_v4_2_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.ring;
if (RREG32(mmUVD_STATUS) != 0)
uvd_v4_2_stop(adev); uvd_v4_2_stop(adev);
ring->ready = false; ring->ready = false;
return 0; return 0;
...@@ -267,37 +265,26 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) ...@@ -267,37 +265,26 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.ring;
uint32_t rb_bufsz; uint32_t rb_bufsz;
int i, j, r; int i, j, r;
u32 tmp;
/* disable byte swapping */ /* disable byte swapping */
u32 lmi_swap_cntl = 0; u32 lmi_swap_cntl = 0;
u32 mp_swap_cntl = 0; u32 mp_swap_cntl = 0;
WREG32(mmUVD_CGC_GATE, 0); /* set uvd busy */
uvd_v4_2_set_dcm(adev, true); WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2));
uvd_v4_2_mc_resume(adev);
/* disable interupt */ uvd_v4_2_set_dcm(adev, true);
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); WREG32(mmUVD_CGC_GATE, 0);
/* Stall UMC and register bus before resetting VCPU */
WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
mdelay(1);
/* put LMI, VCPU, RBC etc... into reset */
WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
mdelay(5);
/* take UVD block out of reset */ /* take UVD block out of reset */
WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
mdelay(5); mdelay(5);
/* initialize UVD memory controller */ /* enable VCPU clock */
WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | WREG32(mmUVD_VCPU_CNTL, 1 << 9);
(1 << 21) | (1 << 9) | (1 << 20));
/* disable interupt */
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
/* swap (8 in 32) RB and IB */ /* swap (8 in 32) RB and IB */
...@@ -306,6 +293,11 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) ...@@ -306,6 +293,11 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
#endif #endif
WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
/* initialize UVD memory controller */
WREG32(mmUVD_LMI_CTRL, 0x203108);
tmp = RREG32(mmUVD_MPC_CNTL);
WREG32(mmUVD_MPC_CNTL, tmp | 0x10);
WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
WREG32(mmUVD_MPC_SET_MUXA1, 0x0); WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
...@@ -314,18 +306,20 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) ...@@ -314,18 +306,20 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
WREG32(mmUVD_MPC_SET_ALU, 0); WREG32(mmUVD_MPC_SET_ALU, 0);
WREG32(mmUVD_MPC_SET_MUX, 0x88); WREG32(mmUVD_MPC_SET_MUX, 0x88);
/* take all subblocks out of reset, except VCPU */ uvd_v4_2_mc_resume(adev);
WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
mdelay(5);
/* enable VCPU clock */ tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL);
WREG32(mmUVD_VCPU_CNTL, 1 << 9); WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10));
/* enable UMC */ /* enable UMC */
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
/* boot up the VCPU */ WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
WREG32(mmUVD_SOFT_RESET, 0);
WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
mdelay(10); mdelay(10);
for (i = 0; i < 10; ++i) { for (i = 0; i < 10; ++i) {
...@@ -357,6 +351,8 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) ...@@ -357,6 +351,8 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
/* enable interupt */ /* enable interupt */
WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1)); WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
/* force RBC into idle state */ /* force RBC into idle state */
WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
...@@ -393,22 +389,54 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) ...@@ -393,22 +389,54 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
*/ */
static void uvd_v4_2_stop(struct amdgpu_device *adev) static void uvd_v4_2_stop(struct amdgpu_device *adev)
{ {
/* force RBC into idle state */ uint32_t i, j;
uint32_t status;
WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
for (i = 0; i < 10; ++i) {
for (j = 0; j < 100; ++j) {
status = RREG32(mmUVD_STATUS);
if (status & 2)
break;
mdelay(1);
}
break;
}
for (i = 0; i < 10; ++i) {
for (j = 0; j < 100; ++j) {
status = RREG32(mmUVD_LMI_STATUS);
if (status & 0xf)
break;
mdelay(1);
}
break;
}
/* Stall UMC and register bus before resetting VCPU */ /* Stall UMC and register bus before resetting VCPU */
WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
for (i = 0; i < 10; ++i) {
for (j = 0; j < 100; ++j) {
status = RREG32(mmUVD_LMI_STATUS);
if (status & 0x240)
break;
mdelay(1); mdelay(1);
}
break;
}
/* put VCPU into reset */ WREG32_P(0x3D49, 0, ~(1 << 2));
WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
mdelay(5);
/* disable VCPU clock */ WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9));
WREG32(mmUVD_VCPU_CNTL, 0x0);
/* Unstall UMC and register bus */ /* put LMI, VCPU, RBC etc... into reset */
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
WREG32(mmUVD_STATUS, 0);
uvd_v4_2_set_dcm(adev, false); uvd_v4_2_set_dcm(adev, false);
} }
...@@ -694,8 +722,24 @@ static int uvd_v4_2_set_powergating_state(void *handle, ...@@ -694,8 +722,24 @@ static int uvd_v4_2_set_powergating_state(void *handle,
if (state == AMD_PG_STATE_GATE) { if (state == AMD_PG_STATE_GATE) {
uvd_v4_2_stop(adev); uvd_v4_2_stop(adev);
if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) {
if (!(RREG32_SMC(ixCURRENT_PG_STATUS) & 0x4)) {
WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK |
UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
mdelay(20);
}
}
return 0; return 0;
} else { } else {
if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) {
if (RREG32_SMC(ixCURRENT_PG_STATUS) & 0x4) {
WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK |
UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK));
mdelay(30);
}
}
return uvd_v4_2_start(adev); return uvd_v4_2_start(adev);
} }
} }
......
...@@ -152,9 +152,9 @@ static int uvd_v5_0_hw_init(void *handle) ...@@ -152,9 +152,9 @@ static int uvd_v5_0_hw_init(void *handle)
uint32_t tmp; uint32_t tmp;
int r; int r;
r = uvd_v5_0_start(adev); amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
if (r) uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
goto done; uvd_v5_0_enable_mgcg(adev, true);
ring->ready = true; ring->ready = true;
r = amdgpu_ring_test_ring(ring); r = amdgpu_ring_test_ring(ring);
...@@ -189,11 +189,13 @@ static int uvd_v5_0_hw_init(void *handle) ...@@ -189,11 +189,13 @@ static int uvd_v5_0_hw_init(void *handle)
amdgpu_ring_write(ring, 3); amdgpu_ring_write(ring, 3);
amdgpu_ring_commit(ring); amdgpu_ring_commit(ring);
done: done:
if (!r) if (!r)
DRM_INFO("UVD initialized successfully.\n"); DRM_INFO("UVD initialized successfully.\n");
return r; return r;
} }
/** /**
...@@ -208,7 +210,9 @@ static int uvd_v5_0_hw_fini(void *handle) ...@@ -208,7 +210,9 @@ static int uvd_v5_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.ring;
if (RREG32(mmUVD_STATUS) != 0)
uvd_v5_0_stop(adev); uvd_v5_0_stop(adev);
ring->ready = false; ring->ready = false;
return 0; return 0;
...@@ -310,10 +314,6 @@ static int uvd_v5_0_start(struct amdgpu_device *adev) ...@@ -310,10 +314,6 @@ static int uvd_v5_0_start(struct amdgpu_device *adev)
uvd_v5_0_mc_resume(adev); uvd_v5_0_mc_resume(adev);
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
uvd_v5_0_enable_mgcg(adev, true);
/* disable interupt */ /* disable interupt */
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
...@@ -456,6 +456,8 @@ static void uvd_v5_0_stop(struct amdgpu_device *adev) ...@@ -456,6 +456,8 @@ static void uvd_v5_0_stop(struct amdgpu_device *adev)
/* Unstall UMC and register bus */ /* Unstall UMC and register bus */
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
WREG32(mmUVD_STATUS, 0);
} }
/** /**
...@@ -792,9 +794,6 @@ static int uvd_v5_0_set_clockgating_state(void *handle, ...@@ -792,9 +794,6 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false; bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
if (enable) { if (enable) {
/* wait for STATUS to clear */ /* wait for STATUS to clear */
if (uvd_v5_0_wait_for_idle(handle)) if (uvd_v5_0_wait_for_idle(handle))
...@@ -824,9 +823,6 @@ static int uvd_v5_0_set_powergating_state(void *handle, ...@@ -824,9 +823,6 @@ static int uvd_v5_0_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 0; int ret = 0;
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
return 0;
if (state == AMD_PG_STATE_GATE) { if (state == AMD_PG_STATE_GATE) {
uvd_v5_0_stop(adev); uvd_v5_0_stop(adev);
adev->uvd.is_powergated = true; adev->uvd.is_powergated = true;
......
...@@ -155,9 +155,9 @@ static int uvd_v6_0_hw_init(void *handle) ...@@ -155,9 +155,9 @@ static int uvd_v6_0_hw_init(void *handle)
uint32_t tmp; uint32_t tmp;
int r; int r;
r = uvd_v6_0_start(adev); amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
if (r) uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
goto done; uvd_v6_0_enable_mgcg(adev, true);
ring->ready = true; ring->ready = true;
r = amdgpu_ring_test_ring(ring); r = amdgpu_ring_test_ring(ring);
...@@ -212,7 +212,9 @@ static int uvd_v6_0_hw_fini(void *handle) ...@@ -212,7 +212,9 @@ static int uvd_v6_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.ring;
if (RREG32(mmUVD_STATUS) != 0)
uvd_v6_0_stop(adev); uvd_v6_0_stop(adev);
ring->ready = false; ring->ready = false;
return 0; return 0;
...@@ -397,9 +399,6 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) ...@@ -397,9 +399,6 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
lmi_swap_cntl = 0; lmi_swap_cntl = 0;
mp_swap_cntl = 0; mp_swap_cntl = 0;
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
uvd_v6_0_enable_mgcg(adev, true);
uvd_v6_0_mc_resume(adev); uvd_v6_0_mc_resume(adev);
/* disable interupt */ /* disable interupt */
...@@ -554,6 +553,8 @@ static void uvd_v6_0_stop(struct amdgpu_device *adev) ...@@ -554,6 +553,8 @@ static void uvd_v6_0_stop(struct amdgpu_device *adev)
/* Unstall UMC and register bus */ /* Unstall UMC and register bus */
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
WREG32(mmUVD_STATUS, 0);
} }
/** /**
...@@ -1018,9 +1019,6 @@ static int uvd_v6_0_set_clockgating_state(void *handle, ...@@ -1018,9 +1019,6 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false; bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
if (enable) { if (enable) {
/* wait for STATUS to clear */ /* wait for STATUS to clear */
if (uvd_v6_0_wait_for_idle(handle)) if (uvd_v6_0_wait_for_idle(handle))
...@@ -1049,9 +1047,6 @@ static int uvd_v6_0_set_powergating_state(void *handle, ...@@ -1049,9 +1047,6 @@ static int uvd_v6_0_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 0; int ret = 0;
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
return 0;
WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
if (state == AMD_PG_STATE_GATE) { if (state == AMD_PG_STATE_GATE) {
......
This diff is collapsed.
...@@ -230,10 +230,6 @@ static int vce_v3_0_start(struct amdgpu_device *adev) ...@@ -230,10 +230,6 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
struct amdgpu_ring *ring; struct amdgpu_ring *ring;
int idx, r; int idx, r;
vce_v3_0_override_vce_clock_gating(adev, true);
if (!(adev->flags & AMD_IS_APU))
amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
ring = &adev->vce.ring[0]; ring = &adev->vce.ring[0];
WREG32(mmVCE_RB_RPTR, ring->wptr); WREG32(mmVCE_RB_RPTR, ring->wptr);
WREG32(mmVCE_RB_WPTR, ring->wptr); WREG32(mmVCE_RB_WPTR, ring->wptr);
...@@ -436,9 +432,9 @@ static int vce_v3_0_hw_init(void *handle) ...@@ -436,9 +432,9 @@ static int vce_v3_0_hw_init(void *handle)
int r, i; int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = vce_v3_0_start(adev); vce_v3_0_override_vce_clock_gating(adev, true);
if (r) if (!(adev->flags & AMD_IS_APU))
return r; amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
for (i = 0; i < adev->vce.num_rings; i++) for (i = 0; i < adev->vce.num_rings; i++)
adev->vce.ring[i].ready = false; adev->vce.ring[i].ready = false;
...@@ -766,12 +762,11 @@ static int vce_v3_0_set_powergating_state(void *handle, ...@@ -766,12 +762,11 @@ static int vce_v3_0_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 0; int ret = 0;
if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
return 0;
if (state == AMD_PG_STATE_GATE) { if (state == AMD_PG_STATE_GATE) {
ret = vce_v3_0_stop(adev);
if (ret)
goto out;
adev->vce.is_powergated = true; adev->vce.is_powergated = true;
/* XXX do we need a vce_v3_0_stop()? */
} else { } else {
ret = vce_v3_0_start(adev); ret = vce_v3_0_start(adev);
if (ret) if (ret)
......
...@@ -1310,5 +1310,6 @@ ...@@ -1310,5 +1310,6 @@
#define ixROM_SW_DATA_62 0xc060012c #define ixROM_SW_DATA_62 0xc060012c
#define ixROM_SW_DATA_63 0xc0600130 #define ixROM_SW_DATA_63 0xc0600130
#define ixROM_SW_DATA_64 0xc0600134 #define ixROM_SW_DATA_64 0xc0600134
#define ixCURRENT_PG_STATUS 0xc020029c
#endif /* SMU_7_0_1_D_H */ #endif /* SMU_7_0_1_D_H */
...@@ -161,28 +161,25 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) ...@@ -161,28 +161,25 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{ {
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
if (cz_hwmgr->uvd_power_gated == bgate)
return 0;
cz_hwmgr->uvd_power_gated = bgate; cz_hwmgr->uvd_power_gated = bgate;
if (bgate) { if (bgate) {
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
cgs_set_powergating_state(hwmgr->device, cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD, AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE); AMD_PG_STATE_GATE);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
cz_dpm_update_uvd_dpm(hwmgr, true); cz_dpm_update_uvd_dpm(hwmgr, true);
cz_dpm_powerdown_uvd(hwmgr); cz_dpm_powerdown_uvd(hwmgr);
} else { } else {
cz_dpm_powerup_uvd(hwmgr); cz_dpm_powerup_uvd(hwmgr);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
cgs_set_clockgating_state(hwmgr->device, cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD, AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_UNGATE); AMD_PG_STATE_UNGATE);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
cz_dpm_update_uvd_dpm(hwmgr, false); cz_dpm_update_uvd_dpm(hwmgr, false);
} }
...@@ -193,46 +190,33 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) ...@@ -193,46 +190,33 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{ {
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEPowerGating)) {
if (cz_hwmgr->vce_power_gated != bgate) {
if (bgate) { if (bgate) {
cgs_set_clockgating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
cgs_set_powergating_state( cgs_set_powergating_state(
hwmgr->device, hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE, AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_GATE); AMD_PG_STATE_GATE);
cgs_set_clockgating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
cz_enable_disable_vce_dpm(hwmgr, false); cz_enable_disable_vce_dpm(hwmgr, false);
cz_dpm_powerdown_vce(hwmgr); cz_dpm_powerdown_vce(hwmgr);
cz_hwmgr->vce_power_gated = true; cz_hwmgr->vce_power_gated = true;
} else { } else {
cz_dpm_powerup_vce(hwmgr); cz_dpm_powerup_vce(hwmgr);
cz_hwmgr->vce_power_gated = false; cz_hwmgr->vce_power_gated = false;
cgs_set_powergating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE);
cgs_set_clockgating_state( cgs_set_clockgating_state(
hwmgr->device, hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE, AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_UNGATE); AMD_PG_STATE_UNGATE);
cgs_set_powergating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE);
cz_dpm_update_vce_dpm(hwmgr); cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, true); cz_enable_disable_vce_dpm(hwmgr, true);
return 0; return 0;
} }
}
} else {
cz_hwmgr->vce_power_gated = bgate;
cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, !bgate);
return 0;
}
if (!cz_hwmgr->vce_power_gated)
cz_dpm_update_vce_dpm(hwmgr);
return 0; return 0;
} }
......
...@@ -147,20 +147,20 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) ...@@ -147,20 +147,20 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
data->uvd_power_gated = bgate; data->uvd_power_gated = bgate;
if (bgate) { if (bgate) {
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
cgs_set_powergating_state(hwmgr->device, cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD, AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE); AMD_PG_STATE_GATE);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
smu7_update_uvd_dpm(hwmgr, true); smu7_update_uvd_dpm(hwmgr, true);
smu7_powerdown_uvd(hwmgr); smu7_powerdown_uvd(hwmgr);
} else { } else {
smu7_powerup_uvd(hwmgr); smu7_powerup_uvd(hwmgr);
cgs_set_powergating_state(hwmgr->device, cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD, AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE); AMD_CG_STATE_UNGATE);
cgs_set_clockgating_state(hwmgr->device, cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD, AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE); AMD_CG_STATE_UNGATE);
smu7_update_uvd_dpm(hwmgr, false); smu7_update_uvd_dpm(hwmgr, false);
...@@ -173,12 +173,12 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) ...@@ -173,12 +173,12 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{ {
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->vce_power_gated == bgate)
return 0;
data->vce_power_gated = bgate; data->vce_power_gated = bgate;
if (bgate) { if (bgate) {
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_UNGATE);
cgs_set_clockgating_state(hwmgr->device, cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE); AMD_CG_STATE_GATE);
...@@ -186,10 +186,13 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) ...@@ -186,10 +186,13 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
smu7_powerdown_vce(hwmgr); smu7_powerdown_vce(hwmgr);
} else { } else {
smu7_powerup_vce(hwmgr); smu7_powerup_vce(hwmgr);
smu7_update_vce_dpm(hwmgr, false);
cgs_set_clockgating_state(hwmgr->device, cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE); AMD_CG_STATE_UNGATE);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_UNGATE);
smu7_update_vce_dpm(hwmgr, false);
} }
return 0; return 0;
} }
......
...@@ -2624,6 +2624,7 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, ...@@ -2624,6 +2624,7 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask); smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask); smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask); smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
break; break;
case AMD_DPM_FORCED_LEVEL_MANUAL: case AMD_DPM_FORCED_LEVEL_MANUAL:
hwmgr->dpm_level = level; hwmgr->dpm_level = level;
...@@ -2633,9 +2634,9 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, ...@@ -2633,9 +2634,9 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
break; break;
} }
if (level & (AMD_DPM_FORCED_LEVEL_PROFILE_PEAK | AMD_DPM_FORCED_LEVEL_HIGH)) if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
else else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr); smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
return 0; return 0;
...@@ -4397,16 +4398,14 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) ...@@ -4397,16 +4398,14 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL) if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
return -EINVAL; return -EINVAL;
dep_sclk_table = table_info->vdd_dep_on_sclk; dep_sclk_table = table_info->vdd_dep_on_sclk;
for (i = 0; i < dep_sclk_table->count; i++) { for (i = 0; i < dep_sclk_table->count; i++)
clocks->clock[i] = dep_sclk_table->entries[i].clk; clocks->clock[i] = dep_sclk_table->entries[i].clk;
clocks->count++; clocks->count = dep_sclk_table->count;
}
} else if (hwmgr->pp_table_version == PP_TABLE_V0) { } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk; sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
for (i = 0; i < sclk_table->count; i++) { for (i = 0; i < sclk_table->count; i++)
clocks->clock[i] = sclk_table->entries[i].clk; clocks->clock[i] = sclk_table->entries[i].clk;
clocks->count++; clocks->count = sclk_table->count;
}
} }
return 0; return 0;
...@@ -4440,14 +4439,13 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) ...@@ -4440,14 +4439,13 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
clocks->clock[i] = dep_mclk_table->entries[i].clk; clocks->clock[i] = dep_mclk_table->entries[i].clk;
clocks->latency[i] = smu7_get_mem_latency(hwmgr, clocks->latency[i] = smu7_get_mem_latency(hwmgr,
dep_mclk_table->entries[i].clk); dep_mclk_table->entries[i].clk);
clocks->count++;
} }
clocks->count = dep_mclk_table->count;
} else if (hwmgr->pp_table_version == PP_TABLE_V0) { } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk; mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
for (i = 0; i < mclk_table->count; i++) { for (i = 0; i < mclk_table->count; i++)
clocks->clock[i] = mclk_table->entries[i].clk; clocks->clock[i] = mclk_table->entries[i].clk;
clocks->count++; clocks->count = mclk_table->count;
}
} }
return 0; return 0;
} }
......
...@@ -37,8 +37,10 @@ MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin"); ...@@ -37,8 +37,10 @@ MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin");
MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
......
...@@ -638,10 +638,8 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev) ...@@ -638,10 +638,8 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
vhdr->ImageLength, vhdr->ImageLength,
GFP_KERNEL); GFP_KERNEL);
if (!rdev->bios) { if (!rdev->bios)
kfree(rdev->bios);
return false; return false;
}
return true; return true;
} }
} }
......
...@@ -196,7 +196,7 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data) ...@@ -196,7 +196,7 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
memset(&data[5], 0, 44); memset(&data[5], 0, 44);
memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign)); memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign));
data += le32_to_cpu(data[4]) / 4; data += (le32_to_cpu(sign->len) + 64) / 4;
data[0] = sign->val[i].sigval[0]; data[0] = sign->val[i].sigval[0];
data[1] = sign->val[i].sigval[1]; data[1] = sign->val[i].sigval[1];
data[2] = sign->val[i].sigval[2]; data[2] = sign->val[i].sigval[2];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment