Commit f0511e66 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-fixes-4.5' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

Fixes for radeon and amdgpu:
- Fix GPUVM flushing on CI and VI
- Misc DPM and Powerplay fixes
- VCE DPM fixes for CZ/ST
- DP hotplug fix

* 'drm-fixes-4.5' of git://people.freedesktop.org/~agd5f/linux:
  drm/amdgpu: return from atombios_dp_get_dpcd only when error
  drm/amdgpu/cz: remove commented out call to enable vce pg
  drm/amdgpu/powerplay/cz: enable/disable vce dpm independent of vce pg
  drm/amdgpu/cz: enable/disable vce dpm even if vce pg is disabled
  drm/amdgpu/gfx8: specify which engine to wait before vm flush
  drm/amdgpu: apply gfx_v8 fixes to gfx_v7 as well
  drm/amd/powerplay: send event to notify powerplay all modules are initialized.
  drm/amd/powerplay: export AMD_PP_EVENT_COMPLETE_INIT task to amdgpu.
  drm/radeon/pm: update current crtc info after setting the powerstate
  drm/amdgpu/pm: update current crtc info after setting the powerstate
parents 2d02b8bd 0b39c531
...@@ -77,7 +77,7 @@ void amdgpu_connector_hotplug(struct drm_connector *connector) ...@@ -77,7 +77,7 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
} else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
/* Don't try to start link training before we /* Don't try to start link training before we
* have the dpcd */ * have the dpcd */
if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
return; return;
/* set it to OFF so that drm_helper_connector_dpms() /* set it to OFF so that drm_helper_connector_dpms()
......
...@@ -649,9 +649,6 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) ...@@ -649,9 +649,6 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
/* update display watermarks based on new power state */ /* update display watermarks based on new power state */
amdgpu_display_bandwidth_update(adev); amdgpu_display_bandwidth_update(adev);
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
/* wait for the rings to drain */ /* wait for the rings to drain */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) { for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i]; struct amdgpu_ring *ring = adev->rings[i];
...@@ -670,6 +667,9 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) ...@@ -670,6 +667,9 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
/* update displays */ /* update displays */
amdgpu_dpm_display_configuration_changed(adev); amdgpu_dpm_display_configuration_changed(adev);
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
if (adev->pm.funcs->force_performance_level) { if (adev->pm.funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active) { if (adev->pm.dpm.thermal_active) {
enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level; enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
......
...@@ -143,8 +143,10 @@ static int amdgpu_pp_late_init(void *handle) ...@@ -143,8 +143,10 @@ static int amdgpu_pp_late_init(void *handle)
adev->powerplay.pp_handle); adev->powerplay.pp_handle);
#ifdef CONFIG_DRM_AMD_POWERPLAY #ifdef CONFIG_DRM_AMD_POWERPLAY
if (adev->pp_enabled) if (adev->pp_enabled) {
amdgpu_pm_sysfs_init(adev); amdgpu_pm_sysfs_init(adev);
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
}
#endif #endif
return ret; return ret;
} }
......
...@@ -2202,8 +2202,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) ...@@ -2202,8 +2202,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
AMD_PG_STATE_GATE); AMD_PG_STATE_GATE);
cz_enable_vce_dpm(adev, false); cz_enable_vce_dpm(adev, false);
/* TODO: to figure out why vce can't be poweroff. */ cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF);
/* cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF); */
pi->vce_power_gated = true; pi->vce_power_gated = true;
} else { } else {
cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON); cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON);
...@@ -2226,10 +2225,8 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) ...@@ -2226,10 +2225,8 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
} }
} else { /*pi->caps_vce_pg*/ } else { /*pi->caps_vce_pg*/
cz_update_vce_dpm(adev); cz_update_vce_dpm(adev);
cz_enable_vce_dpm(adev, true); cz_enable_vce_dpm(adev, !gate);
} }
return;
} }
const struct amd_ip_funcs cz_dpm_ip_funcs = { const struct amd_ip_funcs cz_dpm_ip_funcs = {
......
...@@ -3628,6 +3628,19 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -3628,6 +3628,19 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr) unsigned vm_id, uint64_t pd_addr)
{ {
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
WAIT_REG_MEM_FUNCTION(3) | /* equal */
WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, 4); /* poll interval */
if (usepfp) { if (usepfp) {
/* synce CE with ME to prevent CE fetch CEIB before context switch done */ /* synce CE with ME to prevent CE fetch CEIB before context switch done */
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
......
...@@ -4809,7 +4809,8 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -4809,7 +4809,8 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
WAIT_REG_MEM_FUNCTION(3))); /* equal */ WAIT_REG_MEM_FUNCTION(3) | /* equal */
WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
amdgpu_ring_write(ring, addr & 0xfffffffc); amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); amdgpu_ring_write(ring, seq);
......
...@@ -402,8 +402,11 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input, ...@@ -402,8 +402,11 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
data.requested_ui_label = power_state_convert(ps); data.requested_ui_label = power_state_convert(ps);
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
break;
} }
break; case AMD_PP_EVENT_COMPLETE_INIT:
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
break;
default: default:
break; break;
} }
......
...@@ -165,6 +165,7 @@ const struct action_chain resume_action_chain = { ...@@ -165,6 +165,7 @@ const struct action_chain resume_action_chain = {
}; };
static const pem_event_action *complete_init_event[] = { static const pem_event_action *complete_init_event[] = {
unblock_adjust_power_state_tasks,
adjust_power_state_tasks, adjust_power_state_tasks,
enable_gfx_clock_gating_tasks, enable_gfx_clock_gating_tasks,
enable_gfx_voltage_island_power_gating_tasks, enable_gfx_voltage_island_power_gating_tasks,
......
...@@ -226,7 +226,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) ...@@ -226,7 +226,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
} }
} else { } else {
cz_dpm_update_vce_dpm(hwmgr); cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, true); cz_enable_disable_vce_dpm(hwmgr, !bgate);
return 0; return 0;
} }
......
...@@ -1080,10 +1080,6 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) ...@@ -1080,10 +1080,6 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
/* update display watermarks based on new power state */ /* update display watermarks based on new power state */
radeon_bandwidth_update(rdev); radeon_bandwidth_update(rdev);
rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
rdev->pm.dpm.single_display = single_display;
/* wait for the rings to drain */ /* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) { for (i = 0; i < RADEON_NUM_RINGS; i++) {
struct radeon_ring *ring = &rdev->ring[i]; struct radeon_ring *ring = &rdev->ring[i];
...@@ -1102,6 +1098,10 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) ...@@ -1102,6 +1098,10 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
/* update displays */ /* update displays */
radeon_dpm_display_configuration_changed(rdev); radeon_dpm_display_configuration_changed(rdev);
rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
rdev->pm.dpm.single_display = single_display;
if (rdev->asic->dpm.force_performance_level) { if (rdev->asic->dpm.force_performance_level) {
if (rdev->pm.dpm.thermal_active) { if (rdev->pm.dpm.thermal_active) {
enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment