Commit 61d7d0d5 authored by Evan Quan's avatar Evan Quan Committed by Alex Deucher

drm/amd/pm: revise the performance level setting APIs

Avoid cross callings which make lock protection enforcement
on amdgpu_dpm_force_performance_level() impossible.
Signed-off-by: default avatarEvan Quan <evan.quan@amd.com>
Reviewed-by: default avatarLijo Lazar <lijo.lazar@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 3bce90bf
...@@ -268,7 +268,6 @@ enum amd_dpm_forced_level; ...@@ -268,7 +268,6 @@ enum amd_dpm_forced_level;
* @set_clockgating_state: enable/disable cg for the IP block * @set_clockgating_state: enable/disable cg for the IP block
* @set_powergating_state: enable/disable pg for the IP block * @set_powergating_state: enable/disable pg for the IP block
* @get_clockgating_state: get current clockgating status * @get_clockgating_state: get current clockgating status
* @enable_umd_pstate: enable UMD powerstate
* *
* These hooks provide an interface for controlling the operational state * These hooks provide an interface for controlling the operational state
* of IP blocks. After acquiring a list of IP blocks for the GPU in use, * of IP blocks. After acquiring a list of IP blocks for the GPU in use,
...@@ -299,7 +298,6 @@ struct amd_ip_funcs { ...@@ -299,7 +298,6 @@ struct amd_ip_funcs {
int (*set_powergating_state)(void *handle, int (*set_powergating_state)(void *handle,
enum amd_powergating_state state); enum amd_powergating_state state);
void (*get_clockgating_state)(void *handle, u32 *flags); void (*get_clockgating_state)(void *handle, u32 *flags);
int (*enable_umd_pstate)(void *handle, enum amd_dpm_forced_level *level);
}; };
......
...@@ -300,6 +300,10 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, ...@@ -300,6 +300,10 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev); struct amdgpu_device *adev = drm_to_adev(ddev);
enum amd_dpm_forced_level level; enum amd_dpm_forced_level level;
enum amd_dpm_forced_level current_level; enum amd_dpm_forced_level current_level;
uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
int ret = 0; int ret = 0;
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
...@@ -354,10 +358,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, ...@@ -354,10 +358,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
} }
/* profile_exit setting is valid only when current mode is in profile mode */ /* profile_exit setting is valid only when current mode is in profile mode */
if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | if (!(current_level & profile_mode_mask) &&
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
(level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) { (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
pr_err("Currently not in any profile mode!\n"); pr_err("Currently not in any profile mode!\n");
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
...@@ -365,6 +366,26 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, ...@@ -365,6 +366,26 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
return -EINVAL; return -EINVAL;
} }
if (!(current_level & profile_mode_mask) &&
(level & profile_mode_mask)) {
/* enter UMD Pstate */
amdgpu_device_ip_set_powergating_state(adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_UNGATE);
amdgpu_device_ip_set_clockgating_state(adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_CG_STATE_UNGATE);
} else if ((current_level & profile_mode_mask) &&
!(level & profile_mode_mask)) {
/* exit UMD Pstate */
amdgpu_device_ip_set_clockgating_state(adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_CG_STATE_GATE);
amdgpu_device_ip_set_powergating_state(adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_GATE);
}
if (amdgpu_dpm_force_performance_level(adev, level)) { if (amdgpu_dpm_force_performance_level(adev, level)) {
pm_runtime_mark_last_busy(ddev->dev); pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev); pm_runtime_put_autosuspend(ddev->dev);
......
...@@ -953,6 +953,7 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, ...@@ -953,6 +953,7 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
{ {
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
struct amdgpu_ps *ps; struct amdgpu_ps *ps;
enum amd_pm_state_type dpm_state; enum amd_pm_state_type dpm_state;
int ret; int ret;
...@@ -976,7 +977,7 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) ...@@ -976,7 +977,7 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
else else
return -EINVAL; return -EINVAL;
if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { if (amdgpu_dpm == 1 && pp_funcs->print_power_state) {
printk("switching from power state:\n"); printk("switching from power state:\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
printk("switching to power state:\n"); printk("switching to power state:\n");
...@@ -985,14 +986,14 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) ...@@ -985,14 +986,14 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
/* update whether vce is active */ /* update whether vce is active */
ps->vce_active = adev->pm.dpm.vce_active; ps->vce_active = adev->pm.dpm.vce_active;
if (adev->powerplay.pp_funcs->display_configuration_changed) if (pp_funcs->display_configuration_changed)
amdgpu_dpm_display_configuration_changed(adev); amdgpu_dpm_display_configuration_changed(adev);
ret = amdgpu_dpm_pre_set_power_state(adev); ret = amdgpu_dpm_pre_set_power_state(adev);
if (ret) if (ret)
return ret; return ret;
if (adev->powerplay.pp_funcs->check_state_equal) { if (pp_funcs->check_state_equal) {
if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
equal = false; equal = false;
} }
...@@ -1000,24 +1001,24 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) ...@@ -1000,24 +1001,24 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
if (equal) if (equal)
return 0; return 0;
if (adev->powerplay.pp_funcs->set_power_state) if (pp_funcs->set_power_state)
adev->powerplay.pp_funcs->set_power_state(adev->powerplay.pp_handle); pp_funcs->set_power_state(adev->powerplay.pp_handle);
amdgpu_dpm_post_set_power_state(adev); amdgpu_dpm_post_set_power_state(adev);
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
if (adev->powerplay.pp_funcs->force_performance_level) { if (pp_funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active) { if (adev->pm.dpm.thermal_active) {
enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
/* force low perf level for thermal */ /* force low perf level for thermal */
amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); pp_funcs->force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
/* save the user's level */ /* save the user's level */
adev->pm.dpm.forced_level = level; adev->pm.dpm.forced_level = level;
} else { } else {
/* otherwise, user selected level */ /* otherwise, user selected level */
amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); pp_funcs->force_performance_level(adev, adev->pm.dpm.forced_level);
} }
} }
......
...@@ -323,12 +323,6 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr, ...@@ -323,12 +323,6 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
if (*level & profile_mode_mask) { if (*level & profile_mode_mask) {
hwmgr->saved_dpm_level = hwmgr->dpm_level; hwmgr->saved_dpm_level = hwmgr->dpm_level;
hwmgr->en_umd_pstate = true; hwmgr->en_umd_pstate = true;
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_UNGATE);
amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_CG_STATE_UNGATE);
} }
} else { } else {
/* exit umd pstate, restore level, enable gfx cg*/ /* exit umd pstate, restore level, enable gfx cg*/
...@@ -336,12 +330,6 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr, ...@@ -336,12 +330,6 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
*level = hwmgr->saved_dpm_level; *level = hwmgr->saved_dpm_level;
hwmgr->en_umd_pstate = false; hwmgr->en_umd_pstate = false;
amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_CG_STATE_GATE);
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_GATE);
} }
} }
} }
......
...@@ -1677,14 +1677,7 @@ static int smu_enable_umd_pstate(void *handle, ...@@ -1677,14 +1677,7 @@ static int smu_enable_umd_pstate(void *handle,
/* enter umd pstate, save current level, disable gfx cg*/ /* enter umd pstate, save current level, disable gfx cg*/
if (*level & profile_mode_mask) { if (*level & profile_mode_mask) {
smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
smu_dpm_ctx->enable_umd_pstate = true;
smu_gpo_control(smu, false); smu_gpo_control(smu, false);
amdgpu_device_ip_set_powergating_state(smu->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_UNGATE);
amdgpu_device_ip_set_clockgating_state(smu->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_CG_STATE_UNGATE);
smu_gfx_ulv_control(smu, false); smu_gfx_ulv_control(smu, false);
smu_deep_sleep_control(smu, false); smu_deep_sleep_control(smu, false);
amdgpu_asic_update_umd_stable_pstate(smu->adev, true); amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
...@@ -1694,16 +1687,9 @@ static int smu_enable_umd_pstate(void *handle, ...@@ -1694,16 +1687,9 @@ static int smu_enable_umd_pstate(void *handle,
if (!(*level & profile_mode_mask)) { if (!(*level & profile_mode_mask)) {
if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
*level = smu_dpm_ctx->saved_dpm_level; *level = smu_dpm_ctx->saved_dpm_level;
smu_dpm_ctx->enable_umd_pstate = false;
amdgpu_asic_update_umd_stable_pstate(smu->adev, false); amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
smu_deep_sleep_control(smu, true); smu_deep_sleep_control(smu, true);
smu_gfx_ulv_control(smu, true); smu_gfx_ulv_control(smu, true);
amdgpu_device_ip_set_clockgating_state(smu->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_CG_STATE_GATE);
amdgpu_device_ip_set_powergating_state(smu->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_GATE);
smu_gpo_control(smu, true); smu_gpo_control(smu, true);
} }
} }
...@@ -2149,7 +2135,6 @@ const struct amd_ip_funcs smu_ip_funcs = { ...@@ -2149,7 +2135,6 @@ const struct amd_ip_funcs smu_ip_funcs = {
.soft_reset = NULL, .soft_reset = NULL,
.set_clockgating_state = smu_set_clockgating_state, .set_clockgating_state = smu_set_clockgating_state,
.set_powergating_state = smu_set_powergating_state, .set_powergating_state = smu_set_powergating_state,
.enable_umd_pstate = smu_enable_umd_pstate,
}; };
const struct amdgpu_ip_block_version smu_v11_0_ip_block = const struct amdgpu_ip_block_version smu_v11_0_ip_block =
......
...@@ -363,7 +363,6 @@ struct smu_dpm_context { ...@@ -363,7 +363,6 @@ struct smu_dpm_context {
uint32_t dpm_context_size; uint32_t dpm_context_size;
void *dpm_context; void *dpm_context;
void *golden_dpm_context; void *golden_dpm_context;
bool enable_umd_pstate;
enum amd_dpm_forced_level dpm_level; enum amd_dpm_forced_level dpm_level;
enum amd_dpm_forced_level saved_dpm_level; enum amd_dpm_forced_level saved_dpm_level;
enum amd_dpm_forced_level requested_dpm_level; enum amd_dpm_forced_level requested_dpm_level;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment