Commit 348e1bc5 authored by Stanley Chu's avatar Stanley Chu Committed by Martin K. Petersen

scsi: ufs: Clean up and refactor clk-scaling feature

Manipulate clock scaling related stuff only if the host capability supports
clock scaling feature to avoid redundant code execution.

Link: https://lore.kernel.org/r/20210120150142.5049-4-stanley.chu@mediatek.comReviewed-by: default avatarCan Guo <cang@codeaurora.org>
Signed-off-by: default avatarStanley Chu <stanley.chu@mediatek.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent b058fa86
...@@ -1500,9 +1500,6 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) ...@@ -1500,9 +1500,6 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
unsigned long flags; unsigned long flags;
bool suspend = false; bool suspend = false;
if (!ufshcd_is_clkscaling_supported(hba))
return;
cancel_work_sync(&hba->clk_scaling.suspend_work); cancel_work_sync(&hba->clk_scaling.suspend_work);
cancel_work_sync(&hba->clk_scaling.resume_work); cancel_work_sync(&hba->clk_scaling.resume_work);
...@@ -1522,9 +1519,6 @@ static void ufshcd_resume_clkscaling(struct ufs_hba *hba) ...@@ -1522,9 +1519,6 @@ static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
unsigned long flags; unsigned long flags;
bool resume = false; bool resume = false;
if (!ufshcd_is_clkscaling_supported(hba))
return;
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_scaling.is_suspended) { if (hba->clk_scaling.is_suspended) {
resume = true; resume = true;
...@@ -5758,6 +5752,26 @@ static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba) ...@@ -5758,6 +5752,26 @@ static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
} }
} }
static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
{
down_write(&hba->clk_scaling_lock);
hba->clk_scaling.is_allowed = allow;
up_write(&hba->clk_scaling_lock);
}
static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
{
if (suspend) {
if (hba->clk_scaling.is_enabled)
ufshcd_suspend_clkscaling(hba);
ufshcd_clk_scaling_allow(hba, false);
} else {
ufshcd_clk_scaling_allow(hba, true);
if (hba->clk_scaling.is_enabled)
ufshcd_resume_clkscaling(hba);
}
}
static void ufshcd_err_handling_prepare(struct ufs_hba *hba) static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
{ {
pm_runtime_get_sync(hba->dev); pm_runtime_get_sync(hba->dev);
...@@ -5782,22 +5796,18 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba) ...@@ -5782,22 +5796,18 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
ufshcd_vops_resume(hba, pm_op); ufshcd_vops_resume(hba, pm_op);
} else { } else {
ufshcd_hold(hba, false); ufshcd_hold(hba, false);
if (hba->clk_scaling.is_enabled) if (ufshcd_is_clkscaling_supported(hba) &&
hba->clk_scaling.is_enabled)
ufshcd_suspend_clkscaling(hba); ufshcd_suspend_clkscaling(hba);
down_write(&hba->clk_scaling_lock); ufshcd_clk_scaling_allow(hba, false);
hba->clk_scaling.is_allowed = false;
up_write(&hba->clk_scaling_lock);
} }
} }
static void ufshcd_err_handling_unprepare(struct ufs_hba *hba) static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
{ {
ufshcd_release(hba); ufshcd_release(hba);
down_write(&hba->clk_scaling_lock); if (ufshcd_is_clkscaling_supported(hba))
hba->clk_scaling.is_allowed = true; ufshcd_clk_scaling_suspend(hba, false);
up_write(&hba->clk_scaling_lock);
if (hba->clk_scaling.is_enabled)
ufshcd_resume_clkscaling(hba);
pm_runtime_put(hba->dev); pm_runtime_put(hba->dev);
} }
...@@ -8694,12 +8704,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) ...@@ -8694,12 +8704,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_hold(hba, false); ufshcd_hold(hba, false);
hba->clk_gating.is_suspended = true; hba->clk_gating.is_suspended = true;
if (hba->clk_scaling.is_enabled) if (ufshcd_is_clkscaling_supported(hba))
ufshcd_suspend_clkscaling(hba); ufshcd_clk_scaling_suspend(hba, true);
down_write(&hba->clk_scaling_lock);
hba->clk_scaling.is_allowed = false;
up_write(&hba->clk_scaling_lock);
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
req_link_state == UIC_LINK_ACTIVE_STATE) { req_link_state == UIC_LINK_ACTIVE_STATE) {
...@@ -8819,11 +8825,9 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) ...@@ -8819,11 +8825,9 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba); ufshcd_disable_auto_bkops(hba);
enable_gating: enable_gating:
down_write(&hba->clk_scaling_lock); if (ufshcd_is_clkscaling_supported(hba))
hba->clk_scaling.is_allowed = true; ufshcd_clk_scaling_suspend(hba, false);
up_write(&hba->clk_scaling_lock);
if (hba->clk_scaling.is_enabled)
ufshcd_resume_clkscaling(hba);
hba->clk_gating.is_suspended = false; hba->clk_gating.is_suspended = false;
hba->dev_info.b_rpm_dev_flush_capable = false; hba->dev_info.b_rpm_dev_flush_capable = false;
ufshcd_release(hba); ufshcd_release(hba);
...@@ -8925,11 +8929,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) ...@@ -8925,11 +8929,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
hba->clk_gating.is_suspended = false; hba->clk_gating.is_suspended = false;
down_write(&hba->clk_scaling_lock); if (ufshcd_is_clkscaling_supported(hba))
hba->clk_scaling.is_allowed = true; ufshcd_clk_scaling_suspend(hba, false);
up_write(&hba->clk_scaling_lock);
if (hba->clk_scaling.is_enabled)
ufshcd_resume_clkscaling(hba);
/* Enable Auto-Hibernate if configured */ /* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba); ufshcd_auto_hibern8_enable(hba);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment