Commit 078f4f4b authored by Bart Van Assche's avatar Bart Van Assche Committed by Martin K. Petersen

scsi: ufs: Ungate the clock synchronously

Ungating the clock asynchronously causes ufshcd_queuecommand() to return
SCSI_MLQUEUE_HOST_BUSY and hence causes commands to be requeued.  This is
suboptimal. Allow ufshcd_queuecommand() to sleep such that clock ungating
does not trigger command requeuing. Remove the ufshcd_scsi_block_requests()
and ufshcd_scsi_unblock_requests() calls because these are no longer
needed. The flush_work(&hba->clk_gating.ungate_work) call is sufficient to
make the SCSI core wait for clock ungating to complete.
Acked-by: default avatarAdrian Hunter <adrian.hunter@intel.com>
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20230529202640.11883-6-bvanassche@acm.orgReviewed-by: default avatarBean Huo <beanhuo@micron.com>
Reviewed-by: default avatarBao D. Nguyen <quic_nguyenb@quicinc.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 4b68b7f9
......@@ -168,7 +168,7 @@ static ssize_t auto_hibern8_show(struct device *dev,
}
pm_runtime_get_sync(hba->dev);
ufshcd_hold(hba, false);
ufshcd_hold(hba);
ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
ufshcd_release(hba);
pm_runtime_put_sync(hba->dev);
......
......@@ -24,7 +24,7 @@ static int ufshcd_program_key(struct ufs_hba *hba,
u32 slot_offset = hba->crypto_cfg_register + slot * sizeof(*cfg);
int err = 0;
ufshcd_hold(hba, false);
ufshcd_hold(hba);
if (hba->vops && hba->vops->program_key) {
err = hba->vops->program_key(hba, cfg, slot);
......
......@@ -1205,7 +1205,7 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
bool timeout = false, do_last_check = false;
ktime_t start;
ufshcd_hold(hba, false);
ufshcd_hold(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
/*
* Wait for all the outstanding tasks/transfer requests.
......@@ -1326,7 +1326,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
}
/* let's not get into low power until clock scaling is completed */
ufshcd_hold(hba, false);
ufshcd_hold(hba);
out:
return ret;
......@@ -1656,7 +1656,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
goto out;
ufshcd_rpm_get_sync(hba);
ufshcd_hold(hba, false);
ufshcd_hold(hba);
hba->clk_scaling.is_enabled = value;
......@@ -1739,7 +1739,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_gating.state == CLKS_ON) {
spin_unlock_irqrestore(hba->host->host_lock, flags);
goto unblock_reqs;
return;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
......@@ -1762,25 +1762,21 @@ static void ufshcd_ungate_work(struct work_struct *work)
}
hba->clk_gating.is_suspended = false;
}
unblock_reqs:
ufshcd_scsi_unblock_requests(hba);
}
/**
* ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
* Also, exit from hibern8 mode and set the link as active.
* @hba: per adapter instance
* @async: This indicates whether caller should ungate clocks asynchronously.
*/
int ufshcd_hold(struct ufs_hba *hba, bool async)
void ufshcd_hold(struct ufs_hba *hba)
{
int rc = 0;
bool flush_result;
unsigned long flags;
if (!ufshcd_is_clkgating_allowed(hba) ||
!hba->clk_gating.is_initialized)
goto out;
return;
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.active_reqs++;
......@@ -1797,15 +1793,10 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
*/
if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
if (async) {
rc = -EAGAIN;
hba->clk_gating.active_reqs--;
break;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_result = flush_work(&hba->clk_gating.ungate_work);
if (hba->clk_gating.is_suspended && !flush_result)
goto out;
return;
spin_lock_irqsave(hba->host->host_lock, flags);
goto start;
}
......@@ -1827,21 +1818,14 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
hba->clk_gating.state = REQ_CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
if (queue_work(hba->clk_gating.clk_gating_workq,
&hba->clk_gating.ungate_work))
ufshcd_scsi_block_requests(hba);
queue_work(hba->clk_gating.clk_gating_workq,
&hba->clk_gating.ungate_work);
/*
* fall through to check if we should wait for this
* work to be done or not.
*/
fallthrough;
case REQ_CLKS_ON:
if (async) {
rc = -EAGAIN;
hba->clk_gating.active_reqs--;
break;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_work(&hba->clk_gating.ungate_work);
/* Make sure state is CLKS_ON before returning */
......@@ -1853,8 +1837,6 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
break;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
return rc;
}
EXPORT_SYMBOL_GPL(ufshcd_hold);
......@@ -2086,7 +2068,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
ufshcd_remove_clk_gating_sysfs(hba);
/* Ungate the clock if necessary. */
ufshcd_hold(hba, false);
ufshcd_hold(hba);
hba->clk_gating.is_initialized = false;
ufshcd_release(hba);
......@@ -2482,7 +2464,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
return 0;
ufshcd_hold(hba, false);
ufshcd_hold(hba);
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
......@@ -2885,12 +2867,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
/*
* Allows the UFS error handler to wait for prior ufshcd_queuecommand()
* calls.
*/
rcu_read_lock();
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
break;
......@@ -2936,13 +2912,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
hba->req_abort_count = 0;
err = ufshcd_hold(hba, true);
if (err) {
err = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
(hba->clk_gating.state != CLKS_ON));
ufshcd_hold(hba);
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
......@@ -2972,8 +2942,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_send_command(hba, tag, hwq);
out:
rcu_read_unlock();
if (ufs_trigger_eh()) {
unsigned long flags;
......@@ -3267,7 +3235,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
BUG_ON(!hba);
ufshcd_hold(hba, false);
ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
......@@ -3341,7 +3309,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
return -EINVAL;
}
ufshcd_hold(hba, false);
ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
......@@ -3437,7 +3405,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
return -EINVAL;
}
ufshcd_hold(hba, false);
ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
......@@ -4255,7 +4223,7 @@ int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
uic_cmd.command = UIC_CMD_DME_SET;
uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
uic_cmd.argument3 = mode;
ufshcd_hold(hba, false);
ufshcd_hold(hba);
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
ufshcd_release(hba);
......@@ -4362,7 +4330,7 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
if (update &&
!pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
ufshcd_rpm_get_sync(hba);
ufshcd_hold(hba, false);
ufshcd_hold(hba);
ufshcd_auto_hibern8_enable(hba);
ufshcd_release(hba);
ufshcd_rpm_put_sync(hba);
......@@ -4955,7 +4923,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
int err = 0;
int retries;
ufshcd_hold(hba, false);
ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
......@@ -6241,22 +6209,22 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
ufshcd_setup_vreg(hba, true);
ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
ufshcd_hold(hba, false);
ufshcd_hold(hba);
if (!ufshcd_is_clkgating_allowed(hba))
ufshcd_setup_clocks(hba, true);
ufshcd_release(hba);
pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
ufshcd_vops_resume(hba, pm_op);
} else {
ufshcd_hold(hba, false);
ufshcd_hold(hba);
if (ufshcd_is_clkscaling_supported(hba) &&
hba->clk_scaling.is_enabled)
ufshcd_suspend_clkscaling(hba);
ufshcd_clk_scaling_allow(hba, false);
}
ufshcd_scsi_block_requests(hba);
/* Drain ufshcd_queuecommand() */
synchronize_rcu();
/* Wait for ongoing ufshcd_queuecommand() calls to finish. */
blk_mq_wait_quiesce_done(&hba->host->tag_set);
cancel_work_sync(&hba->eeh_work);
}
......@@ -6901,7 +6869,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
return PTR_ERR(req);
req->end_io_data = &wait;
ufshcd_hold(hba, false);
ufshcd_hold(hba);
spin_lock_irqsave(host->host_lock, flags);
......@@ -7138,7 +7106,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
cmd_type = DEV_CMD_TYPE_NOP;
fallthrough;
case UPIU_TRANSACTION_QUERY_REQ:
ufshcd_hold(hba, false);
ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
desc_buff, buff_len,
......@@ -7204,7 +7172,7 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
u16 ehs_len;
/* Protects use of hba->reserved_slot. */
ufshcd_hold(hba, false);
ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
down_read(&hba->clk_scaling_lock);
......@@ -7439,7 +7407,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
ufshcd_hold(hba, false);
ufshcd_hold(hba);
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
/* If command is already aborted/completed, return FAILED. */
if (!(test_bit(tag, &hba->outstanding_reqs))) {
......@@ -9430,7 +9398,7 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* If we can't transition into any of the low power modes
* just gate the clocks.
*/
ufshcd_hold(hba, false);
ufshcd_hold(hba);
hba->clk_gating.is_suspended = true;
if (ufshcd_is_clkscaling_supported(hba))
......
......@@ -1358,7 +1358,7 @@ void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
u8 **buf, bool ascii);
int ufshcd_hold(struct ufs_hba *hba, bool async);
void ufshcd_hold(struct ufs_hba *hba);
void ufshcd_release(struct ufs_hba *hba);
void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment