Commit e30618a4 authored by Martin K. Petersen's avatar Martin K. Petersen

Merge patch series "UFS patches for kernel 6.11"

Bart Van Assche <bvanassche@acm.org> says:

Hi Martin,

Please consider this series of UFS driver patches for the next merge window.

Thank you,

Bart.

Link: https://lore.kernel.org/r/20240708211716.2827751-1-bvanassche@acm.orgSigned-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parents 5e9a522b af568c7e
...@@ -137,7 +137,6 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_queue_cfg_addr); ...@@ -137,7 +137,6 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_queue_cfg_addr);
* *
* MAC - Max. Active Command of the Host Controller (HC) * MAC - Max. Active Command of the Host Controller (HC)
* HC wouldn't send more than this commands to the device. * HC wouldn't send more than this commands to the device.
* It is mandatory to implement get_hba_mac() to enable MCQ mode.
* Calculates and adjusts the queue depth based on the depth * Calculates and adjusts the queue depth based on the depth
* supported by the HC and ufs device. * supported by the HC and ufs device.
*/ */
...@@ -145,12 +144,21 @@ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba) ...@@ -145,12 +144,21 @@ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
{ {
int mac; int mac;
/* Mandatory to implement get_hba_mac() */ if (!hba->vops || !hba->vops->get_hba_mac) {
mac = ufshcd_mcq_vops_get_hba_mac(hba); /*
if (mac < 0) { * Extract the maximum number of active transfer tasks value
dev_err(hba->dev, "Failed to get mac, err=%d\n", mac); * from the host controller capabilities register. This value is
return mac; * 0-based.
*/
hba->capabilities =
ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
mac = hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_MCQ;
mac++;
} else {
mac = hba->vops->get_hba_mac(hba);
} }
if (mac < 0)
goto err;
WARN_ON_ONCE(!hba->dev_info.bqueuedepth); WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
/* /*
...@@ -159,6 +167,10 @@ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba) ...@@ -159,6 +167,10 @@ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
* shared queuing architecture is enabled. * shared queuing architecture is enabled.
*/ */
return min_t(int, mac, hba->dev_info.bqueuedepth); return min_t(int, mac, hba->dev_info.bqueuedepth);
err:
dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
return mac;
} }
static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba) static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
...@@ -415,9 +427,16 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi); ...@@ -415,9 +427,16 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi);
void ufshcd_mcq_enable(struct ufs_hba *hba) void ufshcd_mcq_enable(struct ufs_hba *hba)
{ {
ufshcd_rmwl(hba, MCQ_MODE_SELECT, MCQ_MODE_SELECT, REG_UFS_MEM_CFG); ufshcd_rmwl(hba, MCQ_MODE_SELECT, MCQ_MODE_SELECT, REG_UFS_MEM_CFG);
hba->mcq_enabled = true;
} }
EXPORT_SYMBOL_GPL(ufshcd_mcq_enable); EXPORT_SYMBOL_GPL(ufshcd_mcq_enable);
void ufshcd_mcq_disable(struct ufs_hba *hba)
{
ufshcd_rmwl(hba, MCQ_MODE_SELECT, 0, REG_UFS_MEM_CFG);
hba->mcq_enabled = false;
}
void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg) void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg)
{ {
ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA); ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA);
......
...@@ -64,16 +64,11 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); ...@@ -64,16 +64,11 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag, void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
struct cq_entry *cqe); struct cq_entry *cqe);
int ufshcd_mcq_init(struct ufs_hba *hba); int ufshcd_mcq_init(struct ufs_hba *hba);
void ufshcd_mcq_disable(struct ufs_hba *hba);
int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba); int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
int ufshcd_mcq_memory_alloc(struct ufs_hba *hba); int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
struct request *req); struct request *req);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba, void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq); struct ufs_hw_queue *hwq);
bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd); bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd);
...@@ -255,14 +250,6 @@ static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba) ...@@ -255,14 +250,6 @@ static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline int ufshcd_mcq_vops_get_hba_mac(struct ufs_hba *hba)
{
if (hba->vops && hba->vops->get_hba_mac)
return hba->vops->get_hba_mac(hba);
return -EOPNOTSUPP;
}
static inline int ufshcd_mcq_vops_op_runtime_config(struct ufs_hba *hba) static inline int ufshcd_mcq_vops_op_runtime_config(struct ufs_hba *hba)
{ {
if (hba->vops && hba->vops->op_runtime_config) if (hba->vops && hba->vops->op_runtime_config)
......
...@@ -164,8 +164,6 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs); ...@@ -164,8 +164,6 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
enum { enum {
UFSHCD_MAX_CHANNEL = 0, UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1, UFSHCD_MAX_ID = 1,
UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
}; };
static const char *const ufshcd_state_name[] = { static const char *const ufshcd_state_name[] = {
...@@ -455,7 +453,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, ...@@ -455,7 +453,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS); intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
if (is_mcq_enabled(hba)) { if (hba->mcq_enabled) {
struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq); struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
hwq_id = hwq->id; hwq_id = hwq->id;
...@@ -2304,7 +2302,7 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag, ...@@ -2304,7 +2302,7 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_start_monitor(hba, lrbp); ufshcd_start_monitor(hba, lrbp);
if (is_mcq_enabled(hba)) { if (hba->mcq_enabled) {
int utrd_size = sizeof(struct utp_transfer_req_desc); int utrd_size = sizeof(struct utp_transfer_req_desc);
struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr; struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr;
struct utp_transfer_req_desc *dest; struct utp_transfer_req_desc *dest;
...@@ -2404,7 +2402,7 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) ...@@ -2404,7 +2402,7 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT; hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT;
/* nutrs and nutmrs are 0 based values */ /* nutrs and nutmrs are 0 based values */
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_SDB) + 1;
hba->nutmrs = hba->nutmrs =
((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
hba->reserved_slot = hba->nutrs - 1; hba->reserved_slot = hba->nutrs - 1;
...@@ -3003,7 +3001,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) ...@@ -3003,7 +3001,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto out; goto out;
} }
if (is_mcq_enabled(hba)) if (hba->mcq_enabled)
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
ufshcd_send_command(hba, tag, hwq); ufshcd_send_command(hba, tag, hwq);
...@@ -3062,7 +3060,7 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag) ...@@ -3062,7 +3060,7 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
unsigned long flags; unsigned long flags;
int err; int err;
if (is_mcq_enabled(hba)) { if (hba->mcq_enabled) {
/* /*
* MCQ mode. Clean up the MCQ resources similar to * MCQ mode. Clean up the MCQ resources similar to
* what the ufshcd_utrl_clear() does for SDB mode. * what the ufshcd_utrl_clear() does for SDB mode.
...@@ -3172,7 +3170,7 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, ...@@ -3172,7 +3170,7 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
__func__, lrbp->task_tag); __func__, lrbp->task_tag);
/* MCQ mode */ /* MCQ mode */
if (is_mcq_enabled(hba)) { if (hba->mcq_enabled) {
/* successfully cleared the command, retry if needed */ /* successfully cleared the command, retry if needed */
if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0)
err = -EAGAIN; err = -EAGAIN;
...@@ -3994,11 +3992,11 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba) ...@@ -3994,11 +3992,11 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
*/ */
static int ufshcd_dme_link_startup(struct ufs_hba *hba) static int ufshcd_dme_link_startup(struct ufs_hba *hba)
{ {
struct uic_command uic_cmd = {0}; struct uic_command uic_cmd = {
.command = UIC_CMD_DME_LINK_STARTUP,
};
int ret; int ret;
uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
ret = ufshcd_send_uic_cmd(hba, &uic_cmd); ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret) if (ret)
dev_dbg(hba->dev, dev_dbg(hba->dev,
...@@ -4016,11 +4014,11 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba) ...@@ -4016,11 +4014,11 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
*/ */
static int ufshcd_dme_reset(struct ufs_hba *hba) static int ufshcd_dme_reset(struct ufs_hba *hba)
{ {
struct uic_command uic_cmd = {0}; struct uic_command uic_cmd = {
.command = UIC_CMD_DME_RESET,
};
int ret; int ret;
uic_cmd.command = UIC_CMD_DME_RESET;
ret = ufshcd_send_uic_cmd(hba, &uic_cmd); ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret) if (ret)
dev_err(hba->dev, dev_err(hba->dev,
...@@ -4055,11 +4053,11 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt); ...@@ -4055,11 +4053,11 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
*/ */
static int ufshcd_dme_enable(struct ufs_hba *hba) static int ufshcd_dme_enable(struct ufs_hba *hba)
{ {
struct uic_command uic_cmd = {0}; struct uic_command uic_cmd = {
.command = UIC_CMD_DME_ENABLE,
};
int ret; int ret;
uic_cmd.command = UIC_CMD_DME_ENABLE;
ret = ufshcd_send_uic_cmd(hba, &uic_cmd); ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret) if (ret)
dev_err(hba->dev, dev_err(hba->dev,
...@@ -4112,7 +4110,12 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) ...@@ -4112,7 +4110,12 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
u8 attr_set, u32 mib_val, u8 peer) u8 attr_set, u32 mib_val, u8 peer)
{ {
struct uic_command uic_cmd = {0}; struct uic_command uic_cmd = {
.command = peer ? UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET,
.argument1 = attr_sel,
.argument2 = UIC_ARG_ATTR_TYPE(attr_set),
.argument3 = mib_val,
};
static const char *const action[] = { static const char *const action[] = {
"dme-set", "dme-set",
"dme-peer-set" "dme-peer-set"
...@@ -4121,12 +4124,6 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, ...@@ -4121,12 +4124,6 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
int ret; int ret;
int retries = UFS_UIC_COMMAND_RETRIES; int retries = UFS_UIC_COMMAND_RETRIES;
uic_cmd.command = peer ?
UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
uic_cmd.argument1 = attr_sel;
uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
uic_cmd.argument3 = mib_val;
do { do {
/* for peer attributes we retry upon failure */ /* for peer attributes we retry upon failure */
ret = ufshcd_send_uic_cmd(hba, &uic_cmd); ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
...@@ -4156,7 +4153,10 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr); ...@@ -4156,7 +4153,10 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
u32 *mib_val, u8 peer) u32 *mib_val, u8 peer)
{ {
struct uic_command uic_cmd = {0}; struct uic_command uic_cmd = {
.command = peer ? UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET,
.argument1 = attr_sel,
};
static const char *const action[] = { static const char *const action[] = {
"dme-get", "dme-get",
"dme-peer-get" "dme-peer-get"
...@@ -4190,10 +4190,6 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, ...@@ -4190,10 +4190,6 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
} }
} }
uic_cmd.command = peer ?
UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
uic_cmd.argument1 = attr_sel;
do { do {
/* for peer attributes we retry upon failure */ /* for peer attributes we retry upon failure */
ret = ufshcd_send_uic_cmd(hba, &uic_cmd); ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
...@@ -4326,7 +4322,11 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) ...@@ -4326,7 +4322,11 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
*/ */
int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
{ {
struct uic_command uic_cmd = {0}; struct uic_command uic_cmd = {
.command = UIC_CMD_DME_SET,
.argument1 = UIC_ARG_MIB(PA_PWRMODE),
.argument3 = mode,
};
int ret; int ret;
if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) { if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
...@@ -4339,9 +4339,6 @@ int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) ...@@ -4339,9 +4339,6 @@ int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
} }
} }
uic_cmd.command = UIC_CMD_DME_SET;
uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
uic_cmd.argument3 = mode;
ufshcd_hold(hba); ufshcd_hold(hba);
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
ufshcd_release(hba); ufshcd_release(hba);
...@@ -4382,13 +4379,14 @@ EXPORT_SYMBOL_GPL(ufshcd_link_recovery); ...@@ -4382,13 +4379,14 @@ EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
{ {
int ret; struct uic_command uic_cmd = {
struct uic_command uic_cmd = {0}; .command = UIC_CMD_DME_HIBER_ENTER,
};
ktime_t start = ktime_get(); ktime_t start = ktime_get();
int ret;
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE); ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter", trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
ktime_to_us(ktime_sub(ktime_get(), start)), ret); ktime_to_us(ktime_sub(ktime_get(), start)), ret);
...@@ -4406,13 +4404,14 @@ EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter); ...@@ -4406,13 +4404,14 @@ EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
{ {
struct uic_command uic_cmd = {0}; struct uic_command uic_cmd = {
.command = UIC_CMD_DME_HIBER_EXIT,
};
int ret; int ret;
ktime_t start = ktime_get(); ktime_t start = ktime_get();
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE); ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit", trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
ktime_to_us(ktime_sub(ktime_get(), start)), ret); ktime_to_us(ktime_sub(ktime_get(), start)), ret);
...@@ -5562,7 +5561,7 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num) ...@@ -5562,7 +5561,7 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
u32 tr_doorbell; u32 tr_doorbell;
struct ufs_hw_queue *hwq; struct ufs_hw_queue *hwq;
if (is_mcq_enabled(hba)) { if (hba->mcq_enabled) {
hwq = &hba->uhq[queue_num]; hwq = &hba->uhq[queue_num];
return ufshcd_mcq_poll_cqe_lock(hba, hwq); return ufshcd_mcq_poll_cqe_lock(hba, hwq);
...@@ -6203,7 +6202,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work) ...@@ -6203,7 +6202,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
/* Complete requests that have door-bell cleared */ /* Complete requests that have door-bell cleared */
static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl) static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl)
{ {
if (is_mcq_enabled(hba)) if (hba->mcq_enabled)
ufshcd_mcq_compl_pending_transfer(hba, force_compl); ufshcd_mcq_compl_pending_transfer(hba, force_compl);
else else
ufshcd_transfer_req_compl(hba); ufshcd_transfer_req_compl(hba);
...@@ -6460,7 +6459,7 @@ static bool ufshcd_abort_one(struct request *rq, void *priv) ...@@ -6460,7 +6459,7 @@ static bool ufshcd_abort_one(struct request *rq, void *priv)
*ret ? "failed" : "succeeded"); *ret ? "failed" : "succeeded");
/* Release cmd in MCQ mode if abort succeeds */ /* Release cmd in MCQ mode if abort succeeds */
if (is_mcq_enabled(hba) && (*ret == 0)) { if (hba->mcq_enabled && (*ret == 0)) {
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd)); hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
if (!hwq) if (!hwq)
return 0; return 0;
...@@ -7393,7 +7392,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) ...@@ -7393,7 +7392,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
goto out; goto out;
} }
if (is_mcq_enabled(hba)) { if (hba->mcq_enabled) {
for (pos = 0; pos < hba->nutrs; pos++) { for (pos = 0; pos < hba->nutrs; pos++) {
lrbp = &hba->lrb[pos]; lrbp = &hba->lrb[pos];
if (ufshcd_cmd_inflight(lrbp->cmd) && if (ufshcd_cmd_inflight(lrbp->cmd) &&
...@@ -7489,7 +7488,7 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) ...@@ -7489,7 +7488,7 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
*/ */
dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n", dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
__func__, tag); __func__, tag);
if (is_mcq_enabled(hba)) { if (hba->mcq_enabled) {
/* MCQ mode */ /* MCQ mode */
if (ufshcd_cmd_inflight(lrbp->cmd)) { if (ufshcd_cmd_inflight(lrbp->cmd)) {
/* sleep for max. 200us same delay as in SDB mode */ /* sleep for max. 200us same delay as in SDB mode */
...@@ -7567,7 +7566,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) ...@@ -7567,7 +7566,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
ufshcd_hold(hba); ufshcd_hold(hba);
if (!is_mcq_enabled(hba)) { if (!hba->mcq_enabled) {
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
if (!test_bit(tag, &hba->outstanding_reqs)) { if (!test_bit(tag, &hba->outstanding_reqs)) {
/* If command is already aborted/completed, return FAILED. */ /* If command is already aborted/completed, return FAILED. */
...@@ -7600,7 +7599,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) ...@@ -7600,7 +7599,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
} }
hba->req_abort_count++; hba->req_abort_count++;
if (!is_mcq_enabled(hba) && !(reg & (1 << tag))) { if (!hba->mcq_enabled && !(reg & (1 << tag))) {
/* only execute this code in single doorbell mode */ /* only execute this code in single doorbell mode */
dev_err(hba->dev, dev_err(hba->dev,
"%s: cmd was completed, but without a notifying intr, tag = %d", "%s: cmd was completed, but without a notifying intr, tag = %d",
...@@ -7627,7 +7626,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) ...@@ -7627,7 +7626,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
goto release; goto release;
} }
if (is_mcq_enabled(hba)) { if (hba->mcq_enabled) {
/* MCQ mode. Branch off to handle abort for mcq mode */ /* MCQ mode. Branch off to handle abort for mcq mode */
err = ufshcd_mcq_abort(cmd); err = ufshcd_mcq_abort(cmd);
goto release; goto release;
...@@ -8682,6 +8681,9 @@ static int ufshcd_alloc_mcq(struct ufs_hba *hba) ...@@ -8682,6 +8681,9 @@ static int ufshcd_alloc_mcq(struct ufs_hba *hba)
if (ret) if (ret)
goto err; goto err;
hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
return 0; return 0;
err: err:
hba->nutrs = old_nutrs; hba->nutrs = old_nutrs;
...@@ -8703,12 +8705,6 @@ static void ufshcd_config_mcq(struct ufs_hba *hba) ...@@ -8703,12 +8705,6 @@ static void ufshcd_config_mcq(struct ufs_hba *hba)
ufshcd_mcq_make_queues_operational(hba); ufshcd_mcq_make_queues_operational(hba);
ufshcd_mcq_config_mac(hba, hba->nutrs); ufshcd_mcq_config_mac(hba, hba->nutrs);
hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
ufshcd_mcq_enable(hba);
hba->mcq_enabled = true;
dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n", dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT], hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT],
hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL], hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL],
...@@ -8736,8 +8732,10 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) ...@@ -8736,8 +8732,10 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
ufshcd_set_link_active(hba); ufshcd_set_link_active(hba);
/* Reconfigure MCQ upon reset */ /* Reconfigure MCQ upon reset */
if (is_mcq_enabled(hba) && !init_dev_params) if (hba->mcq_enabled && !init_dev_params) {
ufshcd_config_mcq(hba); ufshcd_config_mcq(hba);
ufshcd_mcq_enable(hba);
}
/* Verify device initialization by sending NOP OUT UPIU */ /* Verify device initialization by sending NOP OUT UPIU */
ret = ufshcd_verify_dev_init(hba); ret = ufshcd_verify_dev_init(hba);
...@@ -8758,11 +8756,13 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) ...@@ -8758,11 +8756,13 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
if (ret) if (ret)
return ret; return ret;
if (is_mcq_supported(hba) && !hba->scsi_host_added) { if (is_mcq_supported(hba) && !hba->scsi_host_added) {
ufshcd_mcq_enable(hba);
ret = ufshcd_alloc_mcq(hba); ret = ufshcd_alloc_mcq(hba);
if (!ret) { if (!ret) {
ufshcd_config_mcq(hba); ufshcd_config_mcq(hba);
} else { } else {
/* Continue with SDB mode */ /* Continue with SDB mode */
ufshcd_mcq_disable(hba);
use_mcq_mode = false; use_mcq_mode = false;
dev_err(hba->dev, "MCQ mode is disabled, err=%d\n", dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
ret); ret);
...@@ -8776,6 +8776,7 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) ...@@ -8776,6 +8776,7 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
} else if (is_mcq_supported(hba)) { } else if (is_mcq_supported(hba)) {
/* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */ /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */
ufshcd_config_mcq(hba); ufshcd_config_mcq(hba);
ufshcd_mcq_enable(hba);
} }
} }
...@@ -8961,8 +8962,6 @@ static const struct scsi_host_template ufshcd_driver_template = { ...@@ -8961,8 +8962,6 @@ static const struct scsi_host_template ufshcd_driver_template = {
.eh_timed_out = ufshcd_eh_timed_out, .eh_timed_out = ufshcd_eh_timed_out,
.this_id = -1, .this_id = -1,
.sg_tablesize = SG_ALL, .sg_tablesize = SG_ALL,
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
.can_queue = UFSHCD_CAN_QUEUE,
.max_segment_size = PRDT_DATA_BYTE_COUNT_MAX, .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
.max_sectors = SZ_1M / SECTOR_SIZE, .max_sectors = SZ_1M / SECTOR_SIZE,
.max_host_blocked = 1, .max_host_blocked = 1,
......
...@@ -693,7 +693,7 @@ static void ufs_mtk_mcq_disable_irq(struct ufs_hba *hba) ...@@ -693,7 +693,7 @@ static void ufs_mtk_mcq_disable_irq(struct ufs_hba *hba)
struct ufs_mtk_host *host = ufshcd_get_variant(hba); struct ufs_mtk_host *host = ufshcd_get_variant(hba);
u32 irq, i; u32 irq, i;
if (!is_mcq_enabled(hba)) if (!hba->mcq_enabled)
return; return;
if (host->mcq_nr_intr == 0) if (host->mcq_nr_intr == 0)
...@@ -711,7 +711,7 @@ static void ufs_mtk_mcq_enable_irq(struct ufs_hba *hba) ...@@ -711,7 +711,7 @@ static void ufs_mtk_mcq_enable_irq(struct ufs_hba *hba)
struct ufs_mtk_host *host = ufshcd_get_variant(hba); struct ufs_mtk_host *host = ufshcd_get_variant(hba);
u32 irq, i; u32 irq, i;
if (!is_mcq_enabled(hba)) if (!hba->mcq_enabled)
return; return;
if (host->mcq_nr_intr == 0) if (host->mcq_nr_intr == 0)
...@@ -1308,7 +1308,7 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba) ...@@ -1308,7 +1308,7 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
if (err) if (err)
return err; return err;
if (is_mcq_enabled(hba)) { if (hba->mcq_enabled) {
ufs_mtk_config_mcq(hba, false); ufs_mtk_config_mcq(hba, false);
ufshcd_mcq_make_queues_operational(hba); ufshcd_mcq_make_queues_operational(hba);
ufshcd_mcq_config_mac(hba, hba->nutrs); ufshcd_mcq_config_mac(hba, hba->nutrs);
......
...@@ -73,8 +73,8 @@ enum ufs_event_type { ...@@ -73,8 +73,8 @@ enum ufs_event_type {
* @done: UIC command completion * @done: UIC command completion
*/ */
struct uic_command { struct uic_command {
u32 command; const u32 command;
u32 argument1; const u32 argument1;
u32 argument2; u32 argument2;
u32 argument3; u32 argument3;
int cmd_active; int cmd_active;
...@@ -325,7 +325,9 @@ struct ufs_pwr_mode_info { ...@@ -325,7 +325,9 @@ struct ufs_pwr_mode_info {
* @event_notify: called to notify important events * @event_notify: called to notify important events
* @reinit_notify: called to notify reinit of UFSHCD during max gear switch * @reinit_notify: called to notify reinit of UFSHCD during max gear switch
* @mcq_config_resource: called to configure MCQ platform resources * @mcq_config_resource: called to configure MCQ platform resources
* @get_hba_mac: called to get vendor specific mac value, mandatory for mcq mode * @get_hba_mac: reports maximum number of outstanding commands supported by
* the controller. Should be implemented for UFSHCI 4.0 or later
* controllers that are not compliant with the UFSHCI 4.0 specification.
* @op_runtime_config: called to config Operation and runtime regs Pointers * @op_runtime_config: called to config Operation and runtime regs Pointers
* @get_outstanding_cqs: called to get outstanding completion queues * @get_outstanding_cqs: called to get outstanding completion queues
* @config_esi: called to config Event Specific Interrupt * @config_esi: called to config Event Specific Interrupt
...@@ -1133,11 +1135,6 @@ struct ufs_hw_queue { ...@@ -1133,11 +1135,6 @@ struct ufs_hw_queue {
#define MCQ_QCFG_SIZE 0x40 #define MCQ_QCFG_SIZE 0x40
static inline bool is_mcq_enabled(struct ufs_hba *hba)
{
return hba->mcq_enabled;
}
static inline unsigned int ufshcd_mcq_opr_offset(struct ufs_hba *hba, static inline unsigned int ufshcd_mcq_opr_offset(struct ufs_hba *hba,
enum ufshcd_mcq_opr opr, int idx) enum ufshcd_mcq_opr opr, int idx)
{ {
......
...@@ -67,7 +67,8 @@ enum { ...@@ -67,7 +67,8 @@ enum {
/* Controller capability masks */ /* Controller capability masks */
enum { enum {
MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F, MASK_TRANSFER_REQUESTS_SLOTS_SDB = 0x0000001F,
MASK_TRANSFER_REQUESTS_SLOTS_MCQ = 0x000000FF,
MASK_NUMBER_OUTSTANDING_RTT = 0x0000FF00, MASK_NUMBER_OUTSTANDING_RTT = 0x0000FF00,
MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000, MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000,
MASK_EHSLUTRD_SUPPORTED = 0x00400000, MASK_EHSLUTRD_SUPPORTED = 0x00400000,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment