Commit 895a1067 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull more SCSI updates from James Bottomley:
 "The only new stuff which missed the first pull request is an update to
  the UFS driver.

  The rest is an assortment of bug fixes and minor tweaks which appeared
  recently (some are fixes for recent code and some are stuff spotted
  recently by the checkers or the new gcc-6 compiler [most of Arnd's
  stuff])"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (32 commits)
  scsi_common: do not clobber fixed sense information
  scsi: ufs: select CONFIG_NLS
  scsi: fc: use get/put_unaligned64 for wwn access
  fnic: move printk()s outside of the critical code section.
  qla2xxx: avoid maybe_uninitialized warning
  megaraid_sas: add missing curly braces in ioctl handler
  lpfc: fix misleading indentation
  scsi_transport_sas: add 'scsi_target_id' sysfs attribute
  scsi_dh_alua: uninitialized variable in alua_check_vpd()
  scsi: ufs-qcom: add printouts of testbus debug registers
  scsi: ufs-qcom: enable/disable the device ref clock
  scsi: ufs-qcom: set PA_Local_TX_LCC_Enable before link startup
  scsi: ufs: add device quirk delay before putting UFS rails in LPM
  scsi: ufs: fix leakage during link off state
  scsi: ufs: tune UniPro parameters to optimize hibern8 exit time
  scsi: ufs: handle non spec compliant bkops behaviour by device
  scsi: ufs: add retry for query descriptors
  scsi: ufs: add error recovery after DL NAC error
  scsi: ufs: make error handling bit faster
  scsi: ufs: disable vccq if it's not needed by UFS device
  ...
parents 02fc59a0 c6986be4
...@@ -38,6 +38,9 @@ Optional properties: ...@@ -38,6 +38,9 @@ Optional properties:
defined or a value in the array is "0" then it is assumed defined or a value in the array is "0" then it is assumed
that the frequency is set by the parent clock or a that the frequency is set by the parent clock or a
fixed rate clock source. fixed rate clock source.
-lanes-per-direction : number of lanes available per direction - either 1 or 2.
Note that it is assume same number of lanes is used both
directions at once. If not specified, default is 2 lanes per direction.
Note: If above properties are not defined it can be assumed that the supply Note: If above properties are not defined it can be assumed that the supply
regulators or clocks are always on. regulators or clocks are always on.
......
...@@ -332,7 +332,7 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, ...@@ -332,7 +332,7 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
{ {
int rel_port = -1, group_id; int rel_port = -1, group_id;
struct alua_port_group *pg, *old_pg = NULL; struct alua_port_group *pg, *old_pg = NULL;
bool pg_updated; bool pg_updated = false;
unsigned long flags; unsigned long flags;
group_id = scsi_vpd_tpg_id(sdev, &rel_port); group_id = scsi_vpd_tpg_id(sdev, &rel_port);
......
...@@ -958,23 +958,22 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, ...@@ -958,23 +958,22 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
case FCPIO_INVALID_PARAM: /* some parameter in request invalid */ case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */ case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
default: default:
shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
fnic_fcpio_status_to_str(hdr_status));
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break; break;
} }
if (hdr_status != FCPIO_SUCCESS) {
atomic64_inc(&fnic_stats->io_stats.io_failures);
shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
fnic_fcpio_status_to_str(hdr_status));
}
/* Break link with the SCSI command */ /* Break link with the SCSI command */
CMD_SP(sc) = NULL; CMD_SP(sc) = NULL;
CMD_FLAGS(sc) |= FNIC_IO_DONE; CMD_FLAGS(sc) |= FNIC_IO_DONE;
spin_unlock_irqrestore(io_lock, flags); spin_unlock_irqrestore(io_lock, flags);
if (hdr_status != FCPIO_SUCCESS) {
atomic64_inc(&fnic_stats->io_stats.io_failures);
shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
fnic_fcpio_status_to_str(hdr_status));
}
fnic_release_ioreq_buf(fnic, io_req, sc); fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool); mempool_free(io_req, fnic->io_req_pool);
......
...@@ -2860,7 +2860,7 @@ lpfc_online(struct lpfc_hba *phba) ...@@ -2860,7 +2860,7 @@ lpfc_online(struct lpfc_hba *phba)
} }
vports = lpfc_create_vport_work_array(phba); vports = lpfc_create_vport_work_array(phba);
if (vports != NULL) if (vports != NULL) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
struct Scsi_Host *shost; struct Scsi_Host *shost;
shost = lpfc_shost_from_vport(vports[i]); shost = lpfc_shost_from_vport(vports[i]);
...@@ -2877,7 +2877,8 @@ lpfc_online(struct lpfc_hba *phba) ...@@ -2877,7 +2877,8 @@ lpfc_online(struct lpfc_hba *phba)
} }
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
} }
lpfc_destroy_vport_work_array(phba, vports); }
lpfc_destroy_vport_work_array(phba, vports);
lpfc_unblock_mgmt_io(phba); lpfc_unblock_mgmt_io(phba);
return 0; return 0;
......
...@@ -2097,7 +2097,7 @@ struct megasas_instance { ...@@ -2097,7 +2097,7 @@ struct megasas_instance {
u8 UnevenSpanSupport; u8 UnevenSpanSupport;
u8 supportmax256vd; u8 supportmax256vd;
u8 allow_fw_scan; u8 pd_list_not_supported;
u16 fw_supported_vd_count; u16 fw_supported_vd_count;
u16 fw_supported_pd_count; u16 fw_supported_pd_count;
......
...@@ -1838,7 +1838,7 @@ static int megasas_slave_configure(struct scsi_device *sdev) ...@@ -1838,7 +1838,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
struct megasas_instance *instance; struct megasas_instance *instance;
instance = megasas_lookup_instance(sdev->host->host_no); instance = megasas_lookup_instance(sdev->host->host_no);
if (instance->allow_fw_scan) { if (instance->pd_list_not_supported) {
if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
sdev->type == TYPE_DISK) { sdev->type == TYPE_DISK) {
pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
...@@ -1874,7 +1874,8 @@ static int megasas_slave_alloc(struct scsi_device *sdev) ...@@ -1874,7 +1874,8 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
pd_index = pd_index =
(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
sdev->id; sdev->id;
if ((instance->allow_fw_scan || instance->pd_list[pd_index].driveState == if ((instance->pd_list_not_supported ||
instance->pd_list[pd_index].driveState ==
MR_PD_STATE_SYSTEM)) { MR_PD_STATE_SYSTEM)) {
goto scan_target; goto scan_target;
} }
...@@ -4087,7 +4088,13 @@ megasas_get_pd_list(struct megasas_instance *instance) ...@@ -4087,7 +4088,13 @@ megasas_get_pd_list(struct megasas_instance *instance)
switch (ret) { switch (ret) {
case DCMD_FAILED: case DCMD_FAILED:
megaraid_sas_kill_hba(instance); dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
"failed/not supported by firmware\n");
if (instance->ctrl_context)
megaraid_sas_kill_hba(instance);
else
instance->pd_list_not_supported = 1;
break; break;
case DCMD_TIMEOUT: case DCMD_TIMEOUT:
...@@ -5034,7 +5041,6 @@ static int megasas_init_fw(struct megasas_instance *instance) ...@@ -5034,7 +5041,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
case PCI_DEVICE_ID_DELL_PERC5: case PCI_DEVICE_ID_DELL_PERC5:
default: default:
instance->instancet = &megasas_instance_template_xscale; instance->instancet = &megasas_instance_template_xscale;
instance->allow_fw_scan = 1;
break; break;
} }
...@@ -6650,12 +6656,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, ...@@ -6650,12 +6656,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
} }
for (i = 0; i < ioc->sge_count; i++) { for (i = 0; i < ioc->sge_count; i++) {
if (kbuff_arr[i]) if (kbuff_arr[i]) {
dma_free_coherent(&instance->pdev->dev, dma_free_coherent(&instance->pdev->dev,
le32_to_cpu(kern_sge32[i].length), le32_to_cpu(kern_sge32[i].length),
kbuff_arr[i], kbuff_arr[i],
le32_to_cpu(kern_sge32[i].phys_addr)); le32_to_cpu(kern_sge32[i].phys_addr));
kbuff_arr[i] = NULL; kbuff_arr[i] = NULL;
}
} }
megasas_return_cmd(instance, cmd); megasas_return_cmd(instance, cmd);
......
...@@ -1881,15 +1881,17 @@ static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, ...@@ -1881,15 +1881,17 @@ static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
else else
vha->req->cnt = vha->req->length - vha->req->cnt = vha->req->length -
(vha->req->ring_index - cnt); (vha->req->ring_index - cnt);
}
if (unlikely(vha->req->cnt < (req_cnt + 2))) { if (unlikely(vha->req->cnt < (req_cnt + 2))) {
ql_dbg(ql_dbg_io, vha, 0x305a, ql_dbg(ql_dbg_io, vha, 0x305a,
"qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n", "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
vha->vp_idx, vha->req->ring_index, vha->vp_idx, vha->req->ring_index,
vha->req->cnt, req_cnt, cnt, cnt_in, vha->req->length); vha->req->cnt, req_cnt, cnt, cnt_in,
return -EAGAIN; vha->req->length);
return -EAGAIN;
}
} }
vha->req->cnt -= req_cnt; vha->req->cnt -= req_cnt;
return 0; return 0;
......
...@@ -278,8 +278,16 @@ int scsi_set_sense_information(u8 *buf, int buf_len, u64 info) ...@@ -278,8 +278,16 @@ int scsi_set_sense_information(u8 *buf, int buf_len, u64 info)
ucp[3] = 0; ucp[3] = 0;
put_unaligned_be64(info, &ucp[4]); put_unaligned_be64(info, &ucp[4]);
} else if ((buf[0] & 0x7f) == 0x70) { } else if ((buf[0] & 0x7f) == 0x70) {
buf[0] |= 0x80; /*
put_unaligned_be64(info, &buf[3]); * Only set the 'VALID' bit if we can represent the value
* correctly; otherwise just fill out the lower bytes and
* clear the 'VALID' flag.
*/
if (info <= 0xffffffffUL)
buf[0] |= 0x80;
else
buf[0] &= 0x7f;
put_unaligned_be32((u32)info, &buf[3]);
} }
return 0; return 0;
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
#define SAS_HOST_ATTRS 0 #define SAS_HOST_ATTRS 0
#define SAS_PHY_ATTRS 17 #define SAS_PHY_ATTRS 17
#define SAS_PORT_ATTRS 1 #define SAS_PORT_ATTRS 1
#define SAS_RPORT_ATTRS 7 #define SAS_RPORT_ATTRS 8
#define SAS_END_DEV_ATTRS 5 #define SAS_END_DEV_ATTRS 5
#define SAS_EXPANDER_ATTRS 7 #define SAS_EXPANDER_ATTRS 7
......
...@@ -1105,7 +1105,7 @@ static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj, ...@@ -1105,7 +1105,7 @@ static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
if (attr == &dev_attr_vpd_pg80 && !sdev->vpd_pg80) if (attr == &dev_attr_vpd_pg80 && !sdev->vpd_pg80)
return 0; return 0;
if (attr == &dev_attr_vpd_pg83 && sdev->vpd_pg83) if (attr == &dev_attr_vpd_pg83 && !sdev->vpd_pg83)
return 0; return 0;
return S_IRUGO; return S_IRUGO;
......
...@@ -1286,6 +1286,7 @@ sas_rphy_protocol_attr(identify.target_port_protocols, target_port_protocols); ...@@ -1286,6 +1286,7 @@ sas_rphy_protocol_attr(identify.target_port_protocols, target_port_protocols);
sas_rphy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n", sas_rphy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
unsigned long long); unsigned long long);
sas_rphy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); sas_rphy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
sas_rphy_simple_attr(scsi_target_id, scsi_target_id, "%d\n", u32);
/* only need 8 bytes of data plus header (4 or 8) */ /* only need 8 bytes of data plus header (4 or 8) */
#define BUF_SIZE 64 #define BUF_SIZE 64
...@@ -1886,6 +1887,7 @@ sas_attach_transport(struct sas_function_template *ft) ...@@ -1886,6 +1887,7 @@ sas_attach_transport(struct sas_function_template *ft)
SETUP_RPORT_ATTRIBUTE(rphy_device_type); SETUP_RPORT_ATTRIBUTE(rphy_device_type);
SETUP_RPORT_ATTRIBUTE(rphy_sas_address); SETUP_RPORT_ATTRIBUTE(rphy_sas_address);
SETUP_RPORT_ATTRIBUTE(rphy_phy_identifier); SETUP_RPORT_ATTRIBUTE(rphy_phy_identifier);
SETUP_RPORT_ATTRIBUTE(rphy_scsi_target_id);
SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_enclosure_identifier, SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_enclosure_identifier,
get_enclosure_identifier); get_enclosure_identifier);
SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_bay_identifier, SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_bay_identifier,
......
...@@ -37,6 +37,7 @@ config SCSI_UFSHCD ...@@ -37,6 +37,7 @@ config SCSI_UFSHCD
depends on SCSI && SCSI_DMA depends on SCSI && SCSI_DMA
select PM_DEVFREQ select PM_DEVFREQ
select DEVFREQ_GOV_SIMPLE_ONDEMAND select DEVFREQ_GOV_SIMPLE_ONDEMAND
select NLS
---help--- ---help---
This selects the support for UFS devices in Linux, say Y and make This selects the support for UFS devices in Linux, say Y and make
sure that you know the name of your UFS host adapter (the card sure that you know the name of your UFS host adapter (the card
......
/* /*
* Copyright (c) 2013-2015, Linux Foundation. All rights reserved. * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and * it under the terms of the GNU General Public License version 2 and
...@@ -16,8 +16,8 @@ ...@@ -16,8 +16,8 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/phy/phy.h> #include <linux/phy/phy.h>
#include <linux/phy/phy-qcom-ufs.h> #include <linux/phy/phy-qcom-ufs.h>
#include "ufshcd.h" #include "ufshcd.h"
#include "ufshcd-pltfrm.h" #include "ufshcd-pltfrm.h"
#include "unipro.h" #include "unipro.h"
...@@ -58,6 +58,12 @@ static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len, ...@@ -58,6 +58,12 @@ static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
len * 4, false); len * 4, false);
} }
static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
char *prefix, void *priv)
{
ufs_qcom_dump_regs(hba, offset, len, prefix);
}
static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
{ {
int err = 0; int err = 0;
...@@ -106,9 +112,11 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host) ...@@ -106,9 +112,11 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
if (!host->is_lane_clks_enabled) if (!host->is_lane_clks_enabled)
return; return;
clk_disable_unprepare(host->tx_l1_sync_clk); if (host->hba->lanes_per_direction > 1)
clk_disable_unprepare(host->tx_l1_sync_clk);
clk_disable_unprepare(host->tx_l0_sync_clk); clk_disable_unprepare(host->tx_l0_sync_clk);
clk_disable_unprepare(host->rx_l1_sync_clk); if (host->hba->lanes_per_direction > 1)
clk_disable_unprepare(host->rx_l1_sync_clk);
clk_disable_unprepare(host->rx_l0_sync_clk); clk_disable_unprepare(host->rx_l0_sync_clk);
host->is_lane_clks_enabled = false; host->is_lane_clks_enabled = false;
...@@ -132,21 +140,24 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host) ...@@ -132,21 +140,24 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
if (err) if (err)
goto disable_rx_l0; goto disable_rx_l0;
err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", if (host->hba->lanes_per_direction > 1) {
host->rx_l1_sync_clk); err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
if (err) host->rx_l1_sync_clk);
goto disable_tx_l0; if (err)
goto disable_tx_l0;
err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
host->tx_l1_sync_clk); host->tx_l1_sync_clk);
if (err) if (err)
goto disable_rx_l1; goto disable_rx_l1;
}
host->is_lane_clks_enabled = true; host->is_lane_clks_enabled = true;
goto out; goto out;
disable_rx_l1: disable_rx_l1:
clk_disable_unprepare(host->rx_l1_sync_clk); if (host->hba->lanes_per_direction > 1)
clk_disable_unprepare(host->rx_l1_sync_clk);
disable_tx_l0: disable_tx_l0:
clk_disable_unprepare(host->tx_l0_sync_clk); clk_disable_unprepare(host->tx_l0_sync_clk);
disable_rx_l0: disable_rx_l0:
...@@ -170,14 +181,16 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host) ...@@ -170,14 +181,16 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
if (err) if (err)
goto out; goto out;
err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", /* In case of single lane per direction, don't read lane1 clocks */
&host->rx_l1_sync_clk); if (host->hba->lanes_per_direction > 1) {
if (err) err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
goto out; &host->rx_l1_sync_clk);
if (err)
err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", goto out;
&host->tx_l1_sync_clk);
err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
&host->tx_l1_sync_clk);
}
out: out:
return err; return err;
} }
...@@ -267,9 +280,8 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) ...@@ -267,9 +280,8 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B); ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
if (ret) { if (ret) {
dev_err(hba->dev, dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
"%s: ufs_qcom_phy_calibrate_phy()failed, ret = %d\n", __func__, ret);
__func__, ret);
goto out; goto out;
} }
...@@ -519,6 +531,18 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, ...@@ -519,6 +531,18 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
150); 150);
/*
* Some UFS devices (and may be host) have issues if LCC is
* enabled. So we are setting PA_Local_TX_LCC_Enable to 0
* before link startup which will make sure that both host
* and device TX LCC are disabled once link startup is
* completed.
*/
if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
err = ufshcd_dme_set(hba,
UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
0);
break; break;
case POST_CHANGE: case POST_CHANGE:
ufs_qcom_link_startup_post_change(hba); ufs_qcom_link_startup_post_change(hba);
...@@ -962,6 +986,10 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, ...@@ -962,6 +986,10 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
goto out; goto out;
} }
/* enable the device ref clock before changing to HS mode */
if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
ufshcd_is_hs_mode(dev_req_params))
ufs_qcom_dev_ref_clk_ctrl(host, true);
break; break;
case POST_CHANGE: case POST_CHANGE:
if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
...@@ -989,6 +1017,11 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, ...@@ -989,6 +1017,11 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
memcpy(&host->dev_req_params, memcpy(&host->dev_req_params,
dev_req_params, sizeof(*dev_req_params)); dev_req_params, sizeof(*dev_req_params));
ufs_qcom_update_bus_bw_vote(host); ufs_qcom_update_bus_bw_vote(host);
/* disable the device ref clock if entered PWM mode */
if (ufshcd_is_hs_mode(&hba->pwr_info) &&
!ufshcd_is_hs_mode(dev_req_params))
ufs_qcom_dev_ref_clk_ctrl(host, false);
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;
...@@ -1090,6 +1123,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on) ...@@ -1090,6 +1123,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
ufs_qcom_phy_disable_iface_clk(host->generic_phy); ufs_qcom_phy_disable_iface_clk(host->generic_phy);
goto out; goto out;
} }
/* enable the device ref clock for HS mode*/
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
vote = host->bus_vote.saved_vote; vote = host->bus_vote.saved_vote;
if (vote == host->bus_vote.min_bw_vote) if (vote == host->bus_vote.min_bw_vote)
ufs_qcom_update_bus_bw_vote(host); ufs_qcom_update_bus_bw_vote(host);
...@@ -1367,6 +1403,74 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, ...@@ -1367,6 +1403,74 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
return err; return err;
} }
static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
void *priv, void (*print_fn)(struct ufs_hba *hba,
int offset, int num_regs, char *str, void *priv))
{
u32 reg;
struct ufs_qcom_host *host;
if (unlikely(!hba)) {
pr_err("%s: hba is NULL\n", __func__);
return;
}
if (unlikely(!print_fn)) {
dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
return;
}
host = ufshcd_get_variant(hba);
if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
return;
reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
reg = ufshcd_readl(hba, REG_UFS_CFG1);
reg |= UFS_BIT(17);
ufshcd_writel(hba, reg, REG_UFS_CFG1);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
ufshcd_writel(hba, (reg & ~UFS_BIT(17)), REG_UFS_CFG1);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
}
static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
{
if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
else
ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
}
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host) static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
{ {
/* provide a legal default configuration */ /* provide a legal default configuration */
...@@ -1475,6 +1579,7 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) ...@@ -1475,6 +1579,7 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
ufshcd_rmwl(host->hba, mask, ufshcd_rmwl(host->hba, mask,
(u32)host->testbus.select_minor << offset, (u32)host->testbus.select_minor << offset,
reg); reg);
ufs_qcom_enable_test_bus(host);
ufshcd_release(host->hba); ufshcd_release(host->hba);
pm_runtime_put_sync(host->hba->dev); pm_runtime_put_sync(host->hba->dev);
...@@ -1491,8 +1596,10 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) ...@@ -1491,8 +1596,10 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16, ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
"HCI Vendor Specific Registers "); "HCI Vendor Specific Registers ");
ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
ufs_qcom_testbus_read(hba); ufs_qcom_testbus_read(hba);
} }
/** /**
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
* *
...@@ -1537,7 +1644,7 @@ static int ufs_qcom_probe(struct platform_device *pdev) ...@@ -1537,7 +1644,7 @@ static int ufs_qcom_probe(struct platform_device *pdev)
* ufs_qcom_remove - set driver_data of the device to NULL * ufs_qcom_remove - set driver_data of the device to NULL
* @pdev: pointer to platform device handle * @pdev: pointer to platform device handle
* *
* Always return 0 * Always returns 0
*/ */
static int ufs_qcom_remove(struct platform_device *pdev) static int ufs_qcom_remove(struct platform_device *pdev)
{ {
......
...@@ -241,6 +241,15 @@ struct ufs_qcom_host { ...@@ -241,6 +241,15 @@ struct ufs_qcom_host {
struct ufs_qcom_testbus testbus; struct ufs_qcom_testbus testbus;
}; };
static inline u32
ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
{
if (host->hw_ver.major <= 0x02)
return UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(reg);
return UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(reg);
};
#define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba) #define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba)
#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba) #define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba) #define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#define GENERAL_UPIU_REQUEST_SIZE 32 #define GENERAL_UPIU_REQUEST_SIZE 32
#define QUERY_DESC_MAX_SIZE 255 #define QUERY_DESC_MAX_SIZE 255
#define QUERY_DESC_MIN_SIZE 2 #define QUERY_DESC_MIN_SIZE 2
#define QUERY_DESC_HDR_SIZE 2
#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \ #define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
(sizeof(struct utp_upiu_header))) (sizeof(struct utp_upiu_header)))
...@@ -195,6 +196,37 @@ enum unit_desc_param { ...@@ -195,6 +196,37 @@ enum unit_desc_param {
UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22, UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
}; };
/* Device descriptor parameters offsets in bytes*/
enum device_desc_param {
DEVICE_DESC_PARAM_LEN = 0x0,
DEVICE_DESC_PARAM_TYPE = 0x1,
DEVICE_DESC_PARAM_DEVICE_TYPE = 0x2,
DEVICE_DESC_PARAM_DEVICE_CLASS = 0x3,
DEVICE_DESC_PARAM_DEVICE_SUB_CLASS = 0x4,
DEVICE_DESC_PARAM_PRTCL = 0x5,
DEVICE_DESC_PARAM_NUM_LU = 0x6,
DEVICE_DESC_PARAM_NUM_WLU = 0x7,
DEVICE_DESC_PARAM_BOOT_ENBL = 0x8,
DEVICE_DESC_PARAM_DESC_ACCSS_ENBL = 0x9,
DEVICE_DESC_PARAM_INIT_PWR_MODE = 0xA,
DEVICE_DESC_PARAM_HIGH_PR_LUN = 0xB,
DEVICE_DESC_PARAM_SEC_RMV_TYPE = 0xC,
DEVICE_DESC_PARAM_SEC_LU = 0xD,
DEVICE_DESC_PARAM_BKOP_TERM_LT = 0xE,
DEVICE_DESC_PARAM_ACTVE_ICC_LVL = 0xF,
DEVICE_DESC_PARAM_SPEC_VER = 0x10,
DEVICE_DESC_PARAM_MANF_DATE = 0x12,
DEVICE_DESC_PARAM_MANF_NAME = 0x14,
DEVICE_DESC_PARAM_PRDCT_NAME = 0x15,
DEVICE_DESC_PARAM_SN = 0x16,
DEVICE_DESC_PARAM_OEM_ID = 0x17,
DEVICE_DESC_PARAM_MANF_ID = 0x18,
DEVICE_DESC_PARAM_UD_OFFSET = 0x1A,
DEVICE_DESC_PARAM_UD_LEN = 0x1B,
DEVICE_DESC_PARAM_RTT_CAP = 0x1C,
DEVICE_DESC_PARAM_FRQ_RTC = 0x1D,
};
/* /*
* Logical Unit Write Protect * Logical Unit Write Protect
* 00h: LU not write protected * 00h: LU not write protected
...@@ -469,6 +501,7 @@ struct ufs_vreg { ...@@ -469,6 +501,7 @@ struct ufs_vreg {
struct regulator *reg; struct regulator *reg;
const char *name; const char *name;
bool enabled; bool enabled;
bool unused;
int min_uV; int min_uV;
int max_uV; int max_uV;
int min_uA; int min_uA;
......
/*
* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _UFS_QUIRKS_H_
#define _UFS_QUIRKS_H_
/* return true if s1 is a prefix of s2 */
#define STR_PRFX_EQUAL(s1, s2) !strncmp(s1, s2, strlen(s1))
#define UFS_ANY_VENDOR 0xFFFF
#define UFS_ANY_MODEL "ANY_MODEL"
#define MAX_MODEL_LEN 16
#define UFS_VENDOR_TOSHIBA 0x198
#define UFS_VENDOR_SAMSUNG 0x1CE
/**
* ufs_device_info - ufs device details
* @wmanufacturerid: card details
* @model: card model
*/
struct ufs_device_info {
u16 wmanufacturerid;
char model[MAX_MODEL_LEN + 1];
};
/**
* ufs_dev_fix - ufs device quirk info
* @card: ufs card details
* @quirk: device quirk
*/
struct ufs_dev_fix {
struct ufs_device_info card;
unsigned int quirk;
};
#define END_FIX { { 0 }, 0 }
/* add specific device quirk */
#define UFS_FIX(_vendor, _model, _quirk) \
{ \
.card.wmanufacturerid = (_vendor),\
.card.model = (_model), \
.quirk = (_quirk), \
}
/*
* If UFS device is having issue in processing LCC (Line Control
* Command) coming from UFS host controller then enable this quirk.
* When this quirk is enabled, host controller driver should disable
* the LCC transmission on UFS host controller (by clearing
* TX_LCC_ENABLE attribute of host to 0).
*/
#define UFS_DEVICE_QUIRK_BROKEN_LCC (1 << 0)
/*
* Some UFS devices don't need VCCQ rail for device operations. Enabling this
* quirk for such devices will make sure that VCCQ rail is not voted.
*/
#define UFS_DEVICE_NO_VCCQ (1 << 1)
/*
* Some vendor's UFS device sends back to back NACs for the DL data frames
* causing the host controller to raise the DFES error status. Sometimes
* such UFS devices send back to back NAC without waiting for new
* retransmitted DL frame from the host and in such cases it might be possible
* the Host UniPro goes into bad state without raising the DFES error
* interrupt. If this happens then all the pending commands would timeout
* only after respective SW command (which is generally too large).
*
* We can workaround such device behaviour like this:
* - As soon as SW sees the DL NAC error, it should schedule the error handler
* - Error handler would sleep for 50ms to see if there are any fatal errors
* raised by UFS controller.
* - If there are fatal errors then SW does normal error recovery.
* - If there are no fatal errors then SW sends the NOP command to device
* to check if link is alive.
* - If NOP command times out, SW does normal error recovery
* - If NOP command succeed, skip the error handling.
*
* If DL NAC error is seen multiple times with some vendor's UFS devices then
* enable this quirk to initiate quick error recovery and also silence related
* error logs to reduce spamming of kernel logs.
*/
#define UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS (1 << 2)
/*
* Some UFS devices may not work properly after resume if the link was kept
* in off state during suspend. Enabling this quirk will not allow the
* link to be kept in off state during suspend.
*/
#define UFS_DEVICE_QUIRK_NO_LINK_OFF (1 << 3)
/*
* Few Toshiba UFS device models advertise RX_MIN_ACTIVATETIME_CAPABILITY as
* 600us which may not be enough for reliable hibern8 exit hardware sequence
* from UFS device.
* To workaround this issue, host should set its PA_TACTIVATE time to 1ms even
* if device advertises RX_MIN_ACTIVATETIME_CAPABILITY less than 1ms.
*/
#define UFS_DEVICE_QUIRK_PA_TACTIVATE (1 << 4)
/*
* Some UFS memory devices may have really low read/write throughput in
* FAST AUTO mode, enable this quirk to make sure that FAST AUTO mode is
* never enabled for such devices.
*/
#define UFS_DEVICE_NO_FASTAUTO (1 << 5)
/*
* It seems some UFS devices may keep drawing more than sleep current
* (atleast for 500us) from UFS rails (especially from VCCQ rail).
* To avoid this situation, add 2ms delay before putting these UFS
* rails in LPM mode.
*/
#define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM (1 << 6)
struct ufs_hba;
void ufs_advertise_fixup_device(struct ufs_hba *hba);
static struct ufs_dev_fix ufs_fixups[] = {
/* UFS cards deviations table */
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
UFS_DEVICE_NO_FASTAUTO),
UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
UFS_DEVICE_QUIRK_PA_TACTIVATE),
UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
UFS_DEVICE_QUIRK_PA_TACTIVATE),
END_FIX
};
#endif /* UFS_QUIRKS_H_ */
...@@ -40,6 +40,8 @@ ...@@ -40,6 +40,8 @@
#include "ufshcd.h" #include "ufshcd.h"
#include "ufshcd-pltfrm.h" #include "ufshcd-pltfrm.h"
#define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2
static int ufshcd_parse_clock_info(struct ufs_hba *hba) static int ufshcd_parse_clock_info(struct ufs_hba *hba)
{ {
int ret = 0; int ret = 0;
...@@ -277,6 +279,21 @@ void ufshcd_pltfrm_shutdown(struct platform_device *pdev) ...@@ -277,6 +279,21 @@ void ufshcd_pltfrm_shutdown(struct platform_device *pdev)
} }
EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown); EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown);
static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
{
struct device *dev = hba->dev;
int ret;
ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
&hba->lanes_per_direction);
if (ret) {
dev_dbg(hba->dev,
"%s: failed to read lanes-per-direction, ret=%d\n",
__func__, ret);
hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
}
}
/** /**
* ufshcd_pltfrm_init - probe routine of the driver * ufshcd_pltfrm_init - probe routine of the driver
* @pdev: pointer to Platform device handle * @pdev: pointer to Platform device handle
...@@ -331,6 +348,8 @@ int ufshcd_pltfrm_init(struct platform_device *pdev, ...@@ -331,6 +348,8 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
pm_runtime_set_active(&pdev->dev); pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev); pm_runtime_enable(&pdev->dev);
ufshcd_init_lanes_per_dir(hba);
err = ufshcd_init(hba, mmio_base, irq); err = ufshcd_init(hba, mmio_base, irq);
if (err) { if (err) {
dev_err(dev, "Initialization failed\n"); dev_err(dev, "Initialization failed\n");
......
...@@ -39,8 +39,10 @@ ...@@ -39,8 +39,10 @@
#include <linux/async.h> #include <linux/async.h>
#include <linux/devfreq.h> #include <linux/devfreq.h>
#include <linux/nls.h>
#include <linux/of.h>
#include "ufshcd.h" #include "ufshcd.h"
#include "ufs_quirks.h"
#include "unipro.h" #include "unipro.h"
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
...@@ -131,9 +133,11 @@ enum { ...@@ -131,9 +133,11 @@ enum {
/* UFSHCD UIC layer error flags */ /* UFSHCD UIC layer error flags */
enum { enum {
UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */ UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */ UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */ UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
}; };
/* Interrupt configuration options */ /* Interrupt configuration options */
...@@ -193,6 +197,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba); ...@@ -193,6 +197,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba);
static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
bool skip_ref_clk); bool skip_ref_clk);
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
...@@ -231,6 +236,16 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba) ...@@ -231,6 +236,16 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
} }
} }
/* replace non-printable or non-ASCII characters with spaces */
static inline void ufshcd_remove_non_printable(char *val)
{
if (!val)
return;
if (*val < 0x20 || *val > 0x7e)
*val = ' ';
}
/* /*
* ufshcd_wait_for_register - wait for register value to change * ufshcd_wait_for_register - wait for register value to change
* @hba - per-adapter interface * @hba - per-adapter interface
...@@ -239,11 +254,13 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba) ...@@ -239,11 +254,13 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
* @val - wait condition * @val - wait condition
* @interval_us - polling interval in microsecs * @interval_us - polling interval in microsecs
* @timeout_ms - timeout in millisecs * @timeout_ms - timeout in millisecs
* @can_sleep - perform sleep or just spin
* *
* Returns -ETIMEDOUT on error, zero on success * Returns -ETIMEDOUT on error, zero on success
*/ */
static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsigned long interval_us, unsigned long timeout_ms) u32 val, unsigned long interval_us,
unsigned long timeout_ms, bool can_sleep)
{ {
int err = 0; int err = 0;
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
...@@ -252,9 +269,10 @@ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, ...@@ -252,9 +269,10 @@ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
val = val & mask; val = val & mask;
while ((ufshcd_readl(hba, reg) & mask) != val) { while ((ufshcd_readl(hba, reg) & mask) != val) {
/* wakeup within 50us of expiry */ if (can_sleep)
usleep_range(interval_us, interval_us + 50); usleep_range(interval_us, interval_us + 50);
else
udelay(interval_us);
if (time_after(jiffies, timeout)) { if (time_after(jiffies, timeout)) {
if ((ufshcd_readl(hba, reg) & mask) != val) if ((ufshcd_readl(hba, reg) & mask) != val)
err = -ETIMEDOUT; err = -ETIMEDOUT;
...@@ -552,6 +570,34 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba) ...@@ -552,6 +570,34 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
} }
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
{
/* HCI version 1.0 and 1.1 supports UniPro 1.41 */
if ((hba->ufs_version == UFSHCI_VERSION_10) ||
(hba->ufs_version == UFSHCI_VERSION_11))
return UFS_UNIPRO_VER_1_41;
else
return UFS_UNIPRO_VER_1_6;
}
EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
{
/*
* If both host and device support UniPro ver1.6 or later, PA layer
* parameters tuning happens during link startup itself.
*
* We can manually tune PA layer parameters if either host or device
* doesn't support UniPro ver 1.6 or later. But to keep manual tuning
* logic simple, we will only do manual tuning if local unipro version
* doesn't support ver1.6 or later.
*/
if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
return true;
else
return false;
}
static void ufshcd_ungate_work(struct work_struct *work) static void ufshcd_ungate_work(struct work_struct *work)
{ {
int ret; int ret;
...@@ -1458,7 +1504,7 @@ ufshcd_clear_cmd(struct ufs_hba *hba, int tag) ...@@ -1458,7 +1504,7 @@ ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
*/ */
err = ufshcd_wait_for_register(hba, err = ufshcd_wait_for_register(hba,
REG_UTP_TRANSFER_REQ_DOOR_BELL, REG_UTP_TRANSFER_REQ_DOOR_BELL,
mask, ~mask, 1000, 1000); mask, ~mask, 1000, 1000, true);
return err; return err;
} }
...@@ -1857,21 +1903,7 @@ static int ufshcd_query_attr_retry(struct ufs_hba *hba, ...@@ -1857,21 +1903,7 @@ static int ufshcd_query_attr_retry(struct ufs_hba *hba,
return ret; return ret;
} }
/** static int __ufshcd_query_descriptor(struct ufs_hba *hba,
* ufshcd_query_descriptor - API function for sending descriptor requests
* hba: per-adapter instance
* opcode: attribute opcode
* idn: attribute idn to access
* index: index field
* selector: selector field
* desc_buf: the buffer that contains the descriptor
* buf_len: length parameter passed to the device
*
* Returns 0 for success, non-zero in case of failure.
* The buf_len parameter will contain, on return, the length parameter
* received on the response.
*/
static int ufshcd_query_descriptor(struct ufs_hba *hba,
enum query_opcode opcode, enum desc_idn idn, u8 index, enum query_opcode opcode, enum desc_idn idn, u8 index,
u8 selector, u8 *desc_buf, int *buf_len) u8 selector, u8 *desc_buf, int *buf_len)
{ {
...@@ -1935,6 +1967,39 @@ static int ufshcd_query_descriptor(struct ufs_hba *hba, ...@@ -1935,6 +1967,39 @@ static int ufshcd_query_descriptor(struct ufs_hba *hba,
return err; return err;
} }
/**
* ufshcd_query_descriptor_retry - API function for sending descriptor
* requests
* hba: per-adapter instance
* opcode: attribute opcode
* idn: attribute idn to access
* index: index field
* selector: selector field
* desc_buf: the buffer that contains the descriptor
* buf_len: length parameter passed to the device
*
* Returns 0 for success, non-zero in case of failure.
* The buf_len parameter will contain, on return, the length parameter
* received on the response.
*/
int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum desc_idn idn, u8 index,
u8 selector, u8 *desc_buf, int *buf_len)
{
int err;
int retries;
for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
err = __ufshcd_query_descriptor(hba, opcode, idn, index,
selector, desc_buf, buf_len);
if (!err || err == -EINVAL)
break;
}
return err;
}
EXPORT_SYMBOL(ufshcd_query_descriptor_retry);
/** /**
* ufshcd_read_desc_param - read the specified descriptor parameter * ufshcd_read_desc_param - read the specified descriptor parameter
* @hba: Pointer to adapter instance * @hba: Pointer to adapter instance
...@@ -1977,9 +2042,9 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba, ...@@ -1977,9 +2042,9 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
return -ENOMEM; return -ENOMEM;
} }
ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC, ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
desc_id, desc_index, 0, desc_buf, desc_id, desc_index, 0, desc_buf,
&buff_len); &buff_len);
if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) || if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
(desc_buf[QUERY_DESC_LENGTH_OFFSET] != (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
...@@ -2017,6 +2082,82 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba, ...@@ -2017,6 +2082,82 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
} }
int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
{
return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
}
EXPORT_SYMBOL(ufshcd_read_device_desc);
/**
* ufshcd_read_string_desc - read string descriptor
* @hba: pointer to adapter instance
* @desc_index: descriptor index
* @buf: pointer to buffer where descriptor would be read
* @size: size of buf
* @ascii: if true convert from unicode to ascii characters
*
* Return 0 in case of success, non-zero otherwise
*/
int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
u32 size, bool ascii)
{
int err = 0;
err = ufshcd_read_desc(hba,
QUERY_DESC_IDN_STRING, desc_index, buf, size);
if (err) {
dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
__func__, QUERY_REQ_RETRIES, err);
goto out;
}
if (ascii) {
int desc_len;
int ascii_len;
int i;
char *buff_ascii;
desc_len = buf[0];
/* remove header and divide by 2 to move from UTF16 to UTF8 */
ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
dev_err(hba->dev, "%s: buffer allocated size is too small\n",
__func__);
err = -ENOMEM;
goto out;
}
buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
if (!buff_ascii) {
err = -ENOMEM;
goto out_free_buff;
}
/*
* the descriptor contains string in UTF16 format
* we need to convert to utf-8 so it can be displayed
*/
utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
desc_len - QUERY_DESC_HDR_SIZE,
UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
/* replace non-printable or non-ASCII characters with spaces */
for (i = 0; i < ascii_len; i++)
ufshcd_remove_non_printable(&buff_ascii[i]);
memset(buf + QUERY_DESC_HDR_SIZE, 0,
size - QUERY_DESC_HDR_SIZE);
memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
out_free_buff:
kfree(buff_ascii);
}
out:
return err;
}
EXPORT_SYMBOL(ufshcd_read_string_desc);
/** /**
* ufshcd_read_unit_desc_param - read the specified unit descriptor parameter * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
* @hba: Pointer to adapter instance * @hba: Pointer to adapter instance
...@@ -2813,6 +2954,23 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) ...@@ -2813,6 +2954,23 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
return err; return err;
} }
/**
* ufshcd_hba_stop - Send controller to reset state
* @hba: per adapter instance
* @can_sleep: perform sleep or just spin
*/
static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
{
int err;
ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
CONTROLLER_ENABLE, CONTROLLER_DISABLE,
10, 1, can_sleep);
if (err)
dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
}
/** /**
* ufshcd_hba_enable - initialize the controller * ufshcd_hba_enable - initialize the controller
* @hba: per adapter instance * @hba: per adapter instance
...@@ -2833,18 +2991,9 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) ...@@ -2833,18 +2991,9 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
* development and testing of this driver. msleep can be changed to * development and testing of this driver. msleep can be changed to
* mdelay and retry count can be reduced based on the controller. * mdelay and retry count can be reduced based on the controller.
*/ */
if (!ufshcd_is_hba_active(hba)) { if (!ufshcd_is_hba_active(hba))
/* change controller state to "reset state" */ /* change controller state to "reset state" */
ufshcd_hba_stop(hba); ufshcd_hba_stop(hba, true);
/*
* This delay is based on the testing done with UFS host
* controller FPGA. The delay can be changed based on the
* host controller used.
*/
msleep(5);
}
/* UniPro link is disabled at this point */ /* UniPro link is disabled at this point */
ufshcd_set_link_off(hba); ufshcd_set_link_off(hba);
...@@ -3365,31 +3514,18 @@ static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) ...@@ -3365,31 +3514,18 @@ static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
} }
/** /**
* ufshcd_transfer_req_compl - handle SCSI and query command completion * __ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance * @hba: per adapter instance
* @completed_reqs: requests to complete
*/ */
static void ufshcd_transfer_req_compl(struct ufs_hba *hba) static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
unsigned long completed_reqs)
{ {
struct ufshcd_lrb *lrbp; struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd; struct scsi_cmnd *cmd;
unsigned long completed_reqs;
u32 tr_doorbell;
int result; int result;
int index; int index;
/* Resetting interrupt aggregation counters first and reading the
* DOOR_BELL afterward allows us to handle all the completed requests.
* In order to prevent other interrupts starvation the DB is read once
* after reset. The down side of this solution is the possibility of
* false interrupt if device completes another request after resetting
* aggregation and before reading the DB.
*/
if (ufshcd_is_intr_aggr_allowed(hba))
ufshcd_reset_intr_aggr(hba);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
for_each_set_bit(index, &completed_reqs, hba->nutrs) { for_each_set_bit(index, &completed_reqs, hba->nutrs) {
lrbp = &hba->lrb[index]; lrbp = &hba->lrb[index];
cmd = lrbp->cmd; cmd = lrbp->cmd;
...@@ -3418,6 +3554,31 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) ...@@ -3418,6 +3554,31 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
wake_up(&hba->dev_cmd.tag_wq); wake_up(&hba->dev_cmd.tag_wq);
} }
/**
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
*/
static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
{
unsigned long completed_reqs;
u32 tr_doorbell;
/* Resetting interrupt aggregation counters first and reading the
* DOOR_BELL afterward allows us to handle all the completed requests.
* In order to prevent other interrupts starvation the DB is read once
* after reset. The down side of this solution is the possibility of
* false interrupt if device completes another request after resetting
* aggregation and before reading the DB.
*/
if (ufshcd_is_intr_aggr_allowed(hba))
ufshcd_reset_intr_aggr(hba);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
__ufshcd_transfer_req_compl(hba, completed_reqs);
}
/** /**
* ufshcd_disable_ee - disable exception event * ufshcd_disable_ee - disable exception event
* @hba: per-adapter instance * @hba: per-adapter instance
...@@ -3630,7 +3791,7 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba, ...@@ -3630,7 +3791,7 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
*/ */
static int ufshcd_urgent_bkops(struct ufs_hba *hba) static int ufshcd_urgent_bkops(struct ufs_hba *hba)
{ {
return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT); return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
} }
static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
...@@ -3639,6 +3800,43 @@ static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) ...@@ -3639,6 +3800,43 @@ static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
QUERY_ATTR_IDN_EE_STATUS, 0, 0, status); QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
} }
static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
{
int err;
u32 curr_status = 0;
if (hba->is_urgent_bkops_lvl_checked)
goto enable_auto_bkops;
err = ufshcd_get_bkops_status(hba, &curr_status);
if (err) {
dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
__func__, err);
goto out;
}
/*
* We are seeing that some devices are raising the urgent bkops
* exception events even when BKOPS status doesn't indicate performace
* impacted or critical. Handle these device by determining their urgent
* bkops status at runtime.
*/
if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
__func__, curr_status);
/* update the current status as the urgent bkops level */
hba->urgent_bkops_lvl = curr_status;
hba->is_urgent_bkops_lvl_checked = true;
}
enable_auto_bkops:
err = ufshcd_enable_auto_bkops(hba);
out:
if (err < 0)
dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
__func__, err);
}
/** /**
* ufshcd_exception_event_handler - handle exceptions raised by device * ufshcd_exception_event_handler - handle exceptions raised by device
* @work: pointer to work data * @work: pointer to work data
...@@ -3662,17 +3860,95 @@ static void ufshcd_exception_event_handler(struct work_struct *work) ...@@ -3662,17 +3860,95 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
} }
status &= hba->ee_ctrl_mask; status &= hba->ee_ctrl_mask;
if (status & MASK_EE_URGENT_BKOPS) {
err = ufshcd_urgent_bkops(hba); if (status & MASK_EE_URGENT_BKOPS)
if (err < 0) ufshcd_bkops_exception_event_handler(hba);
dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
__func__, err);
}
out: out:
pm_runtime_put_sync(hba->dev); pm_runtime_put_sync(hba->dev);
return; return;
} }
/* Complete requests that have door-bell cleared */
static void ufshcd_complete_requests(struct ufs_hba *hba)
{
ufshcd_transfer_req_compl(hba);
ufshcd_tmc_handler(hba);
}
/**
* ufshcd_quirk_dl_nac_errors - This function checks if error handling is
* to recover from the DL NAC errors or not.
* @hba: per-adapter instance
*
* Returns true if error handling is required, false otherwise
*/
static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
{
unsigned long flags;
bool err_handling = true;
spin_lock_irqsave(hba->host->host_lock, flags);
/*
* UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
* device fatal error and/or DL NAC & REPLAY timeout errors.
*/
if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
goto out;
if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
((hba->saved_err & UIC_ERROR) &&
(hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
goto out;
if ((hba->saved_err & UIC_ERROR) &&
(hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
int err;
/*
* wait for 50ms to see if we can get any other errors or not.
*/
spin_unlock_irqrestore(hba->host->host_lock, flags);
msleep(50);
spin_lock_irqsave(hba->host->host_lock, flags);
/*
* now check if we have got any other severe errors other than
* DL NAC error?
*/
if ((hba->saved_err & INT_FATAL_ERRORS) ||
((hba->saved_err & UIC_ERROR) &&
(hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
goto out;
/*
* As DL NAC is the only error received so far, send out NOP
* command to confirm if link is still active or not.
* - If we don't get any response then do error recovery.
* - If we get response then clear the DL NAC error bit.
*/
spin_unlock_irqrestore(hba->host->host_lock, flags);
err = ufshcd_verify_dev_init(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
if (err)
goto out;
/* Link seems to be alive hence ignore the DL NAC errors */
if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
hba->saved_err &= ~UIC_ERROR;
/* clear NAC error */
hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
if (!hba->saved_uic_err) {
err_handling = false;
goto out;
}
}
out:
spin_unlock_irqrestore(hba->host->host_lock, flags);
return err_handling;
}
/** /**
* ufshcd_err_handler - handle UFS errors that require s/w attention * ufshcd_err_handler - handle UFS errors that require s/w attention
* @work: pointer to work structure * @work: pointer to work structure
...@@ -3685,6 +3961,7 @@ static void ufshcd_err_handler(struct work_struct *work) ...@@ -3685,6 +3961,7 @@ static void ufshcd_err_handler(struct work_struct *work)
u32 err_tm = 0; u32 err_tm = 0;
int err = 0; int err = 0;
int tag; int tag;
bool needs_reset = false;
hba = container_of(work, struct ufs_hba, eh_work); hba = container_of(work, struct ufs_hba, eh_work);
...@@ -3692,40 +3969,86 @@ static void ufshcd_err_handler(struct work_struct *work) ...@@ -3692,40 +3969,86 @@ static void ufshcd_err_handler(struct work_struct *work)
ufshcd_hold(hba, false); ufshcd_hold(hba, false);
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->ufshcd_state == UFSHCD_STATE_RESET) { if (hba->ufshcd_state == UFSHCD_STATE_RESET)
spin_unlock_irqrestore(hba->host->host_lock, flags);
goto out; goto out;
}
hba->ufshcd_state = UFSHCD_STATE_RESET; hba->ufshcd_state = UFSHCD_STATE_RESET;
ufshcd_set_eh_in_progress(hba); ufshcd_set_eh_in_progress(hba);
/* Complete requests that have door-bell cleared by h/w */ /* Complete requests that have door-bell cleared by h/w */
ufshcd_transfer_req_compl(hba); ufshcd_complete_requests(hba);
ufshcd_tmc_handler(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags); if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
bool ret;
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
ret = ufshcd_quirk_dl_nac_errors(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
if (!ret)
goto skip_err_handling;
}
if ((hba->saved_err & INT_FATAL_ERRORS) ||
((hba->saved_err & UIC_ERROR) &&
(hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
needs_reset = true;
/*
* if host reset is required then skip clearing the pending
* transfers forcefully because they will automatically get
* cleared after link startup.
*/
if (needs_reset)
goto skip_pending_xfer_clear;
/* release lock as clear command might sleep */
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* Clear pending transfer requests */ /* Clear pending transfer requests */
for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
if (ufshcd_clear_cmd(hba, tag)) if (ufshcd_clear_cmd(hba, tag)) {
err_xfer |= 1 << tag; err_xfer = true;
goto lock_skip_pending_xfer_clear;
}
}
/* Clear pending task management requests */ /* Clear pending task management requests */
for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
if (ufshcd_clear_tm_cmd(hba, tag)) if (ufshcd_clear_tm_cmd(hba, tag)) {
err_tm |= 1 << tag; err_tm = true;
goto lock_skip_pending_xfer_clear;
}
}
/* Complete the requests that are cleared by s/w */ lock_skip_pending_xfer_clear:
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_transfer_req_compl(hba);
ufshcd_tmc_handler(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* Complete the requests that are cleared by s/w */
ufshcd_complete_requests(hba);
if (err_xfer || err_tm)
needs_reset = true;
skip_pending_xfer_clear:
/* Fatal errors need reset */ /* Fatal errors need reset */
if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) || if (needs_reset) {
((hba->saved_err & UIC_ERROR) && unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
(hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
/*
* ufshcd_reset_and_restore() does the link reinitialization
* which will need atleast one empty doorbell slot to send the
* device management commands (NOP and query commands).
* If there is no slot empty at this moment then free up last
* slot forcefully.
*/
if (hba->outstanding_reqs == max_doorbells)
__ufshcd_transfer_req_compl(hba,
(1UL << (hba->nutrs - 1)));
spin_unlock_irqrestore(hba->host->host_lock, flags);
err = ufshcd_reset_and_restore(hba); err = ufshcd_reset_and_restore(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
if (err) { if (err) {
dev_err(hba->dev, "%s: reset and restore failed\n", dev_err(hba->dev, "%s: reset and restore failed\n",
__func__); __func__);
...@@ -3739,9 +4062,19 @@ static void ufshcd_err_handler(struct work_struct *work) ...@@ -3739,9 +4062,19 @@ static void ufshcd_err_handler(struct work_struct *work)
hba->saved_err = 0; hba->saved_err = 0;
hba->saved_uic_err = 0; hba->saved_uic_err = 0;
} }
skip_err_handling:
if (!needs_reset) {
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
if (hba->saved_err || hba->saved_uic_err)
dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
__func__, hba->saved_err, hba->saved_uic_err);
}
ufshcd_clear_eh_in_progress(hba); ufshcd_clear_eh_in_progress(hba);
out: out:
spin_unlock_irqrestore(hba->host->host_lock, flags);
scsi_unblock_requests(hba->host); scsi_unblock_requests(hba->host);
ufshcd_release(hba); ufshcd_release(hba);
pm_runtime_put_sync(hba->dev); pm_runtime_put_sync(hba->dev);
...@@ -3759,6 +4092,14 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba) ...@@ -3759,6 +4092,14 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
else if (hba->dev_quirks &
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
hba->uic_error |=
UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
}
/* UIC NL/TL/DME errors needs software retry */ /* UIC NL/TL/DME errors needs software retry */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
...@@ -3796,15 +4137,18 @@ static void ufshcd_check_errors(struct ufs_hba *hba) ...@@ -3796,15 +4137,18 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
} }
if (queue_eh_work) { if (queue_eh_work) {
/*
* update the transfer error masks to sticky bits, let's do this
* irrespective of current ufshcd_state.
*/
hba->saved_err |= hba->errors;
hba->saved_uic_err |= hba->uic_error;
/* handle fatal errors only when link is functional */ /* handle fatal errors only when link is functional */
if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) { if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
/* block commands from scsi mid-layer */ /* block commands from scsi mid-layer */
scsi_block_requests(hba->host); scsi_block_requests(hba->host);
/* transfer error masks to sticky bits */
hba->saved_err |= hba->errors;
hba->saved_uic_err |= hba->uic_error;
hba->ufshcd_state = UFSHCD_STATE_ERROR; hba->ufshcd_state = UFSHCD_STATE_ERROR;
schedule_work(&hba->eh_work); schedule_work(&hba->eh_work);
} }
...@@ -3897,7 +4241,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) ...@@ -3897,7 +4241,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
/* poll for max. 1 sec to clear door bell register by h/w */ /* poll for max. 1 sec to clear door bell register by h/w */
err = ufshcd_wait_for_register(hba, err = ufshcd_wait_for_register(hba,
REG_UTP_TASK_REQ_DOOR_BELL, REG_UTP_TASK_REQ_DOOR_BELL,
mask, 0, 1000, 1000); mask, 0, 1000, 1000, true);
out: out:
return err; return err;
} }
...@@ -4179,7 +4523,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) ...@@ -4179,7 +4523,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
/* Reset the host controller */ /* Reset the host controller */
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_hba_stop(hba); ufshcd_hba_stop(hba, false);
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
err = ufshcd_hba_enable(hba); err = ufshcd_hba_enable(hba);
...@@ -4466,6 +4810,164 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) ...@@ -4466,6 +4810,164 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
return ret; return ret;
} }
static int ufs_get_device_info(struct ufs_hba *hba,
struct ufs_device_info *card_data)
{
int err;
u8 model_index;
u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0};
u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
err = ufshcd_read_device_desc(hba, desc_buf,
QUERY_DESC_DEVICE_MAX_SIZE);
if (err) {
dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
__func__, err);
goto out;
}
/*
* getting vendor (manufacturerID) and Bank Index in big endian
* format
*/
card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
if (err) {
dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
__func__, err);
goto out;
}
str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
MAX_MODEL_LEN));
/* Null terminate the model string */
card_data->model[MAX_MODEL_LEN] = '\0';
out:
return err;
}
void ufs_advertise_fixup_device(struct ufs_hba *hba)
{
int err;
struct ufs_dev_fix *f;
struct ufs_device_info card_data;
card_data.wmanufacturerid = 0;
err = ufs_get_device_info(hba, &card_data);
if (err) {
dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
__func__, err);
return;
}
for (f = ufs_fixups; f->quirk; f++) {
if (((f->card.wmanufacturerid == card_data.wmanufacturerid) ||
(f->card.wmanufacturerid == UFS_ANY_VENDOR)) &&
(STR_PRFX_EQUAL(f->card.model, card_data.model) ||
!strcmp(f->card.model, UFS_ANY_MODEL)))
hba->dev_quirks |= f->quirk;
}
}
/**
* ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
* @hba: per-adapter instance
*
* PA_TActivate parameter can be tuned manually if UniPro version is less than
* 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
* RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
* the hibern8 exit latency.
*
* Returns zero on success, non-zero error value on failure.
*/
static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
{
int ret = 0;
u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
ret = ufshcd_dme_peer_get(hba,
UIC_ARG_MIB_SEL(
RX_MIN_ACTIVATETIME_CAPABILITY,
UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
&peer_rx_min_activatetime);
if (ret)
goto out;
/* make sure proper unit conversion is applied */
tuned_pa_tactivate =
((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
/ PA_TACTIVATE_TIME_UNIT_US);
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
tuned_pa_tactivate);
out:
return ret;
}
/**
* ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
* @hba: per-adapter instance
*
* PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
* 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
* TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
* This optimal value can help reduce the hibern8 exit latency.
*
* Returns zero on success, non-zero error value on failure.
*/
static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
{
int ret = 0;
u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
u32 max_hibern8_time, tuned_pa_hibern8time;
ret = ufshcd_dme_get(hba,
UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
&local_tx_hibern8_time_cap);
if (ret)
goto out;
ret = ufshcd_dme_peer_get(hba,
UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
&peer_rx_hibern8_time_cap);
if (ret)
goto out;
max_hibern8_time = max(local_tx_hibern8_time_cap,
peer_rx_hibern8_time_cap);
/* make sure proper unit conversion is applied */
tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
/ PA_HIBERN8_TIME_UNIT_US);
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
tuned_pa_hibern8time);
out:
return ret;
}
static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
{
if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
ufshcd_tune_pa_tactivate(hba);
ufshcd_tune_pa_hibern8time(hba);
}
if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
/* set 1ms timeout for PA_TACTIVATE */
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
}
/** /**
* ufshcd_probe_hba - probe hba to detect device and initialize * ufshcd_probe_hba - probe hba to detect device and initialize
* @hba: per-adapter instance * @hba: per-adapter instance
...@@ -4482,6 +4984,10 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) ...@@ -4482,6 +4984,10 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
ufshcd_init_pwr_info(hba); ufshcd_init_pwr_info(hba);
/* set the default level for urgent bkops */
hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
hba->is_urgent_bkops_lvl_checked = false;
/* UniPro link is active now */ /* UniPro link is active now */
ufshcd_set_link_active(hba); ufshcd_set_link_active(hba);
...@@ -4493,6 +4999,14 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) ...@@ -4493,6 +4999,14 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
if (ret) if (ret)
goto out; goto out;
ufs_advertise_fixup_device(hba);
ufshcd_tune_unipro_params(hba);
ret = ufshcd_set_vccq_rail_unused(hba,
(hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
if (ret)
goto out;
/* UFS device is also active now */ /* UFS device is also active now */
ufshcd_set_ufs_dev_active(hba); ufshcd_set_ufs_dev_active(hba);
ufshcd_force_reset_auto_bkops(hba); ufshcd_force_reset_auto_bkops(hba);
...@@ -4567,6 +5081,41 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie) ...@@ -4567,6 +5081,41 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
ufshcd_probe_hba(hba); ufshcd_probe_hba(hba);
} }
static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
{
unsigned long flags;
struct Scsi_Host *host;
struct ufs_hba *hba;
int index;
bool found = false;
if (!scmd || !scmd->device || !scmd->device->host)
return BLK_EH_NOT_HANDLED;
host = scmd->device->host;
hba = shost_priv(host);
if (!hba)
return BLK_EH_NOT_HANDLED;
spin_lock_irqsave(host->host_lock, flags);
for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
if (hba->lrb[index].cmd == scmd) {
found = true;
break;
}
}
spin_unlock_irqrestore(host->host_lock, flags);
/*
* Bypass SCSI error handling and reset the block layer timer if this
* SCSI command was not actually dispatched to UFS driver, otherwise
* let SCSI layer handle the error as usual.
*/
return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
}
static struct scsi_host_template ufshcd_driver_template = { static struct scsi_host_template ufshcd_driver_template = {
.module = THIS_MODULE, .module = THIS_MODULE,
.name = UFSHCD, .name = UFSHCD,
...@@ -4579,6 +5128,7 @@ static struct scsi_host_template ufshcd_driver_template = { ...@@ -4579,6 +5128,7 @@ static struct scsi_host_template ufshcd_driver_template = {
.eh_abort_handler = ufshcd_abort, .eh_abort_handler = ufshcd_abort,
.eh_device_reset_handler = ufshcd_eh_device_reset_handler, .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
.eh_host_reset_handler = ufshcd_eh_host_reset_handler, .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
.eh_timed_out = ufshcd_eh_timed_out,
.this_id = -1, .this_id = -1,
.sg_tablesize = SG_ALL, .sg_tablesize = SG_ALL,
.cmd_per_lun = UFSHCD_CMD_PER_LUN, .cmd_per_lun = UFSHCD_CMD_PER_LUN,
...@@ -4607,13 +5157,24 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, ...@@ -4607,13 +5157,24 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
struct ufs_vreg *vreg) struct ufs_vreg *vreg)
{ {
return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); if (!vreg)
return 0;
else if (vreg->unused)
return 0;
else
return ufshcd_config_vreg_load(hba->dev, vreg,
UFS_VREG_LPM_LOAD_UA);
} }
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
struct ufs_vreg *vreg) struct ufs_vreg *vreg)
{ {
return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); if (!vreg)
return 0;
else if (vreg->unused)
return 0;
else
return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
} }
static int ufshcd_config_vreg(struct device *dev, static int ufshcd_config_vreg(struct device *dev,
...@@ -4648,7 +5209,9 @@ static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg) ...@@ -4648,7 +5209,9 @@ static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
{ {
int ret = 0; int ret = 0;
if (!vreg || vreg->enabled) if (!vreg)
goto out;
else if (vreg->enabled || vreg->unused)
goto out; goto out;
ret = ufshcd_config_vreg(dev, vreg, true); ret = ufshcd_config_vreg(dev, vreg, true);
...@@ -4668,7 +5231,9 @@ static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg) ...@@ -4668,7 +5231,9 @@ static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
{ {
int ret = 0; int ret = 0;
if (!vreg || !vreg->enabled) if (!vreg)
goto out;
else if (!vreg->enabled || vreg->unused)
goto out; goto out;
ret = regulator_disable(vreg->reg); ret = regulator_disable(vreg->reg);
...@@ -4774,6 +5339,36 @@ static int ufshcd_init_hba_vreg(struct ufs_hba *hba) ...@@ -4774,6 +5339,36 @@ static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
return 0; return 0;
} }
static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
{
int ret = 0;
struct ufs_vreg_info *info = &hba->vreg_info;
if (!info)
goto out;
else if (!info->vccq)
goto out;
if (unused) {
/* shut off the rail here */
ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
/*
* Mark this rail as no longer used, so it doesn't get enabled
* later by mistake
*/
if (!ret)
info->vccq->unused = true;
} else {
/*
* rail should have been already enabled hence just make sure
* that unused flag is cleared.
*/
info->vccq->unused = false;
}
out:
return ret;
}
static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
bool skip_ref_clk) bool skip_ref_clk)
{ {
...@@ -5092,11 +5687,21 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba, ...@@ -5092,11 +5687,21 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
else if ((req_link_state == UIC_LINK_OFF_STATE) && else if ((req_link_state == UIC_LINK_OFF_STATE) &&
(!check_for_bkops || (check_for_bkops && (!check_for_bkops || (check_for_bkops &&
!hba->auto_bkops_enabled))) { !hba->auto_bkops_enabled))) {
/*
* Let's make sure that link is in low power mode, we are doing
* this currently by putting the link in Hibern8. Otherway to
* put the link in low power mode is to send the DME end point
* to device and then send the DME reset command to local
* unipro. But putting the link in hibern8 is much faster.
*/
ret = ufshcd_uic_hibern8_enter(hba);
if (ret)
goto out;
/* /*
* Change controller state to "reset state" which * Change controller state to "reset state" which
* should also put the link in off/reset state * should also put the link in off/reset state
*/ */
ufshcd_hba_stop(hba); ufshcd_hba_stop(hba, true);
/* /*
* TODO: Check if we need any delay to make sure that * TODO: Check if we need any delay to make sure that
* controller is reset * controller is reset
...@@ -5110,6 +5715,16 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba, ...@@ -5110,6 +5715,16 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
{ {
/*
* It seems some UFS devices may keep drawing more than sleep current
* (atleast for 500us) from UFS rails (especially from VCCQ rail).
* To avoid this situation, add 2ms delay before putting these UFS
* rails in LPM mode.
*/
if (!ufshcd_is_link_active(hba) &&
hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
usleep_range(2000, 2100);
/* /*
* If UFS device is either in UFS_Sleep turn off VCC rail to save some * If UFS device is either in UFS_Sleep turn off VCC rail to save some
* power. * power.
...@@ -5572,7 +6187,7 @@ void ufshcd_remove(struct ufs_hba *hba) ...@@ -5572,7 +6187,7 @@ void ufshcd_remove(struct ufs_hba *hba)
scsi_remove_host(hba->host); scsi_remove_host(hba->host);
/* disable interrupts */ /* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask); ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba); ufshcd_hba_stop(hba, true);
scsi_host_put(hba->host); scsi_host_put(hba->host);
...@@ -5836,6 +6451,21 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ...@@ -5836,6 +6451,21 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
init_waitqueue_head(&hba->dev_cmd.tag_wq); init_waitqueue_head(&hba->dev_cmd.tag_wq);
ufshcd_init_clk_gating(hba); ufshcd_init_clk_gating(hba);
/*
* In order to avoid any spurious interrupt immediately after
* registering UFS controller interrupt handler, clear any pending UFS
* interrupt status and disable all the UFS interrupts.
*/
ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
REG_INTERRUPT_STATUS);
ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
/*
* Make sure that UFS interrupts are disabled and any pending interrupt
* status is cleared before registering UFS interrupt handler.
*/
mb();
/* IRQ registration */ /* IRQ registration */
err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
if (err) { if (err) {
......
...@@ -54,6 +54,7 @@ ...@@ -54,6 +54,7 @@
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/regulator/consumer.h> #include <linux/regulator/consumer.h>
#include "unipro.h"
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
...@@ -383,6 +384,9 @@ struct ufs_init_prefetch { ...@@ -383,6 +384,9 @@ struct ufs_init_prefetch {
* @clk_list_head: UFS host controller clocks list node head * @clk_list_head: UFS host controller clocks list node head
* @pwr_info: holds current power mode * @pwr_info: holds current power mode
* @max_pwr_info: keeps the device max valid pwm * @max_pwr_info: keeps the device max valid pwm
* @urgent_bkops_lvl: keeps track of urgent bkops level for device
* @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
* device is known or not.
*/ */
struct ufs_hba { struct ufs_hba {
void __iomem *mmio_base; void __iomem *mmio_base;
...@@ -470,6 +474,9 @@ struct ufs_hba { ...@@ -470,6 +474,9 @@ struct ufs_hba {
unsigned int quirks; /* Deviations from standard UFSHCI spec. */ unsigned int quirks; /* Deviations from standard UFSHCI spec. */
/* Device deviations from standard UFS device spec. */
unsigned int dev_quirks;
wait_queue_head_t tm_wq; wait_queue_head_t tm_wq;
wait_queue_head_t tm_tag_wq; wait_queue_head_t tm_tag_wq;
unsigned long tm_condition; unsigned long tm_condition;
...@@ -509,6 +516,8 @@ struct ufs_hba { ...@@ -509,6 +516,8 @@ struct ufs_hba {
bool wlun_dev_clr_ua; bool wlun_dev_clr_ua;
/* Number of lanes available (1 or 2) for Rx/Tx */
u32 lanes_per_direction;
struct ufs_pa_layer_attr pwr_info; struct ufs_pa_layer_attr pwr_info;
struct ufs_pwr_mode_info max_pwr_info; struct ufs_pwr_mode_info max_pwr_info;
...@@ -533,6 +542,9 @@ struct ufs_hba { ...@@ -533,6 +542,9 @@ struct ufs_hba {
struct devfreq *devfreq; struct devfreq *devfreq;
struct ufs_clk_scaling clk_scaling; struct ufs_clk_scaling clk_scaling;
bool is_sys_suspended; bool is_sys_suspended;
enum bkops_status urgent_bkops_lvl;
bool is_urgent_bkops_lvl_checked;
}; };
/* Returns true if clocks can be gated. Otherwise false */ /* Returns true if clocks can be gated. Otherwise false */
...@@ -588,15 +600,9 @@ int ufshcd_alloc_host(struct device *, struct ufs_hba **); ...@@ -588,15 +600,9 @@ int ufshcd_alloc_host(struct device *, struct ufs_hba **);
void ufshcd_dealloc_host(struct ufs_hba *); void ufshcd_dealloc_host(struct ufs_hba *);
int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int); int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
void ufshcd_remove(struct ufs_hba *); void ufshcd_remove(struct ufs_hba *);
int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
/** u32 val, unsigned long interval_us,
* ufshcd_hba_stop - Send controller to reset state unsigned long timeout_ms, bool can_sleep);
* @hba: per adapter instance
*/
static inline void ufshcd_hba_stop(struct ufs_hba *hba)
{
ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
}
static inline void check_upiu_size(void) static inline void check_upiu_size(void)
{ {
...@@ -682,11 +688,27 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba, ...@@ -682,11 +688,27 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
} }
int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size);
static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
{
return (pwr_info->pwr_rx == FAST_MODE ||
pwr_info->pwr_rx == FASTAUTO_MODE) &&
(pwr_info->pwr_tx == FAST_MODE ||
pwr_info->pwr_tx == FASTAUTO_MODE);
}
#define ASCII_STD true
int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
u32 size, bool ascii);
/* Expose Query-Request API */ /* Expose Query-Request API */
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, bool *flag_res); enum flag_idn idn, bool *flag_res);
int ufshcd_hold(struct ufs_hba *hba, bool async); int ufshcd_hold(struct ufs_hba *hba, bool async);
void ufshcd_release(struct ufs_hba *hba); void ufshcd_release(struct ufs_hba *hba);
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
/* Wrapper functions for safely calling variant operations */ /* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
......
...@@ -92,6 +92,7 @@ enum { ...@@ -92,6 +92,7 @@ enum {
UFSHCI_VERSION_10 = 0x00010000, /* 1.0 */ UFSHCI_VERSION_10 = 0x00010000, /* 1.0 */
UFSHCI_VERSION_11 = 0x00010100, /* 1.1 */ UFSHCI_VERSION_11 = 0x00010100, /* 1.1 */
UFSHCI_VERSION_20 = 0x00000200, /* 2.0 */ UFSHCI_VERSION_20 = 0x00000200, /* 2.0 */
UFSHCI_VERSION_21 = 0x00000210, /* 2.1 */
}; };
/* /*
...@@ -170,6 +171,8 @@ enum { ...@@ -170,6 +171,8 @@ enum {
#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31) #define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31)
#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF #define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000 #define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001
#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002
/* UECN - Host UIC Error Code Network Layer 40h */ /* UECN - Host UIC Error Code Network Layer 40h */
#define UIC_NETWORK_LAYER_ERROR UFS_BIT(31) #define UIC_NETWORK_LAYER_ERROR UFS_BIT(31)
...@@ -209,6 +212,7 @@ enum { ...@@ -209,6 +212,7 @@ enum {
/* GenSelectorIndex calculation macros for M-PHY attributes */ /* GenSelectorIndex calculation macros for M-PHY attributes */
#define UIC_ARG_MPHY_TX_GEN_SEL_INDEX(lane) (lane) #define UIC_ARG_MPHY_TX_GEN_SEL_INDEX(lane) (lane)
#define UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane) (PA_MAXDATALANES + (lane))
#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\ #define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\
((sel) & 0xFFFF)) ((sel) & 0xFFFF))
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
/* /*
* M-TX Configuration Attributes * M-TX Configuration Attributes
*/ */
#define TX_HIBERN8TIME_CAPABILITY 0x000F
#define TX_MODE 0x0021 #define TX_MODE 0x0021
#define TX_HSRATE_SERIES 0x0022 #define TX_HSRATE_SERIES 0x0022
#define TX_HSGEAR 0x0023 #define TX_HSGEAR 0x0023
...@@ -48,8 +49,12 @@ ...@@ -48,8 +49,12 @@
#define RX_ENTER_HIBERN8 0x00A7 #define RX_ENTER_HIBERN8 0x00A7
#define RX_BYPASS_8B10B_ENABLE 0x00A8 #define RX_BYPASS_8B10B_ENABLE 0x00A8
#define RX_TERMINATION_FORCE_ENABLE 0x0089 #define RX_TERMINATION_FORCE_ENABLE 0x0089
#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F
#define RX_HIBERN8TIME_CAPABILITY 0x0092
#define is_mphy_tx_attr(attr) (attr < RX_MODE) #define is_mphy_tx_attr(attr) (attr < RX_MODE)
#define RX_MIN_ACTIVATETIME_UNIT_US 100
#define HIBERN8TIME_UNIT_US 100
/* /*
* PHY Adpater attributes * PHY Adpater attributes
*/ */
...@@ -70,6 +75,7 @@ ...@@ -70,6 +75,7 @@
#define PA_MAXRXSPEEDFAST 0x1541 #define PA_MAXRXSPEEDFAST 0x1541
#define PA_MAXRXSPEEDSLOW 0x1542 #define PA_MAXRXSPEEDSLOW 0x1542
#define PA_TXLINKSTARTUPHS 0x1544 #define PA_TXLINKSTARTUPHS 0x1544
#define PA_LOCAL_TX_LCC_ENABLE 0x155E
#define PA_TXSPEEDFAST 0x1565 #define PA_TXSPEEDFAST 0x1565
#define PA_TXSPEEDSLOW 0x1566 #define PA_TXSPEEDSLOW 0x1566
#define PA_REMOTEVERINFO 0x15A0 #define PA_REMOTEVERINFO 0x15A0
...@@ -110,6 +116,12 @@ ...@@ -110,6 +116,12 @@
#define PA_STALLNOCONFIGTIME 0x15A3 #define PA_STALLNOCONFIGTIME 0x15A3
#define PA_SAVECONFIGTIME 0x15A4 #define PA_SAVECONFIGTIME 0x15A4
#define PA_TACTIVATE_TIME_UNIT_US 10
#define PA_HIBERN8_TIME_UNIT_US 100
/* PHY Adapter Protocol Constants */
#define PA_MAXDATALANES 4
/* PA power modes */ /* PA power modes */
enum { enum {
FAST_MODE = 1, FAST_MODE = 1,
...@@ -143,6 +155,16 @@ enum ufs_hs_gear_tag { ...@@ -143,6 +155,16 @@ enum ufs_hs_gear_tag {
UFS_HS_G3, /* HS Gear 3 */ UFS_HS_G3, /* HS Gear 3 */
}; };
enum ufs_unipro_ver {
UFS_UNIPRO_VER_RESERVED = 0,
UFS_UNIPRO_VER_1_40 = 1, /* UniPro version 1.40 */
UFS_UNIPRO_VER_1_41 = 2, /* UniPro version 1.41 */
UFS_UNIPRO_VER_1_6 = 3, /* UniPro version 1.6 */
UFS_UNIPRO_VER_MAX = 4, /* UniPro unsupported version */
/* UniPro version field mask in PA_LOCALVERINFO */
UFS_UNIPRO_VER_MASK = 0xF,
};
/* /*
* Data Link Layer Attributes * Data Link Layer Attributes
*/ */
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#define SCSI_TRANSPORT_FC_H #define SCSI_TRANSPORT_FC_H
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h> #include <scsi/scsi.h>
#include <scsi/scsi_netlink.h> #include <scsi/scsi_netlink.h>
...@@ -797,22 +798,12 @@ fc_remote_port_chkready(struct fc_rport *rport) ...@@ -797,22 +798,12 @@ fc_remote_port_chkready(struct fc_rport *rport)
static inline u64 wwn_to_u64(u8 *wwn) static inline u64 wwn_to_u64(u8 *wwn)
{ {
return (u64)wwn[0] << 56 | (u64)wwn[1] << 48 | return get_unaligned_be64(wwn);
(u64)wwn[2] << 40 | (u64)wwn[3] << 32 |
(u64)wwn[4] << 24 | (u64)wwn[5] << 16 |
(u64)wwn[6] << 8 | (u64)wwn[7];
} }
static inline void u64_to_wwn(u64 inm, u8 *wwn) static inline void u64_to_wwn(u64 inm, u8 *wwn)
{ {
wwn[0] = (inm >> 56) & 0xff; put_unaligned_be64(inm, wwn);
wwn[1] = (inm >> 48) & 0xff;
wwn[2] = (inm >> 40) & 0xff;
wwn[3] = (inm >> 32) & 0xff;
wwn[4] = (inm >> 24) & 0xff;
wwn[5] = (inm >> 16) & 0xff;
wwn[6] = (inm >> 8) & 0xff;
wwn[7] = inm & 0xff;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment