Commit 938a2fbe authored by Martin K. Petersen's avatar Martin K. Petersen

Merge branch '5.11/scsi-fixes' into 5.12/scsi-queue

Pull in the 5.11 SCSI fixes branch to provide an updated baseline for
megaraid and hisi_sas. Both drivers received core changes in
v5.11-rc3.
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parents 7f3a79a7 be255335
......@@ -14,6 +14,7 @@
#include <linux/debugfs.h>
#include <linux/dmapool.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/lcm.h>
#include <linux/libata.h>
#include <linux/mfd/syscon.h>
......@@ -294,6 +295,7 @@ enum {
struct hisi_sas_hw {
int (*hw_init)(struct hisi_hba *hisi_hba);
int (*interrupt_preinit)(struct hisi_hba *hisi_hba);
void (*setup_itct)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *device);
int (*slot_index_alloc)(struct hisi_hba *hisi_hba,
......@@ -393,6 +395,8 @@ struct hisi_hba {
u32 refclk_frequency_mhz;
u8 sas_addr[SAS_ADDR_SIZE];
int *irq_map; /* v2 hw */
int n_phy;
spinlock_t lock;
struct semaphore sem;
......
......@@ -2614,6 +2614,13 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
return NULL;
}
static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba)
{
if (hisi_hba->hw->interrupt_preinit)
return hisi_hba->hw->interrupt_preinit(hisi_hba);
return 0;
}
int hisi_sas_probe(struct platform_device *pdev,
const struct hisi_sas_hw *hw)
{
......@@ -2671,6 +2678,10 @@ int hisi_sas_probe(struct platform_device *pdev,
sha->sas_port[i] = &hisi_hba->port[i].sas_port;
}
rc = hisi_sas_interrupt_preinit(hisi_hba);
if (rc)
goto err_out_ha;
rc = scsi_add_host(shost, &pdev->dev);
if (rc)
goto err_out_ha;
......
......@@ -3302,6 +3302,28 @@ static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = {
fatal_axi_int_v2_hw
};
#define CQ0_IRQ_INDEX (96)
static int hisi_sas_v2_interrupt_preinit(struct hisi_hba *hisi_hba)
{
struct platform_device *pdev = hisi_hba->platform_dev;
struct Scsi_Host *shost = hisi_hba->shost;
struct irq_affinity desc = {
.pre_vectors = CQ0_IRQ_INDEX,
.post_vectors = 16,
};
int resv = desc.pre_vectors + desc.post_vectors, minvec = resv + 1, nvec;
nvec = devm_platform_get_irqs_affinity(pdev, &desc, minvec, 128,
&hisi_hba->irq_map);
if (nvec < 0)
return nvec;
shost->nr_hw_queues = hisi_hba->cq_nvecs = nvec - resv;
return 0;
}
/*
* There is a limitation in the hip06 chipset that we need
* to map in all mbigen interrupts, even if they are not used.
......@@ -3310,14 +3332,11 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
{
struct platform_device *pdev = hisi_hba->platform_dev;
struct device *dev = &pdev->dev;
int irq, rc = 0, irq_map[128];
int irq, rc = 0;
int i, phy_no, fatal_no, queue_no;
for (i = 0; i < 128; i++)
irq_map[i] = platform_get_irq(pdev, i);
for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) {
irq = irq_map[i + 1]; /* Phy up/down is irq1 */
irq = hisi_hba->irq_map[i + 1]; /* Phy up/down is irq1 */
rc = devm_request_irq(dev, irq, phy_interrupts[i], 0,
DRV_NAME " phy", hisi_hba);
if (rc) {
......@@ -3331,7 +3350,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
irq = irq_map[phy_no + 72];
irq = hisi_hba->irq_map[phy_no + 72];
rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0,
DRV_NAME " sata", phy);
if (rc) {
......@@ -3343,7 +3362,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
}
for (fatal_no = 0; fatal_no < HISI_SAS_FATAL_INT_NR; fatal_no++) {
irq = irq_map[fatal_no + 81];
irq = hisi_hba->irq_map[fatal_no + 81];
rc = devm_request_irq(dev, irq, fatal_interrupts[fatal_no], 0,
DRV_NAME " fatal", hisi_hba);
if (rc) {
......@@ -3354,24 +3373,22 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
}
}
for (queue_no = 0; queue_no < hisi_hba->queue_count; queue_no++) {
for (queue_no = 0; queue_no < hisi_hba->cq_nvecs; queue_no++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[queue_no];
cq->irq_no = irq_map[queue_no + 96];
cq->irq_no = hisi_hba->irq_map[queue_no + 96];
rc = devm_request_threaded_irq(dev, cq->irq_no,
cq_interrupt_v2_hw,
cq_thread_v2_hw, IRQF_ONESHOT,
DRV_NAME " cq", cq);
if (rc) {
dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
irq, rc);
cq->irq_no, rc);
rc = -ENOENT;
goto err_out;
}
cq->irq_mask = irq_get_affinity_mask(cq->irq_no);
}
hisi_hba->cq_nvecs = hisi_hba->queue_count;
err_out:
return rc;
}
......@@ -3529,6 +3546,26 @@ static struct device_attribute *host_attrs_v2_hw[] = {
NULL
};
static int map_queues_v2_hw(struct Scsi_Host *shost)
{
struct hisi_hba *hisi_hba = shost_priv(shost);
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
const struct cpumask *mask;
unsigned int queue, cpu;
for (queue = 0; queue < qmap->nr_queues; queue++) {
mask = irq_get_affinity_mask(hisi_hba->irq_map[96 + queue]);
if (!mask)
continue;
for_each_cpu(cpu, mask)
qmap->mq_map[cpu] = qmap->queue_offset + queue;
}
return 0;
}
static struct scsi_host_template sht_v2_hw = {
.name = DRV_NAME,
.proc_name = DRV_NAME,
......@@ -3553,10 +3590,13 @@ static struct scsi_host_template sht_v2_hw = {
#endif
.shost_attrs = host_attrs_v2_hw,
.host_reset = hisi_sas_host_reset,
.map_queues = map_queues_v2_hw,
.host_tagset = 1,
};
static const struct hisi_sas_hw hisi_sas_v2_hw = {
.hw_init = hisi_sas_v2_init,
.interrupt_preinit = hisi_sas_v2_interrupt_preinit,
.setup_itct = setup_itct_v2_hw,
.slot_index_alloc = slot_index_alloc_quirk_v2_hw,
.alloc_dev = alloc_dev_quirk_v2_hw,
......
......@@ -37,6 +37,7 @@
#include <linux/poll.h>
#include <linux/vmalloc.h>
#include <linux/irq_poll.h>
#include <linux/blk-mq-pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
......@@ -113,6 +114,10 @@ unsigned int enable_sdev_max_qd;
module_param(enable_sdev_max_qd, int, 0444);
MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
int host_tagset_enable = 1;
module_param(host_tagset_enable, int, 0444);
MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
......@@ -3119,6 +3124,19 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
return 0;
}
static int megasas_map_queues(struct Scsi_Host *shost)
{
struct megasas_instance *instance;
instance = (struct megasas_instance *)shost->hostdata;
if (shost->nr_hw_queues == 1)
return 0;
return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
instance->pdev, instance->low_latency_index_start);
}
static void megasas_aen_polling(struct work_struct *work);
/**
......@@ -3427,6 +3445,7 @@ static struct scsi_host_template megasas_template = {
.eh_timed_out = megasas_reset_timer,
.shost_attrs = megaraid_host_attrs,
.bios_param = megasas_bios_param,
.map_queues = megasas_map_queues,
.change_queue_depth = scsi_change_queue_depth,
.max_segment_size = 0xffffffff,
};
......@@ -6808,6 +6827,26 @@ static int megasas_io_attach(struct megasas_instance *instance)
host->max_lun = MEGASAS_MAX_LUN;
host->max_cmd_len = 16;
/* Use shared host tagset only for fusion adaptors
* if there are managed interrupts (smp affinity enabled case).
* Single msix_vectors in kdump, so shared host tag is also disabled.
*/
host->host_tagset = 0;
host->nr_hw_queues = 1;
if ((instance->adapter_type != MFI_SERIES) &&
(instance->msix_vectors > instance->low_latency_index_start) &&
host_tagset_enable &&
instance->smp_affinity_enable) {
host->host_tagset = 1;
host->nr_hw_queues = instance->msix_vectors -
instance->low_latency_index_start;
}
dev_info(&instance->pdev->dev,
"Max firmware commands: %d shared with nr_hw_queues = %d\n",
instance->max_fw_cmds, host->nr_hw_queues);
/*
* Notify the mid-layer about the new controller
*/
......
......@@ -359,24 +359,29 @@ megasas_get_msix_index(struct megasas_instance *instance,
{
int sdev_busy;
/* nr_hw_queue = 1 for MegaRAID */
struct blk_mq_hw_ctx *hctx =
scmd->device->request_queue->queue_hw_ctx[0];
sdev_busy = atomic_read(&hctx->nr_active);
/* TBD - if sml remove device_busy in future, driver
* should track counter in internal structure.
*/
sdev_busy = atomic_read(&scmd->device->device_busy);
if (instance->perf_mode == MR_BALANCED_PERF_MODE &&
sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) {
cmd->request_desc->SCSIIO.MSIxIndex =
mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
else if (instance->msix_load_balance)
} else if (instance->msix_load_balance) {
cmd->request_desc->SCSIIO.MSIxIndex =
(mega_mod64(atomic64_add_return(1, &instance->total_io_count),
instance->msix_vectors));
else
} else if (instance->host->nr_hw_queues > 1) {
u32 tag = blk_mq_unique_tag(scmd->request);
cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) +
instance->low_latency_index_start;
} else {
cmd->request_desc->SCSIIO.MSIxIndex =
instance->reply_map[raw_smp_processor_id()];
}
}
/**
......@@ -956,9 +961,6 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
if (megasas_alloc_cmdlist_fusion(instance))
goto fail_exit;
dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n",
instance->max_fw_cmds);
/* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
......@@ -1102,8 +1104,9 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing)
instance->perf_mode = MR_BALANCED_PERF_MODE;
dev_info(&instance->pdev->dev, "Performance mode :%s\n",
MEGASAS_PERF_MODE_2STR(instance->perf_mode));
dev_info(&instance->pdev->dev, "Performance mode :%s (latency index = %d)\n",
MEGASAS_PERF_MODE_2STR(instance->perf_mode),
instance->low_latency_index_start);
instance->fw_sync_cache_support = (scratch_pad_1 &
MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
......
......@@ -79,5 +79,5 @@ config SCSI_MPT2SAS
select SCSI_MPT3SAS
depends on PCI && SCSI
help
Dummy config option for backwards compatiblity: configure the MPT3SAS
Dummy config option for backwards compatibility: configure the MPT3SAS
driver instead.
......@@ -2245,7 +2245,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
chap_name);
break;
case ISCSI_BOOT_TGT_CHAP_SECRET:
rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN,
chap_secret);
break;
case ISCSI_BOOT_TGT_REV_CHAP_NAME:
......@@ -2253,7 +2253,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
mchap_name);
break;
case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN,
mchap_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
......
......@@ -6740,7 +6740,7 @@ static int __init scsi_debug_init(void)
k = sdeb_zbc_model_str(sdeb_zbc_model_s);
if (k < 0) {
ret = k;
goto free_vm;
goto free_q_arr;
}
sdeb_zbc_model = k;
switch (sdeb_zbc_model) {
......@@ -6753,7 +6753,8 @@ static int __init scsi_debug_init(void)
break;
default:
pr_err("Invalid ZBC model\n");
return -EINVAL;
ret = -EINVAL;
goto free_q_arr;
}
}
if (sdeb_zbc_model != BLK_ZONED_NONE) {
......
......@@ -984,8 +984,10 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
}
}
if (sdp->no_write_same)
if (sdp->no_write_same) {
rq->rq_flags |= RQF_QUIET;
return BLK_STS_TARGET;
}
if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff)
return sd_setup_write_same16_cmnd(cmd, false);
......@@ -3510,10 +3512,8 @@ static int sd_probe(struct device *dev)
static int sd_remove(struct device *dev)
{
struct scsi_disk *sdkp;
dev_t devt;
sdkp = dev_get_drvdata(dev);
devt = disk_devt(sdkp->disk);
scsi_autopm_get_device(sdkp->device);
async_synchronize_full_domain(&scsi_sd_pm_domain);
......
......@@ -292,7 +292,8 @@ static inline void ufshcd_wb_config(struct ufs_hba *hba)
if (ret)
dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
__func__, ret);
ufshcd_wb_toggle_flush(hba, true);
if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
ufshcd_wb_toggle_flush(hba, true);
}
static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
......@@ -5463,9 +5464,6 @@ static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
{
if (hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL)
return;
if (enable)
ufshcd_wb_buf_flush_enable(hba);
else
......@@ -6689,19 +6687,16 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host;
struct ufs_hba *hba;
unsigned int tag;
u32 pos;
int err;
u8 resp = 0xF;
struct ufshcd_lrb *lrbp;
u8 resp = 0xF, lun;
unsigned long flags;
host = cmd->device->host;
hba = shost_priv(host);
tag = cmd->request->tag;
lrbp = &hba->lrb[tag];
err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
if (!err)
err = resp;
......@@ -6710,7 +6705,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
/* clear the commands that were pending for corresponding LUN */
for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
if (hba->lrb[pos].lun == lrbp->lun) {
if (hba->lrb[pos].lun == lun) {
err = ufshcd_clear_cmd(hba, pos);
if (err)
break;
......@@ -8729,6 +8724,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_wb_need_flush(hba));
}
flush_work(&hba->eeh_work);
if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
if (!ufshcd_is_runtime_pm(pm_op))
/* ensure that bkops is disabled */
......@@ -8741,8 +8738,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
}
}
flush_work(&hba->eeh_work);
/*
* In the case of DeepSleep, the device is expected to remain powered
* with the link off, so do not check for bkops.
......@@ -8975,7 +8970,8 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
hba->curr_dev_pwr_mode) &&
(ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
hba->uic_link_state))
hba->uic_link_state) &&
!hba->dev_info.b_rpm_dev_flush_capable)
goto out;
if (pm_runtime_suspended(hba->dev)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment