Commit 7d34ddbe authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
 "Thirteen small fixes: The hopefully final effort to get the lpfc nvme
  kconfig problems sorted, there's one important sg fix (user can induce
  read after end of buffer) and one minor enhancement (adding an extra
  PCI ID to qedi). The rest are a set of minor fixes, which mostly occur
  as user visible in error legs or on specific devices"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: ufs: remove the duplicated checking for supporting clkscaling
  scsi: lpfc: fix building without debugfs support
  scsi: lpfc: Fix PT2PT PRLI reject
  scsi: hpsa: fix volume offline state
  scsi: libsas: fix ata xfer length
  scsi: scsi_dh_alua: Warn if the first argument of alua_rtpg_queue() is NULL
  scsi: scsi_dh_alua: Ensure that alua_activate() calls the completion function
  scsi: scsi_dh_alua: Check scsi_device_get() return value
  scsi: sg: check length passed to SG_NEXT_CMD_LEN
  scsi: ufshcd-platform: remove the useless cast in ERR_PTR/IS_ERR
  scsi: qedi: Add PCI device-ID for QL41xxx adapters.
  scsi: aacraid: Fix potential null access
  scsi: qla2xxx: Fix crash in qla2xxx_eh_abort on bad ptr
parents 978e0f92 0917ac4f
...@@ -2056,7 +2056,6 @@ static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool, ...@@ -2056,7 +2056,6 @@ static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
{ {
struct hw_fib **hw_fib_p; struct hw_fib **hw_fib_p;
struct fib **fib_p; struct fib **fib_p;
int rcode = 1;
hw_fib_p = hw_fib_pool; hw_fib_p = hw_fib_pool;
fib_p = fib_pool; fib_p = fib_pool;
...@@ -2074,11 +2073,11 @@ static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool, ...@@ -2074,11 +2073,11 @@ static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
} }
} }
/*
* Get the actual number of allocated fibs
*/
num = hw_fib_p - hw_fib_pool; num = hw_fib_p - hw_fib_pool;
if (!num) return num;
rcode = 0;
return rcode;
} }
static void wakeup_fibctx_threads(struct aac_dev *dev, static void wakeup_fibctx_threads(struct aac_dev *dev,
...@@ -2186,7 +2185,6 @@ static void aac_process_events(struct aac_dev *dev) ...@@ -2186,7 +2185,6 @@ static void aac_process_events(struct aac_dev *dev)
struct fib *fib; struct fib *fib;
unsigned long flags; unsigned long flags;
spinlock_t *t_lock; spinlock_t *t_lock;
unsigned int rcode;
t_lock = dev->queues->queue[HostNormCmdQueue].lock; t_lock = dev->queues->queue[HostNormCmdQueue].lock;
spin_lock_irqsave(t_lock, flags); spin_lock_irqsave(t_lock, flags);
...@@ -2269,8 +2267,8 @@ static void aac_process_events(struct aac_dev *dev) ...@@ -2269,8 +2267,8 @@ static void aac_process_events(struct aac_dev *dev)
* Fill up fib pointer pools with actual fibs * Fill up fib pointer pools with actual fibs
* and hw_fibs * and hw_fibs
*/ */
rcode = fillup_pools(dev, hw_fib_pool, fib_pool, num); num = fillup_pools(dev, hw_fib_pool, fib_pool, num);
if (!rcode) if (!num)
goto free_mem; goto free_mem;
/* /*
......
...@@ -113,7 +113,7 @@ struct alua_queue_data { ...@@ -113,7 +113,7 @@ struct alua_queue_data {
#define ALUA_POLICY_SWITCH_ALL 1 #define ALUA_POLICY_SWITCH_ALL 1
static void alua_rtpg_work(struct work_struct *work); static void alua_rtpg_work(struct work_struct *work);
static void alua_rtpg_queue(struct alua_port_group *pg, static bool alua_rtpg_queue(struct alua_port_group *pg,
struct scsi_device *sdev, struct scsi_device *sdev,
struct alua_queue_data *qdata, bool force); struct alua_queue_data *qdata, bool force);
static void alua_check(struct scsi_device *sdev, bool force); static void alua_check(struct scsi_device *sdev, bool force);
...@@ -862,7 +862,13 @@ static void alua_rtpg_work(struct work_struct *work) ...@@ -862,7 +862,13 @@ static void alua_rtpg_work(struct work_struct *work)
kref_put(&pg->kref, release_port_group); kref_put(&pg->kref, release_port_group);
} }
static void alua_rtpg_queue(struct alua_port_group *pg, /**
* alua_rtpg_queue() - cause RTPG to be submitted asynchronously
*
* Returns true if and only if alua_rtpg_work() will be called asynchronously.
* That function is responsible for calling @qdata->fn().
*/
static bool alua_rtpg_queue(struct alua_port_group *pg,
struct scsi_device *sdev, struct scsi_device *sdev,
struct alua_queue_data *qdata, bool force) struct alua_queue_data *qdata, bool force)
{ {
...@@ -870,8 +876,8 @@ static void alua_rtpg_queue(struct alua_port_group *pg, ...@@ -870,8 +876,8 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
unsigned long flags; unsigned long flags;
struct workqueue_struct *alua_wq = kaluad_wq; struct workqueue_struct *alua_wq = kaluad_wq;
if (!pg) if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev))
return; return false;
spin_lock_irqsave(&pg->lock, flags); spin_lock_irqsave(&pg->lock, flags);
if (qdata) { if (qdata) {
...@@ -884,14 +890,12 @@ static void alua_rtpg_queue(struct alua_port_group *pg, ...@@ -884,14 +890,12 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
pg->flags |= ALUA_PG_RUN_RTPG; pg->flags |= ALUA_PG_RUN_RTPG;
kref_get(&pg->kref); kref_get(&pg->kref);
pg->rtpg_sdev = sdev; pg->rtpg_sdev = sdev;
scsi_device_get(sdev);
start_queue = 1; start_queue = 1;
} else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) { } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) {
pg->flags |= ALUA_PG_RUN_RTPG; pg->flags |= ALUA_PG_RUN_RTPG;
/* Do not queue if the worker is already running */ /* Do not queue if the worker is already running */
if (!(pg->flags & ALUA_PG_RUNNING)) { if (!(pg->flags & ALUA_PG_RUNNING)) {
kref_get(&pg->kref); kref_get(&pg->kref);
sdev = NULL;
start_queue = 1; start_queue = 1;
} }
} }
...@@ -900,13 +904,17 @@ static void alua_rtpg_queue(struct alua_port_group *pg, ...@@ -900,13 +904,17 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
alua_wq = kaluad_sync_wq; alua_wq = kaluad_sync_wq;
spin_unlock_irqrestore(&pg->lock, flags); spin_unlock_irqrestore(&pg->lock, flags);
if (start_queue && if (start_queue) {
!queue_delayed_work(alua_wq, &pg->rtpg_work, if (queue_delayed_work(alua_wq, &pg->rtpg_work,
msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) { msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS)))
if (sdev) sdev = NULL;
scsi_device_put(sdev); else
kref_put(&pg->kref, release_port_group); kref_put(&pg->kref, release_port_group);
} }
if (sdev)
scsi_device_put(sdev);
return true;
} }
/* /*
...@@ -1007,11 +1015,13 @@ static int alua_activate(struct scsi_device *sdev, ...@@ -1007,11 +1015,13 @@ static int alua_activate(struct scsi_device *sdev,
mutex_unlock(&h->init_mutex); mutex_unlock(&h->init_mutex);
goto out; goto out;
} }
fn = NULL;
rcu_read_unlock(); rcu_read_unlock();
mutex_unlock(&h->init_mutex); mutex_unlock(&h->init_mutex);
alua_rtpg_queue(pg, sdev, qdata, true); if (alua_rtpg_queue(pg, sdev, qdata, true))
fn = NULL;
else
err = SCSI_DH_DEV_OFFLINED;
kref_put(&pg->kref, release_port_group); kref_put(&pg->kref, release_port_group);
out: out:
if (fn) if (fn)
......
...@@ -3885,6 +3885,7 @@ static int hpsa_update_device_info(struct ctlr_info *h, ...@@ -3885,6 +3885,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
hpsa_get_ioaccel_status(h, scsi3addr, this_device); hpsa_get_ioaccel_status(h, scsi3addr, this_device);
volume_offline = hpsa_volume_offline(h, scsi3addr); volume_offline = hpsa_volume_offline(h, scsi3addr);
this_device->volume_offline = volume_offline;
if (volume_offline == HPSA_LV_FAILED) { if (volume_offline == HPSA_LV_FAILED) {
rc = HPSA_LV_FAILED; rc = HPSA_LV_FAILED;
dev_err(&h->pdev->dev, dev_err(&h->pdev->dev,
......
...@@ -221,7 +221,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) ...@@ -221,7 +221,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
task->num_scatter = qc->n_elem; task->num_scatter = qc->n_elem;
} else { } else {
for_each_sg(qc->sg, sg, qc->n_elem, si) for_each_sg(qc->sg, sg, qc->n_elem, si)
xfer += sg->length; xfer += sg_dma_len(sg);
task->total_xfer_len = xfer; task->total_xfer_len = xfer;
task->num_scatter = si; task->num_scatter = si;
......
...@@ -44,14 +44,6 @@ ...@@ -44,14 +44,6 @@
/* hbqinfo output buffer size */ /* hbqinfo output buffer size */
#define LPFC_HBQINFO_SIZE 8192 #define LPFC_HBQINFO_SIZE 8192
enum {
DUMP_FCP,
DUMP_NVME,
DUMP_MBX,
DUMP_ELS,
DUMP_NVMELS,
};
/* nvmestat output buffer size */ /* nvmestat output buffer size */
#define LPFC_NVMESTAT_SIZE 8192 #define LPFC_NVMESTAT_SIZE 8192
#define LPFC_NVMEKTIME_SIZE 8192 #define LPFC_NVMEKTIME_SIZE 8192
...@@ -283,8 +275,22 @@ struct lpfc_idiag { ...@@ -283,8 +275,22 @@ struct lpfc_idiag {
struct lpfc_idiag_offset offset; struct lpfc_idiag_offset offset;
void *ptr_private; void *ptr_private;
}; };
#else
#define lpfc_nvmeio_data(phba, fmt, arg...) \
no_printk(fmt, ##arg)
#endif #endif
enum {
DUMP_FCP,
DUMP_NVME,
DUMP_MBX,
DUMP_ELS,
DUMP_NVMELS,
};
/* Mask for discovery_trace */ /* Mask for discovery_trace */
#define LPFC_DISC_TRC_ELS_CMD 0x1 /* Trace ELS commands */ #define LPFC_DISC_TRC_ELS_CMD 0x1 /* Trace ELS commands */
#define LPFC_DISC_TRC_ELS_RSP 0x2 /* Trace ELS response */ #define LPFC_DISC_TRC_ELS_RSP 0x2 /* Trace ELS response */
......
...@@ -7968,7 +7968,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, ...@@ -7968,7 +7968,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
did, vport->port_state, ndlp->nlp_flag); did, vport->port_state, ndlp->nlp_flag);
phba->fc_stat.elsRcvPRLI++; phba->fc_stat.elsRcvPRLI++;
if (vport->port_state < LPFC_DISC_AUTH) { if ((vport->port_state < LPFC_DISC_AUTH) &&
(vport->fc_flag & FC_FABRIC)) {
rjt_err = LSRJT_UNABLE_TPC; rjt_err = LSRJT_UNABLE_TPC;
rjt_exp = LSEXP_NOTHING_MORE; rjt_exp = LSEXP_NOTHING_MORE;
break; break;
......
...@@ -520,7 +520,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, ...@@ -520,7 +520,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
struct lpfc_hba *phba = ctxp->phba; struct lpfc_hba *phba = ctxp->phba;
struct lpfc_iocbq *nvmewqeq; struct lpfc_iocbq *nvmewqeq;
unsigned long iflags; unsigned long iflags;
int rc, id; int rc;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->ktime_on) { if (phba->ktime_on) {
...@@ -530,7 +530,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, ...@@ -530,7 +530,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
ctxp->ts_nvme_data = ktime_get_ns(); ctxp->ts_nvme_data = ktime_get_ns();
} }
if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
id = smp_processor_id(); int id = smp_processor_id();
ctxp->cpu = id; ctxp->cpu = id;
if (id < LPFC_CHECK_CPU_CNT) if (id < LPFC_CHECK_CPU_CNT)
phba->cpucheck_xmt_io[id]++; phba->cpucheck_xmt_io[id]++;
......
...@@ -2007,6 +2007,7 @@ static void qedi_remove(struct pci_dev *pdev) ...@@ -2007,6 +2007,7 @@ static void qedi_remove(struct pci_dev *pdev)
static struct pci_device_id qedi_pci_tbl[] = { static struct pci_device_id qedi_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8084) },
{ 0 }, { 0 },
}; };
MODULE_DEVICE_TABLE(pci, qedi_pci_tbl); MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
......
...@@ -1651,7 +1651,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) ...@@ -1651,7 +1651,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
/* Don't abort commands in adapter during EEH /* Don't abort commands in adapter during EEH
* recovery as it's not accessible/responding. * recovery as it's not accessible/responding.
*/ */
if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) { if (GET_CMD_SP(sp) && !ha->flags.eeh_busy &&
(sp->type == SRB_SCSI_CMD)) {
/* Get a reference to the sp and drop the lock. /* Get a reference to the sp and drop the lock.
* The reference ensures this sp->done() call * The reference ensures this sp->done() call
* - and not the call in qla2xxx_eh_abort() - * - and not the call in qla2xxx_eh_abort() -
......
...@@ -996,6 +996,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) ...@@ -996,6 +996,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
result = get_user(val, ip); result = get_user(val, ip);
if (result) if (result)
return result; return result;
if (val > SG_MAX_CDB_SIZE)
return -ENOMEM;
sfp->next_cmd_len = (val > 0) ? val : 0; sfp->next_cmd_len = (val > 0) ? val : 0;
return 0; return 0;
case SG_GET_VERSION_NUM: case SG_GET_VERSION_NUM:
......
...@@ -309,8 +309,8 @@ int ufshcd_pltfrm_init(struct platform_device *pdev, ...@@ -309,8 +309,8 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mmio_base = devm_ioremap_resource(dev, mem_res); mmio_base = devm_ioremap_resource(dev, mem_res);
if (IS_ERR(*(void **)&mmio_base)) { if (IS_ERR(mmio_base)) {
err = PTR_ERR(*(void **)&mmio_base); err = PTR_ERR(mmio_base);
goto out; goto out;
} }
......
...@@ -4662,8 +4662,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, ...@@ -4662,8 +4662,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
} }
if (ufshcd_is_clkscaling_supported(hba)) if (ufshcd_is_clkscaling_supported(hba))
hba->clk_scaling.active_reqs--; hba->clk_scaling.active_reqs--;
if (ufshcd_is_clkscaling_supported(hba))
hba->clk_scaling.active_reqs--;
} }
/* clear corresponding bits of completed commands */ /* clear corresponding bits of completed commands */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment