Commit 403a39f8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
 "This is seven small fixes which are all for user visible issues that
  fortunately only occur in rare circumstances.

  The most serious is the sr one in which QEMU can cause us to read
  beyond the end of a buffer (I don't think it's exploitable, but just
  in case).

  The next is the sd capacity fix which means all non 512 byte sector
  drives greater than 2TB fail to be correctly sized.

  The rest are either in new drivers (qedf) or on error legs"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: ipr: do not set DID_PASSTHROUGH on CHECK CONDITION
  scsi: aacraid: fix PCI error recovery path
  scsi: sd: Fix capacity calculation with 32-bit sector_t
  scsi: qla2xxx: Add fix to read correct register value for ISP82xx.
  scsi: qedf: Fix crash due to unsolicited FIP VLAN response.
  scsi: sr: Sanity check returned mode data
  scsi: sd: Consider max_xfer_blocks if opt_xfer_blocks is unusable
parents be84a46c 0e1bfea9
...@@ -1690,9 +1690,6 @@ struct aac_dev ...@@ -1690,9 +1690,6 @@ struct aac_dev
#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \ #define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
(dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
#define aac_adapter_check_health(dev) \
(dev)->a_ops.adapter_check_health(dev)
#define aac_adapter_restart(dev, bled, reset_type) \ #define aac_adapter_restart(dev, bled, reset_type) \
((dev)->a_ops.adapter_restart(dev, bled, reset_type)) ((dev)->a_ops.adapter_restart(dev, bled, reset_type))
...@@ -2615,6 +2612,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor) ...@@ -2615,6 +2612,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
return capacity; return capacity;
} }
static inline int aac_adapter_check_health(struct aac_dev *dev)
{
if (unlikely(pci_channel_offline(dev->pdev)))
return -1;
return (dev)->a_ops.adapter_check_health(dev);
}
/* SCp.phase values */ /* SCp.phase values */
#define AAC_OWNER_MIDLEVEL 0x101 #define AAC_OWNER_MIDLEVEL 0x101
#define AAC_OWNER_LOWLEVEL 0x102 #define AAC_OWNER_LOWLEVEL 0x102
......
...@@ -1873,7 +1873,8 @@ int aac_check_health(struct aac_dev * aac) ...@@ -1873,7 +1873,8 @@ int aac_check_health(struct aac_dev * aac)
spin_unlock_irqrestore(&aac->fib_lock, flagv); spin_unlock_irqrestore(&aac->fib_lock, flagv);
if (BlinkLED < 0) { if (BlinkLED < 0) {
printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED); printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n",
aac->name, BlinkLED);
goto out; goto out;
} }
......
...@@ -6293,7 +6293,12 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, ...@@ -6293,7 +6293,12 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
break; break;
case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */ case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
case IPR_IOASA_IR_DUAL_IOA_DISABLED: case IPR_IOASA_IR_DUAL_IOA_DISABLED:
scsi_cmd->result |= (DID_PASSTHROUGH << 16); /*
* exception: do not set DID_PASSTHROUGH on CHECK CONDITION
* so SCSI mid-layer and upper layers handle it accordingly.
*/
if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
scsi_cmd->result |= (DID_PASSTHROUGH << 16);
break; break;
case IPR_IOASC_BUS_WAS_RESET: case IPR_IOASC_BUS_WAS_RESET:
case IPR_IOASC_BUS_WAS_RESET_BY_OTHER: case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
......
...@@ -99,7 +99,8 @@ static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf, ...@@ -99,7 +99,8 @@ static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
qedf_set_vlan_id(qedf, vid); qedf_set_vlan_id(qedf, vid);
/* Inform waiter that it's ok to call fcoe_ctlr_link up() */ /* Inform waiter that it's ok to call fcoe_ctlr_link up() */
complete(&qedf->fipvlan_compl); if (!completion_done(&qedf->fipvlan_compl))
complete(&qedf->fipvlan_compl);
} }
} }
......
...@@ -2803,6 +2803,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) ...@@ -2803,6 +2803,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
atomic_set(&qedf->num_offloads, 0); atomic_set(&qedf->num_offloads, 0);
qedf->stop_io_on_error = false; qedf->stop_io_on_error = false;
pci_set_drvdata(pdev, qedf); pci_set_drvdata(pdev, qedf);
init_completion(&qedf->fipvlan_compl);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
"QLogic FastLinQ FCoE Module qedf %s, " "QLogic FastLinQ FCoE Module qedf %s, "
......
...@@ -1160,8 +1160,13 @@ static inline ...@@ -1160,8 +1160,13 @@ static inline
uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha) uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
{ {
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
return ((RD_REG_DWORD(&reg->host_status)) == ISP_REG_DISCONNECT); if (IS_P3P_TYPE(ha))
return ((RD_REG_DWORD(&reg82->host_int)) == ISP_REG_DISCONNECT);
else
return ((RD_REG_DWORD(&reg->host_status)) ==
ISP_REG_DISCONNECT);
} }
/************************************************************************** /**************************************************************************
......
...@@ -2102,6 +2102,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, ...@@ -2102,6 +2102,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
#define READ_CAPACITY_RETRIES_ON_RESET 10 #define READ_CAPACITY_RETRIES_ON_RESET 10
/*
* Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
* and the reported logical block size is bigger than 512 bytes. Note
* that last_sector is a u64 and therefore logical_to_sectors() is not
* applicable.
*/
static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
{
u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
return false;
return true;
}
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
unsigned char *buffer) unsigned char *buffer)
{ {
...@@ -2167,7 +2183,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, ...@@ -2167,7 +2183,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
return -ENODEV; return -ENODEV;
} }
if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) { if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block " "kernel compiled with support for large block "
"devices.\n"); "devices.\n");
...@@ -2256,7 +2272,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp, ...@@ -2256,7 +2272,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
return sector_size; return sector_size;
} }
if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) { if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block " "kernel compiled with support for large block "
"devices.\n"); "devices.\n");
...@@ -2956,7 +2972,8 @@ static int sd_revalidate_disk(struct gendisk *disk) ...@@ -2956,7 +2972,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
} else } else
rw_max = BLK_DEF_MAX_SECTORS; rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
(sector_t)BLK_DEF_MAX_SECTORS);
/* Combine with controller limits */ /* Combine with controller limits */
q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q)); q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
......
...@@ -836,6 +836,7 @@ static void get_capabilities(struct scsi_cd *cd) ...@@ -836,6 +836,7 @@ static void get_capabilities(struct scsi_cd *cd)
unsigned char *buffer; unsigned char *buffer;
struct scsi_mode_data data; struct scsi_mode_data data;
struct scsi_sense_hdr sshdr; struct scsi_sense_hdr sshdr;
unsigned int ms_len = 128;
int rc, n; int rc, n;
static const char *loadmech[] = static const char *loadmech[] =
...@@ -862,10 +863,11 @@ static void get_capabilities(struct scsi_cd *cd) ...@@ -862,10 +863,11 @@ static void get_capabilities(struct scsi_cd *cd)
scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr); scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
/* ask for mode page 0x2a */ /* ask for mode page 0x2a */
rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128, rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
SR_TIMEOUT, 3, &data, NULL); SR_TIMEOUT, 3, &data, NULL);
if (!scsi_status_is_good(rc)) { if (!scsi_status_is_good(rc) || data.length > ms_len ||
data.header_length + data.block_descriptor_length > data.length) {
/* failed, drive doesn't have capabilities mode page */ /* failed, drive doesn't have capabilities mode page */
cd->cdi.speed = 1; cd->cdi.speed = 1;
cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R | cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment