Commit fcaec17b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.16/scsi-ma-2021-10-29' of git://git.kernel.dk/linux-block

Pull SCSI multi-actuator support from Jens Axboe:
 "This adds SCSI support for the recently merged block multi-actuator
  support. Since this was sitting on top of the block tree, the SCSI
  side asked me to queue it up."

* tag 'for-5.16/scsi-ma-2021-10-29' of git://git.kernel.dk/linux-block:
  doc: Fix typo in request queue sysfs documentation
  doc: document sysfs queue/independent_access_ranges attributes
  libata: support concurrent positioning ranges log
  scsi: sd: add concurrent positioning ranges support
parents 3f01727f 9d824642
...@@ -4,7 +4,7 @@ Queue sysfs files ...@@ -4,7 +4,7 @@ Queue sysfs files
This text file will detail the queue files that are located in the sysfs tree This text file will detail the queue files that are located in the sysfs tree
for each block device. Note that stacked devices typically do not export for each block device. Note that stacked devices typically do not export
any settings, since their queue merely functions are a remapping target. any settings, since their queue merely functions as a remapping target.
These files are the ones found in the /sys/block/xxx/queue/ directory. These files are the ones found in the /sys/block/xxx/queue/ directory.
Files denoted with a RO postfix are readonly and the RW postfix means Files denoted with a RO postfix are readonly and the RW postfix means
...@@ -286,4 +286,35 @@ sequential zones of zoned block devices (devices with a zoned attributed ...@@ -286,4 +286,35 @@ sequential zones of zoned block devices (devices with a zoned attributed
that reports "host-managed" or "host-aware"). This value is always 0 for that reports "host-managed" or "host-aware"). This value is always 0 for
regular block devices. regular block devices.
independent_access_ranges (RO)
------------------------------
The presence of this sub-directory of the /sys/block/xxx/queue/ directory
indicates that the device is capable of executing requests targeting
different sector ranges in parallel. For instance, single LUN multi-actuator
hard-disks will have an independent_access_ranges directory if the device
correctly advertizes the sector ranges of its actuators.
The independent_access_ranges directory contains one directory per access
range, with each range described using the sector (RO) attribute file to
indicate the first sector of the range and the nr_sectors (RO) attribute file
to indicate the total number of sectors in the range starting from the first
sector of the range. For example, a dual-actuator hard-disk will have the
following independent_access_ranges entries.::
$ tree /sys/block/<device>/queue/independent_access_ranges/
/sys/block/<device>/queue/independent_access_ranges/
|-- 0
| |-- nr_sectors
| `-- sector
`-- 1
|-- nr_sectors
`-- sector
The sector and nr_sectors attributes use 512B sector unit, regardless of
the actual block size of the device. Independent access ranges do not
overlap and include all sectors within the device capacity. The access
ranges are numbered in increasing order of the range start sector,
that is, the sector attribute of range 0 always has the value 0.
Jens Axboe <jens.axboe@oracle.com>, February 2009 Jens Axboe <jens.axboe@oracle.com>, February 2009
...@@ -2459,18 +2459,70 @@ static void ata_dev_config_devslp(struct ata_device *dev) ...@@ -2459,18 +2459,70 @@ static void ata_dev_config_devslp(struct ata_device *dev)
} }
} }
static void ata_dev_config_cpr(struct ata_device *dev)
{
unsigned int err_mask;
size_t buf_len;
int i, nr_cpr = 0;
struct ata_cpr_log *cpr_log = NULL;
u8 *desc, *buf = NULL;
if (!ata_identify_page_supported(dev,
ATA_LOG_CONCURRENT_POSITIONING_RANGES))
goto out;
/*
* Read IDENTIFY DEVICE data log, page 0x47
* (concurrent positioning ranges). We can have at most 255 32B range
* descriptors plus a 64B header.
*/
buf_len = (64 + 255 * 32 + 511) & ~511;
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
goto out;
err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
ATA_LOG_CONCURRENT_POSITIONING_RANGES,
buf, buf_len >> 9);
if (err_mask)
goto out;
nr_cpr = buf[0];
if (!nr_cpr)
goto out;
cpr_log = kzalloc(struct_size(cpr_log, cpr, nr_cpr), GFP_KERNEL);
if (!cpr_log)
goto out;
cpr_log->nr_cpr = nr_cpr;
desc = &buf[64];
for (i = 0; i < nr_cpr; i++, desc += 32) {
cpr_log->cpr[i].num = desc[0];
cpr_log->cpr[i].num_storage_elements = desc[1];
cpr_log->cpr[i].start_lba = get_unaligned_le64(&desc[8]);
cpr_log->cpr[i].num_lbas = get_unaligned_le64(&desc[16]);
}
out:
swap(dev->cpr_log, cpr_log);
kfree(cpr_log);
kfree(buf);
}
static void ata_dev_print_features(struct ata_device *dev) static void ata_dev_print_features(struct ata_device *dev)
{ {
if (!(dev->flags & ATA_DFLAG_FEATURES_MASK)) if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
return; return;
ata_dev_info(dev, ata_dev_info(dev,
"Features:%s%s%s%s%s\n", "Features:%s%s%s%s%s%s\n",
dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "", dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "", dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "", dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "", dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : ""); dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
dev->cpr_log ? " CPR" : "");
} }
/** /**
...@@ -2634,6 +2686,7 @@ int ata_dev_configure(struct ata_device *dev) ...@@ -2634,6 +2686,7 @@ int ata_dev_configure(struct ata_device *dev)
ata_dev_config_sense_reporting(dev); ata_dev_config_sense_reporting(dev);
ata_dev_config_zac(dev); ata_dev_config_zac(dev);
ata_dev_config_trusted(dev); ata_dev_config_trusted(dev);
ata_dev_config_cpr(dev);
dev->cdb_len = 32; dev->cdb_len = 32;
if (ata_msg_drv(ap) && print_info) if (ata_msg_drv(ap) && print_info)
......
...@@ -1895,7 +1895,7 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) ...@@ -1895,7 +1895,7 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
*/ */
static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf) static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
{ {
int num_pages; int i, num_pages = 0;
static const u8 pages[] = { static const u8 pages[] = {
0x00, /* page 0x00, this page */ 0x00, /* page 0x00, this page */
0x80, /* page 0x80, unit serial no page */ 0x80, /* page 0x80, unit serial no page */
...@@ -1905,13 +1905,17 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf) ...@@ -1905,13 +1905,17 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
0xb1, /* page 0xb1, block device characteristics page */ 0xb1, /* page 0xb1, block device characteristics page */
0xb2, /* page 0xb2, thin provisioning page */ 0xb2, /* page 0xb2, thin provisioning page */
0xb6, /* page 0xb6, zoned block device characteristics */ 0xb6, /* page 0xb6, zoned block device characteristics */
0xb9, /* page 0xb9, concurrent positioning ranges */
}; };
num_pages = sizeof(pages); for (i = 0; i < sizeof(pages); i++) {
if (!(args->dev->flags & ATA_DFLAG_ZAC)) if (pages[i] == 0xb6 &&
num_pages--; !(args->dev->flags & ATA_DFLAG_ZAC))
continue;
rbuf[num_pages + 4] = pages[i];
num_pages++;
}
rbuf[3] = num_pages; /* number of supported VPD pages */ rbuf[3] = num_pages; /* number of supported VPD pages */
memcpy(rbuf + 4, pages, num_pages);
return 0; return 0;
} }
...@@ -2121,6 +2125,26 @@ static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf) ...@@ -2121,6 +2125,26 @@ static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
return 0; return 0;
} }
static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
{
struct ata_cpr_log *cpr_log = args->dev->cpr_log;
u8 *desc = &rbuf[64];
int i;
/* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */
rbuf[1] = 0xb9;
put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[3]);
for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) {
desc[0] = cpr_log->cpr[i].num;
desc[1] = cpr_log->cpr[i].num_storage_elements;
put_unaligned_be64(cpr_log->cpr[i].start_lba, &desc[8]);
put_unaligned_be64(cpr_log->cpr[i].num_lbas, &desc[16]);
}
return 0;
}
/** /**
* modecpy - Prepare response for MODE SENSE * modecpy - Prepare response for MODE SENSE
* @dest: output buffer * @dest: output buffer
...@@ -4120,11 +4144,17 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) ...@@ -4120,11 +4144,17 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2); ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2);
break; break;
case 0xb6: case 0xb6:
if (dev->flags & ATA_DFLAG_ZAC) { if (dev->flags & ATA_DFLAG_ZAC)
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6); ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6);
else
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
break;
case 0xb9:
if (dev->cpr_log)
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b9);
else
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
break; break;
}
fallthrough;
default: default:
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff); ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
break; break;
......
...@@ -3088,6 +3088,86 @@ static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer) ...@@ -3088,6 +3088,86 @@ static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
sdkp->security = 1; sdkp->security = 1;
} }
static inline sector_t sd64_to_sectors(struct scsi_disk *sdkp, u8 *buf)
{
return logical_to_sectors(sdkp->device, get_unaligned_be64(buf));
}
/**
* sd_read_cpr - Query concurrent positioning ranges
* @sdkp: disk to query
*/
static void sd_read_cpr(struct scsi_disk *sdkp)
{
struct blk_independent_access_ranges *iars = NULL;
unsigned char *buffer = NULL;
unsigned int nr_cpr = 0;
int i, vpd_len, buf_len = SD_BUF_SIZE;
u8 *desc;
/*
* We need to have the capacity set first for the block layer to be
* able to check the ranges.
*/
if (sdkp->first_scan)
return;
if (!sdkp->capacity)
goto out;
/*
* Concurrent Positioning Ranges VPD: there can be at most 256 ranges,
* leading to a maximum page size of 64 + 256*32 bytes.
*/
buf_len = 64 + 256*32;
buffer = kmalloc(buf_len, GFP_KERNEL);
if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb9, buffer, buf_len))
goto out;
/* We must have at least a 64B header and one 32B range descriptor */
vpd_len = get_unaligned_be16(&buffer[2]) + 3;
if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) {
sd_printk(KERN_ERR, sdkp,
"Invalid Concurrent Positioning Ranges VPD page\n");
goto out;
}
nr_cpr = (vpd_len - 64) / 32;
if (nr_cpr == 1) {
nr_cpr = 0;
goto out;
}
iars = disk_alloc_independent_access_ranges(sdkp->disk, nr_cpr);
if (!iars) {
nr_cpr = 0;
goto out;
}
desc = &buffer[64];
for (i = 0; i < nr_cpr; i++, desc += 32) {
if (desc[0] != i) {
sd_printk(KERN_ERR, sdkp,
"Invalid Concurrent Positioning Range number\n");
nr_cpr = 0;
break;
}
iars->ia_range[i].sector = sd64_to_sectors(sdkp, desc + 8);
iars->ia_range[i].nr_sectors = sd64_to_sectors(sdkp, desc + 16);
}
out:
disk_set_independent_access_ranges(sdkp->disk, iars);
if (nr_cpr && sdkp->nr_actuators != nr_cpr) {
sd_printk(KERN_NOTICE, sdkp,
"%u concurrent positioning ranges\n", nr_cpr);
sdkp->nr_actuators = nr_cpr;
}
kfree(buffer);
}
/* /*
* Determine the device's preferred I/O size for reads and writes * Determine the device's preferred I/O size for reads and writes
* unless the reported value is unreasonably small, large, not a * unless the reported value is unreasonably small, large, not a
...@@ -3203,6 +3283,7 @@ static int sd_revalidate_disk(struct gendisk *disk) ...@@ -3203,6 +3283,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_app_tag_own(sdkp, buffer); sd_read_app_tag_own(sdkp, buffer);
sd_read_write_same(sdkp, buffer); sd_read_write_same(sdkp, buffer);
sd_read_security(sdkp, buffer); sd_read_security(sdkp, buffer);
sd_read_cpr(sdkp);
} }
/* /*
......
...@@ -106,6 +106,7 @@ struct scsi_disk { ...@@ -106,6 +106,7 @@ struct scsi_disk {
u8 protection_type;/* Data Integrity Field */ u8 protection_type;/* Data Integrity Field */
u8 provisioning_mode; u8 provisioning_mode;
u8 zeroing_mode; u8 zeroing_mode;
u8 nr_actuators; /* Number of actuators */
unsigned ATO : 1; /* state of disk ATO bit */ unsigned ATO : 1; /* state of disk ATO bit */
unsigned cache_override : 1; /* temp override of WCE,RCD */ unsigned cache_override : 1; /* temp override of WCE,RCD */
unsigned WCE : 1; /* state of disk WCE bit */ unsigned WCE : 1; /* state of disk WCE bit */
......
...@@ -329,6 +329,7 @@ enum { ...@@ -329,6 +329,7 @@ enum {
ATA_LOG_SECURITY = 0x06, ATA_LOG_SECURITY = 0x06,
ATA_LOG_SATA_SETTINGS = 0x08, ATA_LOG_SATA_SETTINGS = 0x08,
ATA_LOG_ZONED_INFORMATION = 0x09, ATA_LOG_ZONED_INFORMATION = 0x09,
ATA_LOG_CONCURRENT_POSITIONING_RANGES = 0x47,
/* Identify device SATA settings log:*/ /* Identify device SATA settings log:*/
ATA_LOG_DEVSLP_OFFSET = 0x30, ATA_LOG_DEVSLP_OFFSET = 0x30,
......
...@@ -676,6 +676,18 @@ struct ata_ering { ...@@ -676,6 +676,18 @@ struct ata_ering {
struct ata_ering_entry ring[ATA_ERING_SIZE]; struct ata_ering_entry ring[ATA_ERING_SIZE];
}; };
struct ata_cpr {
u8 num;
u8 num_storage_elements;
u64 start_lba;
u64 num_lbas;
};
struct ata_cpr_log {
u8 nr_cpr;
struct ata_cpr cpr[];
};
struct ata_device { struct ata_device {
struct ata_link *link; struct ata_link *link;
unsigned int devno; /* 0 or 1 */ unsigned int devno; /* 0 or 1 */
...@@ -735,6 +747,9 @@ struct ata_device { ...@@ -735,6 +747,9 @@ struct ata_device {
u32 zac_zones_optimal_nonseq; u32 zac_zones_optimal_nonseq;
u32 zac_zones_max_open; u32 zac_zones_max_open;
/* Concurrent positioning ranges */
struct ata_cpr_log *cpr_log;
/* error history */ /* error history */
int spdn_cnt; int spdn_cnt;
/* ering is CLEAR_END, read comment above CLEAR_END */ /* ering is CLEAR_END, read comment above CLEAR_END */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment