Commit 5d8fbce0 authored by Mike McGowen's avatar Mike McGowen Committed by Martin K. Petersen

scsi: smartpqi: Speed up RAID 10 sequential reads

Use all data disks for sequential read operations.

Testing discovered inconsistent performance on RAID 10 volumes when
performing 256K sequential reads. The driver was only using a single
tracker to determine which physical drive to send a request to for AIO
requests.

Change the single tracker (next_bypass_group) to an array of trackers based
on the number of data disks in a row of the RAID map.

Link: https://lore.kernel.org/r/164375212842.440833.6733971458765002128.stgit@brunhilda.pdev.netReviewed-by: default avatarKevin Barnett <kevin.barnett@microchip.com>
Reviewed-by: default avatarMike McGowen <mike.mcgowen@microchip.com>
Reviewed-by: default avatarScott Benesh <scott.benesh@microchip.com>
Reviewed-by: default avatarScott Teel <scott.teel@microchip.com>
Signed-off-by: default avatarMike McGowen <Mike.McGowen@microchip.com>
Signed-off-by: default avatarDon Brace <don.brace@microchip.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 3ada501d
......@@ -918,7 +918,8 @@ union pqi_reset_register {
#define PQI_MAX_TRANSFER_SIZE (1024U * 1024U)
#define PQI_MAX_TRANSFER_SIZE_KDUMP (512 * 1024U)
#define RAID_MAP_MAX_ENTRIES 1024
#define RAID_MAP_MAX_ENTRIES 1024
#define RAID_MAP_MAX_DATA_DISKS_PER_ROW 128
#define PQI_PHYSICAL_DEVICE_BUS 0
#define PQI_RAID_VOLUME_BUS 1
......@@ -1125,7 +1126,7 @@ struct pqi_scsi_dev {
u8 ncq_prio_support;
bool raid_bypass_configured; /* RAID bypass configured */
bool raid_bypass_enabled; /* RAID bypass enabled */
u32 next_bypass_group;
u32 next_bypass_group[RAID_MAP_MAX_DATA_DISKS_PER_ROW];
struct raid_map *raid_map; /* RAID bypass map */
u32 max_transfer_encrypted;
......
......@@ -2058,7 +2058,7 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
sizeof(existing_device->box));
memcpy(existing_device->phys_connector, new_device->phys_connector,
sizeof(existing_device->phys_connector));
existing_device->next_bypass_group = 0;
memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
kfree(existing_device->raid_map);
existing_device->raid_map = new_device->raid_map;
existing_device->raid_bypass_configured =
......@@ -2963,11 +2963,11 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
if (rmd.is_write) {
pqi_calc_aio_r1_nexus(raid_map, &rmd);
} else {
group = device->next_bypass_group;
group = device->next_bypass_group[rmd.map_index];
next_bypass_group = group + 1;
if (next_bypass_group >= rmd.layout_map_count)
next_bypass_group = 0;
device->next_bypass_group = next_bypass_group;
device->next_bypass_group[rmd.map_index] = next_bypass_group;
rmd.map_index += group * rmd.data_disks_per_row;
}
} else if ((device->raid_level == SA_RAID_5 ||
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment