Commit b27ac2fa authored by Don Brace's avatar Don Brace Committed by Martin K. Petersen

scsi: smartpqi: Convert to host_tagset

Add support for host_tagset.

Also move the reserved command slots to the end of the pool to eliminate an
addition operation for every SCSI request.

This patch was originally authored by Hannes Reinecke here:

Link: https://lore.kernel.org/linux-block/20191126131009.71726-8-hare@suse.de/

But we NAKed this patch because we wanted to fully test multipath
failover operations.
Suggested-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarScott Benesh <scott.benesh@microchip.com>
Reviewed-by: default avatarScott Teel <scott.teel@microchip.com>
Reviewed-by: default avatarMahesh Rajashekhara <Mahesh.Rajashekhara@microchip.com>
Reviewed-by: default avatarMike McGowen <Mike.McGowen@microchip.com>
Reviewed-by: default avatarKevin Barnett <kevin.barnett@microchip.com>
Signed-off-by: default avatarDon Brace <don.brace@microchip.com>
Link: https://lore.kernel.org/r/166793529811.322537.3294617845448383948.stgit@brunhildaSigned-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 729c287e
......@@ -1307,7 +1307,6 @@ struct pqi_ctrl_info {
dma_addr_t error_buffer_dma_handle;
size_t sg_chain_buffer_length;
unsigned int num_queue_groups;
u16 max_hw_queue_index;
u16 num_elements_per_iq;
u16 num_elements_per_oq;
u16 max_inbound_iu_length_per_firmware;
......@@ -1369,8 +1368,6 @@ struct pqi_ctrl_info {
u64 sas_address;
struct pqi_io_request *io_request_pool;
u16 next_io_request_slot;
struct pqi_event events[PQI_NUM_SUPPORTED_EVENTS];
struct work_struct event_work;
......
......@@ -678,22 +678,35 @@ static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
io_request->raid_bypass = false;
}
static struct pqi_io_request *pqi_alloc_io_request(
struct pqi_ctrl_info *ctrl_info)
static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
{
struct pqi_io_request *io_request;
u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
u16 i;
while (1) {
if (scmd) { /* SML I/O request */
u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
i = blk_mq_unique_tag_to_tag(blk_tag);
io_request = &ctrl_info->io_request_pool[i];
if (atomic_inc_return(&io_request->refcount) > 1) {
atomic_dec(&io_request->refcount);
return NULL;
}
} else { /* IOCTL or driver internal request */
/*
* benignly racy - may have to wait for an open slot.
* command slot range is scsi_ml_can_queue -
* [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)]
*/
i = 0;
while (1) {
io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i];
if (atomic_inc_return(&io_request->refcount) == 1)
break;
atomic_dec(&io_request->refcount);
i = (i + 1) % ctrl_info->max_io_slots;
i = (i + 1) % PQI_RESERVED_IO_SLOTS;
}
}
/* benignly racy */
ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
pqi_reinit_io_request(io_request);
......@@ -4586,7 +4599,7 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
goto out;
}
io_request = pqi_alloc_io_request(ctrl_info);
io_request = pqi_alloc_io_request(ctrl_info, NULL);
put_unaligned_le16(io_request->index,
&(((struct pqi_raid_path_request *)request)->request_id));
......@@ -5233,7 +5246,6 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
}
ctrl_info->num_queue_groups = num_queue_groups;
ctrl_info->max_hw_queue_index = num_queue_groups - 1;
/*
* Make sure that the max. inbound IU length is an even multiple
......@@ -5567,7 +5579,9 @@ static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
{
struct pqi_io_request *io_request;
io_request = pqi_alloc_io_request(ctrl_info);
io_request = pqi_alloc_io_request(ctrl_info, scmd);
if (!io_request)
return SCSI_MLQUEUE_HOST_BUSY;
return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
device, scmd, queue_group);
......@@ -5671,7 +5685,9 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device;
device = scmd->device->hostdata;
io_request = pqi_alloc_io_request(ctrl_info);
io_request = pqi_alloc_io_request(ctrl_info, scmd);
if (!io_request)
return SCSI_MLQUEUE_HOST_BUSY;
io_request->io_complete_callback = pqi_aio_io_complete;
io_request->scmd = scmd;
io_request->raid_bypass = raid_bypass;
......@@ -5743,7 +5759,10 @@ static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_io_request *io_request;
struct pqi_aio_r1_path_request *r1_request;
io_request = pqi_alloc_io_request(ctrl_info);
io_request = pqi_alloc_io_request(ctrl_info, scmd);
if (!io_request)
return SCSI_MLQUEUE_HOST_BUSY;
io_request->io_complete_callback = pqi_aio_io_complete;
io_request->scmd = scmd;
io_request->raid_bypass = true;
......@@ -5801,7 +5820,9 @@ static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_io_request *io_request;
struct pqi_aio_r56_path_request *r56_request;
io_request = pqi_alloc_io_request(ctrl_info);
io_request = pqi_alloc_io_request(ctrl_info, scmd);
if (!io_request)
return SCSI_MLQUEUE_HOST_BUSY;
io_request->io_complete_callback = pqi_aio_io_complete;
io_request->scmd = scmd;
io_request->raid_bypass = true;
......@@ -5860,13 +5881,10 @@ static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd)
{
u16 hw_queue;
hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
if (hw_queue > ctrl_info->max_hw_queue_index)
hw_queue = 0;
return hw_queue;
/*
* We are setting host_tagset = 1 during init.
*/
return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
}
static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
......@@ -6268,7 +6286,7 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd
struct pqi_scsi_dev *device;
device = scmd->device->hostdata;
io_request = pqi_alloc_io_request(ctrl_info);
io_request = pqi_alloc_io_request(ctrl_info, NULL);
io_request->io_complete_callback = pqi_lun_reset_complete;
io_request->context = &wait;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment