Commit d285203c authored by Christoph Hellwig's avatar Christoph Hellwig

scsi: add support for a blk-mq based I/O path.

This patch adds support for an alternate I/O path in the scsi midlayer
which uses the blk-mq infrastructure instead of the legacy request code.

Use of blk-mq is fully transparent to drivers, although for now a host
template field is provided to opt out of blk-mq usage in case any unforseen
incompatibilities arise.

In general replacing the legacy request code with blk-mq is a simple and
mostly mechanical transformation.  The biggest exception is the new code
that deals with the fact the I/O submissions in blk-mq must happen from
process context, which slightly complicates the I/O completion handler.
The second biggest differences is that blk-mq is build around the concept
of preallocated requests that also include driver specific data, which
in SCSI context means the scsi_cmnd structure.  This completely avoids
dynamic memory allocations for the fast path through I/O submission.

Due the preallocated requests the MQ code path exclusively uses the
host-wide shared tag allocator instead of a per-LUN one.  This only
affects drivers actually using the block layer provided tag allocator
instead of their own.  Unlike the old path blk-mq always provides a tag,
although drivers don't have to use it.

For now the blk-mq path is disable by defauly and must be enabled using
the "use_blk_mq" module parameter.  Once the remaining work in the block
layer to make blk-mq more suitable for slow devices is complete I hope
to make it the default and eventually even remove the old code path.

Based on the earlier scsi-mq prototype by Nicholas Bellinger.

Thanks to Bart Van Assche and Robert Elliot for testing, benchmarking and
various sugestions and code contributions.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarWebb Scales <webbnh@hp.com>
Acked-by: default avatarJens Axboe <axboe@kernel.dk>
Tested-by: default avatarBart Van Assche <bvanassche@acm.org>
Tested-by: default avatarRobert Elliott <elliott@hp.com>
parent c53c6d6a
...@@ -213,9 +213,24 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, ...@@ -213,9 +213,24 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
goto fail; goto fail;
} }
if (shost_use_blk_mq(shost)) {
error = scsi_mq_setup_tags(shost);
if (error)
goto fail;
}
/*
* Note that we allocate the freelist even for the MQ case for now,
* as we need a command set aside for scsi_reset_provider. Having
* the full host freelist and one command available for that is a
* little heavy-handed, but avoids introducing a special allocator
* just for this. Eventually the structure of scsi_reset_provider
* will need a major overhaul.
*/
error = scsi_setup_command_freelist(shost); error = scsi_setup_command_freelist(shost);
if (error) if (error)
goto fail; goto out_destroy_tags;
if (!shost->shost_gendev.parent) if (!shost->shost_gendev.parent)
shost->shost_gendev.parent = dev ? dev : &platform_bus; shost->shost_gendev.parent = dev ? dev : &platform_bus;
...@@ -226,7 +241,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, ...@@ -226,7 +241,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
error = device_add(&shost->shost_gendev); error = device_add(&shost->shost_gendev);
if (error) if (error)
goto out; goto out_destroy_freelist;
pm_runtime_set_active(&shost->shost_gendev); pm_runtime_set_active(&shost->shost_gendev);
pm_runtime_enable(&shost->shost_gendev); pm_runtime_enable(&shost->shost_gendev);
...@@ -279,8 +294,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, ...@@ -279,8 +294,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
device_del(&shost->shost_dev); device_del(&shost->shost_dev);
out_del_gendev: out_del_gendev:
device_del(&shost->shost_gendev); device_del(&shost->shost_gendev);
out: out_destroy_freelist:
scsi_destroy_command_freelist(shost); scsi_destroy_command_freelist(shost);
out_destroy_tags:
if (shost_use_blk_mq(shost))
scsi_mq_destroy_tags(shost);
fail: fail:
return error; return error;
} }
...@@ -309,8 +327,13 @@ static void scsi_host_dev_release(struct device *dev) ...@@ -309,8 +327,13 @@ static void scsi_host_dev_release(struct device *dev)
} }
scsi_destroy_command_freelist(shost); scsi_destroy_command_freelist(shost);
if (shost->bqt) if (shost_use_blk_mq(shost)) {
blk_free_tags(shost->bqt); if (shost->tag_set.tags)
scsi_mq_destroy_tags(shost);
} else {
if (shost->bqt)
blk_free_tags(shost->bqt);
}
kfree(shost->shost_data); kfree(shost->shost_data);
...@@ -436,6 +459,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) ...@@ -436,6 +459,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
else else
shost->dma_boundary = 0xffffffff; shost->dma_boundary = 0xffffffff;
shost->use_blk_mq = scsi_use_blk_mq && !shost->hostt->disable_blk_mq;
device_initialize(&shost->shost_gendev); device_initialize(&shost->shost_gendev);
dev_set_name(&shost->shost_gendev, "host%d", shost->host_no); dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
shost->shost_gendev.bus = &scsi_bus_type; shost->shost_gendev.bus = &scsi_bus_type;
......
...@@ -805,7 +805,7 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) ...@@ -805,7 +805,7 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
* is more IO than the LLD's can_queue (so there are not enuogh * is more IO than the LLD's can_queue (so there are not enuogh
* tags) request_fn's host queue ready check will handle it. * tags) request_fn's host queue ready check will handle it.
*/ */
if (!sdev->host->bqt) { if (!shost_use_blk_mq(sdev->host) && !sdev->host->bqt) {
if (blk_queue_tagged(sdev->request_queue) && if (blk_queue_tagged(sdev->request_queue) &&
blk_queue_resize_tags(sdev->request_queue, tags) != 0) blk_queue_resize_tags(sdev->request_queue, tags) != 0)
goto out; goto out;
...@@ -1361,6 +1361,9 @@ MODULE_LICENSE("GPL"); ...@@ -1361,6 +1361,9 @@ MODULE_LICENSE("GPL");
module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
bool scsi_use_blk_mq = false;
module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
static int __init init_scsi(void) static int __init init_scsi(void)
{ {
int error; int error;
......
This diff is collapsed.
...@@ -88,6 +88,9 @@ extern void scsi_next_command(struct scsi_cmnd *cmd); ...@@ -88,6 +88,9 @@ extern void scsi_next_command(struct scsi_cmnd *cmd);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int); extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
extern void scsi_run_host_queues(struct Scsi_Host *shost); extern void scsi_run_host_queues(struct Scsi_Host *shost);
extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev); extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
extern int scsi_init_queue(void); extern int scsi_init_queue(void);
extern void scsi_exit_queue(void); extern void scsi_exit_queue(void);
struct request_queue; struct request_queue;
......
...@@ -273,7 +273,10 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, ...@@ -273,7 +273,10 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
*/ */
sdev->borken = 1; sdev->borken = 1;
sdev->request_queue = scsi_alloc_queue(sdev); if (shost_use_blk_mq(shost))
sdev->request_queue = scsi_mq_alloc_queue(sdev);
else
sdev->request_queue = scsi_alloc_queue(sdev);
if (!sdev->request_queue) { if (!sdev->request_queue) {
/* release fn is set up in scsi_sysfs_device_initialise, so /* release fn is set up in scsi_sysfs_device_initialise, so
* have to free and put manually here */ * have to free and put manually here */
......
...@@ -333,6 +333,7 @@ store_shost_eh_deadline(struct device *dev, struct device_attribute *attr, ...@@ -333,6 +333,7 @@ store_shost_eh_deadline(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline); static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline);
shost_rd_attr(use_blk_mq, "%d\n");
shost_rd_attr(unique_id, "%u\n"); shost_rd_attr(unique_id, "%u\n");
shost_rd_attr(cmd_per_lun, "%hd\n"); shost_rd_attr(cmd_per_lun, "%hd\n");
shost_rd_attr(can_queue, "%hd\n"); shost_rd_attr(can_queue, "%hd\n");
...@@ -352,6 +353,7 @@ show_host_busy(struct device *dev, struct device_attribute *attr, char *buf) ...@@ -352,6 +353,7 @@ show_host_busy(struct device *dev, struct device_attribute *attr, char *buf)
static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL); static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL);
static struct attribute *scsi_sysfs_shost_attrs[] = { static struct attribute *scsi_sysfs_shost_attrs[] = {
&dev_attr_use_blk_mq.attr,
&dev_attr_unique_id.attr, &dev_attr_unique_id.attr,
&dev_attr_host_busy.attr, &dev_attr_host_busy.attr,
&dev_attr_cmd_per_lun.attr, &dev_attr_cmd_per_lun.attr,
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/blk-mq.h>
#include <scsi/scsi.h> #include <scsi/scsi.h>
struct request_queue; struct request_queue;
...@@ -510,6 +511,9 @@ struct scsi_host_template { ...@@ -510,6 +511,9 @@ struct scsi_host_template {
*/ */
unsigned int cmd_size; unsigned int cmd_size;
struct scsi_host_cmd_pool *cmd_pool; struct scsi_host_cmd_pool *cmd_pool;
/* temporary flag to disable blk-mq I/O path */
bool disable_blk_mq;
}; };
/* /*
...@@ -580,7 +584,10 @@ struct Scsi_Host { ...@@ -580,7 +584,10 @@ struct Scsi_Host {
* Area to keep a shared tag map (if needed, will be * Area to keep a shared tag map (if needed, will be
* NULL if not). * NULL if not).
*/ */
struct blk_queue_tag *bqt; union {
struct blk_queue_tag *bqt;
struct blk_mq_tag_set tag_set;
};
atomic_t host_busy; /* commands actually active on low-level */ atomic_t host_busy; /* commands actually active on low-level */
atomic_t host_blocked; atomic_t host_blocked;
...@@ -672,6 +679,8 @@ struct Scsi_Host { ...@@ -672,6 +679,8 @@ struct Scsi_Host {
/* The controller does not support WRITE SAME */ /* The controller does not support WRITE SAME */
unsigned no_write_same:1; unsigned no_write_same:1;
unsigned use_blk_mq:1;
/* /*
* Optional work queue to be utilized by the transport * Optional work queue to be utilized by the transport
*/ */
...@@ -772,6 +781,13 @@ static inline int scsi_host_in_recovery(struct Scsi_Host *shost) ...@@ -772,6 +781,13 @@ static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
shost->tmf_in_progress; shost->tmf_in_progress;
} }
extern bool scsi_use_blk_mq;
static inline bool shost_use_blk_mq(struct Scsi_Host *shost)
{
return shost->use_blk_mq;
}
extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
extern void scsi_flush_work(struct Scsi_Host *); extern void scsi_flush_work(struct Scsi_Host *);
......
...@@ -67,7 +67,8 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth) ...@@ -67,7 +67,8 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth)
if (!sdev->tagged_supported) if (!sdev->tagged_supported)
return; return;
if (!blk_queue_tagged(sdev->request_queue)) if (!shost_use_blk_mq(sdev->host) &&
blk_queue_tagged(sdev->request_queue))
blk_queue_init_tags(sdev->request_queue, depth, blk_queue_init_tags(sdev->request_queue, depth,
sdev->host->bqt); sdev->host->bqt);
...@@ -80,7 +81,8 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth) ...@@ -80,7 +81,8 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth)
**/ **/
static inline void scsi_deactivate_tcq(struct scsi_device *sdev, int depth) static inline void scsi_deactivate_tcq(struct scsi_device *sdev, int depth)
{ {
if (blk_queue_tagged(sdev->request_queue)) if (!shost_use_blk_mq(sdev->host) &&
blk_queue_tagged(sdev->request_queue))
blk_queue_free_tags(sdev->request_queue); blk_queue_free_tags(sdev->request_queue);
scsi_adjust_queue_depth(sdev, 0, depth); scsi_adjust_queue_depth(sdev, 0, depth);
} }
...@@ -108,6 +110,15 @@ static inline int scsi_populate_tag_msg(struct scsi_cmnd *cmd, char *msg) ...@@ -108,6 +110,15 @@ static inline int scsi_populate_tag_msg(struct scsi_cmnd *cmd, char *msg)
return 0; return 0;
} }
static inline struct scsi_cmnd *scsi_mq_find_tag(struct Scsi_Host *shost,
unsigned int hw_ctx, int tag)
{
struct request *req;
req = blk_mq_tag_to_rq(shost->tag_set.tags[hw_ctx], tag);
return req ? (struct scsi_cmnd *)req->special : NULL;
}
/** /**
* scsi_find_tag - find a tagged command by device * scsi_find_tag - find a tagged command by device
* @SDpnt: pointer to the ScSI device * @SDpnt: pointer to the ScSI device
...@@ -118,10 +129,12 @@ static inline int scsi_populate_tag_msg(struct scsi_cmnd *cmd, char *msg) ...@@ -118,10 +129,12 @@ static inline int scsi_populate_tag_msg(struct scsi_cmnd *cmd, char *msg)
**/ **/
static inline struct scsi_cmnd *scsi_find_tag(struct scsi_device *sdev, int tag) static inline struct scsi_cmnd *scsi_find_tag(struct scsi_device *sdev, int tag)
{ {
struct request *req; struct request *req;
if (tag != SCSI_NO_TAG) { if (tag != SCSI_NO_TAG) {
if (shost_use_blk_mq(sdev->host))
return scsi_mq_find_tag(sdev->host, 0, tag);
req = blk_queue_find_tag(sdev->request_queue, tag); req = blk_queue_find_tag(sdev->request_queue, tag);
return req ? (struct scsi_cmnd *)req->special : NULL; return req ? (struct scsi_cmnd *)req->special : NULL;
} }
...@@ -130,6 +143,7 @@ static inline struct scsi_cmnd *scsi_find_tag(struct scsi_device *sdev, int tag) ...@@ -130,6 +143,7 @@ static inline struct scsi_cmnd *scsi_find_tag(struct scsi_device *sdev, int tag)
return sdev->current_cmnd; return sdev->current_cmnd;
} }
/** /**
* scsi_init_shared_tag_map - create a shared tag map * scsi_init_shared_tag_map - create a shared tag map
* @shost: the host to share the tag map among all devices * @shost: the host to share the tag map among all devices
...@@ -137,6 +151,12 @@ static inline struct scsi_cmnd *scsi_find_tag(struct scsi_device *sdev, int tag) ...@@ -137,6 +151,12 @@ static inline struct scsi_cmnd *scsi_find_tag(struct scsi_device *sdev, int tag)
*/ */
static inline int scsi_init_shared_tag_map(struct Scsi_Host *shost, int depth) static inline int scsi_init_shared_tag_map(struct Scsi_Host *shost, int depth)
{ {
/*
* We always have a shared tag map around when using blk-mq.
*/
if (shost_use_blk_mq(shost))
return 0;
/* /*
* If the shared tag map isn't already initialized, do it now. * If the shared tag map isn't already initialized, do it now.
* This saves callers from having to check ->bqt when setting up * This saves callers from having to check ->bqt when setting up
...@@ -165,6 +185,8 @@ static inline struct scsi_cmnd *scsi_host_find_tag(struct Scsi_Host *shost, ...@@ -165,6 +185,8 @@ static inline struct scsi_cmnd *scsi_host_find_tag(struct Scsi_Host *shost,
struct request *req; struct request *req;
if (tag != SCSI_NO_TAG) { if (tag != SCSI_NO_TAG) {
if (shost_use_blk_mq(shost))
return scsi_mq_find_tag(shost, 0, tag);
req = blk_map_queue_find_tag(shost->bqt, tag); req = blk_map_queue_find_tag(shost->bqt, tag);
return req ? (struct scsi_cmnd *)req->special : NULL; return req ? (struct scsi_cmnd *)req->special : NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment