Commit 6a23e05c authored by Jens Axboe's avatar Jens Axboe Committed by Mike Snitzer

dm: remove legacy request-based IO path

dm supports both, and since we're killing off the legacy path in
general, get rid of it in dm.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 118aa47c
......@@ -215,17 +215,6 @@ config BLK_DEV_DM
If unsure, say N.
config DM_MQ_DEFAULT
bool "request-based DM: use blk-mq I/O path by default"
depends on BLK_DEV_DM
---help---
This option enables the blk-mq based I/O path for request-based
DM devices by default. With the option the dm_mod.use_blk_mq
module/boot option defaults to Y, without it to N, but it can
still be overriden either way.
If unsure say N.
config DM_DEBUG
bool "Device mapper debugging support"
depends on BLK_DEV_DM
......
......@@ -112,18 +112,8 @@ struct mapped_device {
struct dm_stats stats;
struct kthread_worker kworker;
struct task_struct *kworker_task;
/* for request-based merge heuristic in dm_request_fn() */
unsigned seq_rq_merge_deadline_usecs;
int last_rq_rw;
sector_t last_rq_pos;
ktime_t last_rq_start_time;
/* for blk-mq request-based DM support */
struct blk_mq_tag_set *tag_set;
bool use_blk_mq:1;
bool init_tio_pdu:1;
struct srcu_struct io_barrier;
......
......@@ -203,14 +203,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
{
if (m->queue_mode == DM_TYPE_NONE) {
/*
* Default to request-based.
*/
if (dm_use_blk_mq(dm_table_get_md(ti->table)))
m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
else
m->queue_mode = DM_TYPE_REQUEST_BASED;
m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
} else if (m->queue_mode == DM_TYPE_BIO_BASED) {
INIT_WORK(&m->process_queued_bios, process_queued_bios);
/*
......@@ -537,10 +530,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
* get the queue busy feedback (via BLK_STS_RESOURCE),
* otherwise I/O merging can suffer.
*/
if (q->mq_ops)
return DM_MAPIO_REQUEUE;
else
return DM_MAPIO_DELAY_REQUEUE;
return DM_MAPIO_REQUEUE;
}
clone->bio = clone->biotail = NULL;
clone->rq_disk = bdev->bd_disk;
......
This diff is collapsed.
......@@ -46,10 +46,6 @@ struct dm_rq_clone_bio_info {
struct bio clone;
};
bool dm_use_blk_mq_default(void);
bool dm_use_blk_mq(struct mapped_device *md);
int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t);
int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t);
void dm_mq_cleanup_mapped_device(struct mapped_device *md);
......
......@@ -92,7 +92,8 @@ static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf)
static ssize_t dm_attr_use_blk_mq_show(struct mapped_device *md, char *buf)
{
sprintf(buf, "%d\n", dm_use_blk_mq(md));
/* Purely for userspace compatibility */
sprintf(buf, "%d\n", true);
return strlen(buf);
}
......
......@@ -47,7 +47,6 @@ struct dm_table {
bool integrity_supported:1;
bool singleton:1;
bool all_blk_mq:1;
unsigned integrity_added:1;
/*
......@@ -910,21 +909,10 @@ static bool dm_table_supports_dax(struct dm_table *t)
static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
struct verify_rq_based_data {
unsigned sq_count;
unsigned mq_count;
};
static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
struct verify_rq_based_data *v = data;
if (q->mq_ops)
v->mq_count++;
else
v->sq_count++;
return queue_is_rq_based(q);
}
......@@ -933,7 +921,6 @@ static int dm_table_determine_type(struct dm_table *t)
{
unsigned i;
unsigned bio_based = 0, request_based = 0, hybrid = 0;
struct verify_rq_based_data v = {.sq_count = 0, .mq_count = 0};
struct dm_target *tgt;
struct list_head *devices = dm_table_get_devices(t);
enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
......@@ -1022,11 +1009,9 @@ static int dm_table_determine_type(struct dm_table *t)
int srcu_idx;
struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
/* inherit live table's type and all_blk_mq */
if (live_table) {
/* inherit live table's type */
if (live_table)
t->type = live_table->type;
t->all_blk_mq = live_table->all_blk_mq;
}
dm_put_live_table(t->md, srcu_idx);
return 0;
}
......@@ -1042,21 +1027,10 @@ static int dm_table_determine_type(struct dm_table *t)
/* Non-request-stackable devices can't be used for request-based dm */
if (!tgt->type->iterate_devices ||
!tgt->type->iterate_devices(tgt, device_is_rq_based, &v)) {
!tgt->type->iterate_devices(tgt, device_is_rq_based, NULL)) {
DMERR("table load rejected: including non-request-stackable devices");
return -EINVAL;
}
if (v.sq_count && v.mq_count) {
DMERR("table load rejected: not all devices are blk-mq request-stackable");
return -EINVAL;
}
t->all_blk_mq = v.mq_count > 0;
if (!t->all_blk_mq &&
(t->type == DM_TYPE_MQ_REQUEST_BASED || t->type == DM_TYPE_NVME_BIO_BASED)) {
DMERR("table load rejected: all devices are not blk-mq request-stackable");
return -EINVAL;
}
return 0;
}
......@@ -1105,11 +1079,6 @@ bool dm_table_request_based(struct dm_table *t)
return __table_type_request_based(dm_table_get_type(t));
}
bool dm_table_all_blk_mq_devices(struct dm_table *t)
{
return t->all_blk_mq;
}
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{
enum dm_queue_mode type = dm_table_get_type(t);
......@@ -2083,22 +2052,14 @@ void dm_table_run_md_queue_async(struct dm_table *t)
{
struct mapped_device *md;
struct request_queue *queue;
unsigned long flags;
if (!dm_table_request_based(t))
return;
md = dm_table_get_md(t);
queue = dm_get_md_queue(md);
if (queue) {
if (queue->mq_ops)
blk_mq_run_hw_queues(queue, true);
else {
spin_lock_irqsave(queue->queue_lock, flags);
blk_run_queue_async(queue);
spin_unlock_irqrestore(queue->queue_lock, flags);
}
}
if (queue)
blk_mq_run_hw_queues(queue, true);
}
EXPORT_SYMBOL(dm_table_run_md_queue_async);
......@@ -1808,8 +1808,6 @@ static void dm_wq_work(struct work_struct *work);
static void dm_init_normal_md_queue(struct mapped_device *md)
{
md->use_blk_mq = false;
/*
* Initialize aspects of queue that aren't relevant for blk-mq
*/
......@@ -1820,8 +1818,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
{
if (md->wq)
destroy_workqueue(md->wq);
if (md->kworker_task)
kthread_stop(md->kworker_task);
bioset_exit(&md->bs);
bioset_exit(&md->io_bs);
......@@ -1888,7 +1884,6 @@ static struct mapped_device *alloc_dev(int minor)
goto bad_io_barrier;
md->numa_node_id = numa_node_id;
md->use_blk_mq = dm_use_blk_mq_default();
md->init_tio_pdu = false;
md->type = DM_TYPE_NONE;
mutex_init(&md->suspend_lock);
......@@ -1919,7 +1914,6 @@ static struct mapped_device *alloc_dev(int minor)
INIT_WORK(&md->work, dm_wq_work);
init_waitqueue_head(&md->eventq);
init_completion(&md->kobj_holder.completion);
md->kworker_task = NULL;
md->disk->major = _major;
md->disk->first_minor = minor;
......@@ -2219,13 +2213,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
switch (type) {
case DM_TYPE_REQUEST_BASED:
dm_init_normal_md_queue(md);
r = dm_old_init_request_queue(md, t);
if (r) {
DMERR("Cannot initialize queue for request-based mapped device");
return r;
}
break;
case DM_TYPE_MQ_REQUEST_BASED:
r = dm_mq_init_request_queue(md, t);
if (r) {
......@@ -2331,9 +2318,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
blk_set_queue_dying(md->queue);
if (dm_request_based(md) && md->kworker_task)
kthread_flush_worker(&md->kworker);
/*
* Take suspend_lock so that presuspend and postsuspend methods
* do not race with internal suspend.
......@@ -2586,11 +2570,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* Stop md->queue before flushing md->wq in case request-based
* dm defers requests to md->wq from md->queue.
*/
if (dm_request_based(md)) {
if (dm_request_based(md))
dm_stop_queue(md->queue);
if (md->kworker_task)
kthread_flush_worker(&md->kworker);
}
flush_workqueue(md->wq);
......
......@@ -70,7 +70,6 @@ struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
bool dm_table_bio_based(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
bool dm_table_all_blk_mq_devices(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment