Commit 974f51e8 authored by Hou Tao's avatar Hou Tao Committed by Mike Snitzer

dm: fix congested_fn for request-based device

We neither assign congested_fn for requested-based blk-mq device nor
implement it correctly. So fix both.

Also, remove incorrect comment from dm_init_normal_md_queue and rename
it to dm_init_congested_fn.

Fixes: 4aa9c692 ("bdi: separate out congested state into a separate struct")
Cc: stable@vger.kernel.org
Signed-off-by: default avatarHou Tao <houtao1@huawei.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 248aa264
...@@ -1788,7 +1788,8 @@ static int dm_any_congested(void *congested_data, int bdi_bits) ...@@ -1788,7 +1788,8 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
* With request-based DM we only need to check the * With request-based DM we only need to check the
* top-level queue for congestion. * top-level queue for congestion.
*/ */
r = md->queue->backing_dev_info->wb.state & bdi_bits; struct backing_dev_info *bdi = md->queue->backing_dev_info;
r = bdi->wb.congested->state & bdi_bits;
} else { } else {
map = dm_get_live_table_fast(md); map = dm_get_live_table_fast(md);
if (map) if (map)
...@@ -1854,15 +1855,6 @@ static const struct dax_operations dm_dax_ops; ...@@ -1854,15 +1855,6 @@ static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work); static void dm_wq_work(struct work_struct *work);
static void dm_init_normal_md_queue(struct mapped_device *md)
{
/*
* Initialize aspects of queue that aren't relevant for blk-mq
*/
md->queue->backing_dev_info->congested_data = md;
md->queue->backing_dev_info->congested_fn = dm_any_congested;
}
static void cleanup_mapped_device(struct mapped_device *md) static void cleanup_mapped_device(struct mapped_device *md)
{ {
if (md->wq) if (md->wq)
...@@ -2249,6 +2241,12 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md) ...@@ -2249,6 +2241,12 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
} }
EXPORT_SYMBOL_GPL(dm_get_queue_limits); EXPORT_SYMBOL_GPL(dm_get_queue_limits);
static void dm_init_congested_fn(struct mapped_device *md)
{
md->queue->backing_dev_info->congested_data = md;
md->queue->backing_dev_info->congested_fn = dm_any_congested;
}
/* /*
* Setup the DM device's queue based on md's type * Setup the DM device's queue based on md's type
*/ */
...@@ -2265,11 +2263,12 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) ...@@ -2265,11 +2263,12 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
DMERR("Cannot initialize queue for request-based dm-mq mapped device"); DMERR("Cannot initialize queue for request-based dm-mq mapped device");
return r; return r;
} }
dm_init_congested_fn(md);
break; break;
case DM_TYPE_BIO_BASED: case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED: case DM_TYPE_DAX_BIO_BASED:
case DM_TYPE_NVME_BIO_BASED: case DM_TYPE_NVME_BIO_BASED:
dm_init_normal_md_queue(md); dm_init_congested_fn(md);
break; break;
case DM_TYPE_NONE: case DM_TYPE_NONE:
WARN_ON_ONCE(true); WARN_ON_ONCE(true);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment