Commit 9974fa2c authored by Mike Snitzer's avatar Mike Snitzer

dm table: add dm_table_run_md_queue_async

Introduce dm_table_run_md_queue_async() to run the request_queue of the
mapped_device associated with a request-based DM table.

Also add dm_md_get_queue() wrapper to extract the request_queue from a
mapped_device.
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
Signed-off-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarJun'ichi Nomura <j-nomura@ce.jp.nec.com>
parent 17f4ff45
...@@ -1618,6 +1618,25 @@ struct mapped_device *dm_table_get_md(struct dm_table *t) ...@@ -1618,6 +1618,25 @@ struct mapped_device *dm_table_get_md(struct dm_table *t)
} }
EXPORT_SYMBOL(dm_table_get_md); EXPORT_SYMBOL(dm_table_get_md);
void dm_table_run_md_queue_async(struct dm_table *t)
{
struct mapped_device *md;
struct request_queue *queue;
unsigned long flags;
if (!dm_table_request_based(t))
return;
md = dm_table_get_md(t);
queue = dm_get_md_queue(md);
if (queue) {
spin_lock_irqsave(queue->queue_lock, flags);
blk_run_queue_async(queue);
spin_unlock_irqrestore(queue->queue_lock, flags);
}
}
EXPORT_SYMBOL(dm_table_run_md_queue_async);
static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data) sector_t start, sector_t len, void *data)
{ {
......
...@@ -468,6 +468,11 @@ sector_t dm_get_size(struct mapped_device *md) ...@@ -468,6 +468,11 @@ sector_t dm_get_size(struct mapped_device *md)
return get_capacity(md->disk); return get_capacity(md->disk);
} }
struct request_queue *dm_get_md_queue(struct mapped_device *md)
{
return md->queue;
}
struct dm_stats *dm_get_stats(struct mapped_device *md) struct dm_stats *dm_get_stats(struct mapped_device *md)
{ {
return &md->stats; return &md->stats;
......
...@@ -188,6 +188,7 @@ int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only ...@@ -188,6 +188,7 @@ int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only
int dm_cancel_deferred_remove(struct mapped_device *md); int dm_cancel_deferred_remove(struct mapped_device *md);
int dm_request_based(struct mapped_device *md); int dm_request_based(struct mapped_device *md);
sector_t dm_get_size(struct mapped_device *md); sector_t dm_get_size(struct mapped_device *md);
struct request_queue *dm_get_md_queue(struct mapped_device *md);
struct dm_stats *dm_get_stats(struct mapped_device *md); struct dm_stats *dm_get_stats(struct mapped_device *md);
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
......
...@@ -462,6 +462,11 @@ struct mapped_device *dm_table_get_md(struct dm_table *t); ...@@ -462,6 +462,11 @@ struct mapped_device *dm_table_get_md(struct dm_table *t);
*/ */
void dm_table_event(struct dm_table *t); void dm_table_event(struct dm_table *t);
/*
* Run the queue for request-based targets.
*/
void dm_table_run_md_queue_async(struct dm_table *t);
/* /*
* The device must be suspended before calling this method. * The device must be suspended before calling this method.
* Returns the previous table, which the caller must destroy. * Returns the previous table, which the caller must destroy.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment