Commit 754c5fc7 authored by Mike Snitzer's avatar Mike Snitzer Committed by Alasdair G Kergon

dm: calculate queue limits during resume not load

Currently, device-mapper maintains a separate instance of 'struct
queue_limits' for each table of each device.  When the configuration of
a device is to be changed, first its table is loaded and this structure
is populated, then the device is 'resumed' and the calculated
queue_limits are applied.

This places restrictions on how userspace may process related devices,
where it is often advantageous to 'load' tables for several devices
at once before 'resuming' them together.  As the new queue_limits
only take effect after the 'resume', if they are changing and one
device uses another, the latter must be 'resumed' before the former
may be 'loaded'.

This patch moves the calculation of these queue_limits out of
the 'load' operation into 'resume'.  Since we are no longer
pre-calculating this struct, we no longer need to maintain copies
within our dm structs.

dm_set_device_limits() now passes the 'start' of the device's
data area (aka pe_start) as the 'offset' to blk_stack_limits().

init_valid_queue_limits() is replaced by blk_set_default_limits().
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
Cc: martin.petersen@oracle.com
Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
parent 18d8594d
This diff is collapsed.
...@@ -1313,7 +1313,8 @@ static void __set_size(struct mapped_device *md, sector_t size) ...@@ -1313,7 +1313,8 @@ static void __set_size(struct mapped_device *md, sector_t size)
mutex_unlock(&md->bdev->bd_inode->i_mutex); mutex_unlock(&md->bdev->bd_inode->i_mutex);
} }
static int __bind(struct mapped_device *md, struct dm_table *t) static int __bind(struct mapped_device *md, struct dm_table *t,
struct queue_limits *limits)
{ {
struct request_queue *q = md->queue; struct request_queue *q = md->queue;
sector_t size; sector_t size;
...@@ -1337,7 +1338,7 @@ static int __bind(struct mapped_device *md, struct dm_table *t) ...@@ -1337,7 +1338,7 @@ static int __bind(struct mapped_device *md, struct dm_table *t)
write_lock(&md->map_lock); write_lock(&md->map_lock);
md->map = t; md->map = t;
dm_table_set_restrictions(t, q); dm_table_set_restrictions(t, q, limits);
write_unlock(&md->map_lock); write_unlock(&md->map_lock);
return 0; return 0;
...@@ -1562,6 +1563,7 @@ static void dm_queue_flush(struct mapped_device *md) ...@@ -1562,6 +1563,7 @@ static void dm_queue_flush(struct mapped_device *md)
*/ */
int dm_swap_table(struct mapped_device *md, struct dm_table *table) int dm_swap_table(struct mapped_device *md, struct dm_table *table)
{ {
struct queue_limits limits;
int r = -EINVAL; int r = -EINVAL;
mutex_lock(&md->suspend_lock); mutex_lock(&md->suspend_lock);
...@@ -1570,8 +1572,12 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table) ...@@ -1570,8 +1572,12 @@ int dm_swap_table(struct mapped_device *md, struct dm_table *table)
if (!dm_suspended(md)) if (!dm_suspended(md))
goto out; goto out;
r = dm_calculate_queue_limits(table, &limits);
if (r)
goto out;
__unbind(md); __unbind(md);
r = __bind(md, table); r = __bind(md, table, &limits);
out: out:
mutex_unlock(&md->suspend_lock); mutex_unlock(&md->suspend_lock);
......
...@@ -41,7 +41,10 @@ void dm_table_event_callback(struct dm_table *t, ...@@ -41,7 +41,10 @@ void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context); void (*fn)(void *), void *context);
struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index); struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector); struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q); int dm_calculate_queue_limits(struct dm_table *table,
struct queue_limits *limits);
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits);
struct list_head *dm_table_get_devices(struct dm_table *t); struct list_head *dm_table_get_devices(struct dm_table *t);
void dm_table_presuspend_targets(struct dm_table *t); void dm_table_presuspend_targets(struct dm_table *t);
void dm_table_postsuspend_targets(struct dm_table *t); void dm_table_postsuspend_targets(struct dm_table *t);
......
...@@ -103,7 +103,8 @@ void dm_error(const char *message); ...@@ -103,7 +103,8 @@ void dm_error(const char *message);
/* /*
* Combine device limits. * Combine device limits.
*/ */
void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev); int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
sector_t start, void *data);
struct dm_dev { struct dm_dev {
struct block_device *bdev; struct block_device *bdev;
...@@ -163,7 +164,6 @@ struct dm_target { ...@@ -163,7 +164,6 @@ struct dm_target {
sector_t begin; sector_t begin;
sector_t len; sector_t len;
/* FIXME: turn this into a mask, and merge with queue_limits */
/* Always a power of 2 */ /* Always a power of 2 */
sector_t split_io; sector_t split_io;
...@@ -177,12 +177,6 @@ struct dm_target { ...@@ -177,12 +177,6 @@ struct dm_target {
*/ */
unsigned num_flush_requests; unsigned num_flush_requests;
/*
* These are automatically filled in by
* dm_table_get_device.
*/
struct queue_limits limits;
/* target specific data */ /* target specific data */
void *private; void *private;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment