Commit 5ec140e6 authored by Vasily Averin's avatar Vasily Averin Committed by Jens Axboe

dm: bounce_pfn limit added

Device mapper uses its own bounce_pfn that may differ from one on underlying
device. In that way dm can build incorrect requests that contain sg elements
greater than underlying device is able to handle.

This is the cause of slab corruption in i2o layer, occurred on i386 arch when
very long direct IO requests are addressed to dm-over-i2o device.
Signed-off-by: default avatarVasily Averin <vvs@sw.ru>
Cc: <stable@kernel.org>
Cc: Alasdair G Kergon <agk@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 6f5d8aa6
...@@ -102,6 +102,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs, ...@@ -102,6 +102,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs,
lhs->seg_boundary_mask = lhs->seg_boundary_mask =
min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask); min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn);
lhs->no_cluster |= rhs->no_cluster; lhs->no_cluster |= rhs->no_cluster;
} }
...@@ -566,6 +568,8 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) ...@@ -566,6 +568,8 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
min_not_zero(rs->seg_boundary_mask, min_not_zero(rs->seg_boundary_mask,
q->seg_boundary_mask); q->seg_boundary_mask);
rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
} }
EXPORT_SYMBOL_GPL(dm_set_device_limits); EXPORT_SYMBOL_GPL(dm_set_device_limits);
...@@ -707,6 +711,8 @@ static void check_for_valid_limits(struct io_restrictions *rs) ...@@ -707,6 +711,8 @@ static void check_for_valid_limits(struct io_restrictions *rs)
rs->max_segment_size = MAX_SEGMENT_SIZE; rs->max_segment_size = MAX_SEGMENT_SIZE;
if (!rs->seg_boundary_mask) if (!rs->seg_boundary_mask)
rs->seg_boundary_mask = -1; rs->seg_boundary_mask = -1;
if (!rs->bounce_pfn)
rs->bounce_pfn = -1;
} }
int dm_table_add_target(struct dm_table *t, const char *type, int dm_table_add_target(struct dm_table *t, const char *type,
...@@ -891,6 +897,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) ...@@ -891,6 +897,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
q->hardsect_size = t->limits.hardsect_size; q->hardsect_size = t->limits.hardsect_size;
q->max_segment_size = t->limits.max_segment_size; q->max_segment_size = t->limits.max_segment_size;
q->seg_boundary_mask = t->limits.seg_boundary_mask; q->seg_boundary_mask = t->limits.seg_boundary_mask;
q->bounce_pfn = t->limits.bounce_pfn;
if (t->limits.no_cluster) if (t->limits.no_cluster)
q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER); q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER);
else else
......
...@@ -116,6 +116,7 @@ struct io_restrictions { ...@@ -116,6 +116,7 @@ struct io_restrictions {
unsigned short hardsect_size; unsigned short hardsect_size;
unsigned int max_segment_size; unsigned int max_segment_size;
unsigned long seg_boundary_mask; unsigned long seg_boundary_mask;
unsigned long bounce_pfn;
unsigned char no_cluster; /* inverted so that 0 is default */ unsigned char no_cluster; /* inverted so that 0 is default */
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment