Commit 0c01b452 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dm-3.14-fixes-4' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device-mapper fixes form Mike Snitzer:
 "Two small fixes for the DM cache target:

   - fix corruption with >2TB fast device due to truncation bug
   - fix access beyond end of origin device due to a partial block"

* tag 'dm-3.14-fixes-4' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm cache: fix access beyond end of origin device
  dm cache: fix truncation bug when copying a block to/from >2TB fast device
parents c60f7d5a e893fba9
...@@ -979,12 +979,13 @@ static void issue_copy_real(struct dm_cache_migration *mg) ...@@ -979,12 +979,13 @@ static void issue_copy_real(struct dm_cache_migration *mg)
int r; int r;
struct dm_io_region o_region, c_region; struct dm_io_region o_region, c_region;
struct cache *cache = mg->cache; struct cache *cache = mg->cache;
sector_t cblock = from_cblock(mg->cblock);
o_region.bdev = cache->origin_dev->bdev; o_region.bdev = cache->origin_dev->bdev;
o_region.count = cache->sectors_per_block; o_region.count = cache->sectors_per_block;
c_region.bdev = cache->cache_dev->bdev; c_region.bdev = cache->cache_dev->bdev;
c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block; c_region.sector = cblock * cache->sectors_per_block;
c_region.count = cache->sectors_per_block; c_region.count = cache->sectors_per_block;
if (mg->writeback || mg->demote) { if (mg->writeback || mg->demote) {
...@@ -2464,20 +2465,18 @@ static int cache_map(struct dm_target *ti, struct bio *bio) ...@@ -2464,20 +2465,18 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
bool discarded_block; bool discarded_block;
struct dm_bio_prison_cell *cell; struct dm_bio_prison_cell *cell;
struct policy_result lookup_result; struct policy_result lookup_result;
struct per_bio_data *pb; struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
if (from_oblock(block) > from_oblock(cache->origin_blocks)) { if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
/* /*
* This can only occur if the io goes to a partial block at * This can only occur if the io goes to a partial block at
* the end of the origin device. We don't cache these. * the end of the origin device. We don't cache these.
* Just remap to the origin and carry on. * Just remap to the origin and carry on.
*/ */
remap_to_origin_clear_discard(cache, bio, block); remap_to_origin(cache, bio);
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
} }
pb = init_per_bio_data(bio, pb_data_size);
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
defer_bio(cache, bio); defer_bio(cache, bio);
return DM_MAPIO_SUBMITTED; return DM_MAPIO_SUBMITTED;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment