Commit 85672639 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-6.10/dm-fixes' of...

Merge tag 'for-6.10/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

 - Fix DM discard regressions due to DM core switching over to using
   queue_limits_set() without DM core and targets first being updated to
   set (and stack) discard limits in terms of max_hw_discard_sectors and
   not max_discard_sectors

 - Fix stable@ DM integrity discard support to set device's
   discard_granularity limit to the device's logical block size

* tag 'for-6.10/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm: always manage discard support in terms of max_hw_discard_sectors
  dm-integrity: set discard_granularity to logical block size
parents 98f312bc 825d8bbd
...@@ -3390,7 +3390,7 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits) ...@@ -3390,7 +3390,7 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
if (!cache->features.discard_passdown) { if (!cache->features.discard_passdown) {
/* No passdown is done so setting own virtual limits */ /* No passdown is done so setting own virtual limits */
limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024, limits->max_hw_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
cache->origin_sectors); cache->origin_sectors);
limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
return; return;
...@@ -3400,7 +3400,6 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits) ...@@ -3400,7 +3400,6 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
* cache_iterate_devices() is stacking both origin and fast device limits * cache_iterate_devices() is stacking both origin and fast device limits
* but discards aren't passed to fast device, so inherit origin's limits. * but discards aren't passed to fast device, so inherit origin's limits.
*/ */
limits->max_discard_sectors = origin_limits->max_discard_sectors;
limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors; limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors;
limits->discard_granularity = origin_limits->discard_granularity; limits->discard_granularity = origin_limits->discard_granularity;
limits->discard_alignment = origin_limits->discard_alignment; limits->discard_alignment = origin_limits->discard_alignment;
......
...@@ -2046,7 +2046,8 @@ static void set_discard_limits(struct clone *clone, struct queue_limits *limits) ...@@ -2046,7 +2046,8 @@ static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) { if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) {
/* No passdown is done so we set our own virtual limits */ /* No passdown is done so we set our own virtual limits */
limits->discard_granularity = clone->region_size << SECTOR_SHIFT; limits->discard_granularity = clone->region_size << SECTOR_SHIFT;
limits->max_discard_sectors = round_down(UINT_MAX >> SECTOR_SHIFT, clone->region_size); limits->max_hw_discard_sectors = round_down(UINT_MAX >> SECTOR_SHIFT,
clone->region_size);
return; return;
} }
...@@ -2055,7 +2056,6 @@ static void set_discard_limits(struct clone *clone, struct queue_limits *limits) ...@@ -2055,7 +2056,6 @@ static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
* device limits but discards aren't passed to the source device, so * device limits but discards aren't passed to the source device, so
* inherit destination's limits. * inherit destination's limits.
*/ */
limits->max_discard_sectors = dest_limits->max_discard_sectors;
limits->max_hw_discard_sectors = dest_limits->max_hw_discard_sectors; limits->max_hw_discard_sectors = dest_limits->max_hw_discard_sectors;
limits->discard_granularity = dest_limits->discard_granularity; limits->discard_granularity = dest_limits->discard_granularity;
limits->discard_alignment = dest_limits->discard_alignment; limits->discard_alignment = dest_limits->discard_alignment;
......
...@@ -3492,6 +3492,7 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim ...@@ -3492,6 +3492,7 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim
limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT; limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT); blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
limits->dma_alignment = limits->logical_block_size - 1; limits->dma_alignment = limits->logical_block_size - 1;
limits->discard_granularity = ic->sectors_per_block << SECTOR_SHIFT;
} }
limits->max_integrity_segments = USHRT_MAX; limits->max_integrity_segments = USHRT_MAX;
} }
......
...@@ -871,7 +871,7 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit ...@@ -871,7 +871,7 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit
if (!bdev_max_discard_sectors(lc->dev->bdev)) { if (!bdev_max_discard_sectors(lc->dev->bdev)) {
lc->device_supports_discard = false; lc->device_supports_discard = false;
limits->discard_granularity = lc->sectorsize; limits->discard_granularity = lc->sectorsize;
limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT); limits->max_hw_discard_sectors = (UINT_MAX >> SECTOR_SHIFT);
} }
limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev); limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev);
limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev); limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev);
......
...@@ -2410,7 +2410,7 @@ static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits) ...@@ -2410,7 +2410,7 @@ static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits)
/* All discards are split on chunk_size boundary */ /* All discards are split on chunk_size boundary */
limits->discard_granularity = snap->store->chunk_size; limits->discard_granularity = snap->store->chunk_size;
limits->max_discard_sectors = snap->store->chunk_size; limits->max_hw_discard_sectors = snap->store->chunk_size;
up_read(&_origins_lock); up_read(&_origins_lock);
} }
......
...@@ -249,7 +249,6 @@ static int io_err_iterate_devices(struct dm_target *ti, ...@@ -249,7 +249,6 @@ static int io_err_iterate_devices(struct dm_target *ti,
static void io_err_io_hints(struct dm_target *ti, struct queue_limits *limits) static void io_err_io_hints(struct dm_target *ti, struct queue_limits *limits)
{ {
limits->max_discard_sectors = UINT_MAX;
limits->max_hw_discard_sectors = UINT_MAX; limits->max_hw_discard_sectors = UINT_MAX;
limits->discard_granularity = 512; limits->discard_granularity = 512;
} }
......
...@@ -4094,7 +4094,7 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) ...@@ -4094,7 +4094,7 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
if (pt->adjusted_pf.discard_enabled) { if (pt->adjusted_pf.discard_enabled) {
disable_discard_passdown_if_not_supported(pt); disable_discard_passdown_if_not_supported(pt);
if (!pt->adjusted_pf.discard_passdown) if (!pt->adjusted_pf.discard_passdown)
limits->max_discard_sectors = 0; limits->max_hw_discard_sectors = 0;
/* /*
* The pool uses the same discard limits as the underlying data * The pool uses the same discard limits as the underlying data
* device. DM core has already set this up. * device. DM core has already set this up.
...@@ -4491,7 +4491,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) ...@@ -4491,7 +4491,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
if (pool->pf.discard_enabled) { if (pool->pf.discard_enabled) {
limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
limits->max_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE; limits->max_hw_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE;
} }
} }
......
...@@ -61,7 +61,6 @@ static int zero_map(struct dm_target *ti, struct bio *bio) ...@@ -61,7 +61,6 @@ static int zero_map(struct dm_target *ti, struct bio *bio)
static void zero_io_hints(struct dm_target *ti, struct queue_limits *limits) static void zero_io_hints(struct dm_target *ti, struct queue_limits *limits)
{ {
limits->max_discard_sectors = UINT_MAX;
limits->max_hw_discard_sectors = UINT_MAX; limits->max_hw_discard_sectors = UINT_MAX;
limits->discard_granularity = 512; limits->discard_granularity = 512;
} }
......
...@@ -1001,7 +1001,6 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits) ...@@ -1001,7 +1001,6 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
limits->discard_alignment = 0; limits->discard_alignment = 0;
limits->discard_granularity = DMZ_BLOCK_SIZE; limits->discard_granularity = DMZ_BLOCK_SIZE;
limits->max_discard_sectors = chunk_sectors;
limits->max_hw_discard_sectors = chunk_sectors; limits->max_hw_discard_sectors = chunk_sectors;
limits->max_write_zeroes_sectors = chunk_sectors; limits->max_write_zeroes_sectors = chunk_sectors;
......
...@@ -1086,7 +1086,7 @@ void disable_discard(struct mapped_device *md) ...@@ -1086,7 +1086,7 @@ void disable_discard(struct mapped_device *md)
struct queue_limits *limits = dm_get_queue_limits(md); struct queue_limits *limits = dm_get_queue_limits(md);
/* device doesn't really support DISCARD, disable it */ /* device doesn't really support DISCARD, disable it */
limits->max_discard_sectors = 0; limits->max_hw_discard_sectors = 0;
} }
void disable_write_zeroes(struct mapped_device *md) void disable_write_zeroes(struct mapped_device *md)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment