Commit 7d4dec52 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: move the raid_partial_stripes_expensive flag into the features field

Move the raid_partial_stripes_expensive flags into the features field to
reclaim a little bit of space.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Link: https://lore.kernel.org/r/20240619154623.450048-7-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 4cac3d3a
...@@ -556,10 +556,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ...@@ -556,10 +556,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
ret = -1; ret = -1;
} }
t->raid_partial_stripes_expensive =
max(t->raid_partial_stripes_expensive,
b->raid_partial_stripes_expensive);
/* Find lowest common alignment_offset */ /* Find lowest common alignment_offset */
t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
% max(t->physical_block_size, t->io_min); % max(t->physical_block_size, t->io_min);
......
...@@ -1416,8 +1416,8 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) ...@@ -1416,8 +1416,8 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
} }
if (bdev_io_opt(dc->bdev)) if (bdev_io_opt(dc->bdev))
dc->partial_stripes_expensive = dc->partial_stripes_expensive = q->limits.features &
q->limits.raid_partial_stripes_expensive; BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE;
ret = bcache_device_init(&dc->disk, block_size, ret = bcache_device_init(&dc->disk, block_size,
bdev_nr_sectors(dc->bdev) - dc->sb.data_offset, bdev_nr_sectors(dc->bdev) - dc->sb.data_offset,
......
...@@ -7707,7 +7707,7 @@ static int raid5_set_limits(struct mddev *mddev) ...@@ -7707,7 +7707,7 @@ static int raid5_set_limits(struct mddev *mddev)
blk_set_stacking_limits(&lim); blk_set_stacking_limits(&lim);
lim.io_min = mddev->chunk_sectors << 9; lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * (conf->raid_disks - conf->max_degraded); lim.io_opt = lim.io_min * (conf->raid_disks - conf->max_degraded);
lim.raid_partial_stripes_expensive = 1; lim.features |= BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE;
lim.discard_granularity = stripe; lim.discard_granularity = stripe;
lim.max_write_zeroes_sectors = 0; lim.max_write_zeroes_sectors = 0;
mddev_stack_rdev_limits(mddev, &lim, 0); mddev_stack_rdev_limits(mddev, &lim, 0);
......
...@@ -328,6 +328,9 @@ enum { ...@@ -328,6 +328,9 @@ enum {
/* bounce all highmem pages */ /* bounce all highmem pages */
BLK_FEAT_BOUNCE_HIGH = (1u << 14), BLK_FEAT_BOUNCE_HIGH = (1u << 14),
/* undocumented magic for bcache */
BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE = (1u << 15),
}; };
/* /*
...@@ -335,7 +338,8 @@ enum { ...@@ -335,7 +338,8 @@ enum {
*/ */
#define BLK_FEAT_INHERIT_MASK \ #define BLK_FEAT_INHERIT_MASK \
(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \ (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \
BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | BLK_FEAT_BOUNCE_HIGH) BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | BLK_FEAT_BOUNCE_HIGH | \
BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE)
/* internal flags in queue_limits.flags */ /* internal flags in queue_limits.flags */
enum { enum {
...@@ -377,7 +381,6 @@ struct queue_limits { ...@@ -377,7 +381,6 @@ struct queue_limits {
unsigned short max_integrity_segments; unsigned short max_integrity_segments;
unsigned short max_discard_segments; unsigned short max_discard_segments;
unsigned char raid_partial_stripes_expensive;
unsigned int max_open_zones; unsigned int max_open_zones;
unsigned int max_active_zones; unsigned int max_active_zones;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment