Commit 9d8f0363 authored by Andre Noll's avatar Andre Noll Committed by NeilBrown

md: Make mddev->chunk_size sector-based.

This patch renames the chunk_size field to chunk_sectors with the
implied change of semantics.  Since

	is_power_of_2(chunk_size) = is_power_of_2(chunk_sectors << 9)
				  = is_power_of_2(chunk_sectors)

these bits don't need an adjustment for the shift.
Signed-off-by: default avatarAndre Noll <maan@systemlinux.org>
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
parent fbb704ef
...@@ -305,7 +305,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio) ...@@ -305,7 +305,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
static void linear_status (struct seq_file *seq, mddev_t *mddev) static void linear_status (struct seq_file *seq, mddev_t *mddev)
{ {
seq_printf(seq, " %dk rounding", mddev->chunk_size/1024); seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
} }
......
...@@ -869,7 +869,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -869,7 +869,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->minor_version = sb->minor_version; mddev->minor_version = sb->minor_version;
mddev->patch_version = sb->patch_version; mddev->patch_version = sb->patch_version;
mddev->external = 0; mddev->external = 0;
mddev->chunk_size = sb->chunk_size; mddev->chunk_sectors = sb->chunk_size >> 9;
mddev->ctime = sb->ctime; mddev->ctime = sb->ctime;
mddev->utime = sb->utime; mddev->utime = sb->utime;
mddev->level = sb->level; mddev->level = sb->level;
...@@ -892,7 +892,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -892,7 +892,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->delta_disks = 0; mddev->delta_disks = 0;
mddev->new_level = mddev->level; mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout; mddev->new_layout = mddev->layout;
mddev->new_chunk = mddev->chunk_size; mddev->new_chunk = mddev->chunk_sectors << 9;
} }
if (sb->state & (1<<MD_SB_CLEAN)) if (sb->state & (1<<MD_SB_CLEAN))
...@@ -1021,7 +1021,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -1021,7 +1021,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->recovery_cp = 0; sb->recovery_cp = 0;
sb->layout = mddev->layout; sb->layout = mddev->layout;
sb->chunk_size = mddev->chunk_size; sb->chunk_size = mddev->chunk_sectors << 9;
if (mddev->bitmap && mddev->bitmap_file == NULL) if (mddev->bitmap && mddev->bitmap_file == NULL)
sb->state |= (1<<MD_SB_BITMAP_PRESENT); sb->state |= (1<<MD_SB_BITMAP_PRESENT);
...@@ -1278,7 +1278,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -1278,7 +1278,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->major_version = 1; mddev->major_version = 1;
mddev->patch_version = 0; mddev->patch_version = 0;
mddev->external = 0; mddev->external = 0;
mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9; mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
mddev->level = le32_to_cpu(sb->level); mddev->level = le32_to_cpu(sb->level);
...@@ -1310,7 +1310,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -1310,7 +1310,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->delta_disks = 0; mddev->delta_disks = 0;
mddev->new_level = mddev->level; mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout; mddev->new_layout = mddev->layout;
mddev->new_chunk = mddev->chunk_size; mddev->new_chunk = mddev->chunk_sectors << 9;
} }
} else if (mddev->pers == NULL) { } else if (mddev->pers == NULL) {
...@@ -1382,7 +1382,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -1382,7 +1382,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->raid_disks = cpu_to_le32(mddev->raid_disks); sb->raid_disks = cpu_to_le32(mddev->raid_disks);
sb->size = cpu_to_le64(mddev->dev_sectors); sb->size = cpu_to_le64(mddev->dev_sectors);
sb->chunksize = cpu_to_le32(mddev->chunk_size >> 9); sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
sb->level = cpu_to_le32(mddev->level); sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout); sb->layout = cpu_to_le32(mddev->layout);
...@@ -2753,7 +2753,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len) ...@@ -2753,7 +2753,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
if (IS_ERR(priv)) { if (IS_ERR(priv)) {
mddev->new_level = mddev->level; mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout; mddev->new_layout = mddev->layout;
mddev->new_chunk = mddev->chunk_size; mddev->new_chunk = mddev->chunk_sectors << 9;
mddev->raid_disks -= mddev->delta_disks; mddev->raid_disks -= mddev->delta_disks;
mddev->delta_disks = 0; mddev->delta_disks = 0;
module_put(pers->owner); module_put(pers->owner);
...@@ -2771,7 +2771,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len) ...@@ -2771,7 +2771,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
mddev->level = mddev->new_level; mddev->level = mddev->new_level;
mddev->layout = mddev->new_layout; mddev->layout = mddev->new_layout;
mddev->chunk_size = mddev->new_chunk; mddev->chunk_sectors = mddev->new_chunk >> 9;
mddev->delta_disks = 0; mddev->delta_disks = 0;
pers->run(mddev); pers->run(mddev);
mddev_resume(mddev); mddev_resume(mddev);
...@@ -2864,10 +2864,10 @@ static ssize_t ...@@ -2864,10 +2864,10 @@ static ssize_t
chunk_size_show(mddev_t *mddev, char *page) chunk_size_show(mddev_t *mddev, char *page)
{ {
if (mddev->reshape_position != MaxSector && if (mddev->reshape_position != MaxSector &&
mddev->chunk_size != mddev->new_chunk) mddev->chunk_sectors << 9 != mddev->new_chunk)
return sprintf(page, "%d (%d)\n", mddev->new_chunk, return sprintf(page, "%d (%d)\n", mddev->new_chunk,
mddev->chunk_size); mddev->chunk_sectors << 9);
return sprintf(page, "%d\n", mddev->chunk_size); return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
} }
static ssize_t static ssize_t
...@@ -2889,7 +2889,7 @@ chunk_size_store(mddev_t *mddev, const char *buf, size_t len) ...@@ -2889,7 +2889,7 @@ chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
} else { } else {
mddev->new_chunk = n; mddev->new_chunk = n;
if (mddev->reshape_position == MaxSector) if (mddev->reshape_position == MaxSector)
mddev->chunk_size = n; mddev->chunk_sectors = n >> 9;
} }
return len; return len;
} }
...@@ -3534,9 +3534,9 @@ min_sync_store(mddev_t *mddev, const char *buf, size_t len) ...@@ -3534,9 +3534,9 @@ min_sync_store(mddev_t *mddev, const char *buf, size_t len)
return -EBUSY; return -EBUSY;
/* Must be a multiple of chunk_size */ /* Must be a multiple of chunk_size */
if (mddev->chunk_size) { if (mddev->chunk_sectors) {
sector_t temp = min; sector_t temp = min;
if (sector_div(temp, (mddev->chunk_size>>9))) if (sector_div(temp, mddev->chunk_sectors))
return -EINVAL; return -EINVAL;
} }
mddev->resync_min = min; mddev->resync_min = min;
...@@ -3572,9 +3572,9 @@ max_sync_store(mddev_t *mddev, const char *buf, size_t len) ...@@ -3572,9 +3572,9 @@ max_sync_store(mddev_t *mddev, const char *buf, size_t len)
return -EBUSY; return -EBUSY;
/* Must be a multiple of chunk_size */ /* Must be a multiple of chunk_size */
if (mddev->chunk_size) { if (mddev->chunk_sectors) {
sector_t temp = max; sector_t temp = max;
if (sector_div(temp, (mddev->chunk_size>>9))) if (sector_div(temp, mddev->chunk_sectors))
return -EINVAL; return -EINVAL;
} }
mddev->resync_max = max; mddev->resync_max = max;
...@@ -3665,7 +3665,7 @@ reshape_position_store(mddev_t *mddev, const char *buf, size_t len) ...@@ -3665,7 +3665,7 @@ reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
mddev->delta_disks = 0; mddev->delta_disks = 0;
mddev->new_level = mddev->level; mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout; mddev->new_layout = mddev->layout;
mddev->new_chunk = mddev->chunk_size; mddev->new_chunk = mddev->chunk_sectors << 9;
return len; return len;
} }
...@@ -4007,7 +4007,7 @@ static int do_md_run(mddev_t * mddev) ...@@ -4007,7 +4007,7 @@ static int do_md_run(mddev_t * mddev)
analyze_sbs(mddev); analyze_sbs(mddev);
} }
chunk_size = mddev->chunk_size; chunk_size = mddev->chunk_sectors << 9;
if (chunk_size) { if (chunk_size) {
if (chunk_size > MAX_CHUNK_SIZE) { if (chunk_size > MAX_CHUNK_SIZE) {
...@@ -4406,7 +4406,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) ...@@ -4406,7 +4406,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
mddev->flags = 0; mddev->flags = 0;
mddev->ro = 0; mddev->ro = 0;
mddev->metadata_type[0] = 0; mddev->metadata_type[0] = 0;
mddev->chunk_size = 0; mddev->chunk_sectors = 0;
mddev->ctime = mddev->utime = 0; mddev->ctime = mddev->utime = 0;
mddev->layout = 0; mddev->layout = 0;
mddev->max_disks = 0; mddev->max_disks = 0;
...@@ -4619,7 +4619,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg) ...@@ -4619,7 +4619,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
info.spare_disks = spare; info.spare_disks = spare;
info.layout = mddev->layout; info.layout = mddev->layout;
info.chunk_size = mddev->chunk_size; info.chunk_size = mddev->chunk_sectors << 9;
if (copy_to_user(arg, &info, sizeof(info))) if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT; return -EFAULT;
...@@ -4844,7 +4844,8 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) ...@@ -4844,7 +4844,8 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
} else } else
rdev->sb_start = calc_dev_sboffset(rdev->bdev); rdev->sb_start = calc_dev_sboffset(rdev->bdev);
rdev->sectors = calc_num_sectors(rdev, mddev->chunk_size); rdev->sectors = calc_num_sectors(rdev,
mddev->chunk_sectors << 9);
err = bind_rdev_to_array(rdev, mddev); err = bind_rdev_to_array(rdev, mddev);
if (err) { if (err) {
...@@ -4914,7 +4915,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev) ...@@ -4914,7 +4915,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
else else
rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
rdev->sectors = calc_num_sectors(rdev, mddev->chunk_size); rdev->sectors = calc_num_sectors(rdev, mddev->chunk_sectors << 9);
if (test_bit(Faulty, &rdev->flags)) { if (test_bit(Faulty, &rdev->flags)) {
printk(KERN_WARNING printk(KERN_WARNING
...@@ -5063,7 +5064,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) ...@@ -5063,7 +5064,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
mddev->external = 0; mddev->external = 0;
mddev->layout = info->layout; mddev->layout = info->layout;
mddev->chunk_size = info->chunk_size; mddev->chunk_sectors = info->chunk_size >> 9;
mddev->max_disks = MD_SB_DISKS; mddev->max_disks = MD_SB_DISKS;
...@@ -5082,7 +5083,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) ...@@ -5082,7 +5083,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
get_random_bytes(mddev->uuid, 16); get_random_bytes(mddev->uuid, 16);
mddev->new_level = mddev->level; mddev->new_level = mddev->level;
mddev->new_chunk = mddev->chunk_size; mddev->new_chunk = mddev->chunk_sectors << 9;
mddev->new_layout = mddev->layout; mddev->new_layout = mddev->layout;
mddev->delta_disks = 0; mddev->delta_disks = 0;
...@@ -5192,7 +5193,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) ...@@ -5192,7 +5193,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
mddev->level != info->level || mddev->level != info->level ||
/* mddev->layout != info->layout || */ /* mddev->layout != info->layout || */
!mddev->persistent != info->not_persistent|| !mddev->persistent != info->not_persistent||
mddev->chunk_size != info->chunk_size || mddev->chunk_sectors != info->chunk_size >> 9 ||
/* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
((state^info->state) & 0xfffffe00) ((state^info->state) & 0xfffffe00)
) )
......
...@@ -145,7 +145,7 @@ struct mddev_s ...@@ -145,7 +145,7 @@ struct mddev_s
int external; /* metadata is int external; /* metadata is
* managed externally */ * managed externally */
char metadata_type[17]; /* externally set*/ char metadata_type[17]; /* externally set*/
int chunk_size; int chunk_sectors;
time_t ctime, utime; time_t ctime, utime;
int level, layout; int level, layout;
char clevel[16]; char clevel[16];
......
...@@ -238,10 +238,10 @@ static int create_strip_zones(mddev_t *mddev) ...@@ -238,10 +238,10 @@ static int create_strip_zones(mddev_t *mddev)
* now since we have the hard sector sizes, we can make sure * now since we have the hard sector sizes, we can make sure
* chunk size is a multiple of that sector size * chunk size is a multiple of that sector size
*/ */
if (mddev->chunk_size % queue_logical_block_size(mddev->queue)) { if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
printk(KERN_ERR "%s chunk_size of %d not valid\n", printk(KERN_ERR "%s chunk_size of %d not valid\n",
mdname(mddev), mdname(mddev),
mddev->chunk_size); mddev->chunk_sectors << 9);
goto abort; goto abort;
} }
printk(KERN_INFO "raid0: done.\n"); printk(KERN_INFO "raid0: done.\n");
...@@ -270,10 +270,10 @@ static int raid0_mergeable_bvec(struct request_queue *q, ...@@ -270,10 +270,10 @@ static int raid0_mergeable_bvec(struct request_queue *q,
mddev_t *mddev = q->queuedata; mddev_t *mddev = q->queuedata;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max; int max;
unsigned int chunk_sectors = mddev->chunk_size >> 9; unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bvm->bi_size >> 9; unsigned int bio_sectors = bvm->bi_size >> 9;
if (is_power_of_2(mddev->chunk_size)) if (is_power_of_2(mddev->chunk_sectors))
max = (chunk_sectors - ((sector & (chunk_sectors-1)) max = (chunk_sectors - ((sector & (chunk_sectors-1))
+ bio_sectors)) << 9; + bio_sectors)) << 9;
else else
...@@ -304,11 +304,11 @@ static int raid0_run(mddev_t *mddev) ...@@ -304,11 +304,11 @@ static int raid0_run(mddev_t *mddev)
{ {
int ret; int ret;
if (mddev->chunk_size == 0) { if (mddev->chunk_sectors == 0) {
printk(KERN_ERR "md/raid0: chunk size must be set.\n"); printk(KERN_ERR "md/raid0: chunk size must be set.\n");
return -EINVAL; return -EINVAL;
} }
blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9); blk_queue_max_sectors(mddev->queue, mddev->chunk_sectors);
mddev->queue->queue_lock = &mddev->queue->__queue_lock; mddev->queue->queue_lock = &mddev->queue->__queue_lock;
ret = create_strip_zones(mddev); ret = create_strip_zones(mddev);
...@@ -330,7 +330,8 @@ static int raid0_run(mddev_t *mddev) ...@@ -330,7 +330,8 @@ static int raid0_run(mddev_t *mddev)
* chunksize should be used in that case. * chunksize should be used in that case.
*/ */
{ {
int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE; int stripe = mddev->raid_disks *
(mddev->chunk_sectors << 9) / PAGE_SIZE;
if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
mddev->queue->backing_dev_info.ra_pages = 2* stripe; mddev->queue->backing_dev_info.ra_pages = 2* stripe;
} }
...@@ -381,9 +382,9 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone, ...@@ -381,9 +382,9 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
unsigned int sect_in_chunk; unsigned int sect_in_chunk;
sector_t chunk; sector_t chunk;
raid0_conf_t *conf = mddev->private; raid0_conf_t *conf = mddev->private;
unsigned int chunk_sects = mddev->chunk_size >> 9; unsigned int chunk_sects = mddev->chunk_sectors;
if (is_power_of_2(mddev->chunk_size)) { if (is_power_of_2(mddev->chunk_sectors)) {
int chunksect_bits = ffz(~chunk_sects); int chunksect_bits = ffz(~chunk_sects);
/* find the sector offset inside the chunk */ /* find the sector offset inside the chunk */
sect_in_chunk = sector & (chunk_sects - 1); sect_in_chunk = sector & (chunk_sects - 1);
...@@ -413,7 +414,7 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone, ...@@ -413,7 +414,7 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
static inline int is_io_in_chunk_boundary(mddev_t *mddev, static inline int is_io_in_chunk_boundary(mddev_t *mddev,
unsigned int chunk_sects, struct bio *bio) unsigned int chunk_sects, struct bio *bio)
{ {
if (likely(is_power_of_2(mddev->chunk_size))) { if (likely(is_power_of_2(mddev->chunk_sectors))) {
return chunk_sects >= ((bio->bi_sector & (chunk_sects-1)) return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
+ (bio->bi_size >> 9)); + (bio->bi_size >> 9));
} else{ } else{
...@@ -444,7 +445,7 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio) ...@@ -444,7 +445,7 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
bio_sectors(bio)); bio_sectors(bio));
part_stat_unlock(); part_stat_unlock();
chunk_sects = mddev->chunk_size >> 9; chunk_sects = mddev->chunk_sectors;
if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) { if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
sector_t sector = bio->bi_sector; sector_t sector = bio->bi_sector;
struct bio_pair *bp; struct bio_pair *bp;
...@@ -455,7 +456,7 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio) ...@@ -455,7 +456,7 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
/* This is a one page bio that upper layers /* This is a one page bio that upper layers
* refuse to split for us, so we need to split it. * refuse to split for us, so we need to split it.
*/ */
if (likely(is_power_of_2(mddev->chunk_size))) if (likely(is_power_of_2(mddev->chunk_sectors)))
bp = bio_split(bio, chunk_sects - (sector & bp = bio_split(bio, chunk_sects - (sector &
(chunk_sects-1))); (chunk_sects-1)));
else else
...@@ -519,7 +520,7 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev) ...@@ -519,7 +520,7 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev)
zone_start = conf->strip_zone[j].zone_end; zone_start = conf->strip_zone[j].zone_end;
} }
#endif #endif
seq_printf(seq, " %dk chunks", mddev->chunk_size/1024); seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
return; return;
} }
......
...@@ -2161,10 +2161,10 @@ static int raid1_reshape(mddev_t *mddev) ...@@ -2161,10 +2161,10 @@ static int raid1_reshape(mddev_t *mddev)
int d, d2, err; int d, d2, err;
/* Cannot change chunk_size, layout, or level */ /* Cannot change chunk_size, layout, or level */
if (mddev->chunk_size != mddev->new_chunk || if (mddev->chunk_sectors << 9 != mddev->new_chunk ||
mddev->layout != mddev->new_layout || mddev->layout != mddev->new_layout ||
mddev->level != mddev->new_level) { mddev->level != mddev->new_level) {
mddev->new_chunk = mddev->chunk_size; mddev->new_chunk = mddev->chunk_sectors << 9;
mddev->new_layout = mddev->layout; mddev->new_layout = mddev->layout;
mddev->new_level = mddev->level; mddev->new_level = mddev->level;
return -EINVAL; return -EINVAL;
......
...@@ -461,7 +461,7 @@ static int raid10_mergeable_bvec(struct request_queue *q, ...@@ -461,7 +461,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,
mddev_t *mddev = q->queuedata; mddev_t *mddev = q->queuedata;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max; int max;
unsigned int chunk_sectors = mddev->chunk_size >> 9; unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bvm->bi_size >> 9; unsigned int bio_sectors = bvm->bi_size >> 9;
max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
...@@ -985,7 +985,7 @@ static void status(struct seq_file *seq, mddev_t *mddev) ...@@ -985,7 +985,7 @@ static void status(struct seq_file *seq, mddev_t *mddev)
int i; int i;
if (conf->near_copies < conf->raid_disks) if (conf->near_copies < conf->raid_disks)
seq_printf(seq, " %dK chunks", mddev->chunk_size/1024); seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
if (conf->near_copies > 1) if (conf->near_copies > 1)
seq_printf(seq, " %d near-copies", conf->near_copies); seq_printf(seq, " %d near-copies", conf->near_copies);
if (conf->far_copies > 1) { if (conf->far_copies > 1) {
...@@ -2050,8 +2050,8 @@ static int run(mddev_t *mddev) ...@@ -2050,8 +2050,8 @@ static int run(mddev_t *mddev)
int nc, fc, fo; int nc, fc, fo;
sector_t stride, size; sector_t stride, size;
if (mddev->chunk_size < PAGE_SIZE || if (mddev->chunk_sectors < (PAGE_SIZE >> 9) ||
!is_power_of_2(mddev->chunk_size)) { !is_power_of_2(mddev->chunk_sectors)) {
printk(KERN_ERR "md/raid10: chunk size must be " printk(KERN_ERR "md/raid10: chunk size must be "
"at least PAGE_SIZE(%ld) and be a power of 2.\n", PAGE_SIZE); "at least PAGE_SIZE(%ld) and be a power of 2.\n", PAGE_SIZE);
return -EINVAL; return -EINVAL;
...@@ -2096,8 +2096,8 @@ static int run(mddev_t *mddev) ...@@ -2096,8 +2096,8 @@ static int run(mddev_t *mddev)
conf->far_copies = fc; conf->far_copies = fc;
conf->copies = nc*fc; conf->copies = nc*fc;
conf->far_offset = fo; conf->far_offset = fo;
conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1; conf->chunk_mask = mddev->chunk_sectors - 1;
conf->chunk_shift = ffz(~mddev->chunk_size) - 9; conf->chunk_shift = ffz(~mddev->chunk_sectors);
size = mddev->dev_sectors >> conf->chunk_shift; size = mddev->dev_sectors >> conf->chunk_shift;
sector_div(size, fc); sector_div(size, fc);
size = size * conf->raid_disks; size = size * conf->raid_disks;
...@@ -2205,7 +2205,8 @@ static int run(mddev_t *mddev) ...@@ -2205,7 +2205,8 @@ static int run(mddev_t *mddev)
* maybe... * maybe...
*/ */
{ {
int stripe = conf->raid_disks * (mddev->chunk_size / PAGE_SIZE); int stripe = conf->raid_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE);
stripe /= conf->near_copies; stripe /= conf->near_copies;
if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
mddev->queue->backing_dev_info.ra_pages = 2* stripe; mddev->queue->backing_dev_info.ra_pages = 2* stripe;
......
...@@ -3352,13 +3352,13 @@ static int raid5_mergeable_bvec(struct request_queue *q, ...@@ -3352,13 +3352,13 @@ static int raid5_mergeable_bvec(struct request_queue *q,
mddev_t *mddev = q->queuedata; mddev_t *mddev = q->queuedata;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max; int max;
unsigned int chunk_sectors = mddev->chunk_size >> 9; unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bvm->bi_size >> 9; unsigned int bio_sectors = bvm->bi_size >> 9;
if ((bvm->bi_rw & 1) == WRITE) if ((bvm->bi_rw & 1) == WRITE)
return biovec->bv_len; /* always allow writes to be mergeable */ return biovec->bv_len; /* always allow writes to be mergeable */
if (mddev->new_chunk < mddev->chunk_size) if (mddev->new_chunk < mddev->chunk_sectors << 9)
chunk_sectors = mddev->new_chunk >> 9; chunk_sectors = mddev->new_chunk >> 9;
max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
if (max < 0) max = 0; if (max < 0) max = 0;
...@@ -3372,10 +3372,10 @@ static int raid5_mergeable_bvec(struct request_queue *q, ...@@ -3372,10 +3372,10 @@ static int raid5_mergeable_bvec(struct request_queue *q,
static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
{ {
sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
unsigned int chunk_sectors = mddev->chunk_size >> 9; unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bio->bi_size >> 9; unsigned int bio_sectors = bio->bi_size >> 9;
if (mddev->new_chunk < mddev->chunk_size) if (mddev->new_chunk < mddev->chunk_sectors << 9)
chunk_sectors = mddev->new_chunk >> 9; chunk_sectors = mddev->new_chunk >> 9;
return chunk_sectors >= return chunk_sectors >=
((sector & (chunk_sectors - 1)) + bio_sectors); ((sector & (chunk_sectors - 1)) + bio_sectors);
...@@ -3791,10 +3791,10 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped ...@@ -3791,10 +3791,10 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
* If old and new chunk sizes differ, we need to process the * If old and new chunk sizes differ, we need to process the
* largest of these * largest of these
*/ */
if (mddev->new_chunk > mddev->chunk_size) if (mddev->new_chunk > mddev->chunk_sectors << 9)
reshape_sectors = mddev->new_chunk / 512; reshape_sectors = mddev->new_chunk / 512;
else else
reshape_sectors = mddev->chunk_size / 512; reshape_sectors = mddev->chunk_sectors;
/* we update the metadata when there is more than 3Meg /* we update the metadata when there is more than 3Meg
* in the block range (that is rather arbitrary, should * in the block range (that is rather arbitrary, should
...@@ -4303,7 +4303,7 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) ...@@ -4303,7 +4303,7 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
raid_disks = conf->previous_raid_disks; raid_disks = conf->previous_raid_disks;
} }
sectors &= ~((sector_t)mddev->chunk_size/512 - 1); sectors &= ~((sector_t)mddev->chunk_sectors - 1);
sectors &= ~((sector_t)mddev->new_chunk/512 - 1); sectors &= ~((sector_t)mddev->new_chunk/512 - 1);
return sectors * (raid_disks - conf->max_degraded); return sectors * (raid_disks - conf->max_degraded);
} }
...@@ -4412,7 +4412,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) ...@@ -4412,7 +4412,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
conf->max_nr_stripes = NR_STRIPES; conf->max_nr_stripes = NR_STRIPES;
conf->reshape_progress = mddev->reshape_position; conf->reshape_progress = mddev->reshape_position;
if (conf->reshape_progress != MaxSector) { if (conf->reshape_progress != MaxSector) {
conf->prev_chunk = mddev->chunk_size; conf->prev_chunk = mddev->chunk_sectors << 9;
conf->prev_algo = mddev->layout; conf->prev_algo = mddev->layout;
} }
...@@ -4484,7 +4484,7 @@ static int run(mddev_t *mddev) ...@@ -4484,7 +4484,7 @@ static int run(mddev_t *mddev)
} }
/* here_new is the stripe we will write to */ /* here_new is the stripe we will write to */
here_old = mddev->reshape_position; here_old = mddev->reshape_position;
sector_div(here_old, (mddev->chunk_size>>9)* sector_div(here_old, mddev->chunk_sectors *
(old_disks-max_degraded)); (old_disks-max_degraded));
/* here_old is the first stripe that we might need to read /* here_old is the first stripe that we might need to read
* from */ * from */
...@@ -4499,7 +4499,7 @@ static int run(mddev_t *mddev) ...@@ -4499,7 +4499,7 @@ static int run(mddev_t *mddev)
} else { } else {
BUG_ON(mddev->level != mddev->new_level); BUG_ON(mddev->level != mddev->new_level);
BUG_ON(mddev->layout != mddev->new_layout); BUG_ON(mddev->layout != mddev->new_layout);
BUG_ON(mddev->chunk_size != mddev->new_chunk); BUG_ON(mddev->chunk_sectors << 9 != mddev->new_chunk);
BUG_ON(mddev->delta_disks != 0); BUG_ON(mddev->delta_disks != 0);
} }
...@@ -4533,7 +4533,7 @@ static int run(mddev_t *mddev) ...@@ -4533,7 +4533,7 @@ static int run(mddev_t *mddev)
} }
/* device size must be a multiple of chunk size */ /* device size must be a multiple of chunk size */
mddev->dev_sectors &= ~(mddev->chunk_size / 512 - 1); mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
mddev->resync_max_sectors = mddev->dev_sectors; mddev->resync_max_sectors = mddev->dev_sectors;
if (mddev->degraded > 0 && if (mddev->degraded > 0 &&
...@@ -4582,7 +4582,7 @@ static int run(mddev_t *mddev) ...@@ -4582,7 +4582,7 @@ static int run(mddev_t *mddev)
{ {
int data_disks = conf->previous_raid_disks - conf->max_degraded; int data_disks = conf->previous_raid_disks - conf->max_degraded;
int stripe = data_disks * int stripe = data_disks *
(mddev->chunk_size / PAGE_SIZE); ((mddev->chunk_sectors << 9) / PAGE_SIZE);
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe; mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
} }
...@@ -4679,7 +4679,8 @@ static void status(struct seq_file *seq, mddev_t *mddev) ...@@ -4679,7 +4679,8 @@ static void status(struct seq_file *seq, mddev_t *mddev)
raid5_conf_t *conf = (raid5_conf_t *) mddev->private; raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
int i; int i;
seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
mddev->chunk_sectors / 2, mddev->layout);
seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
for (i = 0; i < conf->raid_disks; i++) for (i = 0; i < conf->raid_disks; i++)
seq_printf (seq, "%s", seq_printf (seq, "%s",
...@@ -4827,7 +4828,7 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors) ...@@ -4827,7 +4828,7 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
* any io in the removed space completes, but it hardly seems * any io in the removed space completes, but it hardly seems
* worth it. * worth it.
*/ */
sectors &= ~((sector_t)mddev->chunk_size/512 - 1); sectors &= ~((sector_t)mddev->chunk_sectors - 1);
md_set_array_sectors(mddev, raid5_size(mddev, sectors, md_set_array_sectors(mddev, raid5_size(mddev, sectors,
mddev->raid_disks)); mddev->raid_disks));
if (mddev->array_sectors > if (mddev->array_sectors >
...@@ -4850,7 +4851,7 @@ static int raid5_check_reshape(mddev_t *mddev) ...@@ -4850,7 +4851,7 @@ static int raid5_check_reshape(mddev_t *mddev)
if (mddev->delta_disks == 0 && if (mddev->delta_disks == 0 &&
mddev->new_layout == mddev->layout && mddev->new_layout == mddev->layout &&
mddev->new_chunk == mddev->chunk_size) mddev->new_chunk == mddev->chunk_sectors << 9)
return -EINVAL; /* nothing to do */ return -EINVAL; /* nothing to do */
if (mddev->bitmap) if (mddev->bitmap)
/* Cannot grow a bitmap yet */ /* Cannot grow a bitmap yet */
...@@ -4878,10 +4879,11 @@ static int raid5_check_reshape(mddev_t *mddev) ...@@ -4878,10 +4879,11 @@ static int raid5_check_reshape(mddev_t *mddev)
* If the chunk size is greater, user-space should request more * If the chunk size is greater, user-space should request more
* stripe_heads first. * stripe_heads first.
*/ */
if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes || if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
> conf->max_nr_stripes ||
(mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) {
printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
(max(mddev->chunk_size, mddev->new_chunk) (max(mddev->chunk_sectors << 9, mddev->new_chunk)
/ STRIPE_SIZE)*4); / STRIPE_SIZE)*4);
return -ENOSPC; return -ENOSPC;
} }
...@@ -5054,7 +5056,7 @@ static void raid5_finish_reshape(mddev_t *mddev) ...@@ -5054,7 +5056,7 @@ static void raid5_finish_reshape(mddev_t *mddev)
raid5_remove_disk(mddev, d); raid5_remove_disk(mddev, d);
} }
mddev->layout = conf->algorithm; mddev->layout = conf->algorithm;
mddev->chunk_size = conf->chunk_size; mddev->chunk_sectors = conf->chunk_size >> 9;
mddev->reshape_position = MaxSector; mddev->reshape_position = MaxSector;
mddev->delta_disks = 0; mddev->delta_disks = 0;
} }
...@@ -5183,7 +5185,8 @@ static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk) ...@@ -5183,7 +5185,8 @@ static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk)
} }
if (new_chunk > 0) { if (new_chunk > 0) {
conf->chunk_size = new_chunk; conf->chunk_size = new_chunk;
mddev->chunk_size = mddev->new_chunk = new_chunk; mddev->new_chunk = new_chunk;
mddev->chunk_sectors = new_chunk >> 9;
} }
set_bit(MD_CHANGE_DEVS, &mddev->flags); set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment