Commit 9db90880 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'md-3.7' of git://neil.brown.name/md

Pull md updates from NeilBrown:
 - "discard" support, some dm-raid improvements and other assorted bits
   and pieces.

* tag 'md-3.7' of git://neil.brown.name/md: (29 commits)
  md: refine reporting of resync/reshape delays.
  md/raid5: be careful not to resize_stripes too big.
  md: make sure manual changes to recovery checkpoint are saved.
  md/raid10: use correct limit variable
  md: writing to sync_action should clear the read-auto state.
  Subject: [PATCH] md:change resync_mismatches to atomic64_t to avoid races
  md/raid5: make sure to_read and to_write never go negative.
  md: When RAID5 is dirty, force reconstruct-write instead of read-modify-write.
  md/raid5: protect debug message against NULL derefernce.
  md/raid5: add some missing locking in handle_failed_stripe.
  MD: raid5 avoid unnecessary zero page for trim
  MD: raid5 trim support
  md/bitmap:Don't use IS_ERR to judge alloc_page().
  md/raid1: Don't release reference to device while handling read error.
  raid: replace list_for_each_continue_rcu with new interface
  add further __init annotations to crypto/xor.c
  DM RAID: Fix for "sync" directive ineffectiveness
  DM RAID: Fix comparison of index and quantity for "rebuild" parameter
  DM RAID: Add rebuild capability for RAID10
  DM RAID: Move 'rebuild' checking code to its own function
  ...
parents 4d7127da 72f36d59
...@@ -132,3 +132,12 @@ Here we can see the RAID type is raid4, there are 5 devices - all of ...@@ -132,3 +132,12 @@ Here we can see the RAID type is raid4, there are 5 devices - all of
which are 'A'live, and the array is 2/490221568 complete with recovery. which are 'A'live, and the array is 2/490221568 complete with recovery.
Faulty or missing devices are marked 'D'. Devices that are out-of-sync Faulty or missing devices are marked 'D'. Devices that are out-of-sync
are marked 'a'. are marked 'a'.
Version History
---------------
1.0.0 Initial version. Support for RAID 4/5/6
1.1.0 Added support for RAID 1
1.2.0 Handle creation of arrays that contain failed devices.
1.3.0 Added support for RAID 10
1.3.1 Allow device replacement/rebuild for RAID 10
...@@ -56,11 +56,11 @@ xor_blocks(unsigned int src_count, unsigned int bytes, void *dest, void **srcs) ...@@ -56,11 +56,11 @@ xor_blocks(unsigned int src_count, unsigned int bytes, void *dest, void **srcs)
EXPORT_SYMBOL(xor_blocks); EXPORT_SYMBOL(xor_blocks);
/* Set of all registered templates. */ /* Set of all registered templates. */
static struct xor_block_template *template_list; static struct xor_block_template *__initdata template_list;
#define BENCH_SIZE (PAGE_SIZE) #define BENCH_SIZE (PAGE_SIZE)
static void static void __init
do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2) do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
{ {
int speed; int speed;
......
...@@ -163,20 +163,17 @@ static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mdde ...@@ -163,20 +163,17 @@ static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mdde
* As devices are only added or removed when raid_disk is < 0 and * As devices are only added or removed when raid_disk is < 0 and
* nr_pending is 0 and In_sync is clear, the entries we return will * nr_pending is 0 and In_sync is clear, the entries we return will
* still be in the same position on the list when we re-enter * still be in the same position on the list when we re-enter
* list_for_each_continue_rcu. * list_for_each_entry_continue_rcu.
*/ */
struct list_head *pos;
rcu_read_lock(); rcu_read_lock();
if (rdev == NULL) if (rdev == NULL)
/* start at the beginning */ /* start at the beginning */
pos = &mddev->disks; rdev = list_entry_rcu(&mddev->disks, struct md_rdev, same_set);
else { else {
/* release the previous rdev and start from there. */ /* release the previous rdev and start from there. */
rdev_dec_pending(rdev, mddev); rdev_dec_pending(rdev, mddev);
pos = &rdev->same_set;
} }
list_for_each_continue_rcu(pos, &mddev->disks) { list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) {
rdev = list_entry(pos, struct md_rdev, same_set);
if (rdev->raid_disk >= 0 && if (rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags)) { !test_bit(Faulty, &rdev->flags)) {
/* this is a usable devices */ /* this is a usable devices */
...@@ -473,14 +470,10 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap) ...@@ -473,14 +470,10 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
{ {
bitmap_super_t *sb; bitmap_super_t *sb;
unsigned long chunksize, daemon_sleep, write_behind; unsigned long chunksize, daemon_sleep, write_behind;
int err = -EINVAL;
bitmap->storage.sb_page = alloc_page(GFP_KERNEL); bitmap->storage.sb_page = alloc_page(GFP_KERNEL);
if (IS_ERR(bitmap->storage.sb_page)) { if (bitmap->storage.sb_page == NULL)
err = PTR_ERR(bitmap->storage.sb_page); return -ENOMEM;
bitmap->storage.sb_page = NULL;
return err;
}
bitmap->storage.sb_page->index = 0; bitmap->storage.sb_page->index = 0;
sb = kmap_atomic(bitmap->storage.sb_page); sb = kmap_atomic(bitmap->storage.sb_page);
......
...@@ -337,6 +337,84 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) ...@@ -337,6 +337,84 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
return 0; return 0;
} }
/*
* validate_rebuild_devices
* @rs
*
* Determine if the devices specified for rebuild can result in a valid
* usable array that is capable of rebuilding the given devices.
*
* Returns: 0 on success, -EINVAL on failure.
*/
static int validate_rebuild_devices(struct raid_set *rs)
{
unsigned i, rebuild_cnt = 0;
unsigned rebuilds_per_group, copies, d;
if (!(rs->print_flags & DMPF_REBUILD))
return 0;
for (i = 0; i < rs->md.raid_disks; i++)
if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
rebuild_cnt++;
switch (rs->raid_type->level) {
case 1:
if (rebuild_cnt >= rs->md.raid_disks)
goto too_many;
break;
case 4:
case 5:
case 6:
if (rebuild_cnt > rs->raid_type->parity_devs)
goto too_many;
break;
case 10:
copies = raid10_md_layout_to_copies(rs->md.layout);
if (rebuild_cnt < copies)
break;
/*
* It is possible to have a higher rebuild count for RAID10,
* as long as the failed devices occur in different mirror
* groups (i.e. different stripes).
*
* Right now, we only allow for "near" copies. When other
* formats are added, we will have to check those too.
*
* When checking "near" format, make sure no adjacent devices
* have failed beyond what can be handled. In addition to the
* simple case where the number of devices is a multiple of the
* number of copies, we must also handle cases where the number
* of devices is not a multiple of the number of copies.
* E.g. dev1 dev2 dev3 dev4 dev5
* A A B B C
* C D D E E
*/
rebuilds_per_group = 0;
for (i = 0; i < rs->md.raid_disks * copies; i++) {
d = i % rs->md.raid_disks;
if (!test_bit(In_sync, &rs->dev[d].rdev.flags) &&
(++rebuilds_per_group >= copies))
goto too_many;
if (!((i + 1) % copies))
rebuilds_per_group = 0;
}
break;
default:
DMERR("The rebuild parameter is not supported for %s",
rs->raid_type->name);
rs->ti->error = "Rebuild not supported for this RAID type";
return -EINVAL;
}
return 0;
too_many:
rs->ti->error = "Too many rebuild devices specified";
return -EINVAL;
}
/* /*
* Possible arguments are... * Possible arguments are...
* <chunk_size> [optional_args] * <chunk_size> [optional_args]
...@@ -365,7 +443,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv, ...@@ -365,7 +443,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
{ {
char *raid10_format = "near"; char *raid10_format = "near";
unsigned raid10_copies = 2; unsigned raid10_copies = 2;
unsigned i, rebuild_cnt = 0; unsigned i;
unsigned long value, region_size = 0; unsigned long value, region_size = 0;
sector_t sectors_per_dev = rs->ti->len; sector_t sectors_per_dev = rs->ti->len;
sector_t max_io_len; sector_t max_io_len;
...@@ -461,31 +539,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv, ...@@ -461,31 +539,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
/* Parameters that take a numeric value are checked here */ /* Parameters that take a numeric value are checked here */
if (!strcasecmp(key, "rebuild")) { if (!strcasecmp(key, "rebuild")) {
rebuild_cnt++; if (value >= rs->md.raid_disks) {
switch (rs->raid_type->level) {
case 1:
if (rebuild_cnt >= rs->md.raid_disks) {
rs->ti->error = "Too many rebuild devices specified";
return -EINVAL;
}
break;
case 4:
case 5:
case 6:
if (rebuild_cnt > rs->raid_type->parity_devs) {
rs->ti->error = "Too many rebuild devices specified for given RAID type";
return -EINVAL;
}
break;
case 10:
default:
DMERR("The rebuild parameter is not supported for %s", rs->raid_type->name);
rs->ti->error = "Rebuild not supported for this RAID type";
return -EINVAL;
}
if (value > rs->md.raid_disks) {
rs->ti->error = "Invalid rebuild index given"; rs->ti->error = "Invalid rebuild index given";
return -EINVAL; return -EINVAL;
} }
...@@ -608,6 +662,9 @@ static int parse_raid_params(struct raid_set *rs, char **argv, ...@@ -608,6 +662,9 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
} }
rs->md.dev_sectors = sectors_per_dev; rs->md.dev_sectors = sectors_per_dev;
if (validate_rebuild_devices(rs))
return -EINVAL;
/* Assume there are no metadata devices until the drives are parsed */ /* Assume there are no metadata devices until the drives are parsed */
rs->md.persistent = 0; rs->md.persistent = 0;
rs->md.external = 1; rs->md.external = 1;
...@@ -960,6 +1017,19 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) ...@@ -960,6 +1017,19 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
freshest = NULL; freshest = NULL;
rdev_for_each_safe(rdev, tmp, mddev) { rdev_for_each_safe(rdev, tmp, mddev) {
/*
* Skipping super_load due to DMPF_SYNC will cause
* the array to undergo initialization again as
* though it were new. This is the intended effect
* of the "sync" directive.
*
* When reshaping capability is added, we must ensure
* that the "sync" directive is disallowed during the
* reshape.
*/
if (rs->print_flags & DMPF_SYNC)
continue;
if (!rdev->meta_bdev) if (!rdev->meta_bdev)
continue; continue;
...@@ -1360,7 +1430,7 @@ static void raid_resume(struct dm_target *ti) ...@@ -1360,7 +1430,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = { static struct target_type raid_target = {
.name = "raid", .name = "raid",
.version = {1, 3, 0}, .version = {1, 3, 1},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = raid_ctr, .ctr = raid_ctr,
.dtr = raid_dtr, .dtr = raid_dtr,
......
...@@ -138,6 +138,7 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) ...@@ -138,6 +138,7 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
struct linear_conf *conf; struct linear_conf *conf;
struct md_rdev *rdev; struct md_rdev *rdev;
int i, cnt; int i, cnt;
bool discard_supported = false;
conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(struct dev_info), conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(struct dev_info),
GFP_KERNEL); GFP_KERNEL);
...@@ -171,6 +172,8 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) ...@@ -171,6 +172,8 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
conf->array_sectors += rdev->sectors; conf->array_sectors += rdev->sectors;
cnt++; cnt++;
if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
discard_supported = true;
} }
if (cnt != raid_disks) { if (cnt != raid_disks) {
printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n", printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n",
...@@ -178,6 +181,11 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) ...@@ -178,6 +181,11 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
goto out; goto out;
} }
if (!discard_supported)
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
/* /*
* Here we calculate the device offsets. * Here we calculate the device offsets.
*/ */
...@@ -244,7 +252,9 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev) ...@@ -244,7 +252,9 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
if (!newconf) if (!newconf)
return -ENOMEM; return -ENOMEM;
oldconf = rcu_dereference(mddev->private); oldconf = rcu_dereference_protected(mddev->private,
lockdep_is_held(
&mddev->reconfig_mutex));
mddev->raid_disks++; mddev->raid_disks++;
rcu_assign_pointer(mddev->private, newconf); rcu_assign_pointer(mddev->private, newconf);
md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
...@@ -256,7 +266,10 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev) ...@@ -256,7 +266,10 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
static int linear_stop (struct mddev *mddev) static int linear_stop (struct mddev *mddev)
{ {
struct linear_conf *conf = mddev->private; struct linear_conf *conf =
rcu_dereference_protected(mddev->private,
lockdep_is_held(
&mddev->reconfig_mutex));
/* /*
* We do not require rcu protection here since * We do not require rcu protection here since
...@@ -326,6 +339,14 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) ...@@ -326,6 +339,14 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
bio->bi_sector = bio->bi_sector - start_sector bio->bi_sector = bio->bi_sector - start_sector
+ tmp_dev->rdev->data_offset; + tmp_dev->rdev->data_offset;
rcu_read_unlock(); rcu_read_unlock();
if (unlikely((bio->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
/* Just ignore it */
bio_endio(bio, 0);
return;
}
generic_make_request(bio); generic_make_request(bio);
} }
......
This diff is collapsed.
...@@ -282,7 +282,7 @@ struct mddev { ...@@ -282,7 +282,7 @@ struct mddev {
sector_t resync_max_sectors; /* may be set by personality */ sector_t resync_max_sectors; /* may be set by personality */
sector_t resync_mismatches; /* count of sectors where atomic64_t resync_mismatches; /* count of sectors where
* parity/replica mismatch found * parity/replica mismatch found
*/ */
...@@ -540,12 +540,13 @@ static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) ...@@ -540,12 +540,13 @@ static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
struct md_thread { struct md_thread {
void (*run) (struct mddev *mddev); void (*run) (struct md_thread *thread);
struct mddev *mddev; struct mddev *mddev;
wait_queue_head_t wqueue; wait_queue_head_t wqueue;
unsigned long flags; unsigned long flags;
struct task_struct *tsk; struct task_struct *tsk;
unsigned long timeout; unsigned long timeout;
void *private;
}; };
#define THREAD_WAKEUP 0 #define THREAD_WAKEUP 0
...@@ -584,7 +585,7 @@ static inline void safe_put_page(struct page *p) ...@@ -584,7 +585,7 @@ static inline void safe_put_page(struct page *p)
extern int register_md_personality(struct md_personality *p); extern int register_md_personality(struct md_personality *p);
extern int unregister_md_personality(struct md_personality *p); extern int unregister_md_personality(struct md_personality *p);
extern struct md_thread *md_register_thread( extern struct md_thread *md_register_thread(
void (*run)(struct mddev *mddev), void (*run)(struct md_thread *thread),
struct mddev *mddev, struct mddev *mddev,
const char *name); const char *name);
extern void md_unregister_thread(struct md_thread **threadp); extern void md_unregister_thread(struct md_thread **threadp);
...@@ -603,7 +604,7 @@ extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, ...@@ -603,7 +604,7 @@ extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
extern void md_super_wait(struct mddev *mddev); extern void md_super_wait(struct mddev *mddev);
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int rw, bool metadata_op); struct page *page, int rw, bool metadata_op);
extern void md_do_sync(struct mddev *mddev); extern void md_do_sync(struct md_thread *thread);
extern void md_new_event(struct mddev *mddev); extern void md_new_event(struct mddev *mddev);
extern int md_allow_write(struct mddev *mddev); extern int md_allow_write(struct mddev *mddev);
extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
......
...@@ -335,8 +335,9 @@ static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -335,8 +335,9 @@ static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
* 3. Performs writes following reads for array syncronising. * 3. Performs writes following reads for array syncronising.
*/ */
static void multipathd (struct mddev *mddev) static void multipathd(struct md_thread *thread)
{ {
struct mddev *mddev = thread->mddev;
struct multipath_bh *mp_bh; struct multipath_bh *mp_bh;
struct bio *bio; struct bio *bio;
unsigned long flags; unsigned long flags;
......
...@@ -88,6 +88,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) ...@@ -88,6 +88,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
char b2[BDEVNAME_SIZE]; char b2[BDEVNAME_SIZE];
struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL); struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
bool discard_supported = false;
if (!conf) if (!conf)
return -ENOMEM; return -ENOMEM;
...@@ -195,6 +196,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) ...@@ -195,6 +196,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
if (!smallest || (rdev1->sectors < smallest->sectors)) if (!smallest || (rdev1->sectors < smallest->sectors))
smallest = rdev1; smallest = rdev1;
cnt++; cnt++;
if (blk_queue_discard(bdev_get_queue(rdev1->bdev)))
discard_supported = true;
} }
if (cnt != mddev->raid_disks) { if (cnt != mddev->raid_disks) {
printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - " printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
...@@ -272,6 +276,11 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) ...@@ -272,6 +276,11 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
blk_queue_io_opt(mddev->queue, blk_queue_io_opt(mddev->queue,
(mddev->chunk_sectors << 9) * mddev->raid_disks); (mddev->chunk_sectors << 9) * mddev->raid_disks);
if (!discard_supported)
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
pr_debug("md/raid0:%s: done.\n", mdname(mddev)); pr_debug("md/raid0:%s: done.\n", mdname(mddev));
*private_conf = conf; *private_conf = conf;
...@@ -423,6 +432,7 @@ static int raid0_run(struct mddev *mddev) ...@@ -423,6 +432,7 @@ static int raid0_run(struct mddev *mddev)
return -EINVAL; return -EINVAL;
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
/* if private is not null, we are here after takeover */ /* if private is not null, we are here after takeover */
if (mddev->private == NULL) { if (mddev->private == NULL) {
...@@ -510,7 +520,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) ...@@ -510,7 +520,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
sector_t sector = bio->bi_sector; sector_t sector = bio->bi_sector;
struct bio_pair *bp; struct bio_pair *bp;
/* Sanity check -- queue functions should prevent this happening */ /* Sanity check -- queue functions should prevent this happening */
if (bio->bi_vcnt != 1 || if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
bio->bi_idx != 0) bio->bi_idx != 0)
goto bad_map; goto bad_map;
/* This is a one page bio that upper layers /* This is a one page bio that upper layers
...@@ -536,6 +546,13 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) ...@@ -536,6 +546,13 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
bio->bi_sector = sector_offset + zone->dev_start + bio->bi_sector = sector_offset + zone->dev_start +
tmp_dev->data_offset; tmp_dev->data_offset;
if (unlikely((bio->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
/* Just ignore it */
bio_endio(bio, 0);
return;
}
generic_make_request(bio); generic_make_request(bio);
return; return;
......
...@@ -333,9 +333,10 @@ static void raid1_end_read_request(struct bio *bio, int error) ...@@ -333,9 +333,10 @@ static void raid1_end_read_request(struct bio *bio, int error)
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
} }
if (uptodate) if (uptodate) {
raid_end_bio_io(r1_bio); raid_end_bio_io(r1_bio);
else { rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
} else {
/* /*
* oops, read error: * oops, read error:
*/ */
...@@ -349,9 +350,8 @@ static void raid1_end_read_request(struct bio *bio, int error) ...@@ -349,9 +350,8 @@ static void raid1_end_read_request(struct bio *bio, int error)
(unsigned long long)r1_bio->sector); (unsigned long long)r1_bio->sector);
set_bit(R1BIO_ReadError, &r1_bio->state); set_bit(R1BIO_ReadError, &r1_bio->state);
reschedule_retry(r1_bio); reschedule_retry(r1_bio);
/* don't drop the reference on read_disk yet */
} }
rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
} }
static void close_write(struct r1bio *r1_bio) static void close_write(struct r1bio *r1_bio)
...@@ -781,6 +781,11 @@ static void flush_pending_writes(struct r1conf *conf) ...@@ -781,6 +781,11 @@ static void flush_pending_writes(struct r1conf *conf)
while (bio) { /* submit pending writes */ while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next; struct bio *next = bio->bi_next;
bio->bi_next = NULL; bio->bi_next = NULL;
if (unlikely((bio->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio, 0);
else
generic_make_request(bio); generic_make_request(bio);
bio = next; bio = next;
} }
...@@ -994,6 +999,8 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -994,6 +999,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
const int rw = bio_data_dir(bio); const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
const unsigned long do_discard = (bio->bi_rw
& (REQ_DISCARD | REQ_SECURE));
struct md_rdev *blocked_rdev; struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb; struct blk_plug_cb *cb;
struct raid1_plug_cb *plug = NULL; struct raid1_plug_cb *plug = NULL;
...@@ -1295,7 +1302,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1295,7 +1302,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
conf->mirrors[i].rdev->data_offset); conf->mirrors[i].rdev->data_offset);
mbio->bi_bdev = conf->mirrors[i].rdev->bdev; mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request; mbio->bi_end_io = raid1_end_write_request;
mbio->bi_rw = WRITE | do_flush_fua | do_sync; mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard;
mbio->bi_private = r1_bio; mbio->bi_private = r1_bio;
atomic_inc(&r1_bio->remaining); atomic_inc(&r1_bio->remaining);
...@@ -1549,6 +1556,8 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -1549,6 +1556,8 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
clear_bit(Unmerged, &rdev->flags); clear_bit(Unmerged, &rdev->flags);
} }
md_integrity_add_rdev(rdev, mddev); md_integrity_add_rdev(rdev, mddev);
if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
print_conf(conf); print_conf(conf);
return err; return err;
} }
...@@ -1867,7 +1876,7 @@ static int process_checks(struct r1bio *r1_bio) ...@@ -1867,7 +1876,7 @@ static int process_checks(struct r1bio *r1_bio)
} else } else
j = 0; j = 0;
if (j >= 0) if (j >= 0)
mddev->resync_mismatches += r1_bio->sectors; atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
&& test_bit(BIO_UPTODATE, &sbio->bi_flags))) { && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
/* No need to write to this device. */ /* No need to write to this device. */
...@@ -2220,6 +2229,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) ...@@ -2220,6 +2229,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
unfreeze_array(conf); unfreeze_array(conf);
} else } else
md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
bio = r1_bio->bios[r1_bio->read_disk]; bio = r1_bio->bios[r1_bio->read_disk];
bdevname(bio->bi_bdev, b); bdevname(bio->bi_bdev, b);
...@@ -2285,8 +2295,9 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) ...@@ -2285,8 +2295,9 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
} }
} }
static void raid1d(struct mddev *mddev) static void raid1d(struct md_thread *thread)
{ {
struct mddev *mddev = thread->mddev;
struct r1bio *r1_bio; struct r1bio *r1_bio;
unsigned long flags; unsigned long flags;
struct r1conf *conf = mddev->private; struct r1conf *conf = mddev->private;
...@@ -2783,6 +2794,7 @@ static int run(struct mddev *mddev) ...@@ -2783,6 +2794,7 @@ static int run(struct mddev *mddev)
int i; int i;
struct md_rdev *rdev; struct md_rdev *rdev;
int ret; int ret;
bool discard_supported = false;
if (mddev->level != 1) { if (mddev->level != 1) {
printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n", printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
...@@ -2812,6 +2824,8 @@ static int run(struct mddev *mddev) ...@@ -2812,6 +2824,8 @@ static int run(struct mddev *mddev)
continue; continue;
disk_stack_limits(mddev->gendisk, rdev->bdev, disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9); rdev->data_offset << 9);
if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
discard_supported = true;
} }
mddev->degraded = 0; mddev->degraded = 0;
...@@ -2846,6 +2860,13 @@ static int run(struct mddev *mddev) ...@@ -2846,6 +2860,13 @@ static int run(struct mddev *mddev)
mddev->queue->backing_dev_info.congested_fn = raid1_congested; mddev->queue->backing_dev_info.congested_fn = raid1_congested;
mddev->queue->backing_dev_info.congested_data = mddev; mddev->queue->backing_dev_info.congested_data = mddev;
blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec); blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
if (discard_supported)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
mddev->queue);
else
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
mddev->queue);
} }
ret = md_integrity_register(mddev); ret = md_integrity_register(mddev);
......
...@@ -911,6 +911,11 @@ static void flush_pending_writes(struct r10conf *conf) ...@@ -911,6 +911,11 @@ static void flush_pending_writes(struct r10conf *conf)
while (bio) { /* submit pending writes */ while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next; struct bio *next = bio->bi_next;
bio->bi_next = NULL; bio->bi_next = NULL;
if (unlikely((bio->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio, 0);
else
generic_make_request(bio); generic_make_request(bio);
bio = next; bio = next;
} }
...@@ -1050,6 +1055,44 @@ static sector_t choose_data_offset(struct r10bio *r10_bio, ...@@ -1050,6 +1055,44 @@ static sector_t choose_data_offset(struct r10bio *r10_bio,
return rdev->new_data_offset; return rdev->new_data_offset;
} }
struct raid10_plug_cb {
struct blk_plug_cb cb;
struct bio_list pending;
int pending_cnt;
};
static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
cb);
struct mddev *mddev = plug->cb.data;
struct r10conf *conf = mddev->private;
struct bio *bio;
if (from_schedule) {
spin_lock_irq(&conf->device_lock);
bio_list_merge(&conf->pending_bio_list, &plug->pending);
conf->pending_count += plug->pending_cnt;
spin_unlock_irq(&conf->device_lock);
md_wakeup_thread(mddev->thread);
kfree(plug);
return;
}
/* we aren't scheduling, so we can do the write-out directly. */
bio = bio_list_get(&plug->pending);
bitmap_unplug(mddev->bitmap);
wake_up(&conf->wait_barrier);
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
bio->bi_next = NULL;
generic_make_request(bio);
bio = next;
}
kfree(plug);
}
static void make_request(struct mddev *mddev, struct bio * bio) static void make_request(struct mddev *mddev, struct bio * bio)
{ {
struct r10conf *conf = mddev->private; struct r10conf *conf = mddev->private;
...@@ -1061,8 +1104,12 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1061,8 +1104,12 @@ static void make_request(struct mddev *mddev, struct bio * bio)
const int rw = bio_data_dir(bio); const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_fua = (bio->bi_rw & REQ_FUA); const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
const unsigned long do_discard = (bio->bi_rw
& (REQ_DISCARD | REQ_SECURE));
unsigned long flags; unsigned long flags;
struct md_rdev *blocked_rdev; struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
struct raid10_plug_cb *plug = NULL;
int sectors_handled; int sectors_handled;
int max_sectors; int max_sectors;
int sectors; int sectors;
...@@ -1081,7 +1128,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1081,7 +1128,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
|| conf->prev.near_copies < conf->prev.raid_disks))) { || conf->prev.near_copies < conf->prev.raid_disks))) {
struct bio_pair *bp; struct bio_pair *bp;
/* Sanity check -- queue functions should prevent this happening */ /* Sanity check -- queue functions should prevent this happening */
if (bio->bi_vcnt != 1 || if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
bio->bi_idx != 0) bio->bi_idx != 0)
goto bad_map; goto bad_map;
/* This is a one page bio that upper layers /* This is a one page bio that upper layers
...@@ -1410,15 +1457,26 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1410,15 +1457,26 @@ static void make_request(struct mddev *mddev, struct bio * bio)
conf->mirrors[d].rdev)); conf->mirrors[d].rdev));
mbio->bi_bdev = conf->mirrors[d].rdev->bdev; mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
mbio->bi_end_io = raid10_end_write_request; mbio->bi_end_io = raid10_end_write_request;
mbio->bi_rw = WRITE | do_sync | do_fua; mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
mbio->bi_private = r10_bio; mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining); atomic_inc(&r10_bio->remaining);
cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
if (cb)
plug = container_of(cb, struct raid10_plug_cb, cb);
else
plug = NULL;
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
if (plug) {
bio_list_add(&plug->pending, mbio);
plug->pending_cnt++;
} else {
bio_list_add(&conf->pending_bio_list, mbio); bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++; conf->pending_count++;
}
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
if (!mddev_check_plugged(mddev)) if (!plug)
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
if (!r10_bio->devs[i].repl_bio) if (!r10_bio->devs[i].repl_bio)
...@@ -1439,7 +1497,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1439,7 +1497,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
conf->mirrors[d].replacement)); conf->mirrors[d].replacement));
mbio->bi_bdev = conf->mirrors[d].replacement->bdev; mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
mbio->bi_end_io = raid10_end_write_request; mbio->bi_end_io = raid10_end_write_request;
mbio->bi_rw = WRITE | do_sync | do_fua; mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
mbio->bi_private = r10_bio; mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining); atomic_inc(&r10_bio->remaining);
...@@ -1638,7 +1696,7 @@ static int raid10_spare_active(struct mddev *mddev) ...@@ -1638,7 +1696,7 @@ static int raid10_spare_active(struct mddev *mddev)
&& !test_bit(Faulty, &tmp->rdev->flags) && !test_bit(Faulty, &tmp->rdev->flags)
&& !test_and_set_bit(In_sync, &tmp->rdev->flags)) { && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
count++; count++;
sysfs_notify_dirent(tmp->rdev->sysfs_state); sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
} }
} }
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
...@@ -1725,6 +1783,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) ...@@ -1725,6 +1783,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
clear_bit(Unmerged, &rdev->flags); clear_bit(Unmerged, &rdev->flags);
} }
md_integrity_add_rdev(rdev, mddev); md_integrity_add_rdev(rdev, mddev);
if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
print_conf(conf); print_conf(conf);
return err; return err;
} }
...@@ -1952,7 +2013,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) ...@@ -1952,7 +2013,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
break; break;
if (j == vcnt) if (j == vcnt)
continue; continue;
mddev->resync_mismatches += r10_bio->sectors; atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
/* Don't fix anything. */ /* Don't fix anything. */
continue; continue;
...@@ -2673,8 +2734,9 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) ...@@ -2673,8 +2734,9 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
} }
} }
static void raid10d(struct mddev *mddev) static void raid10d(struct md_thread *thread)
{ {
struct mddev *mddev = thread->mddev;
struct r10bio *r10_bio; struct r10bio *r10_bio;
unsigned long flags; unsigned long flags;
struct r10conf *conf = mddev->private; struct r10conf *conf = mddev->private;
...@@ -3158,7 +3220,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -3158,7 +3220,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
else { else {
bad_sectors -= (sector - first_bad); bad_sectors -= (sector - first_bad);
if (max_sync > bad_sectors) if (max_sync > bad_sectors)
max_sync = max_sync; max_sync = bad_sectors;
continue; continue;
} }
} }
...@@ -3482,6 +3544,7 @@ static int run(struct mddev *mddev) ...@@ -3482,6 +3544,7 @@ static int run(struct mddev *mddev)
sector_t size; sector_t size;
sector_t min_offset_diff = 0; sector_t min_offset_diff = 0;
int first = 1; int first = 1;
bool discard_supported = false;
if (mddev->private == NULL) { if (mddev->private == NULL) {
conf = setup_conf(mddev); conf = setup_conf(mddev);
...@@ -3498,6 +3561,8 @@ static int run(struct mddev *mddev) ...@@ -3498,6 +3561,8 @@ static int run(struct mddev *mddev)
chunk_size = mddev->chunk_sectors << 9; chunk_size = mddev->chunk_sectors << 9;
if (mddev->queue) { if (mddev->queue) {
blk_queue_max_discard_sectors(mddev->queue,
mddev->chunk_sectors);
blk_queue_io_min(mddev->queue, chunk_size); blk_queue_io_min(mddev->queue, chunk_size);
if (conf->geo.raid_disks % conf->geo.near_copies) if (conf->geo.raid_disks % conf->geo.near_copies)
blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
...@@ -3543,8 +3608,16 @@ static int run(struct mddev *mddev) ...@@ -3543,8 +3608,16 @@ static int run(struct mddev *mddev)
rdev->data_offset << 9); rdev->data_offset << 9);
disk->head_position = 0; disk->head_position = 0;
if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
discard_supported = true;
} }
if (discard_supported)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
else
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
/* need to check that every block has at least one working mirror */ /* need to check that every block has at least one working mirror */
if (!enough(conf, -1)) { if (!enough(conf, -1)) {
printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n", printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
......
This diff is collapsed.
...@@ -298,6 +298,7 @@ enum r5dev_flags { ...@@ -298,6 +298,7 @@ enum r5dev_flags {
R5_WantReplace, /* We need to update the replacement, we have read R5_WantReplace, /* We need to update the replacement, we have read
* data in, and now is a good time to write it out. * data in, and now is a good time to write it out.
*/ */
R5_Discard, /* Discard the stripe */
}; };
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment