Commit 81eb3dd8 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://neil.brown.name/md

* 'for-linus' of git://neil.brown.name/md:
  md/raid5: remove unusual use of bio_iovec_idx()
  md/raid5: fix FUA request handling in ops_run_io()
  md/raid5: fix raid5_set_bi_hw_segments
  md:Documentation/md.txt - fix typo
  md/bitmap: remove unused fields from struct bitmap
  md/bitmap: use proper accessor macro
  md: check ->hot_remove_disk when removing disk
  md: Using poll  /proc/mdstat can monitor the events of adding a spare disks
  MD: use is_power_of_2 macro
  MD: raid5 do not set fullsync
  MD: support initial bitmap creation in-kernel
  MD: add sync_super to mddev_t struct
  MD: raid1 changes to allow use by device mapper
  MD: move thread wakeups into resume
  MD: possible typo
  MD: no sync IO while suspended
  MD: no integrity register if no gendisk
parents 3e483f46 fcde9075
...@@ -555,7 +555,7 @@ also have ...@@ -555,7 +555,7 @@ also have
sync_min sync_min
sync_max sync_max
The two values, given as numbers of sectors, indicate a range The two values, given as numbers of sectors, indicate a range
withing the array where 'check'/'repair' will operate. Must be within the array where 'check'/'repair' will operate. Must be
a multiple of chunk_size. When it reaches "sync_max" it will a multiple of chunk_size. When it reaches "sync_max" it will
pause, rather than complete. pause, rather than complete.
You can use 'select' or 'poll' on "sync_completed" to wait for You can use 'select' or 'poll' on "sync_completed" to wait for
......
...@@ -534,6 +534,82 @@ void bitmap_print_sb(struct bitmap *bitmap) ...@@ -534,6 +534,82 @@ void bitmap_print_sb(struct bitmap *bitmap)
kunmap_atomic(sb, KM_USER0); kunmap_atomic(sb, KM_USER0);
} }
/*
* bitmap_new_disk_sb
* @bitmap
*
* This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb
* reads and verifies the on-disk bitmap superblock and populates bitmap_info.
* This function verifies 'bitmap_info' and populates the on-disk bitmap
* structure, which is to be written to disk.
*
* Returns: 0 on success, -Exxx on error
*/
static int bitmap_new_disk_sb(struct bitmap *bitmap)
{
bitmap_super_t *sb;
unsigned long chunksize, daemon_sleep, write_behind;
int err = -EINVAL;
bitmap->sb_page = alloc_page(GFP_KERNEL);
if (IS_ERR(bitmap->sb_page)) {
err = PTR_ERR(bitmap->sb_page);
bitmap->sb_page = NULL;
return err;
}
bitmap->sb_page->index = 0;
sb = kmap_atomic(bitmap->sb_page, KM_USER0);
sb->magic = cpu_to_le32(BITMAP_MAGIC);
sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
chunksize = bitmap->mddev->bitmap_info.chunksize;
BUG_ON(!chunksize);
if (!is_power_of_2(chunksize)) {
kunmap_atomic(sb, KM_USER0);
printk(KERN_ERR "bitmap chunksize not a power of 2\n");
return -EINVAL;
}
sb->chunksize = cpu_to_le32(chunksize);
daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
if (!daemon_sleep ||
(daemon_sleep < 1) || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n");
daemon_sleep = 5 * HZ;
}
sb->daemon_sleep = cpu_to_le32(daemon_sleep);
bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
/*
* FIXME: write_behind for RAID1. If not specified, what
* is a good choice? We choose COUNTER_MAX / 2 arbitrarily.
*/
write_behind = bitmap->mddev->bitmap_info.max_write_behind;
if (write_behind > COUNTER_MAX)
write_behind = COUNTER_MAX / 2;
sb->write_behind = cpu_to_le32(write_behind);
bitmap->mddev->bitmap_info.max_write_behind = write_behind;
/* keep the array size field of the bitmap superblock up to date */
sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
memcpy(sb->uuid, bitmap->mddev->uuid, 16);
bitmap->flags |= BITMAP_STALE;
sb->state |= cpu_to_le32(BITMAP_STALE);
bitmap->events_cleared = bitmap->mddev->events;
sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
bitmap->flags |= BITMAP_HOSTENDIAN;
sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN);
kunmap_atomic(sb, KM_USER0);
return 0;
}
/* read the superblock from the bitmap file and initialize some bitmap fields */ /* read the superblock from the bitmap file and initialize some bitmap fields */
static int bitmap_read_sb(struct bitmap *bitmap) static int bitmap_read_sb(struct bitmap *bitmap)
{ {
...@@ -575,7 +651,7 @@ static int bitmap_read_sb(struct bitmap *bitmap) ...@@ -575,7 +651,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
reason = "unrecognized superblock version"; reason = "unrecognized superblock version";
else if (chunksize < 512) else if (chunksize < 512)
reason = "bitmap chunksize too small"; reason = "bitmap chunksize too small";
else if ((1 << ffz(~chunksize)) != chunksize) else if (!is_power_of_2(chunksize))
reason = "bitmap chunksize not a power of 2"; reason = "bitmap chunksize not a power of 2";
else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT) else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
reason = "daemon sleep period out of range"; reason = "daemon sleep period out of range";
...@@ -1076,8 +1152,8 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) ...@@ -1076,8 +1152,8 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
} }
printk(KERN_INFO "%s: bitmap initialized from disk: " printk(KERN_INFO "%s: bitmap initialized from disk: "
"read %lu/%lu pages, set %lu bits\n", "read %lu/%lu pages, set %lu of %lu bits\n",
bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt); bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, chunks);
return 0; return 0;
...@@ -1332,7 +1408,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect ...@@ -1332,7 +1408,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
return 0; return 0;
} }
if (unlikely((*bmc & COUNTER_MAX) == COUNTER_MAX)) { if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
DEFINE_WAIT(__wait); DEFINE_WAIT(__wait);
/* note that it is safe to do the prepare_to_wait /* note that it is safe to do the prepare_to_wait
* after the test as long as we do it before dropping * after the test as long as we do it before dropping
...@@ -1404,10 +1480,10 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto ...@@ -1404,10 +1480,10 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
sysfs_notify_dirent_safe(bitmap->sysfs_can_clear); sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
} }
if (!success && ! (*bmc & NEEDED_MASK)) if (!success && !NEEDED(*bmc))
*bmc |= NEEDED_MASK; *bmc |= NEEDED_MASK;
if ((*bmc & COUNTER_MAX) == COUNTER_MAX) if (COUNTER(*bmc) == COUNTER_MAX)
wake_up(&bitmap->overflow_wait); wake_up(&bitmap->overflow_wait);
(*bmc)--; (*bmc)--;
...@@ -1728,9 +1804,16 @@ int bitmap_create(mddev_t *mddev) ...@@ -1728,9 +1804,16 @@ int bitmap_create(mddev_t *mddev)
vfs_fsync(file, 1); vfs_fsync(file, 1);
} }
/* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */ /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
if (!mddev->bitmap_info.external) if (!mddev->bitmap_info.external) {
/*
* If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
* instructing us to create a new on-disk bitmap instance.
*/
if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
err = bitmap_new_disk_sb(bitmap);
else
err = bitmap_read_sb(bitmap); err = bitmap_read_sb(bitmap);
else { } else {
err = 0; err = 0;
if (mddev->bitmap_info.chunksize == 0 || if (mddev->bitmap_info.chunksize == 0 ||
mddev->bitmap_info.daemon_sleep == 0) mddev->bitmap_info.daemon_sleep == 0)
...@@ -1754,9 +1837,6 @@ int bitmap_create(mddev_t *mddev) ...@@ -1754,9 +1837,6 @@ int bitmap_create(mddev_t *mddev)
bitmap->chunks = chunks; bitmap->chunks = chunks;
bitmap->pages = pages; bitmap->pages = pages;
bitmap->missing_pages = pages; bitmap->missing_pages = pages;
bitmap->counter_bits = COUNTER_BITS;
bitmap->syncchunk = ~0UL;
#ifdef INJECT_FATAL_FAULT_1 #ifdef INJECT_FATAL_FAULT_1
bitmap->bp = NULL; bitmap->bp = NULL;
......
...@@ -85,7 +85,6 @@ ...@@ -85,7 +85,6 @@
typedef __u16 bitmap_counter_t; typedef __u16 bitmap_counter_t;
#define COUNTER_BITS 16 #define COUNTER_BITS 16
#define COUNTER_BIT_SHIFT 4 #define COUNTER_BIT_SHIFT 4
#define COUNTER_BYTE_RATIO (COUNTER_BITS / 8)
#define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3) #define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3)
#define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1))) #define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1)))
...@@ -196,19 +195,10 @@ struct bitmap { ...@@ -196,19 +195,10 @@ struct bitmap {
mddev_t *mddev; /* the md device that the bitmap is for */ mddev_t *mddev; /* the md device that the bitmap is for */
int counter_bits; /* how many bits per block counter */
/* bitmap chunksize -- how much data does each bit represent? */ /* bitmap chunksize -- how much data does each bit represent? */
unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */ unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */
unsigned long chunks; /* total number of data chunks for the array */ unsigned long chunks; /* total number of data chunks for the array */
/* We hold a count on the chunk currently being synced, and drop
* it when the last block is started. If the resync is aborted
* midway, we need to be able to drop that count, so we remember
* the counted chunk..
*/
unsigned long syncchunk;
__u64 events_cleared; __u64 events_cleared;
int need_sync; int need_sync;
......
...@@ -351,6 +351,9 @@ void mddev_resume(mddev_t *mddev) ...@@ -351,6 +351,9 @@ void mddev_resume(mddev_t *mddev)
mddev->suspended = 0; mddev->suspended = 0;
wake_up(&mddev->sb_wait); wake_up(&mddev->sb_wait);
mddev->pers->quiesce(mddev, 0); mddev->pers->quiesce(mddev, 0);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
} }
EXPORT_SYMBOL_GPL(mddev_resume); EXPORT_SYMBOL_GPL(mddev_resume);
...@@ -1750,6 +1753,18 @@ static struct super_type super_types[] = { ...@@ -1750,6 +1753,18 @@ static struct super_type super_types[] = {
}, },
}; };
static void sync_super(mddev_t *mddev, mdk_rdev_t *rdev)
{
if (mddev->sync_super) {
mddev->sync_super(mddev, rdev);
return;
}
BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
super_types[mddev->major_version].sync_super(mddev, rdev);
}
static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
{ {
mdk_rdev_t *rdev, *rdev2; mdk_rdev_t *rdev, *rdev2;
...@@ -1781,8 +1796,8 @@ int md_integrity_register(mddev_t *mddev) ...@@ -1781,8 +1796,8 @@ int md_integrity_register(mddev_t *mddev)
if (list_empty(&mddev->disks)) if (list_empty(&mddev->disks))
return 0; /* nothing to do */ return 0; /* nothing to do */
if (blk_get_integrity(mddev->gendisk)) if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
return 0; /* already registered */ return 0; /* shouldn't register, or already is */
list_for_each_entry(rdev, &mddev->disks, same_set) { list_for_each_entry(rdev, &mddev->disks, same_set) {
/* skip spares and non-functional disks */ /* skip spares and non-functional disks */
if (test_bit(Faulty, &rdev->flags)) if (test_bit(Faulty, &rdev->flags))
...@@ -2168,7 +2183,6 @@ static void sync_sbs(mddev_t * mddev, int nospares) ...@@ -2168,7 +2183,6 @@ static void sync_sbs(mddev_t * mddev, int nospares)
/* Don't update this superblock */ /* Don't update this superblock */
rdev->sb_loaded = 2; rdev->sb_loaded = 2;
} else { } else {
super_types[mddev->major_version].
sync_super(mddev, rdev); sync_super(mddev, rdev);
rdev->sb_loaded = 1; rdev->sb_loaded = 1;
} }
...@@ -2462,7 +2476,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) ...@@ -2462,7 +2476,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
if (rdev->raid_disk == -1) if (rdev->raid_disk == -1)
return -EEXIST; return -EEXIST;
/* personality does all needed checks */ /* personality does all needed checks */
if (rdev->mddev->pers->hot_add_disk == NULL) if (rdev->mddev->pers->hot_remove_disk == NULL)
return -EINVAL; return -EINVAL;
err = rdev->mddev->pers-> err = rdev->mddev->pers->
hot_remove_disk(rdev->mddev, rdev->raid_disk); hot_remove_disk(rdev->mddev, rdev->raid_disk);
...@@ -4619,9 +4633,6 @@ int md_run(mddev_t *mddev) ...@@ -4619,9 +4633,6 @@ int md_run(mddev_t *mddev)
if (mddev->flags) if (mddev->flags)
md_update_sb(mddev, 0); md_update_sb(mddev, 0);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
md_new_event(mddev); md_new_event(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);
sysfs_notify_dirent_safe(mddev->sysfs_action); sysfs_notify_dirent_safe(mddev->sysfs_action);
...@@ -4642,6 +4653,10 @@ static int do_md_run(mddev_t *mddev) ...@@ -4642,6 +4653,10 @@ static int do_md_run(mddev_t *mddev)
bitmap_destroy(mddev); bitmap_destroy(mddev);
goto out; goto out;
} }
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
set_capacity(mddev->gendisk, mddev->array_sectors); set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk); revalidate_disk(mddev->gendisk);
mddev->changed = 1; mddev->changed = 1;
...@@ -5259,6 +5274,8 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) ...@@ -5259,6 +5274,8 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
if (mddev->degraded) if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
if (!err)
md_new_event(mddev);
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
return err; return err;
} }
...@@ -6866,8 +6883,8 @@ void md_do_sync(mddev_t *mddev) ...@@ -6866,8 +6883,8 @@ void md_do_sync(mddev_t *mddev)
* Tune reconstruction: * Tune reconstruction:
*/ */
window = 32*(PAGE_SIZE/512); window = 32*(PAGE_SIZE/512);
printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n", printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
window/2,(unsigned long long) max_sectors/2); window/2, (unsigned long long)max_sectors/2);
atomic_set(&mddev->recovery_active, 0); atomic_set(&mddev->recovery_active, 0);
last_check = 0; last_check = 0;
...@@ -7045,7 +7062,6 @@ void md_do_sync(mddev_t *mddev) ...@@ -7045,7 +7062,6 @@ void md_do_sync(mddev_t *mddev)
} }
EXPORT_SYMBOL_GPL(md_do_sync); EXPORT_SYMBOL_GPL(md_do_sync);
static int remove_and_add_spares(mddev_t *mddev) static int remove_and_add_spares(mddev_t *mddev)
{ {
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
...@@ -7157,6 +7173,9 @@ static void reap_sync_thread(mddev_t *mddev) ...@@ -7157,6 +7173,9 @@ static void reap_sync_thread(mddev_t *mddev)
*/ */
void md_check_recovery(mddev_t *mddev) void md_check_recovery(mddev_t *mddev)
{ {
if (mddev->suspended)
return;
if (mddev->bitmap) if (mddev->bitmap)
bitmap_daemon_work(mddev); bitmap_daemon_work(mddev);
......
...@@ -124,6 +124,7 @@ struct mddev_s ...@@ -124,6 +124,7 @@ struct mddev_s
#define MD_CHANGE_DEVS 0 /* Some device status has changed */ #define MD_CHANGE_DEVS 0 /* Some device status has changed */
#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */ #define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
#define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */
int suspended; int suspended;
atomic_t active_io; atomic_t active_io;
...@@ -330,6 +331,7 @@ struct mddev_s ...@@ -330,6 +331,7 @@ struct mddev_s
atomic_t flush_pending; atomic_t flush_pending;
struct work_struct flush_work; struct work_struct flush_work;
struct work_struct event_work; /* used by dm to report failure event */ struct work_struct event_work; /* used by dm to report failure event */
void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
}; };
......
...@@ -497,21 +497,19 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) ...@@ -497,21 +497,19 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
return best_disk; return best_disk;
} }
static int raid1_congested(void *data, int bits) int md_raid1_congested(mddev_t *mddev, int bits)
{ {
mddev_t *mddev = data;
conf_t *conf = mddev->private; conf_t *conf = mddev->private;
int i, ret = 0; int i, ret = 0;
if (mddev_congested(mddev, bits))
return 1;
rcu_read_lock(); rcu_read_lock();
for (i = 0; i < mddev->raid_disks; i++) { for (i = 0; i < mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) { if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev); struct request_queue *q = bdev_get_queue(rdev->bdev);
BUG_ON(!q);
/* Note the '|| 1' - when read_balance prefers /* Note the '|| 1' - when read_balance prefers
* non-congested targets, it can be removed * non-congested targets, it can be removed
*/ */
...@@ -524,7 +522,15 @@ static int raid1_congested(void *data, int bits) ...@@ -524,7 +522,15 @@ static int raid1_congested(void *data, int bits)
rcu_read_unlock(); rcu_read_unlock();
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(md_raid1_congested);
static int raid1_congested(void *data, int bits)
{
mddev_t *mddev = data;
return mddev_congested(mddev, bits) ||
md_raid1_congested(mddev, bits);
}
static void flush_pending_writes(conf_t *conf) static void flush_pending_writes(conf_t *conf)
{ {
...@@ -1972,6 +1978,8 @@ static int run(mddev_t *mddev) ...@@ -1972,6 +1978,8 @@ static int run(mddev_t *mddev)
return PTR_ERR(conf); return PTR_ERR(conf);
list_for_each_entry(rdev, &mddev->disks, same_set) { list_for_each_entry(rdev, &mddev->disks, same_set) {
if (!mddev->gendisk)
continue;
disk_stack_limits(mddev->gendisk, rdev->bdev, disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9); rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk /* as we don't honour merge_bvec_fn, we must never risk
...@@ -2013,8 +2021,10 @@ static int run(mddev_t *mddev) ...@@ -2013,8 +2021,10 @@ static int run(mddev_t *mddev)
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
if (mddev->queue) {
mddev->queue->backing_dev_info.congested_fn = raid1_congested; mddev->queue->backing_dev_info.congested_fn = raid1_congested;
mddev->queue->backing_dev_info.congested_data = mddev; mddev->queue->backing_dev_info.congested_data = mddev;
}
return md_integrity_register(mddev); return md_integrity_register(mddev);
} }
......
...@@ -126,4 +126,6 @@ struct r1bio_s { ...@@ -126,4 +126,6 @@ struct r1bio_s {
*/ */
#define R1BIO_Returned 6 #define R1BIO_Returned 6
extern int md_raid1_congested(mddev_t *mddev, int bits);
#endif #endif
...@@ -129,7 +129,7 @@ static inline int raid5_dec_bi_hw_segments(struct bio *bio) ...@@ -129,7 +129,7 @@ static inline int raid5_dec_bi_hw_segments(struct bio *bio)
static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
{ {
bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16); bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16);
} }
/* Find first data disk in a raid6 stripe */ /* Find first data disk in a raid6 stripe */
...@@ -514,7 +514,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) ...@@ -514,7 +514,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi = &sh->dev[i].req; bi = &sh->dev[i].req;
bi->bi_rw = rw; bi->bi_rw = rw;
if (rw == WRITE) if (rw & WRITE)
bi->bi_end_io = raid5_end_write_request; bi->bi_end_io = raid5_end_write_request;
else else
bi->bi_end_io = raid5_end_read_request; bi->bi_end_io = raid5_end_read_request;
...@@ -548,13 +548,13 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) ...@@ -548,13 +548,13 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_io_vec[0].bv_offset = 0; bi->bi_io_vec[0].bv_offset = 0;
bi->bi_size = STRIPE_SIZE; bi->bi_size = STRIPE_SIZE;
bi->bi_next = NULL; bi->bi_next = NULL;
if (rw == WRITE && if ((rw & WRITE) &&
test_bit(R5_ReWrite, &sh->dev[i].flags)) test_bit(R5_ReWrite, &sh->dev[i].flags))
atomic_add(STRIPE_SECTORS, atomic_add(STRIPE_SECTORS,
&rdev->corrected_errors); &rdev->corrected_errors);
generic_make_request(bi); generic_make_request(bi);
} else { } else {
if (rw == WRITE) if (rw & WRITE)
set_bit(STRIPE_DEGRADED, &sh->state); set_bit(STRIPE_DEGRADED, &sh->state);
pr_debug("skip op %ld on disc %d for sector %llu\n", pr_debug("skip op %ld on disc %d for sector %llu\n",
bi->bi_rw, i, (unsigned long long)sh->sector); bi->bi_rw, i, (unsigned long long)sh->sector);
...@@ -585,7 +585,7 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, ...@@ -585,7 +585,7 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
init_async_submit(&submit, flags, tx, NULL, NULL, NULL); init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
bio_for_each_segment(bvl, bio, i) { bio_for_each_segment(bvl, bio, i) {
int len = bio_iovec_idx(bio, i)->bv_len; int len = bvl->bv_len;
int clen; int clen;
int b_offset = 0; int b_offset = 0;
...@@ -601,8 +601,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, ...@@ -601,8 +601,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
clen = len; clen = len;
if (clen > 0) { if (clen > 0) {
b_offset += bio_iovec_idx(bio, i)->bv_offset; b_offset += bvl->bv_offset;
bio_page = bio_iovec_idx(bio, i)->bv_page; bio_page = bvl->bv_page;
if (frombio) if (frombio)
tx = async_memcpy(page, bio_page, page_offset, tx = async_memcpy(page, bio_page, page_offset,
b_offset, clen, &submit); b_offset, clen, &submit);
...@@ -4858,7 +4858,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) ...@@ -4858,7 +4858,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
printk(KERN_INFO "md/raid:%s: device %s operational as raid" printk(KERN_INFO "md/raid:%s: device %s operational as raid"
" disk %d\n", " disk %d\n",
mdname(mddev), bdevname(rdev->bdev, b), raid_disk); mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
} else } else if (rdev->saved_raid_disk != raid_disk)
/* Cannot rely on bitmap to complete recovery */ /* Cannot rely on bitmap to complete recovery */
conf->fullsync = 1; conf->fullsync = 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment