Commit bd3813d5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'md/3.14-fixes' of git://neil.brown.name/md

Pull md fixes from Neil Brown:
 "Two bugfixes for md

  both tagged for -stable"

* tag 'md/3.14-fixes' of git://neil.brown.name/md:
  md/raid5: Fix CPU hotplug callback registration
  md/raid1: restore ability for check and repair to fix read errors.
parents c1b8ae03 789b5e03
...@@ -1953,11 +1953,15 @@ static int process_checks(struct r1bio *r1_bio) ...@@ -1953,11 +1953,15 @@ static int process_checks(struct r1bio *r1_bio)
for (i = 0; i < conf->raid_disks * 2; i++) { for (i = 0; i < conf->raid_disks * 2; i++) {
int j; int j;
int size; int size;
int uptodate;
struct bio *b = r1_bio->bios[i]; struct bio *b = r1_bio->bios[i];
if (b->bi_end_io != end_sync_read) if (b->bi_end_io != end_sync_read)
continue; continue;
/* fixup the bio for reuse */ /* fixup the bio for reuse, but preserve BIO_UPTODATE */
uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
bio_reset(b); bio_reset(b);
if (!uptodate)
clear_bit(BIO_UPTODATE, &b->bi_flags);
b->bi_vcnt = vcnt; b->bi_vcnt = vcnt;
b->bi_iter.bi_size = r1_bio->sectors << 9; b->bi_iter.bi_size = r1_bio->sectors << 9;
b->bi_iter.bi_sector = r1_bio->sector + b->bi_iter.bi_sector = r1_bio->sector +
...@@ -1990,11 +1994,14 @@ static int process_checks(struct r1bio *r1_bio) ...@@ -1990,11 +1994,14 @@ static int process_checks(struct r1bio *r1_bio)
int j; int j;
struct bio *pbio = r1_bio->bios[primary]; struct bio *pbio = r1_bio->bios[primary];
struct bio *sbio = r1_bio->bios[i]; struct bio *sbio = r1_bio->bios[i];
int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);
if (sbio->bi_end_io != end_sync_read) if (sbio->bi_end_io != end_sync_read)
continue; continue;
/* Now we can 'fixup' the BIO_UPTODATE flag */
set_bit(BIO_UPTODATE, &sbio->bi_flags);
if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) { if (uptodate) {
for (j = vcnt; j-- ; ) { for (j = vcnt; j-- ; ) {
struct page *p, *s; struct page *p, *s;
p = pbio->bi_io_vec[j].bv_page; p = pbio->bi_io_vec[j].bv_page;
...@@ -2009,7 +2016,7 @@ static int process_checks(struct r1bio *r1_bio) ...@@ -2009,7 +2016,7 @@ static int process_checks(struct r1bio *r1_bio)
if (j >= 0) if (j >= 0)
atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
&& test_bit(BIO_UPTODATE, &sbio->bi_flags))) { && uptodate)) {
/* No need to write to this device. */ /* No need to write to this device. */
sbio->bi_end_io = NULL; sbio->bi_end_io = NULL;
rdev_dec_pending(conf->mirrors[i].rdev, mddev); rdev_dec_pending(conf->mirrors[i].rdev, mddev);
......
...@@ -5514,23 +5514,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) ...@@ -5514,23 +5514,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
return sectors * (raid_disks - conf->max_degraded); return sectors * (raid_disks - conf->max_degraded);
} }
static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
{
safe_put_page(percpu->spare_page);
kfree(percpu->scribble);
percpu->spare_page = NULL;
percpu->scribble = NULL;
}
static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
{
if (conf->level == 6 && !percpu->spare_page)
percpu->spare_page = alloc_page(GFP_KERNEL);
if (!percpu->scribble)
percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
free_scratch_buffer(conf, percpu);
return -ENOMEM;
}
return 0;
}
static void raid5_free_percpu(struct r5conf *conf) static void raid5_free_percpu(struct r5conf *conf)
{ {
struct raid5_percpu *percpu;
unsigned long cpu; unsigned long cpu;
if (!conf->percpu) if (!conf->percpu)
return; return;
get_online_cpus();
for_each_possible_cpu(cpu) {
percpu = per_cpu_ptr(conf->percpu, cpu);
safe_put_page(percpu->spare_page);
kfree(percpu->scribble);
}
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
unregister_cpu_notifier(&conf->cpu_notify); unregister_cpu_notifier(&conf->cpu_notify);
#endif #endif
get_online_cpus();
for_each_possible_cpu(cpu)
free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
put_online_cpus(); put_online_cpus();
free_percpu(conf->percpu); free_percpu(conf->percpu);
...@@ -5557,15 +5577,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, ...@@ -5557,15 +5577,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
switch (action) { switch (action) {
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN: case CPU_UP_PREPARE_FROZEN:
if (conf->level == 6 && !percpu->spare_page) if (alloc_scratch_buffer(conf, percpu)) {
percpu->spare_page = alloc_page(GFP_KERNEL);
if (!percpu->scribble)
percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
if (!percpu->scribble ||
(conf->level == 6 && !percpu->spare_page)) {
safe_put_page(percpu->spare_page);
kfree(percpu->scribble);
pr_err("%s: failed memory allocation for cpu%ld\n", pr_err("%s: failed memory allocation for cpu%ld\n",
__func__, cpu); __func__, cpu);
return notifier_from_errno(-ENOMEM); return notifier_from_errno(-ENOMEM);
...@@ -5573,10 +5585,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, ...@@ -5573,10 +5585,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
break; break;
case CPU_DEAD: case CPU_DEAD:
case CPU_DEAD_FROZEN: case CPU_DEAD_FROZEN:
safe_put_page(percpu->spare_page); free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
kfree(percpu->scribble);
percpu->spare_page = NULL;
percpu->scribble = NULL;
break; break;
default: default:
break; break;
...@@ -5588,40 +5597,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, ...@@ -5588,40 +5597,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
static int raid5_alloc_percpu(struct r5conf *conf) static int raid5_alloc_percpu(struct r5conf *conf)
{ {
unsigned long cpu; unsigned long cpu;
struct page *spare_page; int err = 0;
struct raid5_percpu __percpu *allcpus;
void *scribble;
int err;
allcpus = alloc_percpu(struct raid5_percpu); conf->percpu = alloc_percpu(struct raid5_percpu);
if (!allcpus) if (!conf->percpu)
return -ENOMEM; return -ENOMEM;
conf->percpu = allcpus;
get_online_cpus();
err = 0;
for_each_present_cpu(cpu) {
if (conf->level == 6) {
spare_page = alloc_page(GFP_KERNEL);
if (!spare_page) {
err = -ENOMEM;
break;
}
per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
}
scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
if (!scribble) {
err = -ENOMEM;
break;
}
per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
}
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
conf->cpu_notify.notifier_call = raid456_cpu_notify; conf->cpu_notify.notifier_call = raid456_cpu_notify;
conf->cpu_notify.priority = 0; conf->cpu_notify.priority = 0;
if (err == 0)
err = register_cpu_notifier(&conf->cpu_notify); err = register_cpu_notifier(&conf->cpu_notify);
if (err)
return err;
#endif #endif
get_online_cpus();
for_each_present_cpu(cpu) {
err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
if (err) {
pr_err("%s: failed memory allocation for cpu%ld\n",
__func__, cpu);
break;
}
}
put_online_cpus(); put_online_cpus();
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment