Commit 90a9ed95 authored by Linus Torvalds's avatar Linus Torvalds Committed by Chris Metcalf

Merge branch 'for-linus' of git://neil.brown.name/md

* 'for-linus' of git://neil.brown.name/md: (24 commits)
  md: clean up do_md_stop
  md: fix another deadlock with removing sysfs attributes.
  md: move revalidate_disk() back outside open_mutex
  md/raid10: fix deadlock with unaligned read during resync
  md/bitmap:  separate out loading a bitmap from initialising the structures.
  md/bitmap: prepare for storing write-intent-bitmap via dm-dirty-log.
  md/bitmap: optimise scanning of empty bitmaps.
  md/bitmap: clean up plugging calls.
  md/bitmap: reduce dependence on sysfs.
  md/bitmap: white space clean up and similar.
  md/raid5: export raid5 unplugging interface.
  md/plug: optionally use plugger to unplug an array during resync/recovery.
  md/raid5: add simple plugging infrastructure.
  md/raid5: export is_congested test
  raid5: Don't set read-ahead when there is no queue
  md: add support for raising dm events.
  md: export various start/stop interfaces
  md: split out md_rdev_init
  md: be more careful setting MD_CHANGE_CLEAN
  md/raid5: ensure we create a unique name for kmem_cache when mddev has no gendisk
  ...
parents 8cbd84f2 fd8aa2c1
...@@ -22,6 +22,20 @@ config ASYNC_RAID6_RECOV ...@@ -22,6 +22,20 @@ config ASYNC_RAID6_RECOV
tristate tristate
select ASYNC_CORE select ASYNC_CORE
select ASYNC_PQ select ASYNC_PQ
select ASYNC_XOR
config ASYNC_RAID6_TEST
tristate "Self test for hardware accelerated raid6 recovery"
depends on ASYNC_RAID6_RECOV
select ASYNC_MEMCPY
---help---
This is a one-shot self test that permutes through the
recovery of all the possible two disk failure scenarios for a
N-disk array. Recovery is performed with the asynchronous
raid6 recovery routines, and will optionally use an offload
engine if one is available.
If unsure, say N.
config ASYNC_TX_DISABLE_PQ_VAL_DMA config ASYNC_TX_DISABLE_PQ_VAL_DMA
bool bool
......
...@@ -121,7 +121,7 @@ config MD_RAID10 ...@@ -121,7 +121,7 @@ config MD_RAID10
config MD_RAID456 config MD_RAID456
tristate "RAID-4/RAID-5/RAID-6 mode" tristate "RAID-4/RAID-5/RAID-6 mode"
depends on BLK_DEV_MD depends on BLK_DEV_MD
select MD_RAID6_PQ select RAID6_PQ
select ASYNC_MEMCPY select ASYNC_MEMCPY
select ASYNC_XOR select ASYNC_XOR
select ASYNC_PQ select ASYNC_PQ
...@@ -165,22 +165,6 @@ config MULTICORE_RAID456 ...@@ -165,22 +165,6 @@ config MULTICORE_RAID456
If unsure, say N. If unsure, say N.
config MD_RAID6_PQ
tristate
config ASYNC_RAID6_TEST
tristate "Self test for hardware accelerated raid6 recovery"
depends on MD_RAID6_PQ
select ASYNC_RAID6_RECOV
---help---
This is a one-shot self test that permutes through the
recovery of all the possible two disk failure scenarios for a
N-disk array. Recovery is performed with the asynchronous
raid6 recovery routines, and will optionally use an offload
engine if one is available.
If unsure, say N.
config MD_MULTIPATH config MD_MULTIPATH
tristate "Multipath I/O support" tristate "Multipath I/O support"
depends on BLK_DEV_MD depends on BLK_DEV_MD
......
...@@ -12,13 +12,6 @@ dm-log-userspace-y \ ...@@ -12,13 +12,6 @@ dm-log-userspace-y \
+= dm-log-userspace-base.o dm-log-userspace-transfer.o += dm-log-userspace-base.o dm-log-userspace-transfer.o
md-mod-y += md.o bitmap.o md-mod-y += md.o bitmap.o
raid456-y += raid5.o raid456-y += raid5.o
raid6_pq-y += raid6algos.o raid6recov.o raid6tables.o \
raid6int1.o raid6int2.o raid6int4.o \
raid6int8.o raid6int16.o raid6int32.o \
raid6altivec1.o raid6altivec2.o raid6altivec4.o \
raid6altivec8.o \
raid6mmx.o raid6sse1.o raid6sse2.o
hostprogs-y += mktables
# Note: link order is important. All raid personalities # Note: link order is important. All raid personalities
# and must come before md.o, as they each initialise # and must come before md.o, as they each initialise
...@@ -29,7 +22,6 @@ obj-$(CONFIG_MD_LINEAR) += linear.o ...@@ -29,7 +22,6 @@ obj-$(CONFIG_MD_LINEAR) += linear.o
obj-$(CONFIG_MD_RAID0) += raid0.o obj-$(CONFIG_MD_RAID0) += raid0.o
obj-$(CONFIG_MD_RAID1) += raid1.o obj-$(CONFIG_MD_RAID1) += raid1.o
obj-$(CONFIG_MD_RAID10) += raid10.o obj-$(CONFIG_MD_RAID10) += raid10.o
obj-$(CONFIG_MD_RAID6_PQ) += raid6_pq.o
obj-$(CONFIG_MD_RAID456) += raid456.o obj-$(CONFIG_MD_RAID456) += raid456.o
obj-$(CONFIG_MD_MULTIPATH) += multipath.o obj-$(CONFIG_MD_MULTIPATH) += multipath.o
obj-$(CONFIG_MD_FAULTY) += faulty.o obj-$(CONFIG_MD_FAULTY) += faulty.o
...@@ -45,75 +37,6 @@ obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o ...@@ -45,75 +37,6 @@ obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o
obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o
obj-$(CONFIG_DM_ZERO) += dm-zero.o obj-$(CONFIG_DM_ZERO) += dm-zero.o
quiet_cmd_unroll = UNROLL $@
cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) \
< $< > $@ || ( rm -f $@ && exit 1 )
ifeq ($(CONFIG_ALTIVEC),y)
altivec_flags := -maltivec -mabi=altivec
endif
ifeq ($(CONFIG_DM_UEVENT),y) ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o dm-mod-objs += dm-uevent.o
endif endif
targets += raid6int1.c
$(obj)/raid6int1.c: UNROLL := 1
$(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
targets += raid6int2.c
$(obj)/raid6int2.c: UNROLL := 2
$(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
targets += raid6int4.c
$(obj)/raid6int4.c: UNROLL := 4
$(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
targets += raid6int8.c
$(obj)/raid6int8.c: UNROLL := 8
$(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
targets += raid6int16.c
$(obj)/raid6int16.c: UNROLL := 16
$(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
targets += raid6int32.c
$(obj)/raid6int32.c: UNROLL := 32
$(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_raid6altivec1.o += $(altivec_flags)
targets += raid6altivec1.c
$(obj)/raid6altivec1.c: UNROLL := 1
$(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_raid6altivec2.o += $(altivec_flags)
targets += raid6altivec2.c
$(obj)/raid6altivec2.c: UNROLL := 2
$(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_raid6altivec4.o += $(altivec_flags)
targets += raid6altivec4.c
$(obj)/raid6altivec4.c: UNROLL := 4
$(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_raid6altivec8.o += $(altivec_flags)
targets += raid6altivec8.c
$(obj)/raid6altivec8.c: UNROLL := 8
$(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
quiet_cmd_mktable = TABLE $@
cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 )
targets += raid6tables.c
$(obj)/raid6tables.c: $(obj)/mktables FORCE
$(call if_changed,mktable)
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
* Still to do: * Still to do:
* *
* flush after percent set rather than just time based. (maybe both). * flush after percent set rather than just time based. (maybe both).
* wait if count gets too high, wake when it drops to half.
*/ */
#include <linux/blkdev.h> #include <linux/blkdev.h>
...@@ -30,6 +29,7 @@ ...@@ -30,6 +29,7 @@
#include "md.h" #include "md.h"
#include "bitmap.h" #include "bitmap.h"
#include <linux/dm-dirty-log.h>
/* debug macros */ /* debug macros */
#define DEBUG 0 #define DEBUG 0
...@@ -51,9 +51,6 @@ ...@@ -51,9 +51,6 @@
#define INJECT_FATAL_FAULT_3 0 /* undef */ #define INJECT_FATAL_FAULT_3 0 /* undef */
#endif #endif
//#define DPRINTK PRINTK /* set this NULL to avoid verbose debug output */
#define DPRINTK(x...) do { } while(0)
#ifndef PRINTK #ifndef PRINTK
# if DEBUG > 0 # if DEBUG > 0
# define PRINTK(x...) printk(KERN_DEBUG x) # define PRINTK(x...) printk(KERN_DEBUG x)
...@@ -62,12 +59,11 @@ ...@@ -62,12 +59,11 @@
# endif # endif
#endif #endif
static inline char * bmname(struct bitmap *bitmap) static inline char *bmname(struct bitmap *bitmap)
{ {
return bitmap->mddev ? mdname(bitmap->mddev) : "mdX"; return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
} }
/* /*
* just a placeholder - calls kmalloc for bitmap pages * just a placeholder - calls kmalloc for bitmap pages
*/ */
...@@ -78,7 +74,7 @@ static unsigned char *bitmap_alloc_page(struct bitmap *bitmap) ...@@ -78,7 +74,7 @@ static unsigned char *bitmap_alloc_page(struct bitmap *bitmap)
#ifdef INJECT_FAULTS_1 #ifdef INJECT_FAULTS_1
page = NULL; page = NULL;
#else #else
page = kmalloc(PAGE_SIZE, GFP_NOIO); page = kzalloc(PAGE_SIZE, GFP_NOIO);
#endif #endif
if (!page) if (!page)
printk("%s: bitmap_alloc_page FAILED\n", bmname(bitmap)); printk("%s: bitmap_alloc_page FAILED\n", bmname(bitmap));
...@@ -107,7 +103,8 @@ static void bitmap_free_page(struct bitmap *bitmap, unsigned char *page) ...@@ -107,7 +103,8 @@ static void bitmap_free_page(struct bitmap *bitmap, unsigned char *page)
* if we find our page, we increment the page's refcount so that it stays * if we find our page, we increment the page's refcount so that it stays
* allocated while we're using it * allocated while we're using it
*/ */
static int bitmap_checkpage(struct bitmap *bitmap, unsigned long page, int create) static int bitmap_checkpage(struct bitmap *bitmap,
unsigned long page, int create)
__releases(bitmap->lock) __releases(bitmap->lock)
__acquires(bitmap->lock) __acquires(bitmap->lock)
{ {
...@@ -121,7 +118,6 @@ __acquires(bitmap->lock) ...@@ -121,7 +118,6 @@ __acquires(bitmap->lock)
return -EINVAL; return -EINVAL;
} }
if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */ if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
return 0; return 0;
...@@ -131,43 +127,34 @@ __acquires(bitmap->lock) ...@@ -131,43 +127,34 @@ __acquires(bitmap->lock)
if (!create) if (!create)
return -ENOENT; return -ENOENT;
spin_unlock_irq(&bitmap->lock);
/* this page has not been allocated yet */ /* this page has not been allocated yet */
if ((mappage = bitmap_alloc_page(bitmap)) == NULL) { spin_unlock_irq(&bitmap->lock);
mappage = bitmap_alloc_page(bitmap);
spin_lock_irq(&bitmap->lock);
if (mappage == NULL) {
PRINTK("%s: bitmap map page allocation failed, hijacking\n", PRINTK("%s: bitmap map page allocation failed, hijacking\n",
bmname(bitmap)); bmname(bitmap));
/* failed - set the hijacked flag so that we can use the /* failed - set the hijacked flag so that we can use the
* pointer as a counter */ * pointer as a counter */
spin_lock_irq(&bitmap->lock);
if (!bitmap->bp[page].map) if (!bitmap->bp[page].map)
bitmap->bp[page].hijacked = 1; bitmap->bp[page].hijacked = 1;
goto out; } else if (bitmap->bp[page].map ||
} bitmap->bp[page].hijacked) {
/* got a page */
spin_lock_irq(&bitmap->lock);
/* recheck the page */
if (bitmap->bp[page].map || bitmap->bp[page].hijacked) {
/* somebody beat us to getting the page */ /* somebody beat us to getting the page */
bitmap_free_page(bitmap, mappage); bitmap_free_page(bitmap, mappage);
return 0; return 0;
} } else {
/* no page was in place and we have one, so install it */ /* no page was in place and we have one, so install it */
memset(mappage, 0, PAGE_SIZE); bitmap->bp[page].map = mappage;
bitmap->bp[page].map = mappage; bitmap->missing_pages--;
bitmap->missing_pages--; }
out:
return 0; return 0;
} }
/* if page is completely empty, put it back on the free list, or dealloc it */ /* if page is completely empty, put it back on the free list, or dealloc it */
/* if page was hijacked, unmark the flag so it might get alloced next time */ /* if page was hijacked, unmark the flag so it might get alloced next time */
/* Note: lock should be held when calling this */ /* Note: lock should be held when calling this */
...@@ -183,26 +170,15 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page) ...@@ -183,26 +170,15 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */ if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
bitmap->bp[page].hijacked = 0; bitmap->bp[page].hijacked = 0;
bitmap->bp[page].map = NULL; bitmap->bp[page].map = NULL;
return; } else {
/* normal case, free the page */
ptr = bitmap->bp[page].map;
bitmap->bp[page].map = NULL;
bitmap->missing_pages++;
bitmap_free_page(bitmap, ptr);
} }
/* normal case, free the page */
#if 0
/* actually ... let's not. We will probably need the page again exactly when
* memory is tight and we are flusing to disk
*/
return;
#else
ptr = bitmap->bp[page].map;
bitmap->bp[page].map = NULL;
bitmap->missing_pages++;
bitmap_free_page(bitmap, ptr);
return;
#endif
} }
/* /*
* bitmap file handling - read and write the bitmap file and its superblock * bitmap file handling - read and write the bitmap file and its superblock
*/ */
...@@ -220,11 +196,14 @@ static struct page *read_sb_page(mddev_t *mddev, loff_t offset, ...@@ -220,11 +196,14 @@ static struct page *read_sb_page(mddev_t *mddev, loff_t offset,
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
sector_t target; sector_t target;
int did_alloc = 0;
if (!page) if (!page) {
page = alloc_page(GFP_KERNEL); page = alloc_page(GFP_KERNEL);
if (!page) if (!page)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
did_alloc = 1;
}
list_for_each_entry(rdev, &mddev->disks, same_set) { list_for_each_entry(rdev, &mddev->disks, same_set) {
if (! test_bit(In_sync, &rdev->flags) if (! test_bit(In_sync, &rdev->flags)
...@@ -242,6 +221,8 @@ static struct page *read_sb_page(mddev_t *mddev, loff_t offset, ...@@ -242,6 +221,8 @@ static struct page *read_sb_page(mddev_t *mddev, loff_t offset,
return page; return page;
} }
} }
if (did_alloc)
put_page(page);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
...@@ -286,49 +267,51 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) ...@@ -286,49 +267,51 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
mddev_t *mddev = bitmap->mddev; mddev_t *mddev = bitmap->mddev;
while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
int size = PAGE_SIZE; int size = PAGE_SIZE;
loff_t offset = mddev->bitmap_info.offset; loff_t offset = mddev->bitmap_info.offset;
if (page->index == bitmap->file_pages-1) if (page->index == bitmap->file_pages-1)
size = roundup(bitmap->last_page_size, size = roundup(bitmap->last_page_size,
bdev_logical_block_size(rdev->bdev)); bdev_logical_block_size(rdev->bdev));
/* Just make sure we aren't corrupting data or /* Just make sure we aren't corrupting data or
* metadata * metadata
*/ */
if (mddev->external) { if (mddev->external) {
/* Bitmap could be anywhere. */ /* Bitmap could be anywhere. */
if (rdev->sb_start + offset + (page->index *(PAGE_SIZE/512)) > if (rdev->sb_start + offset + (page->index
rdev->data_offset && * (PAGE_SIZE/512))
rdev->sb_start + offset < > rdev->data_offset
rdev->data_offset + mddev->dev_sectors + &&
(PAGE_SIZE/512)) rdev->sb_start + offset
goto bad_alignment; < (rdev->data_offset + mddev->dev_sectors
} else if (offset < 0) { + (PAGE_SIZE/512)))
/* DATA BITMAP METADATA */ goto bad_alignment;
if (offset } else if (offset < 0) {
+ (long)(page->index * (PAGE_SIZE/512)) /* DATA BITMAP METADATA */
+ size/512 > 0) if (offset
/* bitmap runs in to metadata */ + (long)(page->index * (PAGE_SIZE/512))
goto bad_alignment; + size/512 > 0)
if (rdev->data_offset + mddev->dev_sectors /* bitmap runs in to metadata */
> rdev->sb_start + offset) goto bad_alignment;
/* data runs in to bitmap */ if (rdev->data_offset + mddev->dev_sectors
goto bad_alignment; > rdev->sb_start + offset)
} else if (rdev->sb_start < rdev->data_offset) { /* data runs in to bitmap */
/* METADATA BITMAP DATA */ goto bad_alignment;
if (rdev->sb_start } else if (rdev->sb_start < rdev->data_offset) {
+ offset /* METADATA BITMAP DATA */
+ page->index*(PAGE_SIZE/512) + size/512 if (rdev->sb_start
> rdev->data_offset) + offset
/* bitmap runs in to data */ + page->index*(PAGE_SIZE/512) + size/512
goto bad_alignment; > rdev->data_offset)
} else { /* bitmap runs in to data */
/* DATA METADATA BITMAP - no problems */ goto bad_alignment;
} } else {
md_super_write(mddev, rdev, /* DATA METADATA BITMAP - no problems */
rdev->sb_start + offset }
+ page->index * (PAGE_SIZE/512), md_super_write(mddev, rdev,
size, rdev->sb_start + offset
page); + page->index * (PAGE_SIZE/512),
size,
page);
} }
if (wait) if (wait)
...@@ -364,10 +347,9 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait) ...@@ -364,10 +347,9 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
bh = bh->b_this_page; bh = bh->b_this_page;
} }
if (wait) { if (wait)
wait_event(bitmap->write_wait, wait_event(bitmap->write_wait,
atomic_read(&bitmap->pending_writes)==0); atomic_read(&bitmap->pending_writes)==0);
}
} }
if (bitmap->flags & BITMAP_WRITE_ERROR) if (bitmap->flags & BITMAP_WRITE_ERROR)
bitmap_file_kick(bitmap); bitmap_file_kick(bitmap);
...@@ -424,7 +406,7 @@ static struct page *read_page(struct file *file, unsigned long index, ...@@ -424,7 +406,7 @@ static struct page *read_page(struct file *file, unsigned long index,
struct buffer_head *bh; struct buffer_head *bh;
sector_t block; sector_t block;
PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_SIZE, PRINTK("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
(unsigned long long)index << PAGE_SHIFT); (unsigned long long)index << PAGE_SHIFT);
page = alloc_page(GFP_KERNEL); page = alloc_page(GFP_KERNEL);
...@@ -478,7 +460,7 @@ static struct page *read_page(struct file *file, unsigned long index, ...@@ -478,7 +460,7 @@ static struct page *read_page(struct file *file, unsigned long index,
} }
out: out:
if (IS_ERR(page)) if (IS_ERR(page))
printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n", printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %ld\n",
(int)PAGE_SIZE, (int)PAGE_SIZE,
(unsigned long long)index << PAGE_SHIFT, (unsigned long long)index << PAGE_SHIFT,
PTR_ERR(page)); PTR_ERR(page));
...@@ -664,11 +646,14 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits, ...@@ -664,11 +646,14 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
sb = kmap_atomic(bitmap->sb_page, KM_USER0); sb = kmap_atomic(bitmap->sb_page, KM_USER0);
old = le32_to_cpu(sb->state) & bits; old = le32_to_cpu(sb->state) & bits;
switch (op) { switch (op) {
case MASK_SET: sb->state |= cpu_to_le32(bits); case MASK_SET:
break; sb->state |= cpu_to_le32(bits);
case MASK_UNSET: sb->state &= cpu_to_le32(~bits); break;
break; case MASK_UNSET:
default: BUG(); sb->state &= cpu_to_le32(~bits);
break;
default:
BUG();
} }
kunmap_atomic(sb, KM_USER0); kunmap_atomic(sb, KM_USER0);
return old; return old;
...@@ -710,12 +695,14 @@ static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned lon ...@@ -710,12 +695,14 @@ static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned lon
static inline struct page *filemap_get_page(struct bitmap *bitmap, static inline struct page *filemap_get_page(struct bitmap *bitmap,
unsigned long chunk) unsigned long chunk)
{ {
if (file_page_index(bitmap, chunk) >= bitmap->file_pages) return NULL; if (bitmap->filemap == NULL)
return NULL;
if (file_page_index(bitmap, chunk) >= bitmap->file_pages)
return NULL;
return bitmap->filemap[file_page_index(bitmap, chunk) return bitmap->filemap[file_page_index(bitmap, chunk)
- file_page_index(bitmap, 0)]; - file_page_index(bitmap, 0)];
} }
static void bitmap_file_unmap(struct bitmap *bitmap) static void bitmap_file_unmap(struct bitmap *bitmap)
{ {
struct page **map, *sb_page; struct page **map, *sb_page;
...@@ -766,7 +753,6 @@ static void bitmap_file_put(struct bitmap *bitmap) ...@@ -766,7 +753,6 @@ static void bitmap_file_put(struct bitmap *bitmap)
} }
} }
/* /*
* bitmap_file_kick - if an error occurs while manipulating the bitmap file * bitmap_file_kick - if an error occurs while manipulating the bitmap file
* then it is no longer reliable, so we stop using it and we mark the file * then it is no longer reliable, so we stop using it and we mark the file
...@@ -785,7 +771,6 @@ static void bitmap_file_kick(struct bitmap *bitmap) ...@@ -785,7 +771,6 @@ static void bitmap_file_kick(struct bitmap *bitmap)
ptr = d_path(&bitmap->file->f_path, path, ptr = d_path(&bitmap->file->f_path, path,
PAGE_SIZE); PAGE_SIZE);
printk(KERN_ALERT printk(KERN_ALERT
"%s: kicking failed bitmap file %s from array!\n", "%s: kicking failed bitmap file %s from array!\n",
bmname(bitmap), IS_ERR(ptr) ? "" : ptr); bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
...@@ -803,27 +788,36 @@ static void bitmap_file_kick(struct bitmap *bitmap) ...@@ -803,27 +788,36 @@ static void bitmap_file_kick(struct bitmap *bitmap)
} }
enum bitmap_page_attr { enum bitmap_page_attr {
BITMAP_PAGE_DIRTY = 0, // there are set bits that need to be synced BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */
BITMAP_PAGE_CLEAN = 1, // there are bits that might need to be cleared BITMAP_PAGE_CLEAN = 1, /* there are bits that might need to be cleared */
BITMAP_PAGE_NEEDWRITE=2, // there are cleared bits that need to be synced BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
}; };
static inline void set_page_attr(struct bitmap *bitmap, struct page *page, static inline void set_page_attr(struct bitmap *bitmap, struct page *page,
enum bitmap_page_attr attr) enum bitmap_page_attr attr)
{ {
__set_bit((page->index<<2) + attr, bitmap->filemap_attr); if (page)
__set_bit((page->index<<2) + attr, bitmap->filemap_attr);
else
__set_bit(attr, &bitmap->logattrs);
} }
static inline void clear_page_attr(struct bitmap *bitmap, struct page *page, static inline void clear_page_attr(struct bitmap *bitmap, struct page *page,
enum bitmap_page_attr attr) enum bitmap_page_attr attr)
{ {
__clear_bit((page->index<<2) + attr, bitmap->filemap_attr); if (page)
__clear_bit((page->index<<2) + attr, bitmap->filemap_attr);
else
__clear_bit(attr, &bitmap->logattrs);
} }
static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *page, static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *page,
enum bitmap_page_attr attr) enum bitmap_page_attr attr)
{ {
return test_bit((page->index<<2) + attr, bitmap->filemap_attr); if (page)
return test_bit((page->index<<2) + attr, bitmap->filemap_attr);
else
return test_bit(attr, &bitmap->logattrs);
} }
/* /*
...@@ -836,30 +830,32 @@ static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *p ...@@ -836,30 +830,32 @@ static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *p
static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
{ {
unsigned long bit; unsigned long bit;
struct page *page; struct page *page = NULL;
void *kaddr; void *kaddr;
unsigned long chunk = block >> CHUNK_BLOCK_SHIFT(bitmap); unsigned long chunk = block >> CHUNK_BLOCK_SHIFT(bitmap);
if (!bitmap->filemap) { if (!bitmap->filemap) {
return; struct dm_dirty_log *log = bitmap->mddev->bitmap_info.log;
} if (log)
log->type->mark_region(log, chunk);
page = filemap_get_page(bitmap, chunk); } else {
if (!page) return;
bit = file_page_offset(bitmap, chunk);
/* set the bit */ page = filemap_get_page(bitmap, chunk);
kaddr = kmap_atomic(page, KM_USER0); if (!page)
if (bitmap->flags & BITMAP_HOSTENDIAN) return;
set_bit(bit, kaddr); bit = file_page_offset(bitmap, chunk);
else
ext2_set_bit(bit, kaddr);
kunmap_atomic(kaddr, KM_USER0);
PRINTK("set file bit %lu page %lu\n", bit, page->index);
/* set the bit */
kaddr = kmap_atomic(page, KM_USER0);
if (bitmap->flags & BITMAP_HOSTENDIAN)
set_bit(bit, kaddr);
else
ext2_set_bit(bit, kaddr);
kunmap_atomic(kaddr, KM_USER0);
PRINTK("set file bit %lu page %lu\n", bit, page->index);
}
/* record page number so it gets flushed to disk when unplug occurs */ /* record page number so it gets flushed to disk when unplug occurs */
set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
} }
/* this gets called when the md device is ready to unplug its underlying /* this gets called when the md device is ready to unplug its underlying
...@@ -874,6 +870,16 @@ void bitmap_unplug(struct bitmap *bitmap) ...@@ -874,6 +870,16 @@ void bitmap_unplug(struct bitmap *bitmap)
if (!bitmap) if (!bitmap)
return; return;
if (!bitmap->filemap) {
/* Must be using a dirty_log */
struct dm_dirty_log *log = bitmap->mddev->bitmap_info.log;
dirty = test_and_clear_bit(BITMAP_PAGE_DIRTY, &bitmap->logattrs);
need_write = test_and_clear_bit(BITMAP_PAGE_NEEDWRITE, &bitmap->logattrs);
if (dirty || need_write)
if (log->type->flush(log))
bitmap->flags |= BITMAP_WRITE_ERROR;
goto out;
}
/* look at each page to see if there are any set bits that need to be /* look at each page to see if there are any set bits that need to be
* flushed out to disk */ * flushed out to disk */
...@@ -892,7 +898,7 @@ void bitmap_unplug(struct bitmap *bitmap) ...@@ -892,7 +898,7 @@ void bitmap_unplug(struct bitmap *bitmap)
wait = 1; wait = 1;
spin_unlock_irqrestore(&bitmap->lock, flags); spin_unlock_irqrestore(&bitmap->lock, flags);
if (dirty | need_write) if (dirty || need_write)
write_page(bitmap, page, 0); write_page(bitmap, page, 0);
} }
if (wait) { /* if any writes were performed, we need to wait on them */ if (wait) { /* if any writes were performed, we need to wait on them */
...@@ -902,9 +908,11 @@ void bitmap_unplug(struct bitmap *bitmap) ...@@ -902,9 +908,11 @@ void bitmap_unplug(struct bitmap *bitmap)
else else
md_super_wait(bitmap->mddev); md_super_wait(bitmap->mddev);
} }
out:
if (bitmap->flags & BITMAP_WRITE_ERROR) if (bitmap->flags & BITMAP_WRITE_ERROR)
bitmap_file_kick(bitmap); bitmap_file_kick(bitmap);
} }
EXPORT_SYMBOL(bitmap_unplug);
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed); static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
/* * bitmap_init_from_disk -- called at bitmap_create time to initialize /* * bitmap_init_from_disk -- called at bitmap_create time to initialize
...@@ -943,12 +951,11 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) ...@@ -943,12 +951,11 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
printk(KERN_INFO "%s: bitmap file is out of date, doing full " printk(KERN_INFO "%s: bitmap file is out of date, doing full "
"recovery\n", bmname(bitmap)); "recovery\n", bmname(bitmap));
bytes = (chunks + 7) / 8; bytes = DIV_ROUND_UP(bitmap->chunks, 8);
if (!bitmap->mddev->bitmap_info.external) if (!bitmap->mddev->bitmap_info.external)
bytes += sizeof(bitmap_super_t); bytes += sizeof(bitmap_super_t);
num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
num_pages = (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
if (file && i_size_read(file->f_mapping->host) < bytes) { if (file && i_size_read(file->f_mapping->host) < bytes) {
printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n", printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
...@@ -966,7 +973,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) ...@@ -966,7 +973,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
/* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */ /* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */
bitmap->filemap_attr = kzalloc( bitmap->filemap_attr = kzalloc(
roundup( DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)), roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
GFP_KERNEL); GFP_KERNEL);
if (!bitmap->filemap_attr) if (!bitmap->filemap_attr)
goto err; goto err;
...@@ -1021,7 +1028,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) ...@@ -1021,7 +1028,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
if (outofdate) { if (outofdate) {
/* /*
* if bitmap is out of date, dirty the * if bitmap is out of date, dirty the
* whole page and write it out * whole page and write it out
*/ */
paddr = kmap_atomic(page, KM_USER0); paddr = kmap_atomic(page, KM_USER0);
memset(paddr + offset, 0xff, memset(paddr + offset, 0xff,
...@@ -1052,7 +1059,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) ...@@ -1052,7 +1059,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
} }
} }
/* everything went OK */ /* everything went OK */
ret = 0; ret = 0;
bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET); bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET);
...@@ -1080,21 +1087,16 @@ void bitmap_write_all(struct bitmap *bitmap) ...@@ -1080,21 +1087,16 @@ void bitmap_write_all(struct bitmap *bitmap)
*/ */
int i; int i;
for (i=0; i < bitmap->file_pages; i++) for (i = 0; i < bitmap->file_pages; i++)
set_page_attr(bitmap, bitmap->filemap[i], set_page_attr(bitmap, bitmap->filemap[i],
BITMAP_PAGE_NEEDWRITE); BITMAP_PAGE_NEEDWRITE);
} }
static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc) static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
{ {
sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap); sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap);
unsigned long page = chunk >> PAGE_COUNTER_SHIFT; unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
bitmap->bp[page].count += inc; bitmap->bp[page].count += inc;
/*
if (page == 0) printk("count page 0, offset %llu: %d gives %d\n",
(unsigned long long)offset, inc, bitmap->bp[page].count);
*/
bitmap_checkfree(bitmap, page); bitmap_checkfree(bitmap, page);
} }
static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
...@@ -1114,6 +1116,7 @@ void bitmap_daemon_work(mddev_t *mddev) ...@@ -1114,6 +1116,7 @@ void bitmap_daemon_work(mddev_t *mddev)
struct page *page = NULL, *lastpage = NULL; struct page *page = NULL, *lastpage = NULL;
int blocks; int blocks;
void *paddr; void *paddr;
struct dm_dirty_log *log = mddev->bitmap_info.log;
/* Use a mutex to guard daemon_work against /* Use a mutex to guard daemon_work against
* bitmap_destroy. * bitmap_destroy.
...@@ -1138,11 +1141,12 @@ void bitmap_daemon_work(mddev_t *mddev) ...@@ -1138,11 +1141,12 @@ void bitmap_daemon_work(mddev_t *mddev)
spin_lock_irqsave(&bitmap->lock, flags); spin_lock_irqsave(&bitmap->lock, flags);
for (j = 0; j < bitmap->chunks; j++) { for (j = 0; j < bitmap->chunks; j++) {
bitmap_counter_t *bmc; bitmap_counter_t *bmc;
if (!bitmap->filemap) if (!bitmap->filemap) {
/* error or shutdown */ if (!log)
break; /* error or shutdown */
break;
page = filemap_get_page(bitmap, j); } else
page = filemap_get_page(bitmap, j);
if (page != lastpage) { if (page != lastpage) {
/* skip this page unless it's marked as needing cleaning */ /* skip this page unless it's marked as needing cleaning */
...@@ -1197,14 +1201,11 @@ void bitmap_daemon_work(mddev_t *mddev) ...@@ -1197,14 +1201,11 @@ void bitmap_daemon_work(mddev_t *mddev)
(sector_t)j << CHUNK_BLOCK_SHIFT(bitmap), (sector_t)j << CHUNK_BLOCK_SHIFT(bitmap),
&blocks, 0); &blocks, 0);
if (bmc) { if (bmc) {
/*
if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc);
*/
if (*bmc) if (*bmc)
bitmap->allclean = 0; bitmap->allclean = 0;
if (*bmc == 2) { if (*bmc == 2) {
*bmc=1; /* maybe clear the bit next time */ *bmc = 1; /* maybe clear the bit next time */
set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
} else if (*bmc == 1 && !bitmap->need_sync) { } else if (*bmc == 1 && !bitmap->need_sync) {
/* we can clear the bit */ /* we can clear the bit */
...@@ -1214,14 +1215,17 @@ void bitmap_daemon_work(mddev_t *mddev) ...@@ -1214,14 +1215,17 @@ void bitmap_daemon_work(mddev_t *mddev)
-1); -1);
/* clear the bit */ /* clear the bit */
paddr = kmap_atomic(page, KM_USER0); if (page) {
if (bitmap->flags & BITMAP_HOSTENDIAN) paddr = kmap_atomic(page, KM_USER0);
clear_bit(file_page_offset(bitmap, j), if (bitmap->flags & BITMAP_HOSTENDIAN)
paddr); clear_bit(file_page_offset(bitmap, j),
else paddr);
ext2_clear_bit(file_page_offset(bitmap, j), else
paddr); ext2_clear_bit(file_page_offset(bitmap, j),
kunmap_atomic(paddr, KM_USER0); paddr);
kunmap_atomic(paddr, KM_USER0);
} else
log->type->clear_region(log, j);
} }
} else } else
j |= PAGE_COUNTER_MASK; j |= PAGE_COUNTER_MASK;
...@@ -1229,12 +1233,16 @@ void bitmap_daemon_work(mddev_t *mddev) ...@@ -1229,12 +1233,16 @@ void bitmap_daemon_work(mddev_t *mddev)
spin_unlock_irqrestore(&bitmap->lock, flags); spin_unlock_irqrestore(&bitmap->lock, flags);
/* now sync the final page */ /* now sync the final page */
if (lastpage != NULL) { if (lastpage != NULL || log != NULL) {
spin_lock_irqsave(&bitmap->lock, flags); spin_lock_irqsave(&bitmap->lock, flags);
if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) { if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags); spin_unlock_irqrestore(&bitmap->lock, flags);
write_page(bitmap, lastpage, 0); if (lastpage)
write_page(bitmap, lastpage, 0);
else
if (log->type->flush(log))
bitmap->flags |= BITMAP_WRITE_ERROR;
} else { } else {
set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags); spin_unlock_irqrestore(&bitmap->lock, flags);
...@@ -1243,7 +1251,7 @@ void bitmap_daemon_work(mddev_t *mddev) ...@@ -1243,7 +1251,7 @@ void bitmap_daemon_work(mddev_t *mddev)
done: done:
if (bitmap->allclean == 0) if (bitmap->allclean == 0)
bitmap->mddev->thread->timeout = bitmap->mddev->thread->timeout =
bitmap->mddev->bitmap_info.daemon_sleep; bitmap->mddev->bitmap_info.daemon_sleep;
mutex_unlock(&mddev->bitmap_info.mutex); mutex_unlock(&mddev->bitmap_info.mutex);
} }
...@@ -1262,34 +1270,38 @@ __acquires(bitmap->lock) ...@@ -1262,34 +1270,38 @@ __acquires(bitmap->lock)
unsigned long page = chunk >> PAGE_COUNTER_SHIFT; unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT; unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
sector_t csize; sector_t csize;
int err;
if (bitmap_checkpage(bitmap, page, create) < 0) { err = bitmap_checkpage(bitmap, page, create);
if (bitmap->bp[page].hijacked ||
bitmap->bp[page].map == NULL)
csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap) +
PAGE_COUNTER_SHIFT - 1);
else
csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap)); csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap));
*blocks = csize - (offset & (csize- 1)); *blocks = csize - (offset & (csize - 1));
if (err < 0)
return NULL; return NULL;
}
/* now locked ... */ /* now locked ... */
if (bitmap->bp[page].hijacked) { /* hijacked pointer */ if (bitmap->bp[page].hijacked) { /* hijacked pointer */
/* should we use the first or second counter field /* should we use the first or second counter field
* of the hijacked pointer? */ * of the hijacked pointer? */
int hi = (pageoff > PAGE_COUNTER_MASK); int hi = (pageoff > PAGE_COUNTER_MASK);
csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap) +
PAGE_COUNTER_SHIFT - 1);
*blocks = csize - (offset & (csize- 1));
return &((bitmap_counter_t *) return &((bitmap_counter_t *)
&bitmap->bp[page].map)[hi]; &bitmap->bp[page].map)[hi];
} else { /* page is allocated */ } else /* page is allocated */
csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap));
*blocks = csize - (offset & (csize- 1));
return (bitmap_counter_t *) return (bitmap_counter_t *)
&(bitmap->bp[page].map[pageoff]); &(bitmap->bp[page].map[pageoff]);
}
} }
int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind) int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
{ {
if (!bitmap) return 0; if (!bitmap)
return 0;
if (behind) { if (behind) {
int bw; int bw;
...@@ -1322,17 +1334,16 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect ...@@ -1322,17 +1334,16 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
prepare_to_wait(&bitmap->overflow_wait, &__wait, prepare_to_wait(&bitmap->overflow_wait, &__wait,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&bitmap->lock); spin_unlock_irq(&bitmap->lock);
blk_unplug(bitmap->mddev->queue); md_unplug(bitmap->mddev);
schedule(); schedule();
finish_wait(&bitmap->overflow_wait, &__wait); finish_wait(&bitmap->overflow_wait, &__wait);
continue; continue;
} }
switch(*bmc) { switch (*bmc) {
case 0: case 0:
bitmap_file_set_bit(bitmap, offset); bitmap_file_set_bit(bitmap, offset);
bitmap_count_page(bitmap,offset, 1); bitmap_count_page(bitmap, offset, 1);
blk_plug_device_unlocked(bitmap->mddev->queue);
/* fall through */ /* fall through */
case 1: case 1:
*bmc = 2; *bmc = 2;
...@@ -1345,16 +1356,19 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect ...@@ -1345,16 +1356,19 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
offset += blocks; offset += blocks;
if (sectors > blocks) if (sectors > blocks)
sectors -= blocks; sectors -= blocks;
else sectors = 0; else
sectors = 0;
} }
bitmap->allclean = 0; bitmap->allclean = 0;
return 0; return 0;
} }
EXPORT_SYMBOL(bitmap_startwrite);
void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors,
int success, int behind) int success, int behind)
{ {
if (!bitmap) return; if (!bitmap)
return;
if (behind) { if (behind) {
if (atomic_dec_and_test(&bitmap->behind_writes)) if (atomic_dec_and_test(&bitmap->behind_writes))
wake_up(&bitmap->behind_wait); wake_up(&bitmap->behind_wait);
...@@ -1381,7 +1395,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto ...@@ -1381,7 +1395,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
bitmap->events_cleared < bitmap->mddev->events) { bitmap->events_cleared < bitmap->mddev->events) {
bitmap->events_cleared = bitmap->mddev->events; bitmap->events_cleared = bitmap->mddev->events;
bitmap->need_sync = 1; bitmap->need_sync = 1;
sysfs_notify_dirent(bitmap->sysfs_can_clear); sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
} }
if (!success && ! (*bmc & NEEDED_MASK)) if (!success && ! (*bmc & NEEDED_MASK))
...@@ -1391,18 +1405,22 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto ...@@ -1391,18 +1405,22 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
wake_up(&bitmap->overflow_wait); wake_up(&bitmap->overflow_wait);
(*bmc)--; (*bmc)--;
if (*bmc <= 2) { if (*bmc <= 2)
set_page_attr(bitmap, set_page_attr(bitmap,
filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)), filemap_get_page(
bitmap,
offset >> CHUNK_BLOCK_SHIFT(bitmap)),
BITMAP_PAGE_CLEAN); BITMAP_PAGE_CLEAN);
}
spin_unlock_irqrestore(&bitmap->lock, flags); spin_unlock_irqrestore(&bitmap->lock, flags);
offset += blocks; offset += blocks;
if (sectors > blocks) if (sectors > blocks)
sectors -= blocks; sectors -= blocks;
else sectors = 0; else
sectors = 0;
} }
} }
EXPORT_SYMBOL(bitmap_endwrite);
static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks,
int degraded) int degraded)
...@@ -1455,14 +1473,14 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, ...@@ -1455,14 +1473,14 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks,
} }
return rv; return rv;
} }
EXPORT_SYMBOL(bitmap_start_sync);
void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted) void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted)
{ {
bitmap_counter_t *bmc; bitmap_counter_t *bmc;
unsigned long flags; unsigned long flags;
/*
if (offset == 0) printk("bitmap_end_sync 0 (%d)\n", aborted); if (bitmap == NULL) {
*/ if (bitmap == NULL) {
*blocks = 1024; *blocks = 1024;
return; return;
} }
...@@ -1471,26 +1489,23 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int ab ...@@ -1471,26 +1489,23 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int ab
if (bmc == NULL) if (bmc == NULL)
goto unlock; goto unlock;
/* locked */ /* locked */
/*
if (offset == 0) printk("bitmap_end sync found 0x%x, blocks %d\n", *bmc, *blocks);
*/
if (RESYNC(*bmc)) { if (RESYNC(*bmc)) {
*bmc &= ~RESYNC_MASK; *bmc &= ~RESYNC_MASK;
if (!NEEDED(*bmc) && aborted) if (!NEEDED(*bmc) && aborted)
*bmc |= NEEDED_MASK; *bmc |= NEEDED_MASK;
else { else {
if (*bmc <= 2) { if (*bmc <= 2)
set_page_attr(bitmap, set_page_attr(bitmap,
filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)), filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)),
BITMAP_PAGE_CLEAN); BITMAP_PAGE_CLEAN);
}
} }
} }
unlock: unlock:
spin_unlock_irqrestore(&bitmap->lock, flags); spin_unlock_irqrestore(&bitmap->lock, flags);
bitmap->allclean = 0; bitmap->allclean = 0;
} }
EXPORT_SYMBOL(bitmap_end_sync);
void bitmap_close_sync(struct bitmap *bitmap) void bitmap_close_sync(struct bitmap *bitmap)
{ {
...@@ -1507,6 +1522,7 @@ void bitmap_close_sync(struct bitmap *bitmap) ...@@ -1507,6 +1522,7 @@ void bitmap_close_sync(struct bitmap *bitmap)
sector += blocks; sector += blocks;
} }
} }
EXPORT_SYMBOL(bitmap_close_sync);
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector) void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
{ {
...@@ -1526,7 +1542,8 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector) ...@@ -1526,7 +1542,8 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
atomic_read(&bitmap->mddev->recovery_active) == 0); atomic_read(&bitmap->mddev->recovery_active) == 0);
bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync; bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync;
set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags); if (bitmap->mddev->persistent)
set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1); sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1);
s = 0; s = 0;
while (s < sector && s < bitmap->mddev->resync_max_sectors) { while (s < sector && s < bitmap->mddev->resync_max_sectors) {
...@@ -1536,6 +1553,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector) ...@@ -1536,6 +1553,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
bitmap->last_end_sync = jiffies; bitmap->last_end_sync = jiffies;
sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed"); sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
} }
EXPORT_SYMBOL(bitmap_cond_end_sync);
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed) static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
{ {
...@@ -1552,9 +1570,9 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n ...@@ -1552,9 +1570,9 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
spin_unlock_irq(&bitmap->lock); spin_unlock_irq(&bitmap->lock);
return; return;
} }
if (! *bmc) { if (!*bmc) {
struct page *page; struct page *page;
*bmc = 1 | (needed?NEEDED_MASK:0); *bmc = 1 | (needed ? NEEDED_MASK : 0);
bitmap_count_page(bitmap, offset, 1); bitmap_count_page(bitmap, offset, 1);
page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)); page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap));
set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
...@@ -1663,15 +1681,17 @@ int bitmap_create(mddev_t *mddev) ...@@ -1663,15 +1681,17 @@ int bitmap_create(mddev_t *mddev)
unsigned long pages; unsigned long pages;
struct file *file = mddev->bitmap_info.file; struct file *file = mddev->bitmap_info.file;
int err; int err;
sector_t start; struct sysfs_dirent *bm = NULL;
struct sysfs_dirent *bm;
BUILD_BUG_ON(sizeof(bitmap_super_t) != 256); BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
if (!file && !mddev->bitmap_info.offset) /* bitmap disabled, nothing to do */ if (!file
&& !mddev->bitmap_info.offset
&& !mddev->bitmap_info.log) /* bitmap disabled, nothing to do */
return 0; return 0;
BUG_ON(file && mddev->bitmap_info.offset); BUG_ON(file && mddev->bitmap_info.offset);
BUG_ON(mddev->bitmap_info.offset && mddev->bitmap_info.log);
bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
if (!bitmap) if (!bitmap)
...@@ -1685,7 +1705,8 @@ int bitmap_create(mddev_t *mddev) ...@@ -1685,7 +1705,8 @@ int bitmap_create(mddev_t *mddev)
bitmap->mddev = mddev; bitmap->mddev = mddev;
bm = sysfs_get_dirent(mddev->kobj.sd, NULL, "bitmap"); if (mddev->kobj.sd)
bm = sysfs_get_dirent(mddev->kobj.sd, NULL, "bitmap");
if (bm) { if (bm) {
bitmap->sysfs_can_clear = sysfs_get_dirent(bm, NULL, "can_clear"); bitmap->sysfs_can_clear = sysfs_get_dirent(bm, NULL, "can_clear");
sysfs_put(bm); sysfs_put(bm);
...@@ -1719,9 +1740,9 @@ int bitmap_create(mddev_t *mddev) ...@@ -1719,9 +1740,9 @@ int bitmap_create(mddev_t *mddev)
bitmap->chunkshift = ffz(~mddev->bitmap_info.chunksize); bitmap->chunkshift = ffz(~mddev->bitmap_info.chunksize);
/* now that chunksize and chunkshift are set, we can use these macros */ /* now that chunksize and chunkshift are set, we can use these macros */
chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) >> chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) >>
CHUNK_BLOCK_SHIFT(bitmap); CHUNK_BLOCK_SHIFT(bitmap);
pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO; pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
BUG_ON(!pages); BUG_ON(!pages);
...@@ -1741,27 +1762,11 @@ int bitmap_create(mddev_t *mddev) ...@@ -1741,27 +1762,11 @@ int bitmap_create(mddev_t *mddev)
if (!bitmap->bp) if (!bitmap->bp)
goto error; goto error;
/* now that we have some pages available, initialize the in-memory
* bitmap from the on-disk bitmap */
start = 0;
if (mddev->degraded == 0
|| bitmap->events_cleared == mddev->events)
/* no need to keep dirty bits to optimise a re-add of a missing device */
start = mddev->recovery_cp;
err = bitmap_init_from_disk(bitmap, start);
if (err)
goto error;
printk(KERN_INFO "created bitmap (%lu pages) for device %s\n", printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
pages, bmname(bitmap)); pages, bmname(bitmap));
mddev->bitmap = bitmap; mddev->bitmap = bitmap;
mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
md_wakeup_thread(mddev->thread);
bitmap_update_sb(bitmap);
return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0; return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0;
...@@ -1770,15 +1775,69 @@ int bitmap_create(mddev_t *mddev) ...@@ -1770,15 +1775,69 @@ int bitmap_create(mddev_t *mddev)
return err; return err;
} }
int bitmap_load(mddev_t *mddev)
{
int err = 0;
sector_t sector = 0;
struct bitmap *bitmap = mddev->bitmap;
if (!bitmap)
goto out;
/* Clear out old bitmap info first: Either there is none, or we
* are resuming after someone else has possibly changed things,
* so we should forget old cached info.
* All chunks should be clean, but some might need_sync.
*/
while (sector < mddev->resync_max_sectors) {
int blocks;
bitmap_start_sync(bitmap, sector, &blocks, 0);
sector += blocks;
}
bitmap_close_sync(bitmap);
if (mddev->bitmap_info.log) {
unsigned long i;
struct dm_dirty_log *log = mddev->bitmap_info.log;
for (i = 0; i < bitmap->chunks; i++)
if (!log->type->in_sync(log, i, 1))
bitmap_set_memory_bits(bitmap,
(sector_t)i << CHUNK_BLOCK_SHIFT(bitmap),
1);
} else {
sector_t start = 0;
if (mddev->degraded == 0
|| bitmap->events_cleared == mddev->events)
/* no need to keep dirty bits to optimise a
* re-add of a missing device */
start = mddev->recovery_cp;
err = bitmap_init_from_disk(bitmap, start);
}
if (err)
goto out;
mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
md_wakeup_thread(mddev->thread);
bitmap_update_sb(bitmap);
if (bitmap->flags & BITMAP_WRITE_ERROR)
err = -EIO;
out:
return err;
}
EXPORT_SYMBOL_GPL(bitmap_load);
static ssize_t static ssize_t
location_show(mddev_t *mddev, char *page) location_show(mddev_t *mddev, char *page)
{ {
ssize_t len; ssize_t len;
if (mddev->bitmap_info.file) { if (mddev->bitmap_info.file)
len = sprintf(page, "file"); len = sprintf(page, "file");
} else if (mddev->bitmap_info.offset) { else if (mddev->bitmap_info.offset)
len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset); len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
} else else
len = sprintf(page, "none"); len = sprintf(page, "none");
len += sprintf(page+len, "\n"); len += sprintf(page+len, "\n");
return len; return len;
...@@ -1867,7 +1926,7 @@ timeout_show(mddev_t *mddev, char *page) ...@@ -1867,7 +1926,7 @@ timeout_show(mddev_t *mddev, char *page)
ssize_t len; ssize_t len;
unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ; unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ; unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
len = sprintf(page, "%lu", secs); len = sprintf(page, "%lu", secs);
if (jifs) if (jifs)
len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs)); len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
...@@ -2049,12 +2108,3 @@ struct attribute_group md_bitmap_group = { ...@@ -2049,12 +2108,3 @@ struct attribute_group md_bitmap_group = {
.attrs = md_bitmap_attrs, .attrs = md_bitmap_attrs,
}; };
/* the bitmap API -- for raid personalities */
EXPORT_SYMBOL(bitmap_startwrite);
EXPORT_SYMBOL(bitmap_endwrite);
EXPORT_SYMBOL(bitmap_start_sync);
EXPORT_SYMBOL(bitmap_end_sync);
EXPORT_SYMBOL(bitmap_unplug);
EXPORT_SYMBOL(bitmap_close_sync);
EXPORT_SYMBOL(bitmap_cond_end_sync);
...@@ -222,6 +222,10 @@ struct bitmap { ...@@ -222,6 +222,10 @@ struct bitmap {
unsigned long file_pages; /* number of pages in the file */ unsigned long file_pages; /* number of pages in the file */
int last_page_size; /* bytes in the last page */ int last_page_size; /* bytes in the last page */
unsigned long logattrs; /* used when filemap_attr doesn't exist
* because we are working with a dirty_log
*/
unsigned long flags; unsigned long flags;
int allclean; int allclean;
...@@ -243,12 +247,14 @@ struct bitmap { ...@@ -243,12 +247,14 @@ struct bitmap {
wait_queue_head_t behind_wait; wait_queue_head_t behind_wait;
struct sysfs_dirent *sysfs_can_clear; struct sysfs_dirent *sysfs_can_clear;
}; };
/* the bitmap API */ /* the bitmap API */
/* these are used only by md/bitmap */ /* these are used only by md/bitmap */
int bitmap_create(mddev_t *mddev); int bitmap_create(mddev_t *mddev);
int bitmap_load(mddev_t *mddev);
void bitmap_flush(mddev_t *mddev); void bitmap_flush(mddev_t *mddev);
void bitmap_destroy(mddev_t *mddev); void bitmap_destroy(mddev_t *mddev);
......
...@@ -262,7 +262,7 @@ static int md_make_request(struct request_queue *q, struct bio *bio) ...@@ -262,7 +262,7 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
* Once ->stop is called and completes, the module will be completely * Once ->stop is called and completes, the module will be completely
* unused. * unused.
*/ */
static void mddev_suspend(mddev_t *mddev) void mddev_suspend(mddev_t *mddev)
{ {
BUG_ON(mddev->suspended); BUG_ON(mddev->suspended);
mddev->suspended = 1; mddev->suspended = 1;
...@@ -270,13 +270,15 @@ static void mddev_suspend(mddev_t *mddev) ...@@ -270,13 +270,15 @@ static void mddev_suspend(mddev_t *mddev)
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
mddev->pers->quiesce(mddev, 1); mddev->pers->quiesce(mddev, 1);
} }
EXPORT_SYMBOL_GPL(mddev_suspend);
static void mddev_resume(mddev_t *mddev) void mddev_resume(mddev_t *mddev)
{ {
mddev->suspended = 0; mddev->suspended = 0;
wake_up(&mddev->sb_wait); wake_up(&mddev->sb_wait);
mddev->pers->quiesce(mddev, 0); mddev->pers->quiesce(mddev, 0);
} }
EXPORT_SYMBOL_GPL(mddev_resume);
int mddev_congested(mddev_t *mddev, int bits) int mddev_congested(mddev_t *mddev, int bits)
{ {
...@@ -385,6 +387,51 @@ void md_barrier_request(mddev_t *mddev, struct bio *bio) ...@@ -385,6 +387,51 @@ void md_barrier_request(mddev_t *mddev, struct bio *bio)
} }
EXPORT_SYMBOL(md_barrier_request); EXPORT_SYMBOL(md_barrier_request);
/* Support for plugging.
* This mirrors the plugging support in request_queue, but does not
* require having a whole queue
*/
static void plugger_work(struct work_struct *work)
{
struct plug_handle *plug =
container_of(work, struct plug_handle, unplug_work);
plug->unplug_fn(plug);
}
static void plugger_timeout(unsigned long data)
{
struct plug_handle *plug = (void *)data;
kblockd_schedule_work(NULL, &plug->unplug_work);
}
void plugger_init(struct plug_handle *plug,
void (*unplug_fn)(struct plug_handle *))
{
plug->unplug_flag = 0;
plug->unplug_fn = unplug_fn;
init_timer(&plug->unplug_timer);
plug->unplug_timer.function = plugger_timeout;
plug->unplug_timer.data = (unsigned long)plug;
INIT_WORK(&plug->unplug_work, plugger_work);
}
EXPORT_SYMBOL_GPL(plugger_init);
void plugger_set_plug(struct plug_handle *plug)
{
if (!test_and_set_bit(PLUGGED_FLAG, &plug->unplug_flag))
mod_timer(&plug->unplug_timer, jiffies + msecs_to_jiffies(3)+1);
}
EXPORT_SYMBOL_GPL(plugger_set_plug);
int plugger_remove_plug(struct plug_handle *plug)
{
if (test_and_clear_bit(PLUGGED_FLAG, &plug->unplug_flag)) {
del_timer(&plug->unplug_timer);
return 1;
} else
return 0;
}
EXPORT_SYMBOL_GPL(plugger_remove_plug);
static inline mddev_t *mddev_get(mddev_t *mddev) static inline mddev_t *mddev_get(mddev_t *mddev)
{ {
atomic_inc(&mddev->active); atomic_inc(&mddev->active);
...@@ -417,7 +464,7 @@ static void mddev_put(mddev_t *mddev) ...@@ -417,7 +464,7 @@ static void mddev_put(mddev_t *mddev)
spin_unlock(&all_mddevs_lock); spin_unlock(&all_mddevs_lock);
} }
static void mddev_init(mddev_t *mddev) void mddev_init(mddev_t *mddev)
{ {
mutex_init(&mddev->open_mutex); mutex_init(&mddev->open_mutex);
mutex_init(&mddev->reconfig_mutex); mutex_init(&mddev->reconfig_mutex);
...@@ -437,6 +484,7 @@ static void mddev_init(mddev_t *mddev) ...@@ -437,6 +484,7 @@ static void mddev_init(mddev_t *mddev)
mddev->resync_max = MaxSector; mddev->resync_max = MaxSector;
mddev->level = LEVEL_NONE; mddev->level = LEVEL_NONE;
} }
EXPORT_SYMBOL_GPL(mddev_init);
static mddev_t * mddev_find(dev_t unit) static mddev_t * mddev_find(dev_t unit)
{ {
...@@ -533,25 +581,31 @@ static void mddev_unlock(mddev_t * mddev) ...@@ -533,25 +581,31 @@ static void mddev_unlock(mddev_t * mddev)
* an access to the files will try to take reconfig_mutex * an access to the files will try to take reconfig_mutex
* while holding the file unremovable, which leads to * while holding the file unremovable, which leads to
* a deadlock. * a deadlock.
* So hold open_mutex instead - we are allowed to take * So hold set sysfs_active while the remove in happeing,
* it while holding reconfig_mutex, and md_run can * and anything else which might set ->to_remove or my
* use it to wait for the remove to complete. * otherwise change the sysfs namespace will fail with
* -EBUSY if sysfs_active is still set.
* We set sysfs_active under reconfig_mutex and elsewhere
* test it under the same mutex to ensure its correct value
* is seen.
*/ */
struct attribute_group *to_remove = mddev->to_remove; struct attribute_group *to_remove = mddev->to_remove;
mddev->to_remove = NULL; mddev->to_remove = NULL;
mutex_lock(&mddev->open_mutex); mddev->sysfs_active = 1;
mutex_unlock(&mddev->reconfig_mutex); mutex_unlock(&mddev->reconfig_mutex);
if (to_remove != &md_redundancy_group) if (mddev->kobj.sd) {
sysfs_remove_group(&mddev->kobj, to_remove); if (to_remove != &md_redundancy_group)
if (mddev->pers == NULL || sysfs_remove_group(&mddev->kobj, to_remove);
mddev->pers->sync_request == NULL) { if (mddev->pers == NULL ||
sysfs_remove_group(&mddev->kobj, &md_redundancy_group); mddev->pers->sync_request == NULL) {
if (mddev->sysfs_action) sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
sysfs_put(mddev->sysfs_action); if (mddev->sysfs_action)
mddev->sysfs_action = NULL; sysfs_put(mddev->sysfs_action);
mddev->sysfs_action = NULL;
}
} }
mutex_unlock(&mddev->open_mutex); mddev->sysfs_active = 0;
} else } else
mutex_unlock(&mddev->reconfig_mutex); mutex_unlock(&mddev->reconfig_mutex);
...@@ -1812,11 +1866,9 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) ...@@ -1812,11 +1866,9 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
goto fail; goto fail;
ko = &part_to_dev(rdev->bdev->bd_part)->kobj; ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) { if (sysfs_create_link(&rdev->kobj, ko, "block"))
kobject_del(&rdev->kobj); /* failure here is OK */;
goto fail; rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
}
rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, NULL, "state");
list_add_rcu(&rdev->same_set, &mddev->disks); list_add_rcu(&rdev->same_set, &mddev->disks);
bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk); bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
...@@ -2335,8 +2387,8 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len) ...@@ -2335,8 +2387,8 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
set_bit(In_sync, &rdev->flags); set_bit(In_sync, &rdev->flags);
err = 0; err = 0;
} }
if (!err && rdev->sysfs_state) if (!err)
sysfs_notify_dirent(rdev->sysfs_state); sysfs_notify_dirent_safe(rdev->sysfs_state);
return err ? err : len; return err ? err : len;
} }
static struct rdev_sysfs_entry rdev_state = static struct rdev_sysfs_entry rdev_state =
...@@ -2431,14 +2483,10 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) ...@@ -2431,14 +2483,10 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
rdev->raid_disk = -1; rdev->raid_disk = -1;
return err; return err;
} else } else
sysfs_notify_dirent(rdev->sysfs_state); sysfs_notify_dirent_safe(rdev->sysfs_state);
sprintf(nm, "rd%d", rdev->raid_disk); sprintf(nm, "rd%d", rdev->raid_disk);
if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm)) if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
printk(KERN_WARNING /* failure here is OK */;
"md: cannot register "
"%s for %s\n",
nm, mdname(rdev->mddev));
/* don't wakeup anyone, leave that to userspace. */ /* don't wakeup anyone, leave that to userspace. */
} else { } else {
if (slot >= rdev->mddev->raid_disks) if (slot >= rdev->mddev->raid_disks)
...@@ -2448,7 +2496,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) ...@@ -2448,7 +2496,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
clear_bit(Faulty, &rdev->flags); clear_bit(Faulty, &rdev->flags);
clear_bit(WriteMostly, &rdev->flags); clear_bit(WriteMostly, &rdev->flags);
set_bit(In_sync, &rdev->flags); set_bit(In_sync, &rdev->flags);
sysfs_notify_dirent(rdev->sysfs_state); sysfs_notify_dirent_safe(rdev->sysfs_state);
} }
return len; return len;
} }
...@@ -2696,6 +2744,24 @@ static struct kobj_type rdev_ktype = { ...@@ -2696,6 +2744,24 @@ static struct kobj_type rdev_ktype = {
.default_attrs = rdev_default_attrs, .default_attrs = rdev_default_attrs,
}; };
void md_rdev_init(mdk_rdev_t *rdev)
{
rdev->desc_nr = -1;
rdev->saved_raid_disk = -1;
rdev->raid_disk = -1;
rdev->flags = 0;
rdev->data_offset = 0;
rdev->sb_events = 0;
rdev->last_read_error.tv_sec = 0;
rdev->last_read_error.tv_nsec = 0;
atomic_set(&rdev->nr_pending, 0);
atomic_set(&rdev->read_errors, 0);
atomic_set(&rdev->corrected_errors, 0);
INIT_LIST_HEAD(&rdev->same_set);
init_waitqueue_head(&rdev->blocked_wait);
}
EXPORT_SYMBOL_GPL(md_rdev_init);
/* /*
* Import a device. If 'super_format' >= 0, then sanity check the superblock * Import a device. If 'super_format' >= 0, then sanity check the superblock
* *
...@@ -2719,6 +2785,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi ...@@ -2719,6 +2785,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
md_rdev_init(rdev);
if ((err = alloc_disk_sb(rdev))) if ((err = alloc_disk_sb(rdev)))
goto abort_free; goto abort_free;
...@@ -2728,18 +2795,6 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi ...@@ -2728,18 +2795,6 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
kobject_init(&rdev->kobj, &rdev_ktype); kobject_init(&rdev->kobj, &rdev_ktype);
rdev->desc_nr = -1;
rdev->saved_raid_disk = -1;
rdev->raid_disk = -1;
rdev->flags = 0;
rdev->data_offset = 0;
rdev->sb_events = 0;
rdev->last_read_error.tv_sec = 0;
rdev->last_read_error.tv_nsec = 0;
atomic_set(&rdev->nr_pending, 0);
atomic_set(&rdev->read_errors, 0);
atomic_set(&rdev->corrected_errors, 0);
size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
if (!size) { if (!size) {
printk(KERN_WARNING printk(KERN_WARNING
...@@ -2768,9 +2823,6 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi ...@@ -2768,9 +2823,6 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
} }
} }
INIT_LIST_HEAD(&rdev->same_set);
init_waitqueue_head(&rdev->blocked_wait);
return rdev; return rdev;
abort_free: abort_free:
...@@ -2961,7 +3013,9 @@ level_store(mddev_t *mddev, const char *buf, size_t len) ...@@ -2961,7 +3013,9 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
* - new personality will access other array. * - new personality will access other array.
*/ */
if (mddev->sync_thread || mddev->reshape_position != MaxSector) if (mddev->sync_thread ||
mddev->reshape_position != MaxSector ||
mddev->sysfs_active)
return -EBUSY; return -EBUSY;
if (!mddev->pers->quiesce) { if (!mddev->pers->quiesce) {
...@@ -3438,7 +3492,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len) ...@@ -3438,7 +3492,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
if (err) if (err)
return err; return err;
else { else {
sysfs_notify_dirent(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);
return len; return len;
} }
} }
...@@ -3736,7 +3790,7 @@ action_store(mddev_t *mddev, const char *page, size_t len) ...@@ -3736,7 +3790,7 @@ action_store(mddev_t *mddev, const char *page, size_t len)
} }
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
sysfs_notify_dirent(mddev->sysfs_action); sysfs_notify_dirent_safe(mddev->sysfs_action);
return len; return len;
} }
...@@ -4282,13 +4336,14 @@ static int md_alloc(dev_t dev, char *name) ...@@ -4282,13 +4336,14 @@ static int md_alloc(dev_t dev, char *name)
disk->disk_name); disk->disk_name);
error = 0; error = 0;
} }
if (sysfs_create_group(&mddev->kobj, &md_bitmap_group)) if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_bitmap_group))
printk(KERN_DEBUG "pointless warning\n"); printk(KERN_DEBUG "pointless warning\n");
abort: abort:
mutex_unlock(&disks_mutex); mutex_unlock(&disks_mutex);
if (!error) { if (!error && mddev->kobj.sd) {
kobject_uevent(&mddev->kobj, KOBJ_ADD); kobject_uevent(&mddev->kobj, KOBJ_ADD);
mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, NULL, "array_state"); mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
} }
mddev_put(mddev); mddev_put(mddev);
return error; return error;
...@@ -4326,14 +4381,14 @@ static void md_safemode_timeout(unsigned long data) ...@@ -4326,14 +4381,14 @@ static void md_safemode_timeout(unsigned long data)
if (!atomic_read(&mddev->writes_pending)) { if (!atomic_read(&mddev->writes_pending)) {
mddev->safemode = 1; mddev->safemode = 1;
if (mddev->external) if (mddev->external)
sysfs_notify_dirent(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);
} }
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
} }
static int start_dirty_degraded; static int start_dirty_degraded;
static int md_run(mddev_t *mddev) int md_run(mddev_t *mddev)
{ {
int err; int err;
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
...@@ -4345,13 +4400,9 @@ static int md_run(mddev_t *mddev) ...@@ -4345,13 +4400,9 @@ static int md_run(mddev_t *mddev)
if (mddev->pers) if (mddev->pers)
return -EBUSY; return -EBUSY;
/* Cannot run until previous stop completes properly */
/* These two calls synchronise us with the if (mddev->sysfs_active)
* sysfs_remove_group calls in mddev_unlock, return -EBUSY;
* so they must have completed.
*/
mutex_lock(&mddev->open_mutex);
mutex_unlock(&mddev->open_mutex);
/* /*
* Analyze all RAID superblock(s) * Analyze all RAID superblock(s)
...@@ -4398,7 +4449,7 @@ static int md_run(mddev_t *mddev) ...@@ -4398,7 +4449,7 @@ static int md_run(mddev_t *mddev)
return -EINVAL; return -EINVAL;
} }
} }
sysfs_notify_dirent(rdev->sysfs_state); sysfs_notify_dirent_safe(rdev->sysfs_state);
} }
spin_lock(&pers_lock); spin_lock(&pers_lock);
...@@ -4497,11 +4548,12 @@ static int md_run(mddev_t *mddev) ...@@ -4497,11 +4548,12 @@ static int md_run(mddev_t *mddev)
return err; return err;
} }
if (mddev->pers->sync_request) { if (mddev->pers->sync_request) {
if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_redundancy_group))
printk(KERN_WARNING printk(KERN_WARNING
"md: cannot register extra attributes for %s\n", "md: cannot register extra attributes for %s\n",
mdname(mddev)); mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action"); mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
} else if (mddev->ro == 2) /* auto-readonly not meaningful */ } else if (mddev->ro == 2) /* auto-readonly not meaningful */
mddev->ro = 0; mddev->ro = 0;
...@@ -4519,8 +4571,7 @@ static int md_run(mddev_t *mddev) ...@@ -4519,8 +4571,7 @@ static int md_run(mddev_t *mddev)
char nm[20]; char nm[20];
sprintf(nm, "rd%d", rdev->raid_disk); sprintf(nm, "rd%d", rdev->raid_disk);
if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm)) if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
printk("md: cannot register %s for %s\n", /* failure here is OK */;
nm, mdname(mddev));
} }
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
...@@ -4532,12 +4583,12 @@ static int md_run(mddev_t *mddev) ...@@ -4532,12 +4583,12 @@ static int md_run(mddev_t *mddev)
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
md_new_event(mddev); md_new_event(mddev);
sysfs_notify_dirent(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);
if (mddev->sysfs_action) sysfs_notify_dirent_safe(mddev->sysfs_action);
sysfs_notify_dirent(mddev->sysfs_action);
sysfs_notify(&mddev->kobj, NULL, "degraded"); sysfs_notify(&mddev->kobj, NULL, "degraded");
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(md_run);
static int do_md_run(mddev_t *mddev) static int do_md_run(mddev_t *mddev)
{ {
...@@ -4546,7 +4597,11 @@ static int do_md_run(mddev_t *mddev) ...@@ -4546,7 +4597,11 @@ static int do_md_run(mddev_t *mddev)
err = md_run(mddev); err = md_run(mddev);
if (err) if (err)
goto out; goto out;
err = bitmap_load(mddev);
if (err) {
bitmap_destroy(mddev);
goto out;
}
set_capacity(mddev->gendisk, mddev->array_sectors); set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk); revalidate_disk(mddev->gendisk);
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
...@@ -4574,7 +4629,7 @@ static int restart_array(mddev_t *mddev) ...@@ -4574,7 +4629,7 @@ static int restart_array(mddev_t *mddev)
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0; return 0;
} }
...@@ -4645,9 +4700,10 @@ static void md_clean(mddev_t *mddev) ...@@ -4645,9 +4700,10 @@ static void md_clean(mddev_t *mddev)
mddev->bitmap_info.chunksize = 0; mddev->bitmap_info.chunksize = 0;
mddev->bitmap_info.daemon_sleep = 0; mddev->bitmap_info.daemon_sleep = 0;
mddev->bitmap_info.max_write_behind = 0; mddev->bitmap_info.max_write_behind = 0;
mddev->plug = NULL;
} }
static void md_stop_writes(mddev_t *mddev) void md_stop_writes(mddev_t *mddev)
{ {
if (mddev->sync_thread) { if (mddev->sync_thread) {
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
...@@ -4667,11 +4723,10 @@ static void md_stop_writes(mddev_t *mddev) ...@@ -4667,11 +4723,10 @@ static void md_stop_writes(mddev_t *mddev)
md_update_sb(mddev, 1); md_update_sb(mddev, 1);
} }
} }
EXPORT_SYMBOL_GPL(md_stop_writes);
static void md_stop(mddev_t *mddev) void md_stop(mddev_t *mddev)
{ {
md_stop_writes(mddev);
mddev->pers->stop(mddev); mddev->pers->stop(mddev);
if (mddev->pers->sync_request && mddev->to_remove == NULL) if (mddev->pers->sync_request && mddev->to_remove == NULL)
mddev->to_remove = &md_redundancy_group; mddev->to_remove = &md_redundancy_group;
...@@ -4679,6 +4734,7 @@ static void md_stop(mddev_t *mddev) ...@@ -4679,6 +4734,7 @@ static void md_stop(mddev_t *mddev)
mddev->pers = NULL; mddev->pers = NULL;
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
} }
EXPORT_SYMBOL_GPL(md_stop);
static int md_set_readonly(mddev_t *mddev, int is_open) static int md_set_readonly(mddev_t *mddev, int is_open)
{ {
...@@ -4698,7 +4754,7 @@ static int md_set_readonly(mddev_t *mddev, int is_open) ...@@ -4698,7 +4754,7 @@ static int md_set_readonly(mddev_t *mddev, int is_open)
mddev->ro = 1; mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1); set_disk_ro(mddev->gendisk, 1);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
sysfs_notify_dirent(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);
err = 0; err = 0;
} }
out: out:
...@@ -4712,26 +4768,29 @@ static int md_set_readonly(mddev_t *mddev, int is_open) ...@@ -4712,26 +4768,29 @@ static int md_set_readonly(mddev_t *mddev, int is_open)
*/ */
static int do_md_stop(mddev_t * mddev, int mode, int is_open) static int do_md_stop(mddev_t * mddev, int mode, int is_open)
{ {
int err = 0;
struct gendisk *disk = mddev->gendisk; struct gendisk *disk = mddev->gendisk;
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
mutex_lock(&mddev->open_mutex); mutex_lock(&mddev->open_mutex);
if (atomic_read(&mddev->openers) > is_open) { if (atomic_read(&mddev->openers) > is_open ||
mddev->sysfs_active) {
printk("md: %s still in use.\n",mdname(mddev)); printk("md: %s still in use.\n",mdname(mddev));
err = -EBUSY; mutex_unlock(&mddev->open_mutex);
} else if (mddev->pers) { return -EBUSY;
}
if (mddev->pers) {
if (mddev->ro) if (mddev->ro)
set_disk_ro(disk, 0); set_disk_ro(disk, 0);
md_stop_writes(mddev);
md_stop(mddev); md_stop(mddev);
mddev->queue->merge_bvec_fn = NULL; mddev->queue->merge_bvec_fn = NULL;
mddev->queue->unplug_fn = NULL; mddev->queue->unplug_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL; mddev->queue->backing_dev_info.congested_fn = NULL;
/* tell userspace to handle 'inactive' */ /* tell userspace to handle 'inactive' */
sysfs_notify_dirent(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);
list_for_each_entry(rdev, &mddev->disks, same_set) list_for_each_entry(rdev, &mddev->disks, same_set)
if (rdev->raid_disk >= 0) { if (rdev->raid_disk >= 0) {
...@@ -4741,21 +4800,17 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) ...@@ -4741,21 +4800,17 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
} }
set_capacity(disk, 0); set_capacity(disk, 0);
mutex_unlock(&mddev->open_mutex);
revalidate_disk(disk); revalidate_disk(disk);
if (mddev->ro) if (mddev->ro)
mddev->ro = 0; mddev->ro = 0;
} else
err = 0; mutex_unlock(&mddev->open_mutex);
}
mutex_unlock(&mddev->open_mutex);
if (err)
return err;
/* /*
* Free resources if final stop * Free resources if final stop
*/ */
if (mode == 0) { if (mode == 0) {
printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
bitmap_destroy(mddev); bitmap_destroy(mddev);
...@@ -4772,13 +4827,11 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) ...@@ -4772,13 +4827,11 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
if (mddev->hold_active == UNTIL_STOP) if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0; mddev->hold_active = 0;
} }
err = 0;
blk_integrity_unregister(disk); blk_integrity_unregister(disk);
md_new_event(mddev); md_new_event(mddev);
sysfs_notify_dirent(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);
return err; return 0;
} }
#ifndef MODULE #ifndef MODULE
...@@ -5139,7 +5192,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) ...@@ -5139,7 +5192,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
if (err) if (err)
export_rdev(rdev); export_rdev(rdev);
else else
sysfs_notify_dirent(rdev->sysfs_state); sysfs_notify_dirent_safe(rdev->sysfs_state);
md_update_sb(mddev, 1); md_update_sb(mddev, 1);
if (mddev->degraded) if (mddev->degraded)
...@@ -5332,8 +5385,11 @@ static int set_bitmap_file(mddev_t *mddev, int fd) ...@@ -5332,8 +5385,11 @@ static int set_bitmap_file(mddev_t *mddev, int fd)
err = 0; err = 0;
if (mddev->pers) { if (mddev->pers) {
mddev->pers->quiesce(mddev, 1); mddev->pers->quiesce(mddev, 1);
if (fd >= 0) if (fd >= 0) {
err = bitmap_create(mddev); err = bitmap_create(mddev);
if (!err)
err = bitmap_load(mddev);
}
if (fd < 0 || err) { if (fd < 0 || err) {
bitmap_destroy(mddev); bitmap_destroy(mddev);
fd = -1; /* make sure to put the file */ fd = -1; /* make sure to put the file */
...@@ -5582,6 +5638,8 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) ...@@ -5582,6 +5638,8 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
mddev->bitmap_info.default_offset; mddev->bitmap_info.default_offset;
mddev->pers->quiesce(mddev, 1); mddev->pers->quiesce(mddev, 1);
rv = bitmap_create(mddev); rv = bitmap_create(mddev);
if (!rv)
rv = bitmap_load(mddev);
if (rv) if (rv)
bitmap_destroy(mddev); bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0); mddev->pers->quiesce(mddev, 0);
...@@ -5814,7 +5872,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, ...@@ -5814,7 +5872,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) { if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
if (mddev->ro == 2) { if (mddev->ro == 2) {
mddev->ro = 0; mddev->ro = 0;
sysfs_notify_dirent(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
} else { } else {
...@@ -6065,10 +6123,12 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -6065,10 +6123,12 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->pers->error_handler(mddev,rdev); mddev->pers->error_handler(mddev,rdev);
if (mddev->degraded) if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
sysfs_notify_dirent(rdev->sysfs_state); sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
if (mddev->event_work.func)
schedule_work(&mddev->event_work);
md_new_event_inintr(mddev); md_new_event_inintr(mddev);
} }
...@@ -6526,7 +6586,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi) ...@@ -6526,7 +6586,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
spin_unlock_irq(&mddev->write_lock); spin_unlock_irq(&mddev->write_lock);
} }
if (did_change) if (did_change)
sysfs_notify_dirent(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);
wait_event(mddev->sb_wait, wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_CLEAN, &mddev->flags) && !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
!test_bit(MD_CHANGE_PENDING, &mddev->flags)); !test_bit(MD_CHANGE_PENDING, &mddev->flags));
...@@ -6569,7 +6629,7 @@ int md_allow_write(mddev_t *mddev) ...@@ -6569,7 +6629,7 @@ int md_allow_write(mddev_t *mddev)
mddev->safemode = 1; mddev->safemode = 1;
spin_unlock_irq(&mddev->write_lock); spin_unlock_irq(&mddev->write_lock);
md_update_sb(mddev, 0); md_update_sb(mddev, 0);
sysfs_notify_dirent(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);
} else } else
spin_unlock_irq(&mddev->write_lock); spin_unlock_irq(&mddev->write_lock);
...@@ -6580,6 +6640,14 @@ int md_allow_write(mddev_t *mddev) ...@@ -6580,6 +6640,14 @@ int md_allow_write(mddev_t *mddev)
} }
EXPORT_SYMBOL_GPL(md_allow_write); EXPORT_SYMBOL_GPL(md_allow_write);
void md_unplug(mddev_t *mddev)
{
if (mddev->queue)
blk_unplug(mddev->queue);
if (mddev->plug)
mddev->plug->unplug_fn(mddev->plug);
}
#define SYNC_MARKS 10 #define SYNC_MARKS 10
#define SYNC_MARK_STEP (3*HZ) #define SYNC_MARK_STEP (3*HZ)
void md_do_sync(mddev_t *mddev) void md_do_sync(mddev_t *mddev)
...@@ -6758,12 +6826,13 @@ void md_do_sync(mddev_t *mddev) ...@@ -6758,12 +6826,13 @@ void md_do_sync(mddev_t *mddev)
>= mddev->resync_max - mddev->curr_resync_completed >= mddev->resync_max - mddev->curr_resync_completed
)) { )) {
/* time to update curr_resync_completed */ /* time to update curr_resync_completed */
blk_unplug(mddev->queue); md_unplug(mddev);
wait_event(mddev->recovery_wait, wait_event(mddev->recovery_wait,
atomic_read(&mddev->recovery_active) == 0); atomic_read(&mddev->recovery_active) == 0);
mddev->curr_resync_completed = mddev->curr_resync_completed =
mddev->curr_resync; mddev->curr_resync;
set_bit(MD_CHANGE_CLEAN, &mddev->flags); if (mddev->persistent)
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
sysfs_notify(&mddev->kobj, NULL, "sync_completed"); sysfs_notify(&mddev->kobj, NULL, "sync_completed");
} }
...@@ -6835,7 +6904,7 @@ void md_do_sync(mddev_t *mddev) ...@@ -6835,7 +6904,7 @@ void md_do_sync(mddev_t *mddev)
* about not overloading the IO subsystem. (things like an * about not overloading the IO subsystem. (things like an
* e2fsck being done on the RAID array should execute fast) * e2fsck being done on the RAID array should execute fast)
*/ */
blk_unplug(mddev->queue); md_unplug(mddev);
cond_resched(); cond_resched();
currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
...@@ -6854,7 +6923,7 @@ void md_do_sync(mddev_t *mddev) ...@@ -6854,7 +6923,7 @@ void md_do_sync(mddev_t *mddev)
* this also signals 'finished resyncing' to md_stop * this also signals 'finished resyncing' to md_stop
*/ */
out: out:
blk_unplug(mddev->queue); md_unplug(mddev);
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
...@@ -6956,10 +7025,7 @@ static int remove_and_add_spares(mddev_t *mddev) ...@@ -6956,10 +7025,7 @@ static int remove_and_add_spares(mddev_t *mddev)
sprintf(nm, "rd%d", rdev->raid_disk); sprintf(nm, "rd%d", rdev->raid_disk);
if (sysfs_create_link(&mddev->kobj, if (sysfs_create_link(&mddev->kobj,
&rdev->kobj, nm)) &rdev->kobj, nm))
printk(KERN_WARNING /* failure here is OK */;
"md: cannot register "
"%s for %s\n",
nm, mdname(mddev));
spares++; spares++;
md_new_event(mddev); md_new_event(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags); set_bit(MD_CHANGE_DEVS, &mddev->flags);
...@@ -7052,7 +7118,7 @@ void md_check_recovery(mddev_t *mddev) ...@@ -7052,7 +7118,7 @@ void md_check_recovery(mddev_t *mddev)
mddev->safemode = 0; mddev->safemode = 0;
spin_unlock_irq(&mddev->write_lock); spin_unlock_irq(&mddev->write_lock);
if (did_change) if (did_change)
sysfs_notify_dirent(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);
} }
if (mddev->flags) if (mddev->flags)
...@@ -7091,7 +7157,7 @@ void md_check_recovery(mddev_t *mddev) ...@@ -7091,7 +7157,7 @@ void md_check_recovery(mddev_t *mddev)
mddev->recovery = 0; mddev->recovery = 0;
/* flag recovery needed just to double check */ /* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
sysfs_notify_dirent(mddev->sysfs_action); sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev); md_new_event(mddev);
goto unlock; goto unlock;
} }
...@@ -7153,7 +7219,7 @@ void md_check_recovery(mddev_t *mddev) ...@@ -7153,7 +7219,7 @@ void md_check_recovery(mddev_t *mddev)
mddev->recovery = 0; mddev->recovery = 0;
} else } else
md_wakeup_thread(mddev->sync_thread); md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent(mddev->sysfs_action); sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev); md_new_event(mddev);
} }
unlock: unlock:
...@@ -7162,7 +7228,7 @@ void md_check_recovery(mddev_t *mddev) ...@@ -7162,7 +7228,7 @@ void md_check_recovery(mddev_t *mddev)
if (test_and_clear_bit(MD_RECOVERY_RECOVER, if (test_and_clear_bit(MD_RECOVERY_RECOVER,
&mddev->recovery)) &mddev->recovery))
if (mddev->sysfs_action) if (mddev->sysfs_action)
sysfs_notify_dirent(mddev->sysfs_action); sysfs_notify_dirent_safe(mddev->sysfs_action);
} }
mddev_unlock(mddev); mddev_unlock(mddev);
} }
...@@ -7170,7 +7236,7 @@ void md_check_recovery(mddev_t *mddev) ...@@ -7170,7 +7236,7 @@ void md_check_recovery(mddev_t *mddev)
void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev) void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
{ {
sysfs_notify_dirent(rdev->sysfs_state); sysfs_notify_dirent_safe(rdev->sysfs_state);
wait_event_timeout(rdev->blocked_wait, wait_event_timeout(rdev->blocked_wait,
!test_bit(Blocked, &rdev->flags), !test_bit(Blocked, &rdev->flags),
msecs_to_jiffies(5000)); msecs_to_jiffies(5000));
......
...@@ -29,6 +29,26 @@ ...@@ -29,6 +29,26 @@
typedef struct mddev_s mddev_t; typedef struct mddev_s mddev_t;
typedef struct mdk_rdev_s mdk_rdev_t; typedef struct mdk_rdev_s mdk_rdev_t;
/* generic plugging support - like that provided with request_queue,
* but does not require a request_queue
*/
struct plug_handle {
void (*unplug_fn)(struct plug_handle *);
struct timer_list unplug_timer;
struct work_struct unplug_work;
unsigned long unplug_flag;
};
#define PLUGGED_FLAG 1
void plugger_init(struct plug_handle *plug,
void (*unplug_fn)(struct plug_handle *));
void plugger_set_plug(struct plug_handle *plug);
int plugger_remove_plug(struct plug_handle *plug);
static inline void plugger_flush(struct plug_handle *plug)
{
del_timer_sync(&plug->unplug_timer);
cancel_work_sync(&plug->unplug_work);
}
/* /*
* MD's 'extended' device * MD's 'extended' device
*/ */
...@@ -125,6 +145,10 @@ struct mddev_s ...@@ -125,6 +145,10 @@ struct mddev_s
int suspended; int suspended;
atomic_t active_io; atomic_t active_io;
int ro; int ro;
int sysfs_active; /* set when sysfs deletes
* are happening, so run/
* takeover/stop are not safe
*/
struct gendisk *gendisk; struct gendisk *gendisk;
...@@ -297,9 +321,14 @@ struct mddev_s ...@@ -297,9 +321,14 @@ struct mddev_s
* hot-adding a bitmap. It should * hot-adding a bitmap. It should
* eventually be settable by sysfs. * eventually be settable by sysfs.
*/ */
/* When md is serving under dm, it might use a
* dirty_log to store the bits.
*/
struct dm_dirty_log *log;
struct mutex mutex; struct mutex mutex;
unsigned long chunksize; unsigned long chunksize;
unsigned long daemon_sleep; /* how many seconds between updates? */ unsigned long daemon_sleep; /* how many jiffies between updates? */
unsigned long max_write_behind; /* write-behind mode */ unsigned long max_write_behind; /* write-behind mode */
int external; int external;
} bitmap_info; } bitmap_info;
...@@ -308,6 +337,8 @@ struct mddev_s ...@@ -308,6 +337,8 @@ struct mddev_s
struct list_head all_mddevs; struct list_head all_mddevs;
struct attribute_group *to_remove; struct attribute_group *to_remove;
struct plug_handle *plug; /* if used by personality */
/* Generic barrier handling. /* Generic barrier handling.
* If there is a pending barrier request, all other * If there is a pending barrier request, all other
* writes are blocked while the devices are flushed. * writes are blocked while the devices are flushed.
...@@ -318,6 +349,7 @@ struct mddev_s ...@@ -318,6 +349,7 @@ struct mddev_s
struct bio *barrier; struct bio *barrier;
atomic_t flush_pending; atomic_t flush_pending;
struct work_struct barrier_work; struct work_struct barrier_work;
struct work_struct event_work; /* used by dm to report failure event */
}; };
...@@ -382,6 +414,18 @@ struct md_sysfs_entry { ...@@ -382,6 +414,18 @@ struct md_sysfs_entry {
}; };
extern struct attribute_group md_bitmap_group; extern struct attribute_group md_bitmap_group;
static inline struct sysfs_dirent *sysfs_get_dirent_safe(struct sysfs_dirent *sd, char *name)
{
if (sd)
return sysfs_get_dirent(sd, NULL, name);
return sd;
}
static inline void sysfs_notify_dirent_safe(struct sysfs_dirent *sd)
{
if (sd)
sysfs_notify_dirent(sd);
}
static inline char * mdname (mddev_t * mddev) static inline char * mdname (mddev_t * mddev)
{ {
return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
...@@ -474,5 +518,14 @@ extern int md_integrity_register(mddev_t *mddev); ...@@ -474,5 +518,14 @@ extern int md_integrity_register(mddev_t *mddev);
extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev); extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern void restore_bitmap_write_access(struct file *file); extern void restore_bitmap_write_access(struct file *file);
extern void md_unplug(mddev_t *mddev);
extern void mddev_init(mddev_t *mddev);
extern int md_run(mddev_t *mddev);
extern void md_stop(mddev_t *mddev);
extern void md_stop_writes(mddev_t *mddev);
extern void md_rdev_init(mdk_rdev_t *rdev);
extern void mddev_suspend(mddev_t *mddev);
extern void mddev_resume(mddev_t *mddev);
#endif /* _MD_MD_H */ #endif /* _MD_MD_H */
...@@ -825,11 +825,29 @@ static int make_request(mddev_t *mddev, struct bio * bio) ...@@ -825,11 +825,29 @@ static int make_request(mddev_t *mddev, struct bio * bio)
*/ */
bp = bio_split(bio, bp = bio_split(bio,
chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
/* Each of these 'make_request' calls will call 'wait_barrier'.
* If the first succeeds but the second blocks due to the resync
* thread raising the barrier, we will deadlock because the
* IO to the underlying device will be queued in generic_make_request
* and will never complete, so will never reduce nr_pending.
* So increment nr_waiting here so no new raise_barriers will
* succeed, and so the second wait_barrier cannot block.
*/
spin_lock_irq(&conf->resync_lock);
conf->nr_waiting++;
spin_unlock_irq(&conf->resync_lock);
if (make_request(mddev, &bp->bio1)) if (make_request(mddev, &bp->bio1))
generic_make_request(&bp->bio1); generic_make_request(&bp->bio1);
if (make_request(mddev, &bp->bio2)) if (make_request(mddev, &bp->bio2))
generic_make_request(&bp->bio2); generic_make_request(&bp->bio2);
spin_lock_irq(&conf->resync_lock);
conf->nr_waiting--;
wake_up(&conf->wait_barrier);
spin_unlock_irq(&conf->resync_lock);
bio_pair_release(bp); bio_pair_release(bp);
return 0; return 0;
bad_map: bad_map:
......
...@@ -201,11 +201,11 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) ...@@ -201,11 +201,11 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
if (test_bit(STRIPE_HANDLE, &sh->state)) { if (test_bit(STRIPE_HANDLE, &sh->state)) {
if (test_bit(STRIPE_DELAYED, &sh->state)) { if (test_bit(STRIPE_DELAYED, &sh->state)) {
list_add_tail(&sh->lru, &conf->delayed_list); list_add_tail(&sh->lru, &conf->delayed_list);
blk_plug_device(conf->mddev->queue); plugger_set_plug(&conf->plug);
} else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
sh->bm_seq - conf->seq_write > 0) { sh->bm_seq - conf->seq_write > 0) {
list_add_tail(&sh->lru, &conf->bitmap_list); list_add_tail(&sh->lru, &conf->bitmap_list);
blk_plug_device(conf->mddev->queue); plugger_set_plug(&conf->plug);
} else { } else {
clear_bit(STRIPE_BIT_DELAY, &sh->state); clear_bit(STRIPE_BIT_DELAY, &sh->state);
list_add_tail(&sh->lru, &conf->handle_list); list_add_tail(&sh->lru, &conf->handle_list);
...@@ -434,7 +434,6 @@ static int has_failed(raid5_conf_t *conf) ...@@ -434,7 +434,6 @@ static int has_failed(raid5_conf_t *conf)
} }
static void unplug_slaves(mddev_t *mddev); static void unplug_slaves(mddev_t *mddev);
static void raid5_unplug_device(struct request_queue *q);
static struct stripe_head * static struct stripe_head *
get_active_stripe(raid5_conf_t *conf, sector_t sector, get_active_stripe(raid5_conf_t *conf, sector_t sector,
...@@ -464,7 +463,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector, ...@@ -464,7 +463,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
< (conf->max_nr_stripes *3/4) < (conf->max_nr_stripes *3/4)
|| !conf->inactive_blocked), || !conf->inactive_blocked),
conf->device_lock, conf->device_lock,
raid5_unplug_device(conf->mddev->queue) md_raid5_unplug_device(conf)
); );
conf->inactive_blocked = 0; conf->inactive_blocked = 0;
} else } else
...@@ -1337,10 +1336,14 @@ static int grow_stripes(raid5_conf_t *conf, int num) ...@@ -1337,10 +1336,14 @@ static int grow_stripes(raid5_conf_t *conf, int num)
struct kmem_cache *sc; struct kmem_cache *sc;
int devs = max(conf->raid_disks, conf->previous_raid_disks); int devs = max(conf->raid_disks, conf->previous_raid_disks);
sprintf(conf->cache_name[0], if (conf->mddev->gendisk)
"raid%d-%s", conf->level, mdname(conf->mddev)); sprintf(conf->cache_name[0],
sprintf(conf->cache_name[1], "raid%d-%s", conf->level, mdname(conf->mddev));
"raid%d-%s-alt", conf->level, mdname(conf->mddev)); else
sprintf(conf->cache_name[0],
"raid%d-%p", conf->level, conf->mddev);
sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
conf->active_name = 0; conf->active_name = 0;
sc = kmem_cache_create(conf->cache_name[conf->active_name], sc = kmem_cache_create(conf->cache_name[conf->active_name],
sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
...@@ -3614,7 +3617,7 @@ static void raid5_activate_delayed(raid5_conf_t *conf) ...@@ -3614,7 +3617,7 @@ static void raid5_activate_delayed(raid5_conf_t *conf)
list_add_tail(&sh->lru, &conf->hold_list); list_add_tail(&sh->lru, &conf->hold_list);
} }
} else } else
blk_plug_device(conf->mddev->queue); plugger_set_plug(&conf->plug);
} }
static void activate_bit_delay(raid5_conf_t *conf) static void activate_bit_delay(raid5_conf_t *conf)
...@@ -3655,36 +3658,44 @@ static void unplug_slaves(mddev_t *mddev) ...@@ -3655,36 +3658,44 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_unlock(); rcu_read_unlock();
} }
static void raid5_unplug_device(struct request_queue *q) void md_raid5_unplug_device(raid5_conf_t *conf)
{ {
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev->private;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
if (blk_remove_plug(q)) { if (plugger_remove_plug(&conf->plug)) {
conf->seq_flush++; conf->seq_flush++;
raid5_activate_delayed(conf); raid5_activate_delayed(conf);
} }
md_wakeup_thread(mddev->thread); md_wakeup_thread(conf->mddev->thread);
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
unplug_slaves(mddev); unplug_slaves(conf->mddev);
} }
EXPORT_SYMBOL_GPL(md_raid5_unplug_device);
static int raid5_congested(void *data, int bits) static void raid5_unplug(struct plug_handle *plug)
{
raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
md_raid5_unplug_device(conf);
}
static void raid5_unplug_queue(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
md_raid5_unplug_device(mddev->private);
}
int md_raid5_congested(mddev_t *mddev, int bits)
{ {
mddev_t *mddev = data;
raid5_conf_t *conf = mddev->private; raid5_conf_t *conf = mddev->private;
/* No difference between reads and writes. Just check /* No difference between reads and writes. Just check
* how busy the stripe_cache is * how busy the stripe_cache is
*/ */
if (mddev_congested(mddev, bits))
return 1;
if (conf->inactive_blocked) if (conf->inactive_blocked)
return 1; return 1;
if (conf->quiesce) if (conf->quiesce)
...@@ -3694,6 +3705,15 @@ static int raid5_congested(void *data, int bits) ...@@ -3694,6 +3705,15 @@ static int raid5_congested(void *data, int bits)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(md_raid5_congested);
static int raid5_congested(void *data, int bits)
{
mddev_t *mddev = data;
return mddev_congested(mddev, bits) ||
md_raid5_congested(mddev, bits);
}
/* We want read requests to align with chunks where possible, /* We want read requests to align with chunks where possible,
* but write requests don't need to. * but write requests don't need to.
...@@ -4075,7 +4095,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) ...@@ -4075,7 +4095,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
* add failed due to overlap. Flush everything * add failed due to overlap. Flush everything
* and wait a while * and wait a while
*/ */
raid5_unplug_device(mddev->queue); md_raid5_unplug_device(conf);
release_stripe(sh); release_stripe(sh);
schedule(); schedule();
goto retry; goto retry;
...@@ -4566,23 +4586,15 @@ raid5_show_stripe_cache_size(mddev_t *mddev, char *page) ...@@ -4566,23 +4586,15 @@ raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
return 0; return 0;
} }
static ssize_t int
raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) raid5_set_cache_size(mddev_t *mddev, int size)
{ {
raid5_conf_t *conf = mddev->private; raid5_conf_t *conf = mddev->private;
unsigned long new;
int err; int err;
if (len >= PAGE_SIZE) if (size <= 16 || size > 32768)
return -EINVAL; return -EINVAL;
if (!conf) while (size < conf->max_nr_stripes) {
return -ENODEV;
if (strict_strtoul(page, 10, &new))
return -EINVAL;
if (new <= 16 || new > 32768)
return -EINVAL;
while (new < conf->max_nr_stripes) {
if (drop_one_stripe(conf)) if (drop_one_stripe(conf))
conf->max_nr_stripes--; conf->max_nr_stripes--;
else else
...@@ -4591,11 +4603,32 @@ raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) ...@@ -4591,11 +4603,32 @@ raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
err = md_allow_write(mddev); err = md_allow_write(mddev);
if (err) if (err)
return err; return err;
while (new > conf->max_nr_stripes) { while (size > conf->max_nr_stripes) {
if (grow_one_stripe(conf)) if (grow_one_stripe(conf))
conf->max_nr_stripes++; conf->max_nr_stripes++;
else break; else break;
} }
return 0;
}
EXPORT_SYMBOL(raid5_set_cache_size);
static ssize_t
raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
{
raid5_conf_t *conf = mddev->private;
unsigned long new;
int err;
if (len >= PAGE_SIZE)
return -EINVAL;
if (!conf)
return -ENODEV;
if (strict_strtoul(page, 10, &new))
return -EINVAL;
err = raid5_set_cache_size(mddev, new);
if (err)
return err;
return len; return len;
} }
...@@ -4958,7 +4991,7 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded ...@@ -4958,7 +4991,7 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded
static int run(mddev_t *mddev) static int run(mddev_t *mddev)
{ {
raid5_conf_t *conf; raid5_conf_t *conf;
int working_disks = 0, chunk_size; int working_disks = 0;
int dirty_parity_disks = 0; int dirty_parity_disks = 0;
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
sector_t reshape_offset = 0; sector_t reshape_offset = 0;
...@@ -5144,42 +5177,47 @@ static int run(mddev_t *mddev) ...@@ -5144,42 +5177,47 @@ static int run(mddev_t *mddev)
"reshape"); "reshape");
} }
/* read-ahead size must cover two whole stripes, which is
* 2 * (datadisks) * chunksize where 'n' is the number of raid devices
*/
{
int data_disks = conf->previous_raid_disks - conf->max_degraded;
int stripe = data_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE);
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
}
/* Ok, everything is just fine now */ /* Ok, everything is just fine now */
if (mddev->to_remove == &raid5_attrs_group) if (mddev->to_remove == &raid5_attrs_group)
mddev->to_remove = NULL; mddev->to_remove = NULL;
else if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) else if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
printk(KERN_WARNING printk(KERN_WARNING
"md/raid:%s: failed to create sysfs attributes.\n", "raid5: failed to create sysfs attributes for %s\n",
mdname(mddev)); mdname(mddev));
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
mddev->queue->queue_lock = &conf->device_lock; plugger_init(&conf->plug, raid5_unplug);
mddev->plug = &conf->plug;
if (mddev->queue) {
int chunk_size;
/* read-ahead size must cover two whole stripes, which
* is 2 * (datadisks) * chunksize where 'n' is the
* number of raid devices
*/
int data_disks = conf->previous_raid_disks - conf->max_degraded;
int stripe = data_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE);
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
mddev->queue->unplug_fn = raid5_unplug_device; blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
mddev->queue->queue_lock = &conf->device_lock;
mddev->queue->unplug_fn = raid5_unplug_queue;
blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); chunk_size = mddev->chunk_sectors << 9;
chunk_size = mddev->chunk_sectors << 9; blk_queue_io_min(mddev->queue, chunk_size);
blk_queue_io_min(mddev->queue, chunk_size); blk_queue_io_opt(mddev->queue, chunk_size *
blk_queue_io_opt(mddev->queue, chunk_size * (conf->raid_disks - conf->max_degraded));
(conf->raid_disks - conf->max_degraded));
list_for_each_entry(rdev, &mddev->disks, same_set) list_for_each_entry(rdev, &mddev->disks, same_set)
disk_stack_limits(mddev->gendisk, rdev->bdev, disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9); rdev->data_offset << 9);
}
return 0; return 0;
abort: abort:
...@@ -5200,8 +5238,9 @@ static int stop(mddev_t *mddev) ...@@ -5200,8 +5238,9 @@ static int stop(mddev_t *mddev)
md_unregister_thread(mddev->thread); md_unregister_thread(mddev->thread);
mddev->thread = NULL; mddev->thread = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL; if (mddev->queue)
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ mddev->queue->backing_dev_info.congested_fn = NULL;
plugger_flush(&conf->plug); /* the unplug fn references 'conf'*/
free_conf(conf); free_conf(conf);
mddev->private = NULL; mddev->private = NULL;
mddev->to_remove = &raid5_attrs_group; mddev->to_remove = &raid5_attrs_group;
...@@ -5545,10 +5584,7 @@ static int raid5_start_reshape(mddev_t *mddev) ...@@ -5545,10 +5584,7 @@ static int raid5_start_reshape(mddev_t *mddev)
sprintf(nm, "rd%d", rdev->raid_disk); sprintf(nm, "rd%d", rdev->raid_disk);
if (sysfs_create_link(&mddev->kobj, if (sysfs_create_link(&mddev->kobj,
&rdev->kobj, nm)) &rdev->kobj, nm))
printk(KERN_WARNING /* Failure here is OK */;
"md/raid:%s: failed to create "
" link %s\n",
mdname(mddev), nm);
} else } else
break; break;
} }
...@@ -5603,7 +5639,7 @@ static void end_reshape(raid5_conf_t *conf) ...@@ -5603,7 +5639,7 @@ static void end_reshape(raid5_conf_t *conf)
/* read-ahead size must cover two whole stripes, which is /* read-ahead size must cover two whole stripes, which is
* 2 * (datadisks) * chunksize where 'n' is the number of raid devices * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
*/ */
{ if (conf->mddev->queue) {
int data_disks = conf->raid_disks - conf->max_degraded; int data_disks = conf->raid_disks - conf->max_degraded;
int stripe = data_disks * ((conf->chunk_sectors << 9) int stripe = data_disks * ((conf->chunk_sectors << 9)
/ PAGE_SIZE); / PAGE_SIZE);
......
...@@ -388,7 +388,7 @@ struct raid5_private_data { ...@@ -388,7 +388,7 @@ struct raid5_private_data {
* two caches. * two caches.
*/ */
int active_name; int active_name;
char cache_name[2][20]; char cache_name[2][32];
struct kmem_cache *slab_cache; /* for allocating stripes */ struct kmem_cache *slab_cache; /* for allocating stripes */
int seq_flush, seq_write; int seq_flush, seq_write;
...@@ -398,6 +398,9 @@ struct raid5_private_data { ...@@ -398,6 +398,9 @@ struct raid5_private_data {
* (fresh device added). * (fresh device added).
* Cleared when a sync completes. * Cleared when a sync completes.
*/ */
struct plug_handle plug;
/* per cpu variables */ /* per cpu variables */
struct raid5_percpu { struct raid5_percpu {
struct page *spare_page; /* Used when checking P/Q in raid6 */ struct page *spare_page; /* Used when checking P/Q in raid6 */
...@@ -497,4 +500,8 @@ static inline int algorithm_is_DDF(int layout) ...@@ -497,4 +500,8 @@ static inline int algorithm_is_DDF(int layout)
{ {
return layout >= 8 && layout <= 10; return layout >= 8 && layout <= 10;
} }
extern int md_raid5_congested(mddev_t *mddev, int bits);
extern void md_raid5_unplug_device(raid5_conf_t *conf);
extern int raid5_set_cache_size(mddev_t *mddev, int size);
#endif #endif
...@@ -7,6 +7,9 @@ config BINARY_PRINTF ...@@ -7,6 +7,9 @@ config BINARY_PRINTF
menu "Library routines" menu "Library routines"
config RAID6_PQ
tristate
config BITREVERSE config BITREVERSE
tristate tristate
......
...@@ -69,6 +69,7 @@ obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/ ...@@ -69,6 +69,7 @@ obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
obj-$(CONFIG_LZO_COMPRESS) += lzo/ obj-$(CONFIG_LZO_COMPRESS) += lzo/
obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
obj-$(CONFIG_RAID6_PQ) += raid6/
lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
......
obj-$(CONFIG_RAID6_PQ) += raid6_pq.o
raid6_pq-y += raid6algos.o raid6recov.o raid6tables.o \
raid6int1.o raid6int2.o raid6int4.o \
raid6int8.o raid6int16.o raid6int32.o \
raid6altivec1.o raid6altivec2.o raid6altivec4.o \
raid6altivec8.o \
raid6mmx.o raid6sse1.o raid6sse2.o
hostprogs-y += mktables
quiet_cmd_unroll = UNROLL $@
cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) \
< $< > $@ || ( rm -f $@ && exit 1 )
ifeq ($(CONFIG_ALTIVEC),y)
altivec_flags := -maltivec -mabi=altivec
endif
targets += raid6int1.c
$(obj)/raid6int1.c: UNROLL := 1
$(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
targets += raid6int2.c
$(obj)/raid6int2.c: UNROLL := 2
$(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
targets += raid6int4.c
$(obj)/raid6int4.c: UNROLL := 4
$(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
targets += raid6int8.c
$(obj)/raid6int8.c: UNROLL := 8
$(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
targets += raid6int16.c
$(obj)/raid6int16.c: UNROLL := 16
$(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
targets += raid6int32.c
$(obj)/raid6int32.c: UNROLL := 32
$(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_raid6altivec1.o += $(altivec_flags)
targets += raid6altivec1.c
$(obj)/raid6altivec1.c: UNROLL := 1
$(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_raid6altivec2.o += $(altivec_flags)
targets += raid6altivec2.c
$(obj)/raid6altivec2.c: UNROLL := 2
$(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_raid6altivec4.o += $(altivec_flags)
targets += raid6altivec4.c
$(obj)/raid6altivec4.c: UNROLL := 4
$(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
CFLAGS_raid6altivec8.o += $(altivec_flags)
targets += raid6altivec8.c
$(obj)/raid6altivec8.c: UNROLL := 8
$(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
quiet_cmd_mktable = TABLE $@
cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 )
targets += raid6tables.c
$(obj)/raid6tables.c: $(obj)/mktables FORCE
$(call if_changed,mktable)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment