Commit 2585f3ef authored by NeilBrown's avatar NeilBrown

md/bitmap: improve handling of 'allclean'.

The 'allclean' flag is used to cache the fact that there is nothing to
do, so we can avoid waking up and scanning the bitmap regularly.

The two sorts of pages that might need the attention of the bitmap
daemon are BITMAP_PAGE_PENDING and BITMAP_PAGE_NEEDWRITE pages.

So make sure allclean reflects exactly when there are none of those.
So:
  set it before scanning all pages with either bit set.
  clear it whenever these bits are set
  clear it when we desire not to clear one of these bits.
  don't clear it any other time.
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
parent 5a537df4
...@@ -1146,6 +1146,7 @@ void bitmap_write_all(struct bitmap *bitmap) ...@@ -1146,6 +1146,7 @@ void bitmap_write_all(struct bitmap *bitmap)
for (i = 0; i < bitmap->file_pages; i++) for (i = 0; i < bitmap->file_pages; i++)
set_page_attr(bitmap, bitmap->filemap[i], set_page_attr(bitmap, bitmap->filemap[i],
BITMAP_PAGE_NEEDWRITE); BITMAP_PAGE_NEEDWRITE);
bitmap->allclean = 0;
} }
static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc) static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
...@@ -1211,10 +1212,8 @@ void bitmap_daemon_work(mddev_t *mddev) ...@@ -1211,10 +1212,8 @@ void bitmap_daemon_work(mddev_t *mddev)
clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags); spin_unlock_irqrestore(&bitmap->lock, flags);
if (need_write) { if (need_write)
write_page(bitmap, page, 0); write_page(bitmap, page, 0);
bitmap->allclean = 0;
}
spin_lock_irqsave(&bitmap->lock, flags); spin_lock_irqsave(&bitmap->lock, flags);
j |= (PAGE_BITS - 1); j |= (PAGE_BITS - 1);
continue; continue;
...@@ -1222,12 +1221,16 @@ void bitmap_daemon_work(mddev_t *mddev) ...@@ -1222,12 +1221,16 @@ void bitmap_daemon_work(mddev_t *mddev)
/* grab the new page, sync and release the old */ /* grab the new page, sync and release the old */
if (lastpage != NULL) { if (lastpage != NULL) {
if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) { if (test_page_attr(bitmap, lastpage,
clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); BITMAP_PAGE_NEEDWRITE)) {
clear_page_attr(bitmap, lastpage,
BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags); spin_unlock_irqrestore(&bitmap->lock, flags);
write_page(bitmap, lastpage, 0); write_page(bitmap, lastpage, 0);
} else { } else {
set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); set_page_attr(bitmap, lastpage,
BITMAP_PAGE_NEEDWRITE);
bitmap->allclean = 0;
spin_unlock_irqrestore(&bitmap->lock, flags); spin_unlock_irqrestore(&bitmap->lock, flags);
} }
} else } else
...@@ -1250,6 +1253,8 @@ void bitmap_daemon_work(mddev_t *mddev) ...@@ -1250,6 +1253,8 @@ void bitmap_daemon_work(mddev_t *mddev)
spin_lock_irqsave(&bitmap->lock, flags); spin_lock_irqsave(&bitmap->lock, flags);
if (!bitmap->need_sync) if (!bitmap->need_sync)
clear_page_attr(bitmap, page, BITMAP_PAGE_PENDING); clear_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
else
bitmap->allclean = 0;
} }
bmc = bitmap_get_counter(bitmap, bmc = bitmap_get_counter(bitmap,
(sector_t)j << CHUNK_BLOCK_SHIFT(bitmap), (sector_t)j << CHUNK_BLOCK_SHIFT(bitmap),
...@@ -1257,8 +1262,6 @@ void bitmap_daemon_work(mddev_t *mddev) ...@@ -1257,8 +1262,6 @@ void bitmap_daemon_work(mddev_t *mddev)
if (!bmc) if (!bmc)
j |= PAGE_COUNTER_MASK; j |= PAGE_COUNTER_MASK;
else if (*bmc) { else if (*bmc) {
bitmap->allclean = 0;
if (*bmc == 1 && !bitmap->need_sync) { if (*bmc == 1 && !bitmap->need_sync) {
/* we can clear the bit */ /* we can clear the bit */
*bmc = 0; *bmc = 0;
...@@ -1280,6 +1283,7 @@ void bitmap_daemon_work(mddev_t *mddev) ...@@ -1280,6 +1283,7 @@ void bitmap_daemon_work(mddev_t *mddev)
} else if (*bmc <= 2) { } else if (*bmc <= 2) {
*bmc = 1; /* maybe clear the bit next time */ *bmc = 1; /* maybe clear the bit next time */
set_page_attr(bitmap, page, BITMAP_PAGE_PENDING); set_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
bitmap->allclean = 0;
} }
} }
} }
...@@ -1294,6 +1298,7 @@ void bitmap_daemon_work(mddev_t *mddev) ...@@ -1294,6 +1298,7 @@ void bitmap_daemon_work(mddev_t *mddev)
write_page(bitmap, lastpage, 0); write_page(bitmap, lastpage, 0);
} else { } else {
set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
bitmap->allclean = 0;
spin_unlock_irqrestore(&bitmap->lock, flags); spin_unlock_irqrestore(&bitmap->lock, flags);
} }
} }
...@@ -1407,7 +1412,6 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect ...@@ -1407,7 +1412,6 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
else else
sectors = 0; sectors = 0;
} }
bitmap->allclean = 0;
return 0; return 0;
} }
EXPORT_SYMBOL(bitmap_startwrite); EXPORT_SYMBOL(bitmap_startwrite);
...@@ -1453,13 +1457,14 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto ...@@ -1453,13 +1457,14 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
wake_up(&bitmap->overflow_wait); wake_up(&bitmap->overflow_wait);
(*bmc)--; (*bmc)--;
if (*bmc <= 2) if (*bmc <= 2) {
set_page_attr(bitmap, set_page_attr(bitmap,
filemap_get_page( filemap_get_page(
bitmap, bitmap,
offset >> CHUNK_BLOCK_SHIFT(bitmap)), offset >> CHUNK_BLOCK_SHIFT(bitmap)),
BITMAP_PAGE_PENDING); BITMAP_PAGE_PENDING);
bitmap->allclean = 0;
}
spin_unlock_irqrestore(&bitmap->lock, flags); spin_unlock_irqrestore(&bitmap->lock, flags);
offset += blocks; offset += blocks;
if (sectors > blocks) if (sectors > blocks)
...@@ -1495,7 +1500,6 @@ static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t ...@@ -1495,7 +1500,6 @@ static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t
} }
} }
spin_unlock_irq(&bitmap->lock); spin_unlock_irq(&bitmap->lock);
bitmap->allclean = 0;
return rv; return rv;
} }
...@@ -1543,15 +1547,16 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, i ...@@ -1543,15 +1547,16 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, i
if (!NEEDED(*bmc) && aborted) if (!NEEDED(*bmc) && aborted)
*bmc |= NEEDED_MASK; *bmc |= NEEDED_MASK;
else { else {
if (*bmc <= 2) if (*bmc <= 2) {
set_page_attr(bitmap, set_page_attr(bitmap,
filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)), filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)),
BITMAP_PAGE_PENDING); BITMAP_PAGE_PENDING);
bitmap->allclean = 0;
}
} }
} }
unlock: unlock:
spin_unlock_irqrestore(&bitmap->lock, flags); spin_unlock_irqrestore(&bitmap->lock, flags);
bitmap->allclean = 0;
} }
EXPORT_SYMBOL(bitmap_end_sync); EXPORT_SYMBOL(bitmap_end_sync);
...@@ -1623,9 +1628,9 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n ...@@ -1623,9 +1628,9 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
bitmap_count_page(bitmap, offset, 1); bitmap_count_page(bitmap, offset, 1);
page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)); page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap));
set_page_attr(bitmap, page, BITMAP_PAGE_PENDING); set_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
bitmap->allclean = 0;
} }
spin_unlock_irq(&bitmap->lock); spin_unlock_irq(&bitmap->lock);
bitmap->allclean = 0;
} }
/* dirty the memory and file bits for bitmap chunks "s" to "e" */ /* dirty the memory and file bits for bitmap chunks "s" to "e" */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment