Commit d592a996 authored by Shaohua Li's avatar Shaohua Li Committed by NeilBrown

raid5: add an option to avoid copy data from bio to stripe cache

The stripe cache has two goals:
1. cache data, so next time if data can be found in stripe cache, disk access
can be avoided.
2. stable data. data is copied from bio to stripe cache and calculated parity.
data written to disk is from stripe cache, so if upper layer changes bio data,
data written to disk isn't impacted.

In my environment, I can guarantee 2 will not happen. And BDI_CAP_STABLE_WRITES
can guarantee 2 too. For 1, it's not common too. block plug mechanism will
dispatch a bunch of sequentail small requests together. And since I'm using
SSD, I'm using small chunk size. It's rare case stripe cache is really useful.

So I'd like to avoid the copy from bio to stripe cache and it's very helpful
for performance. In my 1M randwrite tests, avoid the copy can increase the
performance more than 30%.

Of course, this shouldn't be enabled by default. It's reported enabling
BDI_CAP_STABLE_WRITES can harm some workloads before, so I added an option to
control it.

Neilb:
  changed BUG_ON to WARN_ON
  Removed some assignments from raid5_build_block which are now not needed.
Signed-off-by: default avatarShaohua Li <shli@fusionio.com>
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
parent f2e06c58
...@@ -487,6 +487,7 @@ static void shrink_buffers(struct stripe_head *sh) ...@@ -487,6 +487,7 @@ static void shrink_buffers(struct stripe_head *sh)
int num = sh->raid_conf->pool_size; int num = sh->raid_conf->pool_size;
for (i = 0; i < num ; i++) { for (i = 0; i < num ; i++) {
WARN_ON(sh->dev[i].page != sh->dev[i].orig_page);
p = sh->dev[i].page; p = sh->dev[i].page;
if (!p) if (!p)
continue; continue;
...@@ -507,6 +508,7 @@ static int grow_buffers(struct stripe_head *sh) ...@@ -507,6 +508,7 @@ static int grow_buffers(struct stripe_head *sh)
return 1; return 1;
} }
sh->dev[i].page = page; sh->dev[i].page = page;
sh->dev[i].orig_page = page;
} }
return 0; return 0;
} }
...@@ -863,6 +865,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) ...@@ -863,6 +865,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
bi->bi_rw |= REQ_NOMERGE; bi->bi_rw |= REQ_NOMERGE;
if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
sh->dev[i].vec.bv_page = sh->dev[i].page;
bi->bi_vcnt = 1; bi->bi_vcnt = 1;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE; bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
bi->bi_io_vec[0].bv_offset = 0; bi->bi_io_vec[0].bv_offset = 0;
...@@ -907,6 +912,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) ...@@ -907,6 +912,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
else else
rbi->bi_iter.bi_sector = (sh->sector rbi->bi_iter.bi_sector = (sh->sector
+ rrdev->data_offset); + rrdev->data_offset);
if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
sh->dev[i].rvec.bv_page = sh->dev[i].page;
rbi->bi_vcnt = 1; rbi->bi_vcnt = 1;
rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
rbi->bi_io_vec[0].bv_offset = 0; rbi->bi_io_vec[0].bv_offset = 0;
...@@ -935,8 +943,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) ...@@ -935,8 +943,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
} }
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
async_copy_data(int frombio, struct bio *bio, struct page *page, async_copy_data(int frombio, struct bio *bio, struct page **page,
sector_t sector, struct dma_async_tx_descriptor *tx) sector_t sector, struct dma_async_tx_descriptor *tx,
struct stripe_head *sh)
{ {
struct bio_vec bvl; struct bio_vec bvl;
struct bvec_iter iter; struct bvec_iter iter;
...@@ -973,11 +982,16 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, ...@@ -973,11 +982,16 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
if (clen > 0) { if (clen > 0) {
b_offset += bvl.bv_offset; b_offset += bvl.bv_offset;
bio_page = bvl.bv_page; bio_page = bvl.bv_page;
if (frombio) if (frombio) {
tx = async_memcpy(page, bio_page, page_offset, if (sh->raid_conf->skip_copy &&
b_offset == 0 && page_offset == 0 &&
clen == STRIPE_SIZE)
*page = bio_page;
else
tx = async_memcpy(*page, bio_page, page_offset,
b_offset, clen, &submit); b_offset, clen, &submit);
else } else
tx = async_memcpy(bio_page, page, b_offset, tx = async_memcpy(bio_page, *page, b_offset,
page_offset, clen, &submit); page_offset, clen, &submit);
} }
/* chain the operations */ /* chain the operations */
...@@ -1053,8 +1067,8 @@ static void ops_run_biofill(struct stripe_head *sh) ...@@ -1053,8 +1067,8 @@ static void ops_run_biofill(struct stripe_head *sh)
spin_unlock_irq(&sh->stripe_lock); spin_unlock_irq(&sh->stripe_lock);
while (rbi && rbi->bi_iter.bi_sector < while (rbi && rbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) { dev->sector + STRIPE_SECTORS) {
tx = async_copy_data(0, rbi, dev->page, tx = async_copy_data(0, rbi, &dev->page,
dev->sector, tx); dev->sector, tx, sh);
rbi = r5_next_bio(rbi, dev->sector); rbi = r5_next_bio(rbi, dev->sector);
} }
} }
...@@ -1392,6 +1406,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) ...@@ -1392,6 +1406,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
BUG_ON(dev->written); BUG_ON(dev->written);
wbi = dev->written = chosen; wbi = dev->written = chosen;
spin_unlock_irq(&sh->stripe_lock); spin_unlock_irq(&sh->stripe_lock);
WARN_ON(dev->page != dev->orig_page);
while (wbi && wbi->bi_iter.bi_sector < while (wbi && wbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) { dev->sector + STRIPE_SECTORS) {
...@@ -1401,9 +1416,15 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) ...@@ -1401,9 +1416,15 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
set_bit(R5_SyncIO, &dev->flags); set_bit(R5_SyncIO, &dev->flags);
if (wbi->bi_rw & REQ_DISCARD) if (wbi->bi_rw & REQ_DISCARD)
set_bit(R5_Discard, &dev->flags); set_bit(R5_Discard, &dev->flags);
else else {
tx = async_copy_data(1, wbi, dev->page, tx = async_copy_data(1, wbi, &dev->page,
dev->sector, tx); dev->sector, tx, sh);
if (dev->page != dev->orig_page) {
set_bit(R5_SkipCopy, &dev->flags);
clear_bit(R5_UPTODATE, &dev->flags);
clear_bit(R5_OVERWRITE, &dev->flags);
}
}
wbi = r5_next_bio(wbi, dev->sector); wbi = r5_next_bio(wbi, dev->sector);
} }
} }
...@@ -1434,7 +1455,7 @@ static void ops_complete_reconstruct(void *stripe_head_ref) ...@@ -1434,7 +1455,7 @@ static void ops_complete_reconstruct(void *stripe_head_ref)
struct r5dev *dev = &sh->dev[i]; struct r5dev *dev = &sh->dev[i];
if (dev->written || i == pd_idx || i == qd_idx) { if (dev->written || i == pd_idx || i == qd_idx) {
if (!discard) if (!discard && !test_bit(R5_SkipCopy, &dev->flags))
set_bit(R5_UPTODATE, &dev->flags); set_bit(R5_UPTODATE, &dev->flags);
if (fua) if (fua)
set_bit(R5_WantFUA, &dev->flags); set_bit(R5_WantFUA, &dev->flags);
...@@ -1847,8 +1868,10 @@ static int resize_stripes(struct r5conf *conf, int newsize) ...@@ -1847,8 +1868,10 @@ static int resize_stripes(struct r5conf *conf, int newsize)
osh = get_free_stripe(conf, hash); osh = get_free_stripe(conf, hash);
unlock_device_hash_lock(conf, hash); unlock_device_hash_lock(conf, hash);
atomic_set(&nsh->count, 1); atomic_set(&nsh->count, 1);
for(i=0; i<conf->pool_size; i++) for(i=0; i<conf->pool_size; i++) {
nsh->dev[i].page = osh->dev[i].page; nsh->dev[i].page = osh->dev[i].page;
nsh->dev[i].orig_page = osh->dev[i].page;
}
for( ; i<newsize; i++) for( ; i<newsize; i++)
nsh->dev[i].page = NULL; nsh->dev[i].page = NULL;
nsh->hash_lock_index = hash; nsh->hash_lock_index = hash;
...@@ -1904,6 +1927,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) ...@@ -1904,6 +1927,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
if (nsh->dev[i].page == NULL) { if (nsh->dev[i].page == NULL) {
struct page *p = alloc_page(GFP_NOIO); struct page *p = alloc_page(GFP_NOIO);
nsh->dev[i].page = p; nsh->dev[i].page = p;
nsh->dev[i].orig_page = p;
if (!p) if (!p)
err = -ENOMEM; err = -ENOMEM;
} }
...@@ -2141,24 +2165,20 @@ static void raid5_end_write_request(struct bio *bi, int error) ...@@ -2141,24 +2165,20 @@ static void raid5_end_write_request(struct bio *bi, int error)
} }
static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous); static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
static void raid5_build_block(struct stripe_head *sh, int i, int previous) static void raid5_build_block(struct stripe_head *sh, int i, int previous)
{ {
struct r5dev *dev = &sh->dev[i]; struct r5dev *dev = &sh->dev[i];
bio_init(&dev->req); bio_init(&dev->req);
dev->req.bi_io_vec = &dev->vec; dev->req.bi_io_vec = &dev->vec;
dev->req.bi_vcnt++; dev->req.bi_max_vecs = 1;
dev->req.bi_max_vecs++;
dev->req.bi_private = sh; dev->req.bi_private = sh;
dev->vec.bv_page = dev->page;
bio_init(&dev->rreq); bio_init(&dev->rreq);
dev->rreq.bi_io_vec = &dev->rvec; dev->rreq.bi_io_vec = &dev->rvec;
dev->rreq.bi_vcnt++; dev->rreq.bi_max_vecs = 1;
dev->rreq.bi_max_vecs++;
dev->rreq.bi_private = sh; dev->rreq.bi_private = sh;
dev->rvec.bv_page = dev->page;
dev->flags = 0; dev->flags = 0;
dev->sector = compute_blocknr(sh, i, previous); dev->sector = compute_blocknr(sh, i, previous);
...@@ -2758,6 +2778,11 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, ...@@ -2758,6 +2778,11 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
/* and fail all 'written' */ /* and fail all 'written' */
bi = sh->dev[i].written; bi = sh->dev[i].written;
sh->dev[i].written = NULL; sh->dev[i].written = NULL;
if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) {
WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
sh->dev[i].page = sh->dev[i].orig_page;
}
if (bi) bitmap_end = 1; if (bi) bitmap_end = 1;
while (bi && bi->bi_iter.bi_sector < while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) { sh->dev[i].sector + STRIPE_SECTORS) {
...@@ -3002,12 +3027,17 @@ static void handle_stripe_clean_event(struct r5conf *conf, ...@@ -3002,12 +3027,17 @@ static void handle_stripe_clean_event(struct r5conf *conf,
dev = &sh->dev[i]; dev = &sh->dev[i];
if (!test_bit(R5_LOCKED, &dev->flags) && if (!test_bit(R5_LOCKED, &dev->flags) &&
(test_bit(R5_UPTODATE, &dev->flags) || (test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Discard, &dev->flags))) { test_bit(R5_Discard, &dev->flags) ||
test_bit(R5_SkipCopy, &dev->flags))) {
/* We can return any write requests */ /* We can return any write requests */
struct bio *wbi, *wbi2; struct bio *wbi, *wbi2;
pr_debug("Return write for disc %d\n", i); pr_debug("Return write for disc %d\n", i);
if (test_and_clear_bit(R5_Discard, &dev->flags)) if (test_and_clear_bit(R5_Discard, &dev->flags))
clear_bit(R5_UPTODATE, &dev->flags); clear_bit(R5_UPTODATE, &dev->flags);
if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) {
WARN_ON(test_bit(R5_UPTODATE, &dev->flags));
dev->page = dev->orig_page;
}
wbi = dev->written; wbi = dev->written;
dev->written = NULL; dev->written = NULL;
while (wbi && wbi->bi_iter.bi_sector < while (wbi && wbi->bi_iter.bi_sector <
...@@ -3026,6 +3056,8 @@ static void handle_stripe_clean_event(struct r5conf *conf, ...@@ -3026,6 +3056,8 @@ static void handle_stripe_clean_event(struct r5conf *conf,
0); 0);
} else if (test_bit(R5_Discard, &dev->flags)) } else if (test_bit(R5_Discard, &dev->flags))
discard_pending = 1; discard_pending = 1;
WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
WARN_ON(dev->page != dev->orig_page);
} }
if (!discard_pending && if (!discard_pending &&
test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
...@@ -5365,6 +5397,50 @@ raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, ...@@ -5365,6 +5397,50 @@ raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
raid5_show_preread_threshold, raid5_show_preread_threshold,
raid5_store_preread_threshold); raid5_store_preread_threshold);
static ssize_t
raid5_show_skip_copy(struct mddev *mddev, char *page)
{
struct r5conf *conf = mddev->private;
if (conf)
return sprintf(page, "%d\n", conf->skip_copy);
else
return 0;
}
static ssize_t
raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
{
struct r5conf *conf = mddev->private;
unsigned long new;
if (len >= PAGE_SIZE)
return -EINVAL;
if (!conf)
return -ENODEV;
if (kstrtoul(page, 10, &new))
return -EINVAL;
new = !!new;
if (new == conf->skip_copy)
return len;
mddev_suspend(mddev);
conf->skip_copy = new;
if (new)
mddev->queue->backing_dev_info.capabilities |=
BDI_CAP_STABLE_WRITES;
else
mddev->queue->backing_dev_info.capabilities &=
~BDI_CAP_STABLE_WRITES;
mddev_resume(mddev);
return len;
}
static struct md_sysfs_entry
raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR,
raid5_show_skip_copy,
raid5_store_skip_copy);
static ssize_t static ssize_t
stripe_cache_active_show(struct mddev *mddev, char *page) stripe_cache_active_show(struct mddev *mddev, char *page)
{ {
...@@ -5450,6 +5526,7 @@ static struct attribute *raid5_attrs[] = { ...@@ -5450,6 +5526,7 @@ static struct attribute *raid5_attrs[] = {
&raid5_stripecache_active.attr, &raid5_stripecache_active.attr,
&raid5_preread_bypass_threshold.attr, &raid5_preread_bypass_threshold.attr,
&raid5_group_thread_cnt.attr, &raid5_group_thread_cnt.attr,
&raid5_skip_copy.attr,
NULL, NULL,
}; };
static struct attribute_group raid5_attrs_group = { static struct attribute_group raid5_attrs_group = {
......
...@@ -232,7 +232,7 @@ struct stripe_head { ...@@ -232,7 +232,7 @@ struct stripe_head {
*/ */
struct bio req, rreq; struct bio req, rreq;
struct bio_vec vec, rvec; struct bio_vec vec, rvec;
struct page *page; struct page *page, *orig_page;
struct bio *toread, *read, *towrite, *written; struct bio *toread, *read, *towrite, *written;
sector_t sector; /* sector of this page */ sector_t sector; /* sector of this page */
unsigned long flags; unsigned long flags;
...@@ -299,6 +299,7 @@ enum r5dev_flags { ...@@ -299,6 +299,7 @@ enum r5dev_flags {
* data in, and now is a good time to write it out. * data in, and now is a good time to write it out.
*/ */
R5_Discard, /* Discard the stripe */ R5_Discard, /* Discard the stripe */
R5_SkipCopy, /* Don't copy data from bio to stripe cache */
}; };
/* /*
...@@ -436,6 +437,7 @@ struct r5conf { ...@@ -436,6 +437,7 @@ struct r5conf {
atomic_t pending_full_writes; /* full write backlog */ atomic_t pending_full_writes; /* full write backlog */
int bypass_count; /* bypassed prereads */ int bypass_count; /* bypassed prereads */
int bypass_threshold; /* preread nice */ int bypass_threshold; /* preread nice */
int skip_copy; /* Don't copy data from bio to stripe cache */
struct list_head *last_hold; /* detect hold_list promotions */ struct list_head *last_hold; /* detect hold_list promotions */
atomic_t reshape_stripes; /* stripes with pending writes for reshape */ atomic_t reshape_stripes; /* stripes with pending writes for reshape */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment