Commit ae1713e2 authored by Artur Paszkiewicz's avatar Artur Paszkiewicz Committed by Shaohua Li

raid5-ppl: partial parity calculation optimization

In case of read-modify-write, partial partity is the same as the result
of ops_run_prexor5(), so we can just copy sh->dev[pd_idx].page into
sh->ppl_page instead of calculating it again.
Signed-off-by: default avatarArtur Paszkiewicz <artur.paszkiewicz@intel.com>
Signed-off-by: default avatarShaohua Li <shli@fb.com>
parent 845b9e22
...@@ -153,7 +153,7 @@ ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu, ...@@ -153,7 +153,7 @@ ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
struct dma_async_tx_descriptor *tx) struct dma_async_tx_descriptor *tx)
{ {
int disks = sh->disks; int disks = sh->disks;
struct page **xor_srcs = flex_array_get(percpu->scribble, 0); struct page **srcs = flex_array_get(percpu->scribble, 0);
int count = 0, pd_idx = sh->pd_idx, i; int count = 0, pd_idx = sh->pd_idx, i;
struct async_submit_ctl submit; struct async_submit_ctl submit;
...@@ -166,18 +166,18 @@ ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu, ...@@ -166,18 +166,18 @@ ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
* differently. * differently.
*/ */
if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
/* rmw: xor old data and parity from updated disks */ /*
for (i = disks; i--;) { * rmw: xor old data and parity from updated disks
struct r5dev *dev = &sh->dev[i]; * This is calculated earlier by ops_run_prexor5() so just copy
if (test_bit(R5_Wantdrain, &dev->flags) || i == pd_idx) * the parity dev page.
xor_srcs[count++] = dev->page; */
} srcs[count++] = sh->dev[pd_idx].page;
} else if (sh->reconstruct_state == reconstruct_state_drain_run) { } else if (sh->reconstruct_state == reconstruct_state_drain_run) {
/* rcw: xor data from all not updated disks */ /* rcw: xor data from all not updated disks */
for (i = disks; i--;) { for (i = disks; i--;) {
struct r5dev *dev = &sh->dev[i]; struct r5dev *dev = &sh->dev[i];
if (test_bit(R5_UPTODATE, &dev->flags)) if (test_bit(R5_UPTODATE, &dev->flags))
xor_srcs[count++] = dev->page; srcs[count++] = dev->page;
} }
} else { } else {
return tx; return tx;
...@@ -188,10 +188,10 @@ ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu, ...@@ -188,10 +188,10 @@ ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
+ sizeof(struct page *) * (sh->disks + 2)); + sizeof(struct page *) * (sh->disks + 2));
if (count == 1) if (count == 1)
tx = async_memcpy(sh->ppl_page, xor_srcs[0], 0, 0, PAGE_SIZE, tx = async_memcpy(sh->ppl_page, srcs[0], 0, 0, PAGE_SIZE,
&submit); &submit);
else else
tx = async_xor(sh->ppl_page, xor_srcs, 0, count, PAGE_SIZE, tx = async_xor(sh->ppl_page, srcs, 0, count, PAGE_SIZE,
&submit); &submit);
return tx; return tx;
......
...@@ -2079,9 +2079,6 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) ...@@ -2079,9 +2079,6 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
async_tx_ack(tx); async_tx_ack(tx);
} }
if (test_bit(STRIPE_OP_PARTIAL_PARITY, &ops_request))
tx = ops_run_partial_parity(sh, percpu, tx);
if (test_bit(STRIPE_OP_PREXOR, &ops_request)) { if (test_bit(STRIPE_OP_PREXOR, &ops_request)) {
if (level < 6) if (level < 6)
tx = ops_run_prexor5(sh, percpu, tx); tx = ops_run_prexor5(sh, percpu, tx);
...@@ -2089,6 +2086,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) ...@@ -2089,6 +2086,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
tx = ops_run_prexor6(sh, percpu, tx); tx = ops_run_prexor6(sh, percpu, tx);
} }
if (test_bit(STRIPE_OP_PARTIAL_PARITY, &ops_request))
tx = ops_run_partial_parity(sh, percpu, tx);
if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
tx = ops_run_biodrain(sh, tx); tx = ops_run_biodrain(sh, tx);
overlap_clear++; overlap_clear++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment