Commit bf699c9b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://neil.brown.name/md

* 'for-linus' of git://neil.brown.name/md:
  async_tx: fix asynchronous raid6 recovery for ddf layouts
  async_pq: rename scribble page
  async_pq: kill a stray dma_map() call and other cleanups
  md/raid6: kill a gcc-4.0.1 'uninitialized variable' warning
  raid6/async_tx: handle holes in block list in async_syndrome_val
  md/async: don't pass a memory pointer as a page pointer.
  md: Fix handling of raid5 array which is being reshaped to fewer devices.
  md: fix problems with RAID6 calculations for DDF.
  md/raid456: downlevel multicore operations to raid_run_ops
  md: drivers/md/unroll.pl replaced with awk analog
  md: remove clumsy usage of do_sync_mapping_range from bitmap code
  md: raid1/raid10: handle allocation errors during array setup.
  md/raid5: initialize conf->device_lock earlier
  md/raid1/raid10: add a cond_resched
  Revert "md: do not progress the resync process if the stripe was blocked"
parents aefba418 da17bf43
...@@ -26,14 +26,10 @@ ...@@ -26,14 +26,10 @@
#include <linux/async_tx.h> #include <linux/async_tx.h>
/** /**
* scribble - space to hold throwaway P buffer for synchronous gen_syndrome * pq_scribble_page - space to hold throwaway P or Q buffer for
* synchronous gen_syndrome
*/ */
static struct page *scribble; static struct page *pq_scribble_page;
static bool is_raid6_zero_block(struct page *p)
{
return p == (void *) raid6_empty_zero_page;
}
/* the struct page *blocks[] parameter passed to async_gen_syndrome() /* the struct page *blocks[] parameter passed to async_gen_syndrome()
* and async_syndrome_val() contains the 'P' destination address at * and async_syndrome_val() contains the 'P' destination address at
...@@ -83,7 +79,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, ...@@ -83,7 +79,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
* sources and update the coefficients accordingly * sources and update the coefficients accordingly
*/ */
for (i = 0, idx = 0; i < src_cnt; i++) { for (i = 0, idx = 0; i < src_cnt; i++) {
if (is_raid6_zero_block(blocks[i])) if (blocks[i] == NULL)
continue; continue;
dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -160,9 +156,9 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, ...@@ -160,9 +156,9 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
srcs = (void **) blocks; srcs = (void **) blocks;
for (i = 0; i < disks; i++) { for (i = 0; i < disks; i++) {
if (is_raid6_zero_block(blocks[i])) { if (blocks[i] == NULL) {
BUG_ON(i > disks - 3); /* P or Q can't be zero */ BUG_ON(i > disks - 3); /* P or Q can't be zero */
srcs[i] = blocks[i]; srcs[i] = (void*)raid6_empty_zero_page;
} else } else
srcs[i] = page_address(blocks[i]) + offset; srcs[i] = page_address(blocks[i]) + offset;
} }
...@@ -186,10 +182,14 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, ...@@ -186,10 +182,14 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
* blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
* PAGE_SIZE as a temporary buffer of this size is used in the * PAGE_SIZE as a temporary buffer of this size is used in the
* synchronous path. 'disks' always accounts for both destination * synchronous path. 'disks' always accounts for both destination
* buffers. * buffers. If any source buffers (blocks[i] where i < disks - 2) are
* set to NULL those buffers will be replaced with the raid6_zero_page
* in the synchronous path and omitted in the hardware-asynchronous
* path.
* *
* 'blocks' note: if submit->scribble is NULL then the contents of * 'blocks' note: if submit->scribble is NULL then the contents of
* 'blocks' may be overridden * 'blocks' may be overwritten to perform address conversions
* (dma_map_page() or page_address()).
*/ */
struct dma_async_tx_descriptor * struct dma_async_tx_descriptor *
async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
...@@ -227,11 +227,11 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, ...@@ -227,11 +227,11 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
async_tx_quiesce(&submit->depend_tx); async_tx_quiesce(&submit->depend_tx);
if (!P(blocks, disks)) { if (!P(blocks, disks)) {
P(blocks, disks) = scribble; P(blocks, disks) = pq_scribble_page;
BUG_ON(len + offset > PAGE_SIZE); BUG_ON(len + offset > PAGE_SIZE);
} }
if (!Q(blocks, disks)) { if (!Q(blocks, disks)) {
Q(blocks, disks) = scribble; Q(blocks, disks) = pq_scribble_page;
BUG_ON(len + offset > PAGE_SIZE); BUG_ON(len + offset > PAGE_SIZE);
} }
do_sync_gen_syndrome(blocks, offset, disks, len, submit); do_sync_gen_syndrome(blocks, offset, disks, len, submit);
...@@ -265,8 +265,10 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, ...@@ -265,8 +265,10 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
len); len);
struct dma_device *device = chan ? chan->device : NULL; struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
unsigned char coefs[disks-2];
enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
dma_addr_t *dma_src = NULL; dma_addr_t *dma_src = NULL;
int src_cnt = 0;
BUG_ON(disks < 4); BUG_ON(disks < 4);
...@@ -285,22 +287,32 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, ...@@ -285,22 +287,32 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
__func__, disks, len); __func__, disks, len);
if (!P(blocks, disks)) if (!P(blocks, disks))
dma_flags |= DMA_PREP_PQ_DISABLE_P; dma_flags |= DMA_PREP_PQ_DISABLE_P;
else
pq[0] = dma_map_page(dev, P(blocks, disks),
offset, len,
DMA_TO_DEVICE);
if (!Q(blocks, disks)) if (!Q(blocks, disks))
dma_flags |= DMA_PREP_PQ_DISABLE_Q; dma_flags |= DMA_PREP_PQ_DISABLE_Q;
else
pq[1] = dma_map_page(dev, Q(blocks, disks),
offset, len,
DMA_TO_DEVICE);
if (submit->flags & ASYNC_TX_FENCE) if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE; dma_flags |= DMA_PREP_FENCE;
for (i = 0; i < disks; i++) for (i = 0; i < disks-2; i++)
if (likely(blocks[i])) { if (likely(blocks[i])) {
BUG_ON(is_raid6_zero_block(blocks[i])); dma_src[src_cnt] = dma_map_page(dev, blocks[i],
dma_src[i] = dma_map_page(dev, blocks[i],
offset, len, offset, len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
coefs[src_cnt] = raid6_gfexp[i];
src_cnt++;
} }
for (;;) { for (;;) {
tx = device->device_prep_dma_pq_val(chan, pq, dma_src, tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
disks - 2, src_cnt,
raid6_gfexp, coefs,
len, pqres, len, pqres,
dma_flags); dma_flags);
if (likely(tx)) if (likely(tx))
...@@ -373,9 +385,9 @@ EXPORT_SYMBOL_GPL(async_syndrome_val); ...@@ -373,9 +385,9 @@ EXPORT_SYMBOL_GPL(async_syndrome_val);
static int __init async_pq_init(void) static int __init async_pq_init(void)
{ {
scribble = alloc_page(GFP_KERNEL); pq_scribble_page = alloc_page(GFP_KERNEL);
if (scribble) if (pq_scribble_page)
return 0; return 0;
pr_err("%s: failed to allocate required spare page\n", __func__); pr_err("%s: failed to allocate required spare page\n", __func__);
...@@ -385,7 +397,7 @@ static int __init async_pq_init(void) ...@@ -385,7 +397,7 @@ static int __init async_pq_init(void)
static void __exit async_pq_exit(void) static void __exit async_pq_exit(void)
{ {
put_page(scribble); put_page(pq_scribble_page);
} }
module_init(async_pq_init); module_init(async_pq_init);
......
...@@ -131,8 +131,8 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, ...@@ -131,8 +131,8 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
} }
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
__2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, __2data_recov_4(int disks, size_t bytes, int faila, int failb,
struct async_submit_ctl *submit) struct page **blocks, struct async_submit_ctl *submit)
{ {
struct dma_async_tx_descriptor *tx = NULL; struct dma_async_tx_descriptor *tx = NULL;
struct page *p, *q, *a, *b; struct page *p, *q, *a, *b;
...@@ -143,8 +143,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, ...@@ -143,8 +143,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
void *cb_param = submit->cb_param; void *cb_param = submit->cb_param;
void *scribble = submit->scribble; void *scribble = submit->scribble;
p = blocks[4-2]; p = blocks[disks-2];
q = blocks[4-1]; q = blocks[disks-1];
a = blocks[faila]; a = blocks[faila];
b = blocks[failb]; b = blocks[failb];
...@@ -170,8 +170,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, ...@@ -170,8 +170,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
} }
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
__2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, __2data_recov_5(int disks, size_t bytes, int faila, int failb,
struct async_submit_ctl *submit) struct page **blocks, struct async_submit_ctl *submit)
{ {
struct dma_async_tx_descriptor *tx = NULL; struct dma_async_tx_descriptor *tx = NULL;
struct page *p, *q, *g, *dp, *dq; struct page *p, *q, *g, *dp, *dq;
...@@ -181,21 +181,22 @@ __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, ...@@ -181,21 +181,22 @@ __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks,
dma_async_tx_callback cb_fn = submit->cb_fn; dma_async_tx_callback cb_fn = submit->cb_fn;
void *cb_param = submit->cb_param; void *cb_param = submit->cb_param;
void *scribble = submit->scribble; void *scribble = submit->scribble;
int uninitialized_var(good); int good_srcs, good, i;
int i;
for (i = 0; i < 3; i++) { good_srcs = 0;
good = -1;
for (i = 0; i < disks-2; i++) {
if (blocks[i] == NULL)
continue;
if (i == faila || i == failb) if (i == faila || i == failb)
continue; continue;
else {
good = i; good = i;
break; good_srcs++;
}
} }
BUG_ON(i >= 3); BUG_ON(good_srcs > 1);
p = blocks[5-2]; p = blocks[disks-2];
q = blocks[5-1]; q = blocks[disks-1];
g = blocks[good]; g = blocks[good];
/* Compute syndrome with zero for the missing data pages /* Compute syndrome with zero for the missing data pages
...@@ -263,10 +264,10 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb, ...@@ -263,10 +264,10 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb,
* delta p and delta q * delta p and delta q
*/ */
dp = blocks[faila]; dp = blocks[faila];
blocks[faila] = (void *)raid6_empty_zero_page; blocks[faila] = NULL;
blocks[disks-2] = dp; blocks[disks-2] = dp;
dq = blocks[failb]; dq = blocks[failb];
blocks[failb] = (void *)raid6_empty_zero_page; blocks[failb] = NULL;
blocks[disks-1] = dq; blocks[disks-1] = dq;
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
...@@ -323,6 +324,8 @@ struct dma_async_tx_descriptor * ...@@ -323,6 +324,8 @@ struct dma_async_tx_descriptor *
async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
struct page **blocks, struct async_submit_ctl *submit) struct page **blocks, struct async_submit_ctl *submit)
{ {
int non_zero_srcs, i;
BUG_ON(faila == failb); BUG_ON(faila == failb);
if (failb < faila) if (failb < faila)
swap(faila, failb); swap(faila, failb);
...@@ -334,10 +337,12 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, ...@@ -334,10 +337,12 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
*/ */
if (!submit->scribble) { if (!submit->scribble) {
void **ptrs = (void **) blocks; void **ptrs = (void **) blocks;
int i;
async_tx_quiesce(&submit->depend_tx); async_tx_quiesce(&submit->depend_tx);
for (i = 0; i < disks; i++) for (i = 0; i < disks; i++)
if (blocks[i] == NULL)
ptrs[i] = (void *) raid6_empty_zero_page;
else
ptrs[i] = page_address(blocks[i]); ptrs[i] = page_address(blocks[i]);
raid6_2data_recov(disks, bytes, faila, failb, ptrs); raid6_2data_recov(disks, bytes, faila, failb, ptrs);
...@@ -347,19 +352,30 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, ...@@ -347,19 +352,30 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
return NULL; return NULL;
} }
switch (disks) { non_zero_srcs = 0;
case 4: for (i = 0; i < disks-2 && non_zero_srcs < 4; i++)
if (blocks[i])
non_zero_srcs++;
switch (non_zero_srcs) {
case 0:
case 1:
/* There must be at least 2 sources - the failed devices. */
BUG();
case 2:
/* dma devices do not uniformly understand a zero source pq /* dma devices do not uniformly understand a zero source pq
* operation (in contrast to the synchronous case), so * operation (in contrast to the synchronous case), so
* explicitly handle the 4 disk special case * explicitly handle the special case of a 4 disk array with
* both data disks missing.
*/ */
return __2data_recov_4(bytes, faila, failb, blocks, submit); return __2data_recov_4(disks, bytes, faila, failb, blocks, submit);
case 5: case 3:
/* dma devices do not uniformly understand a single /* dma devices do not uniformly understand a single
* source pq operation (in contrast to the synchronous * source pq operation (in contrast to the synchronous
* case), so explicitly handle the 5 disk special case * case), so explicitly handle the special case of a 5 disk
* array with 2 of 3 data disks missing.
*/ */
return __2data_recov_5(bytes, faila, failb, blocks, submit); return __2data_recov_5(disks, bytes, faila, failb, blocks, submit);
default: default:
return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); return __2data_recov_n(disks, bytes, faila, failb, blocks, submit);
} }
...@@ -385,6 +401,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, ...@@ -385,6 +401,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
dma_async_tx_callback cb_fn = submit->cb_fn; dma_async_tx_callback cb_fn = submit->cb_fn;
void *cb_param = submit->cb_param; void *cb_param = submit->cb_param;
void *scribble = submit->scribble; void *scribble = submit->scribble;
int good_srcs, good, i;
struct page *srcs[2]; struct page *srcs[2];
pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
...@@ -394,10 +411,12 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, ...@@ -394,10 +411,12 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
*/ */
if (!scribble) { if (!scribble) {
void **ptrs = (void **) blocks; void **ptrs = (void **) blocks;
int i;
async_tx_quiesce(&submit->depend_tx); async_tx_quiesce(&submit->depend_tx);
for (i = 0; i < disks; i++) for (i = 0; i < disks; i++)
if (blocks[i] == NULL)
ptrs[i] = (void*)raid6_empty_zero_page;
else
ptrs[i] = page_address(blocks[i]); ptrs[i] = page_address(blocks[i]);
raid6_datap_recov(disks, bytes, faila, ptrs); raid6_datap_recov(disks, bytes, faila, ptrs);
...@@ -407,6 +426,20 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, ...@@ -407,6 +426,20 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
return NULL; return NULL;
} }
good_srcs = 0;
good = -1;
for (i = 0; i < disks-2; i++) {
if (i == faila)
continue;
if (blocks[i]) {
good = i;
good_srcs++;
if (good_srcs > 1)
break;
}
}
BUG_ON(good_srcs == 0);
p = blocks[disks-2]; p = blocks[disks-2];
q = blocks[disks-1]; q = blocks[disks-1];
...@@ -414,14 +447,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, ...@@ -414,14 +447,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
* Use the dead data page as temporary storage for delta q * Use the dead data page as temporary storage for delta q
*/ */
dq = blocks[faila]; dq = blocks[faila];
blocks[faila] = (void *)raid6_empty_zero_page; blocks[faila] = NULL;
blocks[disks-1] = dq; blocks[disks-1] = dq;
/* in the 4 disk case we only need to perform a single source /* in the 4-disk case we only need to perform a single source
* multiplication * multiplication with the one good data block.
*/ */
if (disks == 4) { if (good_srcs == 1) {
int good = faila == 0 ? 1 : 0;
struct page *g = blocks[good]; struct page *g = blocks[good];
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
......
...@@ -44,20 +44,23 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, ...@@ -44,20 +44,23 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
void *cb_param_orig = submit->cb_param; void *cb_param_orig = submit->cb_param;
enum async_tx_flags flags_orig = submit->flags; enum async_tx_flags flags_orig = submit->flags;
enum dma_ctrl_flags dma_flags; enum dma_ctrl_flags dma_flags;
int xor_src_cnt; int xor_src_cnt = 0;
dma_addr_t dma_dest; dma_addr_t dma_dest;
/* map the dest bidrectional in case it is re-used as a source */ /* map the dest bidrectional in case it is re-used as a source */
dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
for (i = 0; i < src_cnt; i++) { for (i = 0; i < src_cnt; i++) {
/* only map the dest once */ /* only map the dest once */
if (!src_list[i])
continue;
if (unlikely(src_list[i] == dest)) { if (unlikely(src_list[i] == dest)) {
dma_src[i] = dma_dest; dma_src[xor_src_cnt++] = dma_dest;
continue; continue;
} }
dma_src[i] = dma_map_page(dma->dev, src_list[i], offset, dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset,
len, DMA_TO_DEVICE); len, DMA_TO_DEVICE);
} }
src_cnt = xor_src_cnt;
while (src_cnt) { while (src_cnt) {
submit->flags = flags_orig; submit->flags = flags_orig;
...@@ -123,7 +126,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, ...@@ -123,7 +126,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
int src_cnt, size_t len, struct async_submit_ctl *submit) int src_cnt, size_t len, struct async_submit_ctl *submit)
{ {
int i; int i;
int xor_src_cnt; int xor_src_cnt = 0;
int src_off = 0; int src_off = 0;
void *dest_buf; void *dest_buf;
void **srcs; void **srcs;
...@@ -135,8 +138,9 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, ...@@ -135,8 +138,9 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
/* convert to buffer pointers */ /* convert to buffer pointers */
for (i = 0; i < src_cnt; i++) for (i = 0; i < src_cnt; i++)
srcs[i] = page_address(src_list[i]) + offset; if (src_list[i])
srcs[xor_src_cnt++] = page_address(src_list[i]) + offset;
src_cnt = xor_src_cnt;
/* set destination address */ /* set destination address */
dest_buf = page_address(dest) + offset; dest_buf = page_address(dest) + offset;
......
...@@ -46,7 +46,7 @@ obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o ...@@ -46,7 +46,7 @@ obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o
obj-$(CONFIG_DM_ZERO) += dm-zero.o obj-$(CONFIG_DM_ZERO) += dm-zero.o
quiet_cmd_unroll = UNROLL $@ quiet_cmd_unroll = UNROLL $@
cmd_unroll = $(PERL) $(srctree)/$(src)/unroll.pl $(UNROLL) \ cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) \
< $< > $@ || ( rm -f $@ && exit 1 ) < $< > $@ || ( rm -f $@ && exit 1 )
ifeq ($(CONFIG_ALTIVEC),y) ifeq ($(CONFIG_ALTIVEC),y)
...@@ -59,56 +59,56 @@ endif ...@@ -59,56 +59,56 @@ endif
targets += raid6int1.c targets += raid6int1.c
$(obj)/raid6int1.c: UNROLL := 1 $(obj)/raid6int1.c: UNROLL := 1
$(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE $(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll) $(call if_changed,unroll)
targets += raid6int2.c targets += raid6int2.c
$(obj)/raid6int2.c: UNROLL := 2 $(obj)/raid6int2.c: UNROLL := 2
$(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE $(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll) $(call if_changed,unroll)
targets += raid6int4.c targets += raid6int4.c
$(obj)/raid6int4.c: UNROLL := 4 $(obj)/raid6int4.c: UNROLL := 4
$(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE $(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll) $(call if_changed,unroll)
targets += raid6int8.c targets += raid6int8.c
$(obj)/raid6int8.c: UNROLL := 8 $(obj)/raid6int8.c: UNROLL := 8
$(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE $(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll) $(call if_changed,unroll)
targets += raid6int16.c targets += raid6int16.c
$(obj)/raid6int16.c: UNROLL := 16 $(obj)/raid6int16.c: UNROLL := 16
$(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE $(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll) $(call if_changed,unroll)
targets += raid6int32.c targets += raid6int32.c
$(obj)/raid6int32.c: UNROLL := 32 $(obj)/raid6int32.c: UNROLL := 32
$(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE $(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll) $(call if_changed,unroll)
CFLAGS_raid6altivec1.o += $(altivec_flags) CFLAGS_raid6altivec1.o += $(altivec_flags)
targets += raid6altivec1.c targets += raid6altivec1.c
$(obj)/raid6altivec1.c: UNROLL := 1 $(obj)/raid6altivec1.c: UNROLL := 1
$(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE $(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll) $(call if_changed,unroll)
CFLAGS_raid6altivec2.o += $(altivec_flags) CFLAGS_raid6altivec2.o += $(altivec_flags)
targets += raid6altivec2.c targets += raid6altivec2.c
$(obj)/raid6altivec2.c: UNROLL := 2 $(obj)/raid6altivec2.c: UNROLL := 2
$(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE $(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll) $(call if_changed,unroll)
CFLAGS_raid6altivec4.o += $(altivec_flags) CFLAGS_raid6altivec4.o += $(altivec_flags)
targets += raid6altivec4.c targets += raid6altivec4.c
$(obj)/raid6altivec4.c: UNROLL := 4 $(obj)/raid6altivec4.c: UNROLL := 4
$(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE $(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll) $(call if_changed,unroll)
CFLAGS_raid6altivec8.o += $(altivec_flags) CFLAGS_raid6altivec8.o += $(altivec_flags)
targets += raid6altivec8.c targets += raid6altivec8.c
$(obj)/raid6altivec8.c: UNROLL := 8 $(obj)/raid6altivec8.c: UNROLL := 8
$(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE $(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll) $(call if_changed,unroll)
quiet_cmd_mktable = TABLE $@ quiet_cmd_mktable = TABLE $@
......
...@@ -1624,10 +1624,11 @@ int bitmap_create(mddev_t *mddev) ...@@ -1624,10 +1624,11 @@ int bitmap_create(mddev_t *mddev)
bitmap->offset = mddev->bitmap_offset; bitmap->offset = mddev->bitmap_offset;
if (file) { if (file) {
get_file(file); get_file(file);
do_sync_mapping_range(file->f_mapping, 0, LLONG_MAX, /* As future accesses to this file will use bmap,
SYNC_FILE_RANGE_WAIT_BEFORE | * and bypass the page cache, we must sync the file
SYNC_FILE_RANGE_WRITE | * first.
SYNC_FILE_RANGE_WAIT_AFTER); */
vfs_fsync(file, file->f_dentry, 1);
} }
/* read superblock from bitmap file (this sets bitmap->chunksize) */ /* read superblock from bitmap file (this sets bitmap->chunksize) */
err = bitmap_read_sb(bitmap); err = bitmap_read_sb(bitmap);
......
...@@ -2631,7 +2631,7 @@ static void analyze_sbs(mddev_t * mddev) ...@@ -2631,7 +2631,7 @@ static void analyze_sbs(mddev_t * mddev)
rdev->desc_nr = i++; rdev->desc_nr = i++;
rdev->raid_disk = rdev->desc_nr; rdev->raid_disk = rdev->desc_nr;
set_bit(In_sync, &rdev->flags); set_bit(In_sync, &rdev->flags);
} else if (rdev->raid_disk >= mddev->raid_disks) { } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
rdev->raid_disk = -1; rdev->raid_disk = -1;
clear_bit(In_sync, &rdev->flags); clear_bit(In_sync, &rdev->flags);
} }
......
...@@ -64,7 +64,7 @@ static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) ...@@ -64,7 +64,7 @@ static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
/* allocate a r1bio with room for raid_disks entries in the bios array */ /* allocate a r1bio with room for raid_disks entries in the bios array */
r1_bio = kzalloc(size, gfp_flags); r1_bio = kzalloc(size, gfp_flags);
if (!r1_bio) if (!r1_bio && pi->mddev)
unplug_slaves(pi->mddev); unplug_slaves(pi->mddev);
return r1_bio; return r1_bio;
...@@ -1683,6 +1683,7 @@ static void raid1d(mddev_t *mddev) ...@@ -1683,6 +1683,7 @@ static void raid1d(mddev_t *mddev)
generic_make_request(bio); generic_make_request(bio);
} }
} }
cond_resched();
} }
if (unplug) if (unplug)
unplug_slaves(mddev); unplug_slaves(mddev);
...@@ -1978,13 +1979,14 @@ static int run(mddev_t *mddev) ...@@ -1978,13 +1979,14 @@ static int run(mddev_t *mddev)
conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL); conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
if (!conf->poolinfo) if (!conf->poolinfo)
goto out_no_mem; goto out_no_mem;
conf->poolinfo->mddev = mddev; conf->poolinfo->mddev = NULL;
conf->poolinfo->raid_disks = mddev->raid_disks; conf->poolinfo->raid_disks = mddev->raid_disks;
conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
r1bio_pool_free, r1bio_pool_free,
conf->poolinfo); conf->poolinfo);
if (!conf->r1bio_pool) if (!conf->r1bio_pool)
goto out_no_mem; goto out_no_mem;
conf->poolinfo->mddev = mddev;
spin_lock_init(&conf->device_lock); spin_lock_init(&conf->device_lock);
mddev->queue->queue_lock = &conf->device_lock; mddev->queue->queue_lock = &conf->device_lock;
......
...@@ -68,7 +68,7 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) ...@@ -68,7 +68,7 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
/* allocate a r10bio with room for raid_disks entries in the bios array */ /* allocate a r10bio with room for raid_disks entries in the bios array */
r10_bio = kzalloc(size, gfp_flags); r10_bio = kzalloc(size, gfp_flags);
if (!r10_bio) if (!r10_bio && conf->mddev)
unplug_slaves(conf->mddev); unplug_slaves(conf->mddev);
return r10_bio; return r10_bio;
...@@ -1632,6 +1632,7 @@ static void raid10d(mddev_t *mddev) ...@@ -1632,6 +1632,7 @@ static void raid10d(mddev_t *mddev)
generic_make_request(bio); generic_make_request(bio);
} }
} }
cond_resched();
} }
if (unplug) if (unplug)
unplug_slaves(mddev); unplug_slaves(mddev);
...@@ -2095,7 +2096,6 @@ static int run(mddev_t *mddev) ...@@ -2095,7 +2096,6 @@ static int run(mddev_t *mddev)
if (!conf->tmppage) if (!conf->tmppage)
goto out_free_conf; goto out_free_conf;
conf->mddev = mddev;
conf->raid_disks = mddev->raid_disks; conf->raid_disks = mddev->raid_disks;
conf->near_copies = nc; conf->near_copies = nc;
conf->far_copies = fc; conf->far_copies = fc;
...@@ -2132,6 +2132,7 @@ static int run(mddev_t *mddev) ...@@ -2132,6 +2132,7 @@ static int run(mddev_t *mddev)
goto out_free_conf; goto out_free_conf;
} }
conf->mddev = mddev;
spin_lock_init(&conf->device_lock); spin_lock_init(&conf->device_lock);
mddev->queue->queue_lock = &conf->device_lock; mddev->queue->queue_lock = &conf->device_lock;
......
This diff is collapsed.
...@@ -214,12 +214,20 @@ struct stripe_head { ...@@ -214,12 +214,20 @@ struct stripe_head {
int disks; /* disks in stripe */ int disks; /* disks in stripe */
enum check_states check_state; enum check_states check_state;
enum reconstruct_states reconstruct_state; enum reconstruct_states reconstruct_state;
/* stripe_operations /**
* struct stripe_operations
* @target - STRIPE_OP_COMPUTE_BLK target * @target - STRIPE_OP_COMPUTE_BLK target
* @target2 - 2nd compute target in the raid6 case
* @zero_sum_result - P and Q verification flags
* @request - async service request flags for raid_run_ops
*/ */
struct stripe_operations { struct stripe_operations {
int target, target2; int target, target2;
enum sum_check_flags zero_sum_result; enum sum_check_flags zero_sum_result;
#ifdef CONFIG_MULTICORE_RAID456
unsigned long request;
wait_queue_head_t wait_for_ops;
#endif
} ops; } ops;
struct r5dev { struct r5dev {
struct bio req; struct bio req;
...@@ -294,6 +302,8 @@ struct r6_state { ...@@ -294,6 +302,8 @@ struct r6_state {
#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */ #define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */
#define STRIPE_BIOFILL_RUN 14 #define STRIPE_BIOFILL_RUN 14
#define STRIPE_COMPUTE_RUN 15 #define STRIPE_COMPUTE_RUN 15
#define STRIPE_OPS_REQ_PENDING 16
/* /*
* Operation request flags * Operation request flags
*/ */
...@@ -478,7 +488,7 @@ static inline int algorithm_valid_raid6(int layout) ...@@ -478,7 +488,7 @@ static inline int algorithm_valid_raid6(int layout)
{ {
return (layout >= 0 && layout <= 5) return (layout >= 0 && layout <= 5)
|| ||
(layout == 8 || layout == 10) (layout >= 8 && layout <= 10)
|| ||
(layout >= 16 && layout <= 20); (layout >= 16 && layout <= 20);
} }
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
* *
* $#-way unrolled portable integer math RAID-6 instruction set * $#-way unrolled portable integer math RAID-6 instruction set
* *
* This file is postprocessed using unroll.pl * This file is postprocessed using unroll.awk
* *
* <benh> hpa: in process, * <benh> hpa: in process,
* you can just "steal" the vec unit with enable_kernel_altivec() (but * you can just "steal" the vec unit with enable_kernel_altivec() (but
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
* *
* $#-way unrolled portable integer math RAID-6 instruction set * $#-way unrolled portable integer math RAID-6 instruction set
* *
* This file is postprocessed using unroll.pl * This file is postprocessed using unroll.awk
*/ */
#include <linux/raid/pq.h> #include <linux/raid/pq.h>
......
...@@ -7,7 +7,7 @@ CC = gcc ...@@ -7,7 +7,7 @@ CC = gcc
OPTFLAGS = -O2 # Adjust as desired OPTFLAGS = -O2 # Adjust as desired
CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS) CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS)
LD = ld LD = ld
PERL = perl AWK = awk
AR = ar AR = ar
RANLIB = ranlib RANLIB = ranlib
...@@ -35,35 +35,35 @@ raid6.a: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \ ...@@ -35,35 +35,35 @@ raid6.a: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \
raid6test: test.c raid6.a raid6test: test.c raid6.a
$(CC) $(CFLAGS) -o raid6test $^ $(CC) $(CFLAGS) -o raid6test $^
raid6altivec1.c: raid6altivec.uc ../unroll.pl raid6altivec1.c: raid6altivec.uc ../unroll.awk
$(PERL) ../unroll.pl 1 < raid6altivec.uc > $@ $(AWK) ../unroll.awk -vN=1 < raid6altivec.uc > $@
raid6altivec2.c: raid6altivec.uc ../unroll.pl raid6altivec2.c: raid6altivec.uc ../unroll.awk
$(PERL) ../unroll.pl 2 < raid6altivec.uc > $@ $(AWK) ../unroll.awk -vN=2 < raid6altivec.uc > $@
raid6altivec4.c: raid6altivec.uc ../unroll.pl raid6altivec4.c: raid6altivec.uc ../unroll.awk
$(PERL) ../unroll.pl 4 < raid6altivec.uc > $@ $(AWK) ../unroll.awk -vN=4 < raid6altivec.uc > $@
raid6altivec8.c: raid6altivec.uc ../unroll.pl raid6altivec8.c: raid6altivec.uc ../unroll.awk
$(PERL) ../unroll.pl 8 < raid6altivec.uc > $@ $(AWK) ../unroll.awk -vN=8 < raid6altivec.uc > $@
raid6int1.c: raid6int.uc ../unroll.pl raid6int1.c: raid6int.uc ../unroll.awk
$(PERL) ../unroll.pl 1 < raid6int.uc > $@ $(AWK) ../unroll.awk -vN=1 < raid6int.uc > $@
raid6int2.c: raid6int.uc ../unroll.pl raid6int2.c: raid6int.uc ../unroll.awk
$(PERL) ../unroll.pl 2 < raid6int.uc > $@ $(AWK) ../unroll.awk -vN=2 < raid6int.uc > $@
raid6int4.c: raid6int.uc ../unroll.pl raid6int4.c: raid6int.uc ../unroll.awk
$(PERL) ../unroll.pl 4 < raid6int.uc > $@ $(AWK) ../unroll.awk -vN=4 < raid6int.uc > $@
raid6int8.c: raid6int.uc ../unroll.pl raid6int8.c: raid6int.uc ../unroll.awk
$(PERL) ../unroll.pl 8 < raid6int.uc > $@ $(AWK) ../unroll.awk -vN=8 < raid6int.uc > $@
raid6int16.c: raid6int.uc ../unroll.pl raid6int16.c: raid6int.uc ../unroll.awk
$(PERL) ../unroll.pl 16 < raid6int.uc > $@ $(AWK) ../unroll.awk -vN=16 < raid6int.uc > $@
raid6int32.c: raid6int.uc ../unroll.pl raid6int32.c: raid6int.uc ../unroll.awk
$(PERL) ../unroll.pl 32 < raid6int.uc > $@ $(AWK) ../unroll.awk -vN=32 < raid6int.uc > $@
raid6tables.c: mktables raid6tables.c: mktables
./mktables > raid6tables.c ./mktables > raid6tables.c
......
# This filter requires one command line option of form -vN=n
# where n must be a decimal number.
#
# Repeat each input line containing $$ n times, replacing $$ with 0...n-1.
# Replace each $# with n, and each $* with a single $.
BEGIN {
n = N + 0
}
{
if (/\$\$/) { rep = n } else { rep = 1 }
for (i = 0; i < rep; ++i) {
tmp = $0
gsub(/\$\$/, i, tmp)
gsub(/\$\#/, n, tmp)
gsub(/\$\*/, "$", tmp)
print tmp
}
}
#!/usr/bin/perl
#
# Take a piece of C code and for each line which contains the sequence $$
# repeat n times with $ replaced by 0...n-1; the sequence $# is replaced
# by the unrolling factor, and $* with a single $
#
($n) = @ARGV;
$n += 0;
while ( defined($line = <STDIN>) ) {
if ( $line =~ /\$\$/ ) {
$rep = $n;
} else {
$rep = 1;
}
for ( $i = 0 ; $i < $rep ; $i++ ) {
$tmp = $line;
$tmp =~ s/\$\$/$i/g;
$tmp =~ s/\$\#/$n/g;
$tmp =~ s/\$\*/\$/g;
print $tmp;
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment