Commit 1b956f7a authored by NeilBrown's avatar NeilBrown

md/raid5: be more selective about distributing flags across batch.

When a batch of stripes is broken up, we keep some of the flags
that were per-stripe, and copy other flags from the head to all
others.

This only happens while a stripe is being handled, so many of the
flags are irrelevant.

The "SYNC_FLAGS" (which I've renamed to make it clear there are
several) and STRIPE_DEGRADED are set per-stripe and so need to be
preserved.  STRIPE_INSYNC is the only flag that is set on the head
that needs to be propagated to all others.

For safety, add a WARN_ON if others are set, except:
 STRIPE_HANDLE - this is safe and per-stripe and we are going to set
      in several cases anyway
 STRIPE_INSYNC
 STRIPE_IO_STARTED - this is just a hint and doesn't hurt.
 STRIPE_ON_PLUG_LIST
 STRIPE_ON_RELEASE_LIST - It is a point pointless for a batched
           stripe to be on one of these lists, but it can happen
           as can be safely ignored.
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
parent 3960ce79
...@@ -3534,10 +3534,27 @@ static void handle_stripe_clean_event(struct r5conf *conf, ...@@ -3534,10 +3534,27 @@ static void handle_stripe_clean_event(struct r5conf *conf,
struct stripe_head, batch_list); struct stripe_head, batch_list);
list_del_init(&sh->batch_list); list_del_init(&sh->batch_list);
set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG, WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
head_sh->state & ~((1 << STRIPE_ACTIVE) | (1 << STRIPE_SYNCING) |
(1 << STRIPE_PREREAD_ACTIVE) | (1 << STRIPE_REPLACED) |
STRIPE_EXPAND_SYNC_FLAG)); (1 << STRIPE_PREREAD_ACTIVE) |
(1 << STRIPE_DELAYED) |
(1 << STRIPE_BIT_DELAY) |
(1 << STRIPE_FULL_WRITE) |
(1 << STRIPE_BIOFILL_RUN) |
(1 << STRIPE_COMPUTE_RUN) |
(1 << STRIPE_OPS_REQ_PENDING) |
(1 << STRIPE_DISCARD) |
(1 << STRIPE_BATCH_READY) |
(1 << STRIPE_BATCH_ERR) |
(1 << STRIPE_BITMAP_PENDING)));
WARN_ON_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
(1 << STRIPE_REPLACED)));
set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
(1 << STRIPE_DEGRADED)),
head_sh->state & (1 << STRIPE_INSYNC));
sh->check_state = head_sh->check_state; sh->check_state = head_sh->check_state;
sh->reconstruct_state = head_sh->reconstruct_state; sh->reconstruct_state = head_sh->reconstruct_state;
for (i = 0; i < sh->disks; i++) { for (i = 0; i < sh->disks; i++) {
...@@ -3549,7 +3566,7 @@ static void handle_stripe_clean_event(struct r5conf *conf, ...@@ -3549,7 +3566,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
spin_lock_irq(&sh->stripe_lock); spin_lock_irq(&sh->stripe_lock);
sh->batch_head = NULL; sh->batch_head = NULL;
spin_unlock_irq(&sh->stripe_lock); spin_unlock_irq(&sh->stripe_lock);
if (sh->state & STRIPE_EXPAND_SYNC_FLAG) if (sh->state & STRIPE_EXPAND_SYNC_FLAGS)
set_bit(STRIPE_HANDLE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh); release_stripe(sh);
} }
...@@ -3559,7 +3576,7 @@ static void handle_stripe_clean_event(struct r5conf *conf, ...@@ -3559,7 +3576,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
spin_unlock_irq(&head_sh->stripe_lock); spin_unlock_irq(&head_sh->stripe_lock);
if (wakeup_nr) if (wakeup_nr)
wake_up(&conf->wait_for_overlap); wake_up(&conf->wait_for_overlap);
if (head_sh->state & STRIPE_EXPAND_SYNC_FLAG) if (head_sh->state & STRIPE_EXPAND_SYNC_FLAGS)
set_bit(STRIPE_HANDLE, &head_sh->state); set_bit(STRIPE_HANDLE, &head_sh->state);
} }
...@@ -4246,11 +4263,27 @@ static void break_stripe_batch_list(struct stripe_head *head_sh, ...@@ -4246,11 +4263,27 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
list_del_init(&sh->batch_list); list_del_init(&sh->batch_list);
set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG, WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
head_sh->state & ~((1 << STRIPE_ACTIVE) | (1 << STRIPE_SYNCING) |
(1 << STRIPE_PREREAD_ACTIVE) | (1 << STRIPE_REPLACED) |
(1 << STRIPE_DEGRADED) | (1 << STRIPE_PREREAD_ACTIVE) |
STRIPE_EXPAND_SYNC_FLAG)); (1 << STRIPE_DELAYED) |
(1 << STRIPE_BIT_DELAY) |
(1 << STRIPE_FULL_WRITE) |
(1 << STRIPE_BIOFILL_RUN) |
(1 << STRIPE_COMPUTE_RUN) |
(1 << STRIPE_OPS_REQ_PENDING) |
(1 << STRIPE_DISCARD) |
(1 << STRIPE_BATCH_READY) |
(1 << STRIPE_BATCH_ERR) |
(1 << STRIPE_BITMAP_PENDING)));
WARN_ON_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
(1 << STRIPE_REPLACED)));
set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
(1 << STRIPE_DEGRADED)),
head_sh->state & (1 << STRIPE_INSYNC));
sh->check_state = head_sh->check_state; sh->check_state = head_sh->check_state;
sh->reconstruct_state = head_sh->reconstruct_state; sh->reconstruct_state = head_sh->reconstruct_state;
for (i = 0; i < sh->disks; i++) { for (i = 0; i < sh->disks; i++) {
......
...@@ -342,7 +342,7 @@ enum { ...@@ -342,7 +342,7 @@ enum {
*/ */
}; };
#define STRIPE_EXPAND_SYNC_FLAG \ #define STRIPE_EXPAND_SYNC_FLAGS \
((1 << STRIPE_EXPAND_SOURCE) |\ ((1 << STRIPE_EXPAND_SOURCE) |\
(1 << STRIPE_EXPAND_READY) |\ (1 << STRIPE_EXPAND_READY) |\
(1 << STRIPE_EXPANDING) |\ (1 << STRIPE_EXPANDING) |\
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment