Commit b6d56144 authored by Logan Gunthorpe's avatar Logan Gunthorpe Committed by Song Liu

md/raid5: Refactor raid5_get_active_stripe()

Refactor raid5_get_active_stripe() without the gotos with an
explicit infinite loop and some additional nesting.
Suggested-by: default avatarChristoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Signed-off-by: default avatarSong Liu <song@kernel.org>
parent 1727fd50
...@@ -811,54 +811,54 @@ static struct stripe_head *__raid5_get_active_stripe(struct r5conf *conf, ...@@ -811,54 +811,54 @@ static struct stripe_head *__raid5_get_active_stripe(struct r5conf *conf,
spin_lock_irq(conf->hash_locks + hash); spin_lock_irq(conf->hash_locks + hash);
retry: for (;;) {
if (!noquiesce && conf->quiesce) { if (!noquiesce && conf->quiesce) {
/* /*
* Must release the reference to batch_last before waiting, * Must release the reference to batch_last before
* on quiesce, otherwise the batch_last will hold a reference * waiting, on quiesce, otherwise the batch_last will
* to a stripe and raid5_quiesce() will deadlock waiting for * hold a reference to a stripe and raid5_quiesce()
* active_stripes to go to zero. * will deadlock waiting for active_stripes to go to
*/ * zero.
if (ctx && ctx->batch_last) { */
raid5_release_stripe(ctx->batch_last); if (ctx && ctx->batch_last) {
ctx->batch_last = NULL; raid5_release_stripe(ctx->batch_last);
} ctx->batch_last = NULL;
}
wait_event_lock_irq(conf->wait_for_quiescent, !conf->quiesce,
*(conf->hash_locks + hash));
}
sh = find_get_stripe(conf, sector, conf->generation - previous, hash); wait_event_lock_irq(conf->wait_for_quiescent,
if (sh) !conf->quiesce,
goto out; *(conf->hash_locks + hash));
}
if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) sh = find_get_stripe(conf, sector, conf->generation - previous,
goto wait_for_stripe; hash);
if (sh)
break;
sh = get_free_stripe(conf, hash); if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
if (sh) { sh = get_free_stripe(conf, hash);
r5c_check_stripe_cache_usage(conf); if (sh) {
init_stripe(sh, sector, previous); r5c_check_stripe_cache_usage(conf);
atomic_inc(&sh->count); init_stripe(sh, sector, previous);
goto out; atomic_inc(&sh->count);
} break;
}
if (!test_bit(R5_DID_ALLOC, &conf->cache_state)) if (!test_bit(R5_DID_ALLOC, &conf->cache_state))
set_bit(R5_ALLOC_MORE, &conf->cache_state); set_bit(R5_ALLOC_MORE, &conf->cache_state);
}
wait_for_stripe: if (noblock)
if (noblock) break;
goto out;
set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state); set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
r5l_wake_reclaim(conf->log, 0); r5l_wake_reclaim(conf->log, 0);
wait_event_lock_irq(conf->wait_for_stripe, wait_event_lock_irq(conf->wait_for_stripe,
is_inactive_blocked(conf, hash), is_inactive_blocked(conf, hash),
*(conf->hash_locks + hash)); *(conf->hash_locks + hash));
clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state); clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
goto retry; }
out:
spin_unlock_irq(conf->hash_locks + hash); spin_unlock_irq(conf->hash_locks + hash);
return sh; return sh;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment