Commit 66cb1910 authored by Joe Thornber's avatar Joe Thornber Committed by Mike Snitzer

dm cache: fix a race condition between queuing new migrations and quiescing for a shutdown

The code that was trying to do this was inadequate.  The postsuspend
method (in ioctl context), needs to wait for the worker thread to
acknowledge the request to quiesce.  Otherwise the migration count may
drop to zero temporarily before the worker thread realises we're
quiescing.  In this case the target will be taken down, but the worker
thread may have issued a new migration, which will cause an oops when
it completes.
Signed-off-by: default avatarJoe Thornber <ejt@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
Cc: stable@vger.kernel.org # 3.9+
parent f8e5f01a
...@@ -148,6 +148,9 @@ struct cache { ...@@ -148,6 +148,9 @@ struct cache {
wait_queue_head_t migration_wait; wait_queue_head_t migration_wait;
atomic_t nr_migrations; atomic_t nr_migrations;
wait_queue_head_t quiescing_wait;
atomic_t quiescing_ack;
/* /*
* cache_size entries, dirty if set * cache_size entries, dirty if set
*/ */
...@@ -749,8 +752,9 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, ...@@ -749,8 +752,9 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
static void cleanup_migration(struct dm_cache_migration *mg) static void cleanup_migration(struct dm_cache_migration *mg)
{ {
dec_nr_migrations(mg->cache); struct cache *cache = mg->cache;
free_migration(mg); free_migration(mg);
dec_nr_migrations(cache);
} }
static void migration_failure(struct dm_cache_migration *mg) static void migration_failure(struct dm_cache_migration *mg)
...@@ -1347,34 +1351,51 @@ static void writeback_some_dirty_blocks(struct cache *cache) ...@@ -1347,34 +1351,51 @@ static void writeback_some_dirty_blocks(struct cache *cache)
/*---------------------------------------------------------------- /*----------------------------------------------------------------
* Main worker loop * Main worker loop
*--------------------------------------------------------------*/ *--------------------------------------------------------------*/
static void start_quiescing(struct cache *cache) static bool is_quiescing(struct cache *cache)
{ {
int r;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&cache->lock, flags); spin_lock_irqsave(&cache->lock, flags);
cache->quiescing = 1; r = cache->quiescing;
spin_unlock_irqrestore(&cache->lock, flags); spin_unlock_irqrestore(&cache->lock, flags);
return r;
} }
static void stop_quiescing(struct cache *cache) static void ack_quiescing(struct cache *cache)
{
if (is_quiescing(cache)) {
atomic_inc(&cache->quiescing_ack);
wake_up(&cache->quiescing_wait);
}
}
static void wait_for_quiescing_ack(struct cache *cache)
{
wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
}
static void start_quiescing(struct cache *cache)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&cache->lock, flags); spin_lock_irqsave(&cache->lock, flags);
cache->quiescing = 0; cache->quiescing = true;
spin_unlock_irqrestore(&cache->lock, flags); spin_unlock_irqrestore(&cache->lock, flags);
wait_for_quiescing_ack(cache);
} }
static bool is_quiescing(struct cache *cache) static void stop_quiescing(struct cache *cache)
{ {
int r;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&cache->lock, flags); spin_lock_irqsave(&cache->lock, flags);
r = cache->quiescing; cache->quiescing = false;
spin_unlock_irqrestore(&cache->lock, flags); spin_unlock_irqrestore(&cache->lock, flags);
return r; atomic_set(&cache->quiescing_ack, 0);
} }
static void wait_for_migrations(struct cache *cache) static void wait_for_migrations(struct cache *cache)
...@@ -1421,16 +1442,15 @@ static void do_worker(struct work_struct *ws) ...@@ -1421,16 +1442,15 @@ static void do_worker(struct work_struct *ws)
struct cache *cache = container_of(ws, struct cache, worker); struct cache *cache = container_of(ws, struct cache, worker);
do { do {
if (!is_quiescing(cache)) if (!is_quiescing(cache)) {
writeback_some_dirty_blocks(cache);
process_deferred_writethrough_bios(cache);
process_deferred_bios(cache); process_deferred_bios(cache);
}
process_migrations(cache, &cache->quiesced_migrations, issue_copy); process_migrations(cache, &cache->quiesced_migrations, issue_copy);
process_migrations(cache, &cache->completed_migrations, complete_migration); process_migrations(cache, &cache->completed_migrations, complete_migration);
writeback_some_dirty_blocks(cache);
process_deferred_writethrough_bios(cache);
if (commit_if_needed(cache)) { if (commit_if_needed(cache)) {
process_deferred_flush_bios(cache, false); process_deferred_flush_bios(cache, false);
...@@ -1443,6 +1463,9 @@ static void do_worker(struct work_struct *ws) ...@@ -1443,6 +1463,9 @@ static void do_worker(struct work_struct *ws)
process_migrations(cache, &cache->need_commit_migrations, process_migrations(cache, &cache->need_commit_migrations,
migration_success_post_commit); migration_success_post_commit);
} }
ack_quiescing(cache);
} while (more_work(cache)); } while (more_work(cache));
} }
...@@ -2006,6 +2029,9 @@ static int cache_create(struct cache_args *ca, struct cache **result) ...@@ -2006,6 +2029,9 @@ static int cache_create(struct cache_args *ca, struct cache **result)
atomic_set(&cache->nr_migrations, 0); atomic_set(&cache->nr_migrations, 0);
init_waitqueue_head(&cache->migration_wait); init_waitqueue_head(&cache->migration_wait);
init_waitqueue_head(&cache->quiescing_wait);
atomic_set(&cache->quiescing_ack, 0);
r = -ENOMEM; r = -ENOMEM;
cache->nr_dirty = 0; cache->nr_dirty = 0;
cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment