Commit 306d7122 authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Sasha Levin

dm snapshot: suspend origin when doing exception handover

[ Upstream commit b735fede ]

In the function snapshot_resume we perform exception store handover.  If
there is another active snapshot target, the exception store is moved
from this target to the target that is being resumed.

The problem is that if there is some pending exception, it will point to
an incorrect exception store after that handover, causing a crash due to
dm-snap-persistent.c:get_exception()'s BUG_ON.

This bug can be triggered by repeatedly changing snapshot permissions
with "lvchange -p r" and "lvchange -p rw" while there are writes on the
associated origin device.

To fix this bug, we must suspend the origin device when doing the
exception store handover to make sure that there are no pending
exceptions:
- introduce _origin_hash that keeps track of dm_origin structures.
- introduce functions __lookup_dm_origin, __insert_dm_origin and
  __remove_dm_origin that manipulate the origin hash.
- modify snapshot_resume so that it calls dm_internal_suspend_fast() and
  dm_internal_resume_fast() on the origin device.

NOTE to stable@ people:

When backporting to kernels 3.12-3.18, use dm_internal_suspend and
dm_internal_resume instead of dm_internal_suspend_fast and
dm_internal_resume_fast.

When backporting to kernels older than 3.12, you need to pick functions
dm_internal_suspend and dm_internal_resume from the commit
fd2ed4d2.
Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: default avatarSasha Levin <sasha.levin@oracle.com>
parent 58533aec
...@@ -20,6 +20,8 @@ ...@@ -20,6 +20,8 @@
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/dm-kcopyd.h> #include <linux/dm-kcopyd.h>
#include "dm.h"
#include "dm-exception-store.h" #include "dm-exception-store.h"
#define DM_MSG_PREFIX "snapshots" #define DM_MSG_PREFIX "snapshots"
...@@ -290,6 +292,16 @@ struct origin { ...@@ -290,6 +292,16 @@ struct origin {
struct list_head snapshots; struct list_head snapshots;
}; };
/*
* This structure is allocated for each origin target
*/
struct dm_origin {
struct dm_dev *dev;
struct dm_target *ti;
unsigned split_boundary;
struct list_head hash_list;
};
/* /*
* Size of the hash table for origin volumes. If we make this * Size of the hash table for origin volumes. If we make this
* the size of the minors list then it should be nearly perfect * the size of the minors list then it should be nearly perfect
...@@ -297,6 +309,7 @@ struct origin { ...@@ -297,6 +309,7 @@ struct origin {
#define ORIGIN_HASH_SIZE 256 #define ORIGIN_HASH_SIZE 256
#define ORIGIN_MASK 0xFF #define ORIGIN_MASK 0xFF
static struct list_head *_origins; static struct list_head *_origins;
static struct list_head *_dm_origins;
static struct rw_semaphore _origins_lock; static struct rw_semaphore _origins_lock;
static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
...@@ -310,12 +323,22 @@ static int init_origin_hash(void) ...@@ -310,12 +323,22 @@ static int init_origin_hash(void)
_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
GFP_KERNEL); GFP_KERNEL);
if (!_origins) { if (!_origins) {
DMERR("unable to allocate memory"); DMERR("unable to allocate memory for _origins");
return -ENOMEM; return -ENOMEM;
} }
for (i = 0; i < ORIGIN_HASH_SIZE; i++) for (i = 0; i < ORIGIN_HASH_SIZE; i++)
INIT_LIST_HEAD(_origins + i); INIT_LIST_HEAD(_origins + i);
_dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
GFP_KERNEL);
if (!_dm_origins) {
DMERR("unable to allocate memory for _dm_origins");
kfree(_origins);
return -ENOMEM;
}
for (i = 0; i < ORIGIN_HASH_SIZE; i++)
INIT_LIST_HEAD(_dm_origins + i);
init_rwsem(&_origins_lock); init_rwsem(&_origins_lock);
return 0; return 0;
...@@ -324,6 +347,7 @@ static int init_origin_hash(void) ...@@ -324,6 +347,7 @@ static int init_origin_hash(void)
static void exit_origin_hash(void) static void exit_origin_hash(void)
{ {
kfree(_origins); kfree(_origins);
kfree(_dm_origins);
} }
static unsigned origin_hash(struct block_device *bdev) static unsigned origin_hash(struct block_device *bdev)
...@@ -350,6 +374,30 @@ static void __insert_origin(struct origin *o) ...@@ -350,6 +374,30 @@ static void __insert_origin(struct origin *o)
list_add_tail(&o->hash_list, sl); list_add_tail(&o->hash_list, sl);
} }
static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
{
struct list_head *ol;
struct dm_origin *o;
ol = &_dm_origins[origin_hash(origin)];
list_for_each_entry (o, ol, hash_list)
if (bdev_equal(o->dev->bdev, origin))
return o;
return NULL;
}
static void __insert_dm_origin(struct dm_origin *o)
{
struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
list_add_tail(&o->hash_list, sl);
}
static void __remove_dm_origin(struct dm_origin *o)
{
list_del(&o->hash_list);
}
/* /*
* _origins_lock must be held when calling this function. * _origins_lock must be held when calling this function.
* Returns number of snapshots registered using the supplied cow device, plus: * Returns number of snapshots registered using the supplied cow device, plus:
...@@ -1841,8 +1889,20 @@ static void snapshot_resume(struct dm_target *ti) ...@@ -1841,8 +1889,20 @@ static void snapshot_resume(struct dm_target *ti)
{ {
struct dm_snapshot *s = ti->private; struct dm_snapshot *s = ti->private;
struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
struct dm_origin *o;
struct mapped_device *origin_md = NULL;
down_read(&_origins_lock); down_read(&_origins_lock);
o = __lookup_dm_origin(s->origin->bdev);
if (o)
origin_md = dm_table_get_md(o->ti->table);
if (origin_md == dm_table_get_md(ti->table))
origin_md = NULL;
if (origin_md)
dm_internal_suspend(origin_md);
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) { if (snap_src && snap_dest) {
down_write(&snap_src->lock); down_write(&snap_src->lock);
...@@ -1851,6 +1911,10 @@ static void snapshot_resume(struct dm_target *ti) ...@@ -1851,6 +1911,10 @@ static void snapshot_resume(struct dm_target *ti)
up_write(&snap_dest->lock); up_write(&snap_dest->lock);
up_write(&snap_src->lock); up_write(&snap_src->lock);
} }
if (origin_md)
dm_internal_resume(origin_md);
up_read(&_origins_lock); up_read(&_origins_lock);
/* Now we have correct chunk size, reregister */ /* Now we have correct chunk size, reregister */
...@@ -2133,11 +2197,6 @@ static int origin_write_extent(struct dm_snapshot *merging_snap, ...@@ -2133,11 +2197,6 @@ static int origin_write_extent(struct dm_snapshot *merging_snap,
* Origin: maps a linear range of a device, with hooks for snapshotting. * Origin: maps a linear range of a device, with hooks for snapshotting.
*/ */
struct dm_origin {
struct dm_dev *dev;
unsigned split_boundary;
};
/* /*
* Construct an origin mapping: <dev_path> * Construct an origin mapping: <dev_path>
* The context for an origin is merely a 'struct dm_dev *' * The context for an origin is merely a 'struct dm_dev *'
...@@ -2166,6 +2225,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -2166,6 +2225,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_open; goto bad_open;
} }
o->ti = ti;
ti->private = o; ti->private = o;
ti->num_flush_bios = 1; ti->num_flush_bios = 1;
...@@ -2180,6 +2240,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -2180,6 +2240,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
static void origin_dtr(struct dm_target *ti) static void origin_dtr(struct dm_target *ti)
{ {
struct dm_origin *o = ti->private; struct dm_origin *o = ti->private;
dm_put_device(ti, o->dev); dm_put_device(ti, o->dev);
kfree(o); kfree(o);
} }
...@@ -2216,6 +2277,19 @@ static void origin_resume(struct dm_target *ti) ...@@ -2216,6 +2277,19 @@ static void origin_resume(struct dm_target *ti)
struct dm_origin *o = ti->private; struct dm_origin *o = ti->private;
o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev); o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
down_write(&_origins_lock);
__insert_dm_origin(o);
up_write(&_origins_lock);
}
static void origin_postsuspend(struct dm_target *ti)
{
struct dm_origin *o = ti->private;
down_write(&_origins_lock);
__remove_dm_origin(o);
up_write(&_origins_lock);
} }
static void origin_status(struct dm_target *ti, status_type_t type, static void origin_status(struct dm_target *ti, status_type_t type,
...@@ -2258,12 +2332,13 @@ static int origin_iterate_devices(struct dm_target *ti, ...@@ -2258,12 +2332,13 @@ static int origin_iterate_devices(struct dm_target *ti,
static struct target_type origin_target = { static struct target_type origin_target = {
.name = "snapshot-origin", .name = "snapshot-origin",
.version = {1, 8, 1}, .version = {1, 9, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = origin_ctr, .ctr = origin_ctr,
.dtr = origin_dtr, .dtr = origin_dtr,
.map = origin_map, .map = origin_map,
.resume = origin_resume, .resume = origin_resume,
.postsuspend = origin_postsuspend,
.status = origin_status, .status = origin_status,
.merge = origin_merge, .merge = origin_merge,
.iterate_devices = origin_iterate_devices, .iterate_devices = origin_iterate_devices,
...@@ -2271,7 +2346,7 @@ static struct target_type origin_target = { ...@@ -2271,7 +2346,7 @@ static struct target_type origin_target = {
static struct target_type snapshot_target = { static struct target_type snapshot_target = {
.name = "snapshot", .name = "snapshot",
.version = {1, 12, 0}, .version = {1, 13, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = snapshot_ctr, .ctr = snapshot_ctr,
.dtr = snapshot_dtr, .dtr = snapshot_dtr,
......
...@@ -2887,6 +2887,7 @@ void dm_internal_suspend(struct mapped_device *md) ...@@ -2887,6 +2887,7 @@ void dm_internal_suspend(struct mapped_device *md)
flush_workqueue(md->wq); flush_workqueue(md->wq);
dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
} }
EXPORT_SYMBOL_GPL(dm_internal_suspend);
void dm_internal_resume(struct mapped_device *md) void dm_internal_resume(struct mapped_device *md)
{ {
...@@ -2898,6 +2899,7 @@ void dm_internal_resume(struct mapped_device *md) ...@@ -2898,6 +2899,7 @@ void dm_internal_resume(struct mapped_device *md)
done: done:
mutex_unlock(&md->suspend_lock); mutex_unlock(&md->suspend_lock);
} }
EXPORT_SYMBOL_GPL(dm_internal_resume);
/*----------------------------------------------------------------- /*-----------------------------------------------------------------
* Event notification. * Event notification.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment