Commit 8357422d authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm

* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm: (33 commits)
  dm mpath: support discard
  dm stripe: support discards
  dm: split discard requests on target boundaries
  dm stripe: optimize sector division
  dm stripe: move sector translation to a function
  dm: error return error for discards
  dm delay: support discard
  dm: zero silently drop discards
  dm: use dm_target_offset macro
  dm: factor out max_io_len_target_boundary
  dm: use common __issue_target_request for flush and discard support
  dm: linear support discard
  dm crypt: simplify crypt_ctr
  dm crypt: simplify crypt_config destruction logic
  dm: allow autoloading of dm mod
  dm: rename map_info flush_request to target_request_nr
  dm ioctl: refactor dm_table_complete
  dm snapshot: implement merge
  dm: do not initialise full request queue when bio based
  dm ioctl: make bio or request based device type immutable
  ...
parents 1021a645 959eb4e5
...@@ -445,6 +445,7 @@ Your cooperation is appreciated. ...@@ -445,6 +445,7 @@ Your cooperation is appreciated.
233 = /dev/kmview View-OS A process with a view 233 = /dev/kmview View-OS A process with a view
234 = /dev/btrfs-control Btrfs control device 234 = /dev/btrfs-control Btrfs control device
235 = /dev/autofs Autofs control device 235 = /dev/autofs Autofs control device
236 = /dev/mapper/control Device-Mapper control device
240-254 Reserved for local use 240-254 Reserved for local use
255 Reserved for MISC_DYNAMIC_MINOR 255 Reserved for MISC_DYNAMIC_MINOR
......
This diff is collapsed.
...@@ -198,6 +198,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -198,6 +198,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
atomic_set(&dc->may_delay, 1); atomic_set(&dc->may_delay, 1);
ti->num_flush_requests = 1; ti->num_flush_requests = 1;
ti->num_discard_requests = 1;
ti->private = dc; ti->private = dc;
return 0; return 0;
...@@ -281,14 +282,13 @@ static int delay_map(struct dm_target *ti, struct bio *bio, ...@@ -281,14 +282,13 @@ static int delay_map(struct dm_target *ti, struct bio *bio,
bio->bi_bdev = dc->dev_write->bdev; bio->bi_bdev = dc->dev_write->bdev;
if (bio_sectors(bio)) if (bio_sectors(bio))
bio->bi_sector = dc->start_write + bio->bi_sector = dc->start_write +
(bio->bi_sector - ti->begin); dm_target_offset(ti, bio->bi_sector);
return delay_bio(dc, dc->write_delay, bio); return delay_bio(dc, dc->write_delay, bio);
} }
bio->bi_bdev = dc->dev_read->bdev; bio->bi_bdev = dc->dev_read->bdev;
bio->bi_sector = dc->start_read + bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector);
(bio->bi_sector - ti->begin);
return delay_bio(dc, dc->read_delay, bio); return delay_bio(dc, dc->read_delay, bio);
} }
......
...@@ -173,7 +173,9 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store, ...@@ -173,7 +173,9 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
/* Validate the chunk size against the device block size */ /* Validate the chunk size against the device block size */
if (chunk_size % if (chunk_size %
(bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9)) { (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9) ||
chunk_size %
(bdev_logical_block_size(dm_snap_origin(store->snap)->bdev) >> 9)) {
*error = "Chunk size is not a multiple of device blocksize"; *error = "Chunk size is not a multiple of device blocksize";
return -EINVAL; return -EINVAL;
} }
......
...@@ -126,8 +126,9 @@ struct dm_exception_store { ...@@ -126,8 +126,9 @@ struct dm_exception_store {
}; };
/* /*
* Obtain the cow device used by a given snapshot. * Obtain the origin or cow device used by a given snapshot.
*/ */
struct dm_dev *dm_snap_origin(struct dm_snapshot *snap);
struct dm_dev *dm_snap_cow(struct dm_snapshot *snap); struct dm_dev *dm_snap_cow(struct dm_snapshot *snap);
/* /*
......
This diff is collapsed.
...@@ -53,6 +53,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -53,6 +53,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
} }
ti->num_flush_requests = 1; ti->num_flush_requests = 1;
ti->num_discard_requests = 1;
ti->private = lc; ti->private = lc;
return 0; return 0;
...@@ -73,7 +74,7 @@ static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector) ...@@ -73,7 +74,7 @@ static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
{ {
struct linear_c *lc = ti->private; struct linear_c *lc = ti->private;
return lc->start + (bi_sector - ti->begin); return lc->start + dm_target_offset(ti, bi_sector);
} }
static void linear_map_bio(struct dm_target *ti, struct bio *bio) static void linear_map_bio(struct dm_target *ti, struct bio *bio)
......
...@@ -706,6 +706,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as, ...@@ -706,6 +706,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
if (as->argc < nr_params) { if (as->argc < nr_params) {
ti->error = "not enough path parameters"; ti->error = "not enough path parameters";
r = -EINVAL;
goto bad; goto bad;
} }
...@@ -892,6 +893,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, ...@@ -892,6 +893,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
} }
ti->num_flush_requests = 1; ti->num_flush_requests = 1;
ti->num_discard_requests = 1;
return 0; return 0;
...@@ -1271,6 +1273,15 @@ static int do_end_io(struct multipath *m, struct request *clone, ...@@ -1271,6 +1273,15 @@ static int do_end_io(struct multipath *m, struct request *clone,
if (error == -EOPNOTSUPP) if (error == -EOPNOTSUPP)
return error; return error;
if (clone->cmd_flags & REQ_DISCARD)
/*
* Pass all discard request failures up.
* FIXME: only fail_path if the discard failed due to a
* transport problem. This requires precise understanding
* of the underlying failure (e.g. the SCSI sense).
*/
return error;
if (mpio->pgpath) if (mpio->pgpath)
fail_path(mpio->pgpath); fail_path(mpio->pgpath);
......
...@@ -445,7 +445,7 @@ static sector_t map_sector(struct mirror *m, struct bio *bio) ...@@ -445,7 +445,7 @@ static sector_t map_sector(struct mirror *m, struct bio *bio)
{ {
if (unlikely(!bio->bi_size)) if (unlikely(!bio->bi_size))
return 0; return 0;
return m->offset + (bio->bi_sector - m->ms->ti->begin); return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector);
} }
static void map_bio(struct mirror *m, struct bio *bio) static void map_bio(struct mirror *m, struct bio *bio)
......
...@@ -266,7 +266,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, ...@@ -266,7 +266,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
*/ */
static chunk_t area_location(struct pstore *ps, chunk_t area) static chunk_t area_location(struct pstore *ps, chunk_t area)
{ {
return 1 + ((ps->exceptions_per_area + 1) * area); return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
} }
/* /*
...@@ -780,8 +780,8 @@ static int persistent_commit_merge(struct dm_exception_store *store, ...@@ -780,8 +780,8 @@ static int persistent_commit_merge(struct dm_exception_store *store,
* ps->current_area does not get reduced by prepare_merge() until * ps->current_area does not get reduced by prepare_merge() until
* after commit_merge() has removed the nr_merged previous exceptions. * after commit_merge() has removed the nr_merged previous exceptions.
*/ */
ps->next_free = (area_location(ps, ps->current_area) - 1) + ps->next_free = area_location(ps, ps->current_area) +
(ps->current_committed + 1) + NUM_SNAPSHOT_HDR_CHUNKS; ps->current_committed + 1;
return 0; return 0;
} }
......
...@@ -148,6 +148,12 @@ struct dm_snapshot { ...@@ -148,6 +148,12 @@ struct dm_snapshot {
#define RUNNING_MERGE 0 #define RUNNING_MERGE 0
#define SHUTDOWN_MERGE 1 #define SHUTDOWN_MERGE 1
struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
{
return s->origin;
}
EXPORT_SYMBOL(dm_snap_origin);
struct dm_dev *dm_snap_cow(struct dm_snapshot *s) struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
{ {
return s->cow; return s->cow;
...@@ -1065,10 +1071,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -1065,10 +1071,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
origin_mode = FMODE_WRITE; origin_mode = FMODE_WRITE;
} }
origin_path = argv[0];
argv++;
argc--;
s = kmalloc(sizeof(*s), GFP_KERNEL); s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s) { if (!s) {
ti->error = "Cannot allocate snapshot context private " ti->error = "Cannot allocate snapshot context private "
...@@ -1077,6 +1079,16 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -1077,6 +1079,16 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad; goto bad;
} }
origin_path = argv[0];
argv++;
argc--;
r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
if (r) {
ti->error = "Cannot get origin device";
goto bad_origin;
}
cow_path = argv[0]; cow_path = argv[0];
argv++; argv++;
argc--; argc--;
...@@ -1097,12 +1109,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -1097,12 +1109,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
argv += args_used; argv += args_used;
argc -= args_used; argc -= args_used;
r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
if (r) {
ti->error = "Cannot get origin device";
goto bad_origin;
}
s->ti = ti; s->ti = ti;
s->valid = 1; s->valid = 1;
s->active = 0; s->active = 0;
...@@ -1212,15 +1218,15 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -1212,15 +1218,15 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
dm_exception_table_exit(&s->complete, exception_cache); dm_exception_table_exit(&s->complete, exception_cache);
bad_hash_tables: bad_hash_tables:
dm_put_device(ti, s->origin);
bad_origin:
dm_exception_store_destroy(s->store); dm_exception_store_destroy(s->store);
bad_store: bad_store:
dm_put_device(ti, s->cow); dm_put_device(ti, s->cow);
bad_cow: bad_cow:
dm_put_device(ti, s->origin);
bad_origin:
kfree(s); kfree(s);
bad: bad:
...@@ -1314,12 +1320,12 @@ static void snapshot_dtr(struct dm_target *ti) ...@@ -1314,12 +1320,12 @@ static void snapshot_dtr(struct dm_target *ti)
mempool_destroy(s->pending_pool); mempool_destroy(s->pending_pool);
dm_put_device(ti, s->origin);
dm_exception_store_destroy(s->store); dm_exception_store_destroy(s->store);
dm_put_device(ti, s->cow); dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);
kfree(s); kfree(s);
} }
...@@ -1686,7 +1692,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio, ...@@ -1686,7 +1692,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
chunk_t chunk; chunk_t chunk;
if (unlikely(bio_empty_barrier(bio))) { if (unlikely(bio_empty_barrier(bio))) {
if (!map_context->flush_request) if (!map_context->target_request_nr)
bio->bi_bdev = s->origin->bdev; bio->bi_bdev = s->origin->bdev;
else else
bio->bi_bdev = s->cow->bdev; bio->bi_bdev = s->cow->bdev;
...@@ -1899,8 +1905,14 @@ static int snapshot_iterate_devices(struct dm_target *ti, ...@@ -1899,8 +1905,14 @@ static int snapshot_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data) iterate_devices_callout_fn fn, void *data)
{ {
struct dm_snapshot *snap = ti->private; struct dm_snapshot *snap = ti->private;
int r;
r = fn(ti, snap->origin, 0, ti->len, data);
if (!r)
r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
return fn(ti, snap->origin, 0, ti->len, data); return r;
} }
...@@ -2159,6 +2171,21 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result, ...@@ -2159,6 +2171,21 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result,
return 0; return 0;
} }
static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
struct bio_vec *biovec, int max_size)
{
struct dm_dev *dev = ti->private;
struct request_queue *q = bdev_get_queue(dev->bdev);
if (!q->merge_bvec_fn)
return max_size;
bvm->bi_bdev = dev->bdev;
bvm->bi_sector = bvm->bi_sector;
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
static int origin_iterate_devices(struct dm_target *ti, static int origin_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data) iterate_devices_callout_fn fn, void *data)
{ {
...@@ -2176,6 +2203,7 @@ static struct target_type origin_target = { ...@@ -2176,6 +2203,7 @@ static struct target_type origin_target = {
.map = origin_map, .map = origin_map,
.resume = origin_resume, .resume = origin_resume,
.status = origin_status, .status = origin_status,
.merge = origin_merge,
.iterate_devices = origin_iterate_devices, .iterate_devices = origin_iterate_devices,
}; };
......
...@@ -25,6 +25,8 @@ struct stripe { ...@@ -25,6 +25,8 @@ struct stripe {
struct stripe_c { struct stripe_c {
uint32_t stripes; uint32_t stripes;
int stripes_shift;
sector_t stripes_mask;
/* The size of this target / num. stripes */ /* The size of this target / num. stripes */
sector_t stripe_width; sector_t stripe_width;
...@@ -162,16 +164,22 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -162,16 +164,22 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/* Set pointer to dm target; used in trigger_event */ /* Set pointer to dm target; used in trigger_event */
sc->ti = ti; sc->ti = ti;
sc->stripes = stripes; sc->stripes = stripes;
sc->stripe_width = width; sc->stripe_width = width;
if (stripes & (stripes - 1))
sc->stripes_shift = -1;
else {
sc->stripes_shift = ffs(stripes) - 1;
sc->stripes_mask = ((sector_t) stripes) - 1;
}
ti->split_io = chunk_size; ti->split_io = chunk_size;
ti->num_flush_requests = stripes; ti->num_flush_requests = stripes;
ti->num_discard_requests = stripes;
sc->chunk_shift = ffs(chunk_size) - 1;
sc->chunk_mask = ((sector_t) chunk_size) - 1; sc->chunk_mask = ((sector_t) chunk_size) - 1;
for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++)
chunk_size >>= 1;
sc->chunk_shift--;
/* /*
* Get the stripe destinations. * Get the stripe destinations.
...@@ -207,26 +215,79 @@ static void stripe_dtr(struct dm_target *ti) ...@@ -207,26 +215,79 @@ static void stripe_dtr(struct dm_target *ti)
kfree(sc); kfree(sc);
} }
static void stripe_map_sector(struct stripe_c *sc, sector_t sector,
uint32_t *stripe, sector_t *result)
{
sector_t offset = dm_target_offset(sc->ti, sector);
sector_t chunk = offset >> sc->chunk_shift;
if (sc->stripes_shift < 0)
*stripe = sector_div(chunk, sc->stripes);
else {
*stripe = chunk & sc->stripes_mask;
chunk >>= sc->stripes_shift;
}
*result = (chunk << sc->chunk_shift) | (offset & sc->chunk_mask);
}
static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector,
uint32_t target_stripe, sector_t *result)
{
uint32_t stripe;
stripe_map_sector(sc, sector, &stripe, result);
if (stripe == target_stripe)
return;
*result &= ~sc->chunk_mask; /* round down */
if (target_stripe < stripe)
*result += sc->chunk_mask + 1; /* next chunk */
}
static int stripe_map_discard(struct stripe_c *sc, struct bio *bio,
uint32_t target_stripe)
{
sector_t begin, end;
stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin);
stripe_map_range_sector(sc, bio->bi_sector + bio_sectors(bio),
target_stripe, &end);
if (begin < end) {
bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
bio->bi_sector = begin + sc->stripe[target_stripe].physical_start;
bio->bi_size = to_bytes(end - begin);
return DM_MAPIO_REMAPPED;
} else {
/* The range doesn't map to the target stripe */
bio_endio(bio, 0);
return DM_MAPIO_SUBMITTED;
}
}
static int stripe_map(struct dm_target *ti, struct bio *bio, static int stripe_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context) union map_info *map_context)
{ {
struct stripe_c *sc = (struct stripe_c *) ti->private; struct stripe_c *sc = ti->private;
sector_t offset, chunk;
uint32_t stripe; uint32_t stripe;
unsigned target_request_nr;
if (unlikely(bio_empty_barrier(bio))) { if (unlikely(bio_empty_barrier(bio))) {
BUG_ON(map_context->flush_request >= sc->stripes); target_request_nr = map_context->target_request_nr;
bio->bi_bdev = sc->stripe[map_context->flush_request].dev->bdev; BUG_ON(target_request_nr >= sc->stripes);
bio->bi_bdev = sc->stripe[target_request_nr].dev->bdev;
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
} }
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
target_request_nr = map_context->target_request_nr;
BUG_ON(target_request_nr >= sc->stripes);
return stripe_map_discard(sc, bio, target_request_nr);
}
offset = bio->bi_sector - ti->begin; stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector);
chunk = offset >> sc->chunk_shift;
stripe = sector_div(chunk, sc->stripes);
bio->bi_sector += sc->stripe[stripe].physical_start;
bio->bi_bdev = sc->stripe[stripe].dev->bdev; bio->bi_bdev = sc->stripe[stripe].dev->bdev;
bio->bi_sector = sc->stripe[stripe].physical_start +
(chunk << sc->chunk_shift) + (offset & sc->chunk_mask);
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
} }
......
...@@ -54,6 +54,8 @@ struct dm_table { ...@@ -54,6 +54,8 @@ struct dm_table {
sector_t *highs; sector_t *highs;
struct dm_target *targets; struct dm_target *targets;
unsigned discards_supported:1;
/* /*
* Indicates the rw permissions for the new logical * Indicates the rw permissions for the new logical
* device. This should be a combination of FMODE_READ * device. This should be a combination of FMODE_READ
...@@ -203,6 +205,7 @@ int dm_table_create(struct dm_table **result, fmode_t mode, ...@@ -203,6 +205,7 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
INIT_LIST_HEAD(&t->devices); INIT_LIST_HEAD(&t->devices);
atomic_set(&t->holders, 0); atomic_set(&t->holders, 0);
t->discards_supported = 1;
if (!num_targets) if (!num_targets)
num_targets = KEYS_PER_NODE; num_targets = KEYS_PER_NODE;
...@@ -245,7 +248,7 @@ void dm_table_destroy(struct dm_table *t) ...@@ -245,7 +248,7 @@ void dm_table_destroy(struct dm_table *t)
msleep(1); msleep(1);
smp_mb(); smp_mb();
/* free the indexes (see dm_table_complete) */ /* free the indexes */
if (t->depth >= 2) if (t->depth >= 2)
vfree(t->index[t->depth - 2]); vfree(t->index[t->depth - 2]);
...@@ -770,6 +773,9 @@ int dm_table_add_target(struct dm_table *t, const char *type, ...@@ -770,6 +773,9 @@ int dm_table_add_target(struct dm_table *t, const char *type,
t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
if (!tgt->num_discard_requests)
t->discards_supported = 0;
return 0; return 0;
bad: bad:
...@@ -778,7 +784,7 @@ int dm_table_add_target(struct dm_table *t, const char *type, ...@@ -778,7 +784,7 @@ int dm_table_add_target(struct dm_table *t, const char *type,
return r; return r;
} }
int dm_table_set_type(struct dm_table *t) static int dm_table_set_type(struct dm_table *t)
{ {
unsigned i; unsigned i;
unsigned bio_based = 0, request_based = 0; unsigned bio_based = 0, request_based = 0;
...@@ -900,7 +906,7 @@ static int setup_indexes(struct dm_table *t) ...@@ -900,7 +906,7 @@ static int setup_indexes(struct dm_table *t)
/* /*
* Builds the btree to index the map. * Builds the btree to index the map.
*/ */
int dm_table_complete(struct dm_table *t) static int dm_table_build_index(struct dm_table *t)
{ {
int r = 0; int r = 0;
unsigned int leaf_nodes; unsigned int leaf_nodes;
...@@ -919,6 +925,55 @@ int dm_table_complete(struct dm_table *t) ...@@ -919,6 +925,55 @@ int dm_table_complete(struct dm_table *t)
return r; return r;
} }
/*
* Register the mapped device for blk_integrity support if
* the underlying devices support it.
*/
static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
{
struct list_head *devices = dm_table_get_devices(t);
struct dm_dev_internal *dd;
list_for_each_entry(dd, devices, list)
if (bdev_get_integrity(dd->dm_dev.bdev))
return blk_integrity_register(dm_disk(md), NULL);
return 0;
}
/*
* Prepares the table for use by building the indices,
* setting the type, and allocating mempools.
*/
int dm_table_complete(struct dm_table *t)
{
int r;
r = dm_table_set_type(t);
if (r) {
DMERR("unable to set table type");
return r;
}
r = dm_table_build_index(t);
if (r) {
DMERR("unable to build btrees");
return r;
}
r = dm_table_prealloc_integrity(t, t->md);
if (r) {
DMERR("could not register integrity profile.");
return r;
}
r = dm_table_alloc_md_mempools(t);
if (r)
DMERR("unable to allocate mempools");
return r;
}
static DEFINE_MUTEX(_event_lock); static DEFINE_MUTEX(_event_lock);
void dm_table_event_callback(struct dm_table *t, void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context) void (*fn)(void *), void *context)
...@@ -1086,6 +1141,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, ...@@ -1086,6 +1141,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else else
queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
if (!dm_table_supports_discards(t))
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
dm_table_set_integrity(t); dm_table_set_integrity(t);
/* /*
...@@ -1232,6 +1292,39 @@ struct mapped_device *dm_table_get_md(struct dm_table *t) ...@@ -1232,6 +1292,39 @@ struct mapped_device *dm_table_get_md(struct dm_table *t)
return t->md; return t->md;
} }
static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
return q && blk_queue_discard(q);
}
bool dm_table_supports_discards(struct dm_table *t)
{
struct dm_target *ti;
unsigned i = 0;
if (!t->discards_supported)
return 0;
/*
* Ensure that at least one underlying device supports discards.
* t->devices includes internal dm devices such as mirror logs
* so we need to use iterate_devices here, which targets
* supporting discard must provide.
*/
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti, device_discard_capable, NULL))
return 1;
}
return 0;
}
EXPORT_SYMBOL(dm_vcalloc); EXPORT_SYMBOL(dm_vcalloc);
EXPORT_SYMBOL(dm_get_device); EXPORT_SYMBOL(dm_get_device);
EXPORT_SYMBOL(dm_put_device); EXPORT_SYMBOL(dm_put_device);
......
...@@ -113,6 +113,11 @@ void dm_unregister_target(struct target_type *tt) ...@@ -113,6 +113,11 @@ void dm_unregister_target(struct target_type *tt)
*/ */
static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args) static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args)
{ {
/*
* Return error for discards instead of -EOPNOTSUPP
*/
tt->num_discard_requests = 1;
return 0; return 0;
} }
......
...@@ -22,6 +22,11 @@ static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -22,6 +22,11 @@ static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -EINVAL; return -EINVAL;
} }
/*
* Silently drop discards, avoiding -EOPNOTSUPP.
*/
ti->num_discard_requests = 1;
return 0; return 0;
} }
......
This diff is collapsed.
...@@ -59,13 +59,20 @@ void dm_table_postsuspend_targets(struct dm_table *t); ...@@ -59,13 +59,20 @@ void dm_table_postsuspend_targets(struct dm_table *t);
int dm_table_resume_targets(struct dm_table *t); int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits); int dm_table_any_congested(struct dm_table *t, int bdi_bits);
int dm_table_any_busy_target(struct dm_table *t); int dm_table_any_busy_target(struct dm_table *t);
int dm_table_set_type(struct dm_table *t);
unsigned dm_table_get_type(struct dm_table *t); unsigned dm_table_get_type(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t); bool dm_table_request_based(struct dm_table *t);
bool dm_table_supports_discards(struct dm_table *t);
int dm_table_alloc_md_mempools(struct dm_table *t); int dm_table_alloc_md_mempools(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t); void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
void dm_lock_md_type(struct mapped_device *md);
void dm_unlock_md_type(struct mapped_device *md);
void dm_set_md_type(struct mapped_device *md, unsigned type);
unsigned dm_get_md_type(struct mapped_device *md);
int dm_setup_md_queue(struct mapped_device *md);
/* /*
* To check the return value from dm_table_find_target(). * To check the return value from dm_table_find_target().
*/ */
...@@ -122,6 +129,11 @@ void dm_linear_exit(void); ...@@ -122,6 +129,11 @@ void dm_linear_exit(void);
int dm_stripe_init(void); int dm_stripe_init(void);
void dm_stripe_exit(void); void dm_stripe_exit(void);
/*
* mapped_device operations
*/
void dm_destroy(struct mapped_device *md);
void dm_destroy_immediate(struct mapped_device *md);
int dm_open_count(struct mapped_device *md); int dm_open_count(struct mapped_device *md);
int dm_lock_for_deletion(struct mapped_device *md); int dm_lock_for_deletion(struct mapped_device *md);
......
...@@ -22,7 +22,7 @@ typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; ...@@ -22,7 +22,7 @@ typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
union map_info { union map_info {
void *ptr; void *ptr;
unsigned long long ll; unsigned long long ll;
unsigned flush_request; unsigned target_request_nr;
}; };
/* /*
...@@ -174,12 +174,18 @@ struct dm_target { ...@@ -174,12 +174,18 @@ struct dm_target {
* A number of zero-length barrier requests that will be submitted * A number of zero-length barrier requests that will be submitted
* to the target for the purpose of flushing cache. * to the target for the purpose of flushing cache.
* *
* The request number will be placed in union map_info->flush_request. * The request number will be placed in union map_info->target_request_nr.
* It is a responsibility of the target driver to remap these requests * It is a responsibility of the target driver to remap these requests
* to the real underlying devices. * to the real underlying devices.
*/ */
unsigned num_flush_requests; unsigned num_flush_requests;
/*
* The number of discard requests that will be submitted to the
* target. map_info->request_nr is used just like num_flush_requests.
*/
unsigned num_discard_requests;
/* target specific data */ /* target specific data */
void *private; void *private;
...@@ -392,6 +398,12 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); ...@@ -392,6 +398,12 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
#define dm_array_too_big(fixed, obj, num) \ #define dm_array_too_big(fixed, obj, num) \
((num) > (UINT_MAX - (fixed)) / (obj)) ((num) > (UINT_MAX - (fixed)) / (obj))
/*
* Sector offset taken relative to the start of the target instead of
* relative to the start of the device.
*/
#define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
static inline sector_t to_sector(unsigned long n) static inline sector_t to_sector(unsigned long n)
{ {
return (n >> SECTOR_SHIFT); return (n >> SECTOR_SHIFT);
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/types.h> #include <linux/types.h>
#define DM_DIR "mapper" /* Slashes not supported */ #define DM_DIR "mapper" /* Slashes not supported */
#define DM_CONTROL_NODE "control"
#define DM_MAX_TYPE_NAME 16 #define DM_MAX_TYPE_NAME 16
#define DM_NAME_LEN 128 #define DM_NAME_LEN 128
#define DM_UUID_LEN 129 #define DM_UUID_LEN 129
...@@ -266,9 +267,9 @@ enum { ...@@ -266,9 +267,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4 #define DM_VERSION_MAJOR 4
#define DM_VERSION_MINOR 17 #define DM_VERSION_MINOR 18
#define DM_VERSION_PATCHLEVEL 0 #define DM_VERSION_PATCHLEVEL 0
#define DM_VERSION_EXTRA "-ioctl (2010-03-05)" #define DM_VERSION_EXTRA "-ioctl (2010-06-29)"
/* Status bits */ /* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#define KVM_MINOR 232 #define KVM_MINOR 232
#define BTRFS_MINOR 234 #define BTRFS_MINOR 234
#define AUTOFS_MINOR 235 #define AUTOFS_MINOR 235
#define MAPPER_CTRL_MINOR 236
#define MISC_DYNAMIC_MINOR 255 #define MISC_DYNAMIC_MINOR 255
struct device; struct device;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment